From fa8912226bee59067a093e9f9aaf8f6337f74017 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 12 Jan 2021 17:01:48 -0700 Subject: [PATCH 01/78] migrations: add codeinsights migrations (based on migrations/codeintel) Signed-off-by: Stephen Gutekanst --- migrations/README.md | 16 +- .../codeinsights/1000000000_init.down.sql | 1 + .../codeinsights/1000000000_init.up.sql | 1 + migrations/codeinsights/bindata.go | 297 ++++++++++++++++++ migrations/codeinsights/gen.go | 6 + migrations/codeinsights/migrations_test.go | 53 ++++ 6 files changed, 370 insertions(+), 4 deletions(-) create mode 100644 migrations/codeinsights/1000000000_init.down.sql create mode 100644 migrations/codeinsights/1000000000_init.up.sql create mode 100644 migrations/codeinsights/bindata.go create mode 100644 migrations/codeinsights/gen.go create mode 100644 migrations/codeinsights/migrations_test.go diff --git a/migrations/README.md b/migrations/README.md index 13805d570ac3..7b19c4acd56f 100644 --- a/migrations/README.md +++ b/migrations/README.md @@ -4,13 +4,15 @@ The children of this directory contain migrations for each Postgres database ins - `frontend` is the main database (things should go here unless there is a good reason) - `codeintel` is a database containing only processed LSIF data (which can become extremely large) +- `codeinsights` is a TimescaleDB database, containing only Code Insights time series data. The migration path for each database instance is the same and is described below. Each of the database instances described here are deployed separately, but are designed to be _overlayable_ to reduce friction during development. That is, we assume that the names in each database do not overlap so that the same connection parameters can be used for both database instances. Each database also has a uniquely named schema versions table: -| database | schema version table name | -| ----------- | ----------------------------- | -| `frontend` | `schema_migrations` | -| `codeintel` | `codeintel_schema_migrations` | +| database | schema version table name | +| -------------- | -------------------------------- | +| `frontend` | `schema_migrations` | +| `codeintel` | `codeintel_schema_migrations` | +| `codeinsights` | `codeinsights_schema_migrations` | Migrations are handled by the [migrate](https://github.com/golang-migrate/migrate/tree/master/cmd/migrate#installation) tool. Migrations get applied automatically at application startup. The CLI tool can also be used to manually test migrations. @@ -83,6 +85,12 @@ Running down migrations in a rollback **should NOT** be necessary if all migrati kubectl exec $(kubectl get pod -l app=pgsql-codeintel -o jsonpath='{.items[0].metadata.name}') -- psql -U sg -c 'SELECT * FROM codeintel_schema_migrations' ``` + **codeinsights database**: + + ``` + kubectl exec $(kubectl get pod -l app=codeinsights-db -o jsonpath='{.items[0].metadata.name}') -- psql -U sg -c 'SELECT * FROM codeinsights_schema_migrations' + ``` + For each dirty database, follow the steps in the _Dirty schema_ section below. - For each database `` with the schema version table ``, do the following: diff --git a/migrations/codeinsights/1000000000_init.down.sql b/migrations/codeinsights/1000000000_init.down.sql new file mode 100644 index 000000000000..a7fdf8da43a2 --- /dev/null +++ b/migrations/codeinsights/1000000000_init.down.sql @@ -0,0 +1 @@ +-- empty migration diff --git a/migrations/codeinsights/1000000000_init.up.sql b/migrations/codeinsights/1000000000_init.up.sql new file mode 100644 index 000000000000..a7fdf8da43a2 --- /dev/null +++ b/migrations/codeinsights/1000000000_init.up.sql @@ -0,0 +1 @@ +-- empty migration diff --git a/migrations/codeinsights/bindata.go b/migrations/codeinsights/bindata.go new file mode 100644 index 000000000000..214fc770fa2f --- /dev/null +++ b/migrations/codeinsights/bindata.go @@ -0,0 +1,297 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// 1000000000_init.down.sql (19B) +// 1000000000_init.up.sql (19B) + +package migrations + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var __1000000000_initDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\x48\xcd\x2d\x28\xa9\x54\xc8\xcd\x4c\x2f\x4a\x2c\xc9\xcc\xcf\xe3\x02\x04\x00\x00\xff\xff\x32\x4d\x68\xbd\x13\x00\x00\x00") + +func _1000000000_initDownSqlBytes() ([]byte, error) { + return bindataRead( + __1000000000_initDownSql, + "1000000000_init.down.sql", + ) +} + +func _1000000000_initDownSql() (*asset, error) { + bytes, err := _1000000000_initDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1000000000_init.down.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9c, 0x46, 0xd1, 0x31, 0xb9, 0x68, 0x19, 0xcc, 0x70, 0xb6, 0x7, 0x20, 0x2e, 0x6a, 0x4d, 0xf1, 0xce, 0xd0, 0xc8, 0xda, 0x50, 0xce, 0x8c, 0xee, 0x52, 0x36, 0x80, 0xd0, 0x5a, 0xd2, 0x7a, 0x82}} + return a, nil +} + +var __1000000000_initUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\x48\xcd\x2d\x28\xa9\x54\xc8\xcd\x4c\x2f\x4a\x2c\xc9\xcc\xcf\xe3\x02\x04\x00\x00\xff\xff\x32\x4d\x68\xbd\x13\x00\x00\x00") + +func _1000000000_initUpSqlBytes() ([]byte, error) { + return bindataRead( + __1000000000_initUpSql, + "1000000000_init.up.sql", + ) +} + +func _1000000000_initUpSql() (*asset, error) { + bytes, err := _1000000000_initUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1000000000_init.up.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9c, 0x46, 0xd1, 0x31, 0xb9, 0x68, 0x19, 0xcc, 0x70, 0xb6, 0x7, 0x20, 0x2e, 0x6a, 0x4d, 0xf1, 0xce, 0xd0, 0xc8, 0xda, 0x50, 0xce, 0x8c, 0xee, 0x52, 0x36, 0x80, 0xd0, 0x5a, 0xd2, 0x7a, 0x82}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "1000000000_init.down.sql": _1000000000_initDownSql, + "1000000000_init.up.sql": _1000000000_initUpSql, +} + +// AssetDebug is true if the assets were built with the debug flag enabled. +const AssetDebug = false + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "1000000000_init.down.sql": {_1000000000_initDownSql, map[string]*bintree{}}, + "1000000000_init.up.sql": {_1000000000_initUpSql, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/migrations/codeinsights/gen.go b/migrations/codeinsights/gen.go new file mode 100644 index 000000000000..bad12fb3ab00 --- /dev/null +++ b/migrations/codeinsights/gen.go @@ -0,0 +1,6 @@ +// Package migrations contains the migration scripts for the DB. +package migrations + +//go:generate env GOBIN=$PWD/.bin GO111MODULE=on go install github.com/kevinburke/go-bindata/go-bindata +//go:generate $PWD/.bin/go-bindata -nometadata -pkg migrations -ignore README.md -ignore .*\.go . +//go:generate gofmt -s -w bindata.go diff --git a/migrations/codeinsights/migrations_test.go b/migrations/codeinsights/migrations_test.go new file mode 100644 index 000000000000..f5cd59f0c178 --- /dev/null +++ b/migrations/codeinsights/migrations_test.go @@ -0,0 +1,53 @@ +package migrations_test + +import ( + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "testing" + + migrations "github.com/sourcegraph/sourcegraph/migrations/codeinsights" +) + +const FirstMigration = 1000000000 + +func TestIDConstraints(t *testing.T) { + ups, err := filepath.Glob("*.up.sql") + if err != nil { + t.Fatal(err) + } + + byID := map[int][]string{} + for _, name := range ups { + id, err := strconv.Atoi(name[:strings.IndexByte(name, '_')]) + if err != nil { + t.Fatalf("failed to parse name %q: %v", name, err) + } + byID[id] = append(byID[id], name) + } + + for id, names := range byID { + // Check if we are using sequential migrations from a certain point. + if _, hasPrev := byID[id-1]; id > FirstMigration && !hasPrev { + t.Errorf("migration with ID %d exists, but previous one (%d) does not", id, id-1) + } + if len(names) > 1 { + t.Errorf("multiple migrations with ID %d: %s", id, strings.Join(names, " ")) + } + } +} + +func TestNeedsGenerate(t *testing.T) { + want, err := filepath.Glob("*.sql") + if err != nil { + t.Fatal(err) + } + got := migrations.AssetNames() + sort.Strings(want) + sort.Strings(got) + if !reflect.DeepEqual(got, want) { + t.Fatal("bindata out of date. Please run:\n go generate github.com/sourcegraph/sourcegraph/migrations/...") + } +} From 6014933e9879cbcb63a67fee760a00c0340627c9 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 12 Jan 2021 20:08:20 -0700 Subject: [PATCH 02/78] internal/db: generate separate schema.md files for codeintel and frontend DBs This is needed for us to be able to generate a schema.md file for the new Code Insights DB, which will be a separate TimescaleDB deployment / cannot be part of the same Postgres DB. See #17217 for a more detailed explanation. Signed-off-by: Stephen Gutekanst --- .prettierignore | 1 + internal/db/gen.go | 3 +- internal/db/schema.codeintel.md | 71 ++++++++++++++++++++++++++++++++ internal/db/schema.md | 72 --------------------------------- internal/db/schemadoc/main.go | 15 +++---- 5 files changed, 79 insertions(+), 83 deletions(-) create mode 100644 internal/db/schema.codeintel.md diff --git a/.prettierignore b/.prettierignore index 482ba9852a4b..92d0e17ed239 100644 --- a/.prettierignore +++ b/.prettierignore @@ -5,6 +5,7 @@ client/browser/build **/package.json **/coverage internal/db/schema.md +internal/db/schema.*.md cmd/xlang-python/python-langserver/ package-lock.json package.json diff --git a/internal/db/gen.go b/internal/db/gen.go index 40b222566764..df0b1d5a4a0c 100644 --- a/internal/db/gen.go +++ b/internal/db/gen.go @@ -1,4 +1,5 @@ package db // $PGHOST, $PGUSER, $PGPORT etc. must be set to run this generate script. -//go:generate env GO111MODULE=on go run schemadoc/main.go schema.md +//go:generate env GO111MODULE=on go run schemadoc/main.go frontend schema.md +//go:generate env GO111MODULE=on go run schemadoc/main.go codeintel schema.codeintel.md diff --git a/internal/db/schema.codeintel.md b/internal/db/schema.codeintel.md new file mode 100644 index 000000000000..9baeab634d3c --- /dev/null +++ b/internal/db/schema.codeintel.md @@ -0,0 +1,71 @@ +# Table "public.codeintel_schema_migrations" +``` + Column | Type | Modifiers +---------+---------+----------- + version | bigint | not null + dirty | boolean | not null +Indexes: + "codeintel_schema_migrations_pkey" PRIMARY KEY, btree (version) + +``` + +# Table "public.lsif_data_definitions" +``` + Column | Type | Modifiers +------------+---------+----------- + dump_id | integer | not null + scheme | text | not null + identifier | text | not null + data | bytea | +Indexes: + "lsif_data_definitions_pkey" PRIMARY KEY, btree (dump_id, scheme, identifier) + +``` + +# Table "public.lsif_data_documents" +``` + Column | Type | Modifiers +---------+---------+----------- + dump_id | integer | not null + path | text | not null + data | bytea | +Indexes: + "lsif_data_documents_pkey" PRIMARY KEY, btree (dump_id, path) + +``` + +# Table "public.lsif_data_metadata" +``` + Column | Type | Modifiers +-------------------+---------+----------- + dump_id | integer | not null + num_result_chunks | integer | +Indexes: + "lsif_data_metadata_pkey" PRIMARY KEY, btree (dump_id) + +``` + +# Table "public.lsif_data_references" +``` + Column | Type | Modifiers +------------+---------+----------- + dump_id | integer | not null + scheme | text | not null + identifier | text | not null + data | bytea | +Indexes: + "lsif_data_references_pkey" PRIMARY KEY, btree (dump_id, scheme, identifier) + +``` + +# Table "public.lsif_data_result_chunks" +``` + Column | Type | Modifiers +---------+---------+----------- + dump_id | integer | not null + idx | integer | not null + data | bytea | +Indexes: + "lsif_data_result_chunks_pkey" PRIMARY KEY, btree (dump_id, idx) + +``` diff --git a/internal/db/schema.md b/internal/db/schema.md index 47c00ddfc3f8..9302d25d109e 100644 --- a/internal/db/schema.md +++ b/internal/db/schema.md @@ -331,17 +331,6 @@ Referenced by: ``` -# Table "public.codeintel_schema_migrations" -``` - Column | Type | Modifiers ----------+---------+----------- - version | bigint | not null - dirty | boolean | not null -Indexes: - "codeintel_schema_migrations_pkey" PRIMARY KEY, btree (version) - -``` - # Table "public.critical_and_site_config" ``` Column | Type | Modifiers @@ -574,67 +563,6 @@ Indexes: ``` -# Table "public.lsif_data_definitions" -``` - Column | Type | Modifiers -------------+---------+----------- - dump_id | integer | not null - scheme | text | not null - identifier | text | not null - data | bytea | -Indexes: - "lsif_data_definitions_pkey" PRIMARY KEY, btree (dump_id, scheme, identifier) - -``` - -# Table "public.lsif_data_documents" -``` - Column | Type | Modifiers ----------+---------+----------- - dump_id | integer | not null - path | text | not null - data | bytea | -Indexes: - "lsif_data_documents_pkey" PRIMARY KEY, btree (dump_id, path) - -``` - -# Table "public.lsif_data_metadata" -``` - Column | Type | Modifiers --------------------+---------+----------- - dump_id | integer | not null - num_result_chunks | integer | -Indexes: - "lsif_data_metadata_pkey" PRIMARY KEY, btree (dump_id) - -``` - -# Table "public.lsif_data_references" -``` - Column | Type | Modifiers -------------+---------+----------- - dump_id | integer | not null - scheme | text | not null - identifier | text | not null - data | bytea | -Indexes: - "lsif_data_references_pkey" PRIMARY KEY, btree (dump_id, scheme, identifier) - -``` - -# Table "public.lsif_data_result_chunks" -``` - Column | Type | Modifiers ----------+---------+----------- - dump_id | integer | not null - idx | integer | not null - data | bytea | -Indexes: - "lsif_data_result_chunks_pkey" PRIMARY KEY, btree (dump_id, idx) - -``` - # Table "public.lsif_dirty_repositories" ``` Column | Type | Modifiers diff --git a/internal/db/schemadoc/main.go b/internal/db/schemadoc/main.go index 5fb970598b18..a30201d3aaf0 100644 --- a/internal/db/schemadoc/main.go +++ b/internal/db/schemadoc/main.go @@ -12,7 +12,6 @@ import ( "time" "github.com/sourcegraph/sourcegraph/internal/db/dbconn" - "github.com/sourcegraph/sourcegraph/internal/db/dbutil" "github.com/sourcegraph/sourcegraph/internal/lazyregexp" _ "github.com/lib/pq" @@ -27,7 +26,7 @@ func runIgnoreError(cmd string, args ...string) { // PGPORT, PGUSER etc. env variables must be set to run this script. // // First CLI argument is an optional filename to write the output to. -func generate(log *log.Logger) (string, error) { +func generate(log *log.Logger, databaseName string) (string, error) { const dbname = "schemadoc-gen-temp" var ( @@ -99,12 +98,8 @@ func generate(log *log.Logger) (string, error) { return "", fmt.Errorf("SetupGlobalConnection: %w", err) } - // Migrate the codeintel db on top of the frontend one so we capture - // the schema of both databases. - for _, databaseName := range dbutil.DatabaseNames { - if err := dbconn.MigrateDB(dbconn.Global, databaseName); err != nil { - return "", fmt.Errorf("MigrateDB: %w", err) - } + if err := dbconn.MigrateDB(dbconn.Global, databaseName); err != nil { + return "", fmt.Errorf("MigrateDB: %w", err) } db, err := dbconn.Open(dataSource) @@ -155,12 +150,12 @@ WHERE table_schema='public' AND table_type='BASE TABLE'; } func main() { - out, err := generate(log.New(os.Stderr, "", log.LstdFlags)) + out, err := generate(log.New(os.Stderr, "", log.LstdFlags), os.Args[1]) if err != nil { log.Fatal(err) } if len(os.Args) > 1 { - if err := ioutil.WriteFile(os.Args[1], []byte(out), 0644); err != nil { + if err := ioutil.WriteFile(os.Args[2], []byte(out), 0644); err != nil { log.Fatal(err) } } else { From 3d7323e23695b50ba2b6c0f16875bc70d301498e Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 12 Jan 2021 20:10:06 -0700 Subject: [PATCH 03/78] internal/db/dbutil: add codeinsights migrations Signed-off-by: Stephen Gutekanst --- internal/db/dbutil/dbutil.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/db/dbutil/dbutil.go b/internal/db/dbutil/dbutil.go index 707e70bb11ed..8e37dca1ce23 100644 --- a/internal/db/dbutil/dbutil.go +++ b/internal/db/dbutil/dbutil.go @@ -24,6 +24,7 @@ import ( "github.com/opentracing/opentracing-go/ext" "github.com/pkg/errors" "github.com/sourcegraph/sourcegraph/internal/trace/ot" + codeinsightsMigrations "github.com/sourcegraph/sourcegraph/migrations/codeinsights" codeintelMigrations "github.com/sourcegraph/sourcegraph/migrations/codeintel" frontendMigrations "github.com/sourcegraph/sourcegraph/migrations/frontend" ) @@ -148,6 +149,10 @@ var databases = map[string]struct { MigrationsTable: "codeintel_schema_migrations", Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset), }, + "codeinsights": { + MigrationsTable: "codeinsights_schema_migrations", + Resource: bindata.Resource(codeinsightsMigrations.AssetNames(), codeinsightsMigrations.Asset), + }, } // DatabaseNames returns the list of database names (configured via `dbutil.databases`).. From 608d656df428ac84e6cd418c857f7bb4460ef932 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Thu, 14 Jan 2021 17:00:26 -0700 Subject: [PATCH 04/78] dev: run codeinsights-db (TimescaleDB) Signed-off-by: Stephen Gutekanst --- dev/Procfile | 1 + dev/codeinsights-db.sh | 60 +++++++++++++++++++++++++++++++++++++++++ enterprise/dev/Procfile | 1 + 3 files changed, 62 insertions(+) create mode 100755 dev/codeinsights-db.sh diff --git a/dev/Procfile b/dev/Procfile index 9547f12d4b1b..ac5f5d429b2a 100644 --- a/dev/Procfile +++ b/dev/Procfile @@ -14,6 +14,7 @@ zoekt-indexserver-0: ./dev/zoekt/wrapper indexserver 0 zoekt-indexserver-1: ./dev/zoekt/wrapper indexserver 1 zoekt-webserver-0: ./dev/zoekt/wrapper webserver 0 zoekt-webserver-1: ./dev/zoekt/wrapper webserver 1 +codeinsights-db: ./dev/codeinsights-db.sh keycloak: ./dev/auth-provider/keycloak.sh jaeger: ./dev/jaeger.sh docsite: ./dev/docsite.sh -config doc/docsite.json serve -http=localhost:5080 || echo error starting docsite diff --git a/dev/codeinsights-db.sh b/dev/codeinsights-db.sh new file mode 100755 index 000000000000..1b4ae3fd4cd2 --- /dev/null +++ b/dev/codeinsights-db.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +# Description: Code Insights TimescaleDB. + +set -euf -o pipefail +pushd "$(dirname "${BASH_SOURCE[0]}")/.." >/dev/null + +DISK="${HOME}/.sourcegraph-dev/data/codeinsights-db" +if [ ! -e "${DISK}" ]; then + mkdir -p "${DISK}" +fi + +IMAGE=sourcegraph/codeinsights-db:dev +CONTAINER=codeinsights-db +PORT=3370 + +docker inspect $CONTAINER >/dev/null 2>&1 && docker rm -f $CONTAINER + +# Log file location: since we log outside of the Docker container, we should +# log somewhere that's _not_ ~/.sourcegraph-dev/data/codeinsights-db, since that gets +# volume mounted into the container and therefore has its own ownership +# semantics. +LOGS="${HOME}/.sourcegraph-dev/logs/codeinsights-db" +mkdir -p "${LOGS}" + +# Now for the actual logging. TimescaleDB's output gets sent to stdout and stderr. +# We want to capture that output, but because it's fairly noisy, don't want to +# display it in the normal case. +LOG_FILE="${LOGS}/codeinsights-db.log" + +# Quickly build image +echo "codeinsights-db: building ${IMAGE}..." +IMAGE=${IMAGE} CACHE=true ./docker-images/codeinsights-db/build.sh >"${LOG_FILE}" 2>&1 || + (BUILD_EXIT_CODE=$? && echo "codeinsights-db build failed; dumping log:" && cat "${LOG_FILE}" && exit $BUILD_EXIT_CODE) + +function finish() { + EXIT_CODE=$? + + # Exit code 2 indicates a normal Ctrl-C termination via goreman, so we'll + # only dump the log if it's not 0 _or_ 2. + if [ $EXIT_CODE -ne 0 ] && [ $EXIT_CODE -ne 2 ]; then + echo "codeinsights-db exited with unexpected code ${EXIT_CODE}; dumping log:" + cat "${LOG_FILE}" + fi + + # Ensure that we still return the same code so that goreman can do sensible + # things once this script exits. + return $EXIT_CODE +} + +echo "codeinsights-db: serving on http://localhost:${PORT}" +echo "codeinsights-db: note that logs are piped to ${LOG_FILE}" +docker run --rm \ + --name=${CONTAINER} \ + --cpus=1 \ + --memory=1g \ + -e POSTGRES_PASSWORD=password \ + -p 0.0.0.0:5435:5435 \ + -v "${DISK}":/var/lib/postgresql/data \ + ${IMAGE} >"${LOG_FILE}" 2>&1 || finish diff --git a/enterprise/dev/Procfile b/enterprise/dev/Procfile index f62ecfbfc727..271f7b81d26f 100644 --- a/enterprise/dev/Procfile +++ b/enterprise/dev/Procfile @@ -14,6 +14,7 @@ zoekt-indexserver-0: ./dev/zoekt/wrapper indexserver 0 zoekt-indexserver-1: ./dev/zoekt/wrapper indexserver 1 zoekt-webserver-0: ./dev/zoekt/wrapper webserver 0 zoekt-webserver-1: ./dev/zoekt/wrapper webserver 1 +codeinsights-db: ./dev/codeinsights-db.sh keycloak: ./dev/auth-provider/keycloak.sh jaeger: ./dev/jaeger.sh docsite: ./dev/docsite.sh -config doc/docsite.json serve -http=localhost:5080 || echo error starting docsite From 0bf010e217d67b21217c1eeefa4bd7a2ad142db6 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Thu, 14 Jan 2021 17:00:42 -0700 Subject: [PATCH 05/78] docker-images: add codeinsights-db (re-tag TimescaleDB) Signed-off-by: Stephen Gutekanst --- docker-images/codeinsights-db/build.sh | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100755 docker-images/codeinsights-db/build.sh diff --git a/docker-images/codeinsights-db/build.sh b/docker-images/codeinsights-db/build.sh new file mode 100755 index 000000000000..05705cd9793c --- /dev/null +++ b/docker-images/codeinsights-db/build.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -ex +cd "$(dirname "${BASH_SOURCE[0]}")" + +# This merely re-tags the image to match our official Sourcegraph versioning andimage naming scheme. +docker pull timescale/timescaledb:2.0.0-pg12-oss@sha256:08ea7cda3b6891c1815af449493c322969d8d9cf283a7af501ce22c6672b51a1 +docker tag timescale/timescaledb:2.0.0-pg12-oss@sha256:08ea7cda3b6891c1815af449493c322969d8d9cf283a7af501ce22c6672b51a1 "$IMAGE" From 85d359e0e4494166db25349b8c5d549ec5ffc925 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 15 Jan 2021 19:24:09 -0700 Subject: [PATCH 06/78] add TODOs about places needing updates for timescaledb Signed-off-by: Stephen Gutekanst --- dev/db/squash_migrations.sh | 1 + internal/db/schemadoc/main.go | 1 + 2 files changed, 2 insertions(+) diff --git a/dev/db/squash_migrations.sh b/dev/db/squash_migrations.sh index bdbeb5bb6ae0..91feaf902763 100755 --- a/dev/db/squash_migrations.sh +++ b/dev/db/squash_migrations.sh @@ -67,6 +67,7 @@ DBNAME='squasher' SERVER_VERSION=$(psql --version) if [ "${SERVER_VERSION}" != 9.6 ]; then + # TODO: handling of timescaledb echo "running PostgreSQL 9.6 in docker since local version is ${SERVER_VERSION}" docker image inspect postgres:9.6 >/dev/null || docker pull postgres:9.6 docker rm --force "${DBNAME}" 2>/dev/null || true diff --git a/internal/db/schemadoc/main.go b/internal/db/schemadoc/main.go index a30201d3aaf0..62d787d2db3d 100644 --- a/internal/db/schemadoc/main.go +++ b/internal/db/schemadoc/main.go @@ -46,6 +46,7 @@ func generate(log *log.Logger, databaseName string) (string, error) { runIgnoreError("dropdb", dbname) defer runIgnoreError("dropdb", dbname) } else { + // TODO: need handling for timescaledb here. log.Printf("Running PostgreSQL 9.6 in docker since local version is %s", strings.TrimSpace(string(out))) if err := exec.Command("docker", "image", "inspect", "postgres:9.6").Run(); err != nil { log.Println("docker pull postgres9.6") From 728e4c08546a17200ed65e796f4051a79200cd79 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 15 Jan 2021 19:24:37 -0700 Subject: [PATCH 07/78] running personal notes (to be moved to proper dev docs later) Signed-off-by: Stephen Gutekanst --- README.codeinsights.md | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 README.codeinsights.md diff --git a/README.codeinsights.md b/README.codeinsights.md new file mode 100644 index 000000000000..22242a7abc86 --- /dev/null +++ b/README.codeinsights.md @@ -0,0 +1,37 @@ +## codeinsights-db Docker Image + +We republish the TimescaleDB (open source) Docker image under sourcegraph/codeinsights-db to ensure it uses our standard naming and versioning scheme. This is done in `docker-images/codeinsights-db/`. + +## Getting a psql prompt (dev server) + +```sh +docker exec -it codeinsights-db psql -U postgres +``` + +## Migrations + +Since TimescaleDB is just Postgres (with an extension), we use the same SQL migration framework we use for our other Postgres databases. `migrations/codeinsights` contains the migrations for the Code Insights database, they are executed when the frontend starts up (as is the same with e.g. codeintel DB migrations.) + +### Add a new migration + +To add a new migration, use: + +``` +./dev/db/add_migration.sh codeinsights MIGRATION_NAME +``` + +See [migrations/README.md](migrations/README.md) for more information + +# Random stuff + +## Insert data + +``` +INSERT INTO histogram_events(time,value,metadata,repo_id) VALUES(now(), 0.5, '{"hello": "world"}', 2); +``` + +## Query data + +``` +SELECT * FROM histogram_events ORDER BY time DESC LIMIT 100; +``` From 34778215bc84c2d33f817e4f628de36b34e1e108 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 15 Jan 2021 19:25:09 -0700 Subject: [PATCH 08/78] add intial DB schema Signed-off-by: Stephen Gutekanst --- .../1000000001_initial_schema.down.sql | 5 +++ .../1000000001_initial_schema.up.sql | 32 +++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 migrations/codeinsights/1000000001_initial_schema.down.sql create mode 100644 migrations/codeinsights/1000000001_initial_schema.up.sql diff --git a/migrations/codeinsights/1000000001_initial_schema.down.sql b/migrations/codeinsights/1000000001_initial_schema.down.sql new file mode 100644 index 000000000000..a5cd447a6a55 --- /dev/null +++ b/migrations/codeinsights/1000000001_initial_schema.down.sql @@ -0,0 +1,5 @@ +BEGIN; + +DROP TABLE histogram_events; + +COMMIT; diff --git a/migrations/codeinsights/1000000001_initial_schema.up.sql b/migrations/codeinsights/1000000001_initial_schema.up.sql new file mode 100644 index 000000000000..15d582fccf61 --- /dev/null +++ b/migrations/codeinsights/1000000001_initial_schema.up.sql @@ -0,0 +1,32 @@ +BEGIN; + +CREATE EXTENSION IF NOT EXISTS timescaledb; + +CREATE TABLE histogram_events ( + -- The timestamp of the recorded event. + time TIMESTAMPTZ NOT NULL, + + -- The floating point value at the time of the event. + value double precision NOT NULL, + + -- Metadata about this event, this can be any arbitrary JSON metadata which will be returned + -- when querying events, but cannot be filtered on. + metadata jsonb NOT NULL, + + -- If the event was for a single repository (usually the case) then this field should indicate + -- the repository ID at the time the event was created. Note that the repository may no longer + -- exist / be valid at query time, however. + repo_id integer, + + -- If the event was for a single repository (usually the case) then this field should indicate + -- the repository name at the time the event was created. Note that the repository name may + -- have changed since the event was created (e.g. if the repo was renamed), in which case this + -- describes the outdated repository na,e. + repo_name citext +); + +-- Create hypertable, partitioning histogram events by time. +-- See https://docs.timescale.com/latest/using-timescaledb/hypertables +SELECT create_hypertable('histogram_events', 'time'); + +COMMIT; From 66bf94b73f6ac782c28f6e2a80062e5cef72b357 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 19 Jan 2021 13:16:43 -0700 Subject: [PATCH 09/78] rename histogram_events -> gauge_events; document table Signed-off-by: Stephen Gutekanst --- .../codeinsights/1000000001_initial_schema.up.sql | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/migrations/codeinsights/1000000001_initial_schema.up.sql b/migrations/codeinsights/1000000001_initial_schema.up.sql index 15d582fccf61..7f02137c0708 100644 --- a/migrations/codeinsights/1000000001_initial_schema.up.sql +++ b/migrations/codeinsights/1000000001_initial_schema.up.sql @@ -2,7 +2,14 @@ BEGIN; CREATE EXTENSION IF NOT EXISTS timescaledb; -CREATE TABLE histogram_events ( +-- Records events over time associated with a repository (or none, i.e. globally) where a single +-- numerical value is going arbitrarily up and down. +-- +-- Repository association is based on both repository ID and name. The ID can be used to refer to +-- a specific repository, or lookup the current name of a repository after it has been e.g. renamed. +-- The name can be used to refer to the name of the repository at the time of the event's creation, +-- for example to trace the change in a gauge back to a repository being renamed. +CREATE TABLE gauge_events ( -- The timestamp of the recorded event. time TIMESTAMPTZ NOT NULL, @@ -27,6 +34,6 @@ CREATE TABLE histogram_events ( -- Create hypertable, partitioning histogram events by time. -- See https://docs.timescale.com/latest/using-timescaledb/hypertables -SELECT create_hypertable('histogram_events', 'time'); +SELECT create_hypertable('gauge_events', 'time'); COMMIT; From 25482c9ee4a4254eb615cc500087ede1c2fdf07e Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 19 Jan 2021 13:21:54 -0700 Subject: [PATCH 10/78] dev/drop-entire-local-database-and-redis.sh - make it work for codeinsights-db Signed-off-by: Stephen Gutekanst --- dev/drop-entire-local-database-and-redis.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/dev/drop-entire-local-database-and-redis.sh b/dev/drop-entire-local-database-and-redis.sh index d24160fc2689..7e2f81b205b2 100755 --- a/dev/drop-entire-local-database-and-redis.sh +++ b/dev/drop-entire-local-database-and-redis.sh @@ -2,3 +2,4 @@ psql -c "drop schema public cascade; create schema public;" redis-cli -c flushall +rm -rf $HOME/.sourcegraph-dev/data/codeinsights-db/ \ No newline at end of file From 1f755f9d32eb748a8d738288a0a8c9db55cd7bef Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 19 Jan 2021 13:26:13 -0700 Subject: [PATCH 11/78] README: update Signed-off-by: Stephen Gutekanst --- README.codeinsights.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.codeinsights.md b/README.codeinsights.md index 22242a7abc86..046d2314c4cd 100644 --- a/README.codeinsights.md +++ b/README.codeinsights.md @@ -27,11 +27,11 @@ See [migrations/README.md](migrations/README.md) for more information ## Insert data ``` -INSERT INTO histogram_events(time,value,metadata,repo_id) VALUES(now(), 0.5, '{"hello": "world"}', 2); +INSERT INTO gauge_events(time,value,metadata,repo_id) VALUES(now(), 0.5, '{"hello": "world"}', 2); ``` ## Query data ``` -SELECT * FROM histogram_events ORDER BY time DESC LIMIT 100; +SELECT * FROM gauge_events ORDER BY time DESC LIMIT 100; ``` From 314dcfd6186b1644678e9b84a6a1f77fbc7e1a4c Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 19 Jan 2021 15:59:51 -0700 Subject: [PATCH 12/78] DB schema take 2 Signed-off-by: Stephen Gutekanst --- README.codeinsights.md | 95 ++++++++++++++++++- .../1000000001_initial_schema.down.sql | 13 ++- .../1000000001_initial_schema.up.sql | 90 +++++++++++++++--- 3 files changed, 183 insertions(+), 15 deletions(-) diff --git a/README.codeinsights.md b/README.codeinsights.md index 046d2314c4cd..a2c46ff8166b 100644 --- a/README.codeinsights.md +++ b/README.codeinsights.md @@ -24,14 +24,105 @@ See [migrations/README.md](migrations/README.md) for more information # Random stuff -## Insert data +## Upsert repo names ``` -INSERT INTO gauge_events(time,value,metadata,repo_id) VALUES(now(), 0.5, '{"hello": "world"}', 2); +WITH e AS( + INSERT INTO repo_names(name) + VALUES ('github.com/gorilla/mux-original') + ON CONFLICT DO NOTHING + RETURNING id +) +SELECT * FROM e +UNION + SELECT id FROM repo_names WHERE name='github.com/gorilla/mux-original'; + +WITH e AS( + INSERT INTO repo_names(name) + VALUES ('github.com/gorilla/mux-renamed') + ON CONFLICT DO NOTHING + RETURNING id +) +SELECT * FROM e +UNION + SELECT id FROM repo_names WHERE name='github.com/gorilla/mux-renamed'; +``` + +## Upsert event metadata + +Upsert metadata, getting back ID: + +``` +WITH e AS( + INSERT INTO metadata(metadata) + VALUES ('{"hello": "world", "languages": ["Go", "Python", "Java"]}') + ON CONFLICT DO NOTHING + RETURNING id +) +SELECT * FROM e +UNION + SELECT id FROM metadata WHERE metadata='{"hello": "world", "languages": ["Go", "Python", "Java"]}'; +``` + +## Inserting gauge events + +``` +INSERT INTO gauge_events( + time, + value, + metadata_id, + repo_id, + repo_name_id, + original_repo_name_id +) VALUES( + now(), + 0.5, + (SELECT id FROM metadata WHERE metadata = '{"hello": "world", "languages": ["Go", "Python", "Java"]}'), + 2, + (SELECT id FROM repo_names WHERE name = 'github.com/gorilla/mux-renamed'), + (SELECT id FROM repo_names WHERE name = 'github.com/gorilla/mux-original') +); ``` ## Query data +### All data + ``` SELECT * FROM gauge_events ORDER BY time DESC LIMIT 100; ``` + +### Filter by repo name, returning metadata (may be more optimally queried separately) + +``` +SELECT * +FROM gauge_events +JOIN metadata ON metadata.id = metadata_id +WHERE repo_name_id IN ( + SELECT id FROM repo_names WHERE name ~ '.*-renamed' +) +ORDER BY time +DESC LIMIT 100; +``` + +### Filter by metadata containing `{"hello": "world"}` + +``` +SELECT * +FROM gauge_events +JOIN metadata ON metadata.id = metadata_id +WHERE metadata @> '{"hello": "world"}' +ORDER BY time +DESC LIMIT 100; +``` + +### Filter by metadata containing Go languages + +``` +SELECT * +FROM gauge_events +JOIN metadata ON metadata.id = metadata_id +WHERE metadata @> '{"languages": ["Go"]}' +ORDER BY time +DESC LIMIT 100; +``` diff --git a/migrations/codeinsights/1000000001_initial_schema.down.sql b/migrations/codeinsights/1000000001_initial_schema.down.sql index a5cd447a6a55..fd967e3a014d 100644 --- a/migrations/codeinsights/1000000001_initial_schema.down.sql +++ b/migrations/codeinsights/1000000001_initial_schema.down.sql @@ -1,5 +1,16 @@ BEGIN; -DROP TABLE histogram_events; +DROP INDEX IF EXISTS gauge_events_repo_id_btree; +DROP INDEX IF EXISTS gauge_events_repo_name_id_btree; +DROP INDEX IF EXISTS gauge_events_original_repo_name_id_btree; +DROP TABLE IF EXISTS gauge_events; + +DROP INDEX IF EXISTS repo_names_name_unique_idx; +DROP INDEX IF EXISTS repo_names_name_trgm; +DROP TABLE IF EXISTS repo_names; + +DROP INDEX IF EXISTS metadata_metadata_unique_idx; +DROP INDEX IF EXISTS metadata_metadata_gin; +DROP TABLE IF EXISTS metadata; COMMIT; diff --git a/migrations/codeinsights/1000000001_initial_schema.up.sql b/migrations/codeinsights/1000000001_initial_schema.up.sql index 7f02137c0708..2d7d61cc22b6 100644 --- a/migrations/codeinsights/1000000001_initial_schema.up.sql +++ b/migrations/codeinsights/1000000001_initial_schema.up.sql @@ -1,6 +1,51 @@ BEGIN; CREATE EXTENSION IF NOT EXISTS timescaledb; +CREATE EXTENSION IF NOT EXISTS pg_trgm; +CREATE EXTENSION IF NOT EXISTS citext; + +-- Records repository names, both historical and present, using a unique repository _name_ ID +-- (unrelated to the repository ID.) +CREATE TABLE repo_names ( + -- The repository _name_ ID. + id bigserial NOT NULL PRIMARY KEY, + + -- The name, trigram-indexed for fast e.g. regexp filtering. + name citext NOT NULL, + + CONSTRAINT check_name_nonempty CHECK ((name OPERATOR(<>) ''::citext)) +); + +-- Enforce that names are unique. +CREATE UNIQUE INDEX repo_names_name_unique_idx ON repo_names(name); + +-- Create trigram indexes for repository name filtering based on e.g. regexps. +CREATE INDEX repo_names_name_trgm ON repo_names USING gin (lower((name)::text) gin_trgm_ops); + + +-- Records arbitrary metadata about events. Stored in a separate table as it is often repeated +-- for multiple events. +CREATE TABLE metadata ( + -- The repository _name_ ID. + id bigserial NOT NULL PRIMARY KEY, + + -- Metadata about this event, this can be any arbitrary JSON metadata which will be returned + -- when querying events, and can be filtered on and grouped using jsonb operators ?, ?&, ?|, + -- and @>. This should be small data only, primary use case is small lists such as: + -- + -- {"java_versions": [...]} + -- {"languages": [...]} + -- {"pull_requests": [...]} + -- {"annotations": [...]} + -- + metadata jsonb NOT NULL +); + +-- Enforce that metadata is unique. +CREATE UNIQUE INDEX metadata_metadata_unique_idx ON metadata(metadata); + +-- Index metadata to optimize WHERE clauses with jsonb ?, ?&, ?|, and @> operators. +CREATE INDEX metadata_metadata_gin ON metadata USING GIN (metadata); -- Records events over time associated with a repository (or none, i.e. globally) where a single -- numerical value is going arbitrarily up and down. @@ -16,24 +61,45 @@ CREATE TABLE gauge_events ( -- The floating point value at the time of the event. value double precision NOT NULL, - -- Metadata about this event, this can be any arbitrary JSON metadata which will be returned - -- when querying events, but cannot be filtered on. - metadata jsonb NOT NULL, + -- Associated metadata for this event, if any. + metadata_id integer, - -- If the event was for a single repository (usually the case) then this field should indicate - -- the repository ID at the time the event was created. Note that the repository may no longer - -- exist / be valid at query time, however. + -- The repository ID (from the main application DB) at the time the event was created. Note + -- that the repository may no longer exist / be valid at query time, however. + -- + -- null if the event was not for a single repository (i.e. a global gauge). repo_id integer, - -- If the event was for a single repository (usually the case) then this field should indicate - -- the repository name at the time the event was created. Note that the repository name may - -- have changed since the event was created (e.g. if the repo was renamed), in which case this - -- describes the outdated repository na,e. - repo_name citext + -- The most recently known name for the repository, updated periodically to account for e.g. + -- repository renames. If the repository was deleted, this is still the most recently known + -- name. + -- + -- null if the event was not for a single repository (i.e. a global gauge). + repo_name_id integer, + + -- The repository name as it was known at the time the event was created. It may have been renamed + -- since. + original_repo_name_id integer, + + -- Ensure if one repo association field is specified, all are. + CONSTRAINT check_repo_fields_specifity CHECK ( + ((repo_id IS NULL) AND (repo_name_id IS NULL) AND (original_repo_name_id IS NULL)) + OR + ((repo_id IS NOT NULL) AND (repo_name_id IS NOT NULL) AND (original_repo_name_id IS NOT NULL)) + ), + + FOREIGN KEY (metadata_id) REFERENCES metadata(id) ON DELETE CASCADE DEFERRABLE, + FOREIGN KEY (repo_name_id) REFERENCES repo_names(id) ON DELETE CASCADE DEFERRABLE, + FOREIGN KEY (original_repo_name_id) REFERENCES repo_names(id) ON DELETE CASCADE DEFERRABLE ); --- Create hypertable, partitioning histogram events by time. +-- Create hypertable, partitioning events by time. -- See https://docs.timescale.com/latest/using-timescaledb/hypertables SELECT create_hypertable('gauge_events', 'time'); +-- Create btree indexes for repository filtering. +CREATE INDEX gauge_events_repo_id_btree ON gauge_events USING btree (repo_id); +CREATE INDEX gauge_events_repo_name_id_btree ON gauge_events USING btree (repo_name_id); +CREATE INDEX gauge_events_original_repo_name_id_btree ON gauge_events USING btree (original_repo_name_id); + COMMIT; From 7f2412b17da2db9d61d1fb0bd8f4e644bab1b2d7 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 19 Jan 2021 16:01:47 -0700 Subject: [PATCH 13/78] note metadata filtering Signed-off-by: Stephen Gutekanst --- README.codeinsights.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.codeinsights.md b/README.codeinsights.md index a2c46ff8166b..70c93407097b 100644 --- a/README.codeinsights.md +++ b/README.codeinsights.md @@ -126,3 +126,5 @@ WHERE metadata @> '{"languages": ["Go"]}' ORDER BY time DESC LIMIT 100; ``` + +See https://www.postgresql.org/docs/9.6/functions-json.html for more operator possibilities. Only ?, ?&, ?|, and @> operators are indexed (gin index) From e1528b68f2990ba301b41e4e800866566d2c1da6 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 19 Jan 2021 16:08:10 -0700 Subject: [PATCH 14/78] scratch the surface of aggregation Signed-off-by: Stephen Gutekanst --- README.codeinsights.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.codeinsights.md b/README.codeinsights.md index 70c93407097b..686860f6c39d 100644 --- a/README.codeinsights.md +++ b/README.codeinsights.md @@ -128,3 +128,20 @@ DESC LIMIT 100; ``` See https://www.postgresql.org/docs/9.6/functions-json.html for more operator possibilities. Only ?, ?&, ?|, and @> operators are indexed (gin index) + +### Get average/min/max value every 1h for + +``` +SELECT + value, + time_bucket(INTERVAL '1 hour', time) AS bucket, + AVG(value), + MAX(value), + MIN(value) +FROM gauge_events +GROUP BY value, bucket; +``` + +Note: This is not optimized, we can use materialized views to do continuous aggregation. + +See https://docs.timescale.com/latest/using-timescaledb/continuous-aggregates From a3e36bb208fd38055d6be392545c9bb24d30260f Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 19 Jan 2021 17:11:13 -0700 Subject: [PATCH 15/78] graphqlbackend: stub resolvers out Signed-off-by: Stephen Gutekanst --- cmd/frontend/enterprise/enterprise.go | 2 + cmd/frontend/graphqlbackend/CODENOTIFY | 4 ++ cmd/frontend/graphqlbackend/graphqlbackend.go | 10 ++++- cmd/frontend/graphqlbackend/insights.go | 42 +++++++++++++++++++ cmd/frontend/graphqlbackend/testing.go | 2 +- cmd/frontend/internal/cli/serve_cmd.go | 2 +- 6 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 cmd/frontend/graphqlbackend/insights.go diff --git a/cmd/frontend/enterprise/enterprise.go b/cmd/frontend/enterprise/enterprise.go index af6a1ad114f8..5ae5f3af029a 100644 --- a/cmd/frontend/enterprise/enterprise.go +++ b/cmd/frontend/enterprise/enterprise.go @@ -20,6 +20,7 @@ type Services struct { AuthzResolver graphqlbackend.AuthzResolver CampaignsResolver graphqlbackend.CampaignsResolver CodeIntelResolver graphqlbackend.CodeIntelResolver + InsightsResolver graphqlbackend.InsightsResolver CodeMonitorsResolver graphqlbackend.CodeMonitorsResolver LicenseResolver graphqlbackend.LicenseResolver } @@ -43,6 +44,7 @@ func DefaultServices() Services { NewExecutorProxyHandler: func() http.Handler { return makeNotFoundHandler("executor proxy") }, AuthzResolver: graphqlbackend.DefaultAuthzResolver, CampaignsResolver: graphqlbackend.DefaultCampaignsResolver, + InsightsResolver: graphqlbackend.DefaultInsightsResolver, CodeMonitorsResolver: graphqlbackend.DefaultCodeMonitorsResolver, LicenseResolver: graphqlbackend.DefaultLicenseResolver, } diff --git a/cmd/frontend/graphqlbackend/CODENOTIFY b/cmd/frontend/graphqlbackend/CODENOTIFY index f9e05e465ffa..9a0a80c6290e 100644 --- a/cmd/frontend/graphqlbackend/CODENOTIFY +++ b/cmd/frontend/graphqlbackend/CODENOTIFY @@ -12,3 +12,7 @@ site_monitoring.go @bobheadxi # Campaigns campaigns.go @LawnGnome campaigns.go @eseliger + +# Insights +insights.go @slimsag +insights.go @felixfbecker diff --git a/cmd/frontend/graphqlbackend/graphqlbackend.go b/cmd/frontend/graphqlbackend/graphqlbackend.go index e44bc73b4b1b..a0fd19ef219b 100644 --- a/cmd/frontend/graphqlbackend/graphqlbackend.go +++ b/cmd/frontend/graphqlbackend/graphqlbackend.go @@ -339,11 +339,12 @@ func prometheusGraphQLRequestName(requestName string) string { return "other" } -func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, authz AuthzResolver, codeMonitors CodeMonitorsResolver, license LicenseResolver) (*graphql.Schema, error) { +func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, insights InsightsResolver, authz AuthzResolver, codeMonitors CodeMonitorsResolver, license LicenseResolver) (*graphql.Schema, error) { resolver := &schemaResolver{ CampaignsResolver: defaultCampaignsResolver{}, AuthzResolver: defaultAuthzResolver{}, CodeIntelResolver: defaultCodeIntelResolver{}, + InsightsResolver: defaultInsightsResolver{}, LicenseResolver: defaultLicenseResolver{}, } if campaigns != nil { @@ -354,6 +355,10 @@ func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, authz A EnterpriseResolvers.codeIntelResolver = codeIntel resolver.CodeIntelResolver = codeIntel } + if insights != nil { + EnterpriseResolvers.insightsResolver = insights + resolver.InsightsResolver = insights + } if authz != nil { EnterpriseResolvers.authzResolver = authz resolver.AuthzResolver = authz @@ -558,6 +563,7 @@ type schemaResolver struct { CampaignsResolver AuthzResolver CodeIntelResolver + InsightsResolver CodeMonitorsResolver LicenseResolver } @@ -566,12 +572,14 @@ type schemaResolver struct { // in enterprise mode. These resolver instances are nil when running as OSS. var EnterpriseResolvers = struct { codeIntelResolver CodeIntelResolver + insightsResolver InsightsResolver authzResolver AuthzResolver campaignsResolver CampaignsResolver codeMonitorsResolver CodeMonitorsResolver licenseResolver LicenseResolver }{ codeIntelResolver: defaultCodeIntelResolver{}, + insightsResolver: defaultInsightsResolver{}, authzResolver: defaultAuthzResolver{}, campaignsResolver: defaultCampaignsResolver{}, codeMonitorsResolver: defaultCodeMonitorsResolver{}, diff --git a/cmd/frontend/graphqlbackend/insights.go b/cmd/frontend/graphqlbackend/insights.go new file mode 100644 index 000000000000..d7c15965a859 --- /dev/null +++ b/cmd/frontend/graphqlbackend/insights.go @@ -0,0 +1,42 @@ +package graphqlbackend + +import ( + "context" + "errors" +) + +// This file just contains stub GraphQL resolvers and data types for Code Insights which merely +// return an error if not running in enterprise mode. The actual resolvers can be found in +// enterprise/internal/insights/resolvers + +type InsightDataPointResolver interface { + DateTime() DateTime + Value() float64 +} + +type PointsArgs struct { + From *DateTime + To *DateTime +} + +type InsightsResolver interface { + // Root resolver + Insights(ctx context.Context) (InsightsResolver, error) + + // Insights type resolvers. + Points(ctx context.Context, args *PointsArgs) ([]InsightDataPointResolver, error) +} + +var insightsOnlyInEnterprise = errors.New("insights are only available in enterprise") + +type defaultInsightsResolver struct{} + +func (defaultInsightsResolver) Insights(ctx context.Context) (InsightsResolver, error) { + return nil, insightsOnlyInEnterprise +} + +func (defaultInsightsResolver) Points(ctx context.Context, args *PointsArgs) ([]InsightDataPointResolver, error) { + return nil, insightsOnlyInEnterprise +} + +var DefaultInsightsResolver InsightsResolver = defaultInsightsResolver{} diff --git a/cmd/frontend/graphqlbackend/testing.go b/cmd/frontend/graphqlbackend/testing.go index da750b17a58d..fb73b41f3851 100644 --- a/cmd/frontend/graphqlbackend/testing.go +++ b/cmd/frontend/graphqlbackend/testing.go @@ -17,7 +17,7 @@ func mustParseGraphQLSchema(t *testing.T) *graphql.Schema { t.Helper() parseSchemaOnce.Do(func() { - parsedSchema, parseSchemaErr = NewSchema(nil, nil, nil, nil, nil) + parsedSchema, parseSchemaErr = NewSchema(nil, nil, nil, nil, nil, nil) }) if parseSchemaErr != nil { t.Fatal(parseSchemaErr) diff --git a/cmd/frontend/internal/cli/serve_cmd.go b/cmd/frontend/internal/cli/serve_cmd.go index edc9305a5dd1..d958e0826b44 100644 --- a/cmd/frontend/internal/cli/serve_cmd.go +++ b/cmd/frontend/internal/cli/serve_cmd.go @@ -209,7 +209,7 @@ func Main(enterpriseSetupHook func() enterprise.Services) error { return errors.New("dbconn.Global is nil when trying to parse GraphQL schema") } - schema, err := graphqlbackend.NewSchema(enterprise.CampaignsResolver, enterprise.CodeIntelResolver, enterprise.AuthzResolver, enterprise.CodeMonitorsResolver, enterprise.LicenseResolver) + schema, err := graphqlbackend.NewSchema(enterprise.CampaignsResolver, enterprise.CodeIntelResolver, enterprise.InsightsResolver, enterprise.AuthzResolver, enterprise.CodeMonitorsResolver, enterprise.LicenseResolver) if err != nil { return err } From 0dafd6e84b683d896fa16828a636be5f35bcf9c9 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 19 Jan 2021 17:11:54 -0700 Subject: [PATCH 16/78] initial GraphQL schema Signed-off-by: Stephen Gutekanst --- cmd/frontend/graphqlbackend/schema.go | 30 ++++++++++++++++++++++ cmd/frontend/graphqlbackend/schema.graphql | 30 ++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/cmd/frontend/graphqlbackend/schema.go b/cmd/frontend/graphqlbackend/schema.go index 0ba9fba432b5..505ee8f7c10e 100644 --- a/cmd/frontend/graphqlbackend/schema.go +++ b/cmd/frontend/graphqlbackend/schema.go @@ -2397,6 +2397,31 @@ type ChangesetEventConnection { pageInfo: PageInfo! } +""" +Insights about code. +""" +type Insights { + """ + Data points over a time range (inclusive) + """ + points(from: DateTime, to: DateTime): [InsightDataPoint!]! +} + +""" +A code insight data point. +""" +type InsightDataPoint { + """ + The time of this data point. + """ + dateTime: DateTime! + + """ + The value of the insight at this point in time. + """ + value: Float! +} + """ A new external service. """ @@ -2711,6 +2736,11 @@ type Query { name: String! ): Campaign + """ + Queries code insights + """ + insights(): Insights + """ Looks up a repository by either name or cloneURL. """ diff --git a/cmd/frontend/graphqlbackend/schema.graphql b/cmd/frontend/graphqlbackend/schema.graphql index 47dc8b8990df..1adb60b6d7e4 100755 --- a/cmd/frontend/graphqlbackend/schema.graphql +++ b/cmd/frontend/graphqlbackend/schema.graphql @@ -2390,6 +2390,31 @@ type ChangesetEventConnection { pageInfo: PageInfo! } +""" +Insights about code. +""" +type Insights { + """ + Data points over a time range (inclusive) + """ + points(from: DateTime, to: DateTime): [InsightDataPoint!]! +} + +""" +A code insight data point. +""" +type InsightDataPoint { + """ + The time of this data point. + """ + dateTime: DateTime! + + """ + The value of the insight at this point in time. + """ + value: Float! +} + """ A new external service. """ @@ -2704,6 +2729,11 @@ type Query { name: String! ): Campaign + """ + Queries code insights + """ + insights(): Insights + """ Looks up a repository by either name or cloneURL. """ From 8e1e37050e9209ac993bdca4021ca05c9d67ae4f Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 19 Jan 2021 17:30:27 -0700 Subject: [PATCH 17/78] enterprise: stub resolvers out --- cmd/frontend/graphqlbackend/insights.go | 8 +++---- enterprise/cmd/frontend/main.go | 2 ++ enterprise/internal/insights/CODENOTIFY | 2 ++ enterprise/internal/insights/insights.go | 14 +++++++++++ .../internal/insights/resolvers/resolver.go | 24 +++++++++++++++++++ 5 files changed, 46 insertions(+), 4 deletions(-) create mode 100644 enterprise/internal/insights/CODENOTIFY create mode 100644 enterprise/internal/insights/insights.go create mode 100644 enterprise/internal/insights/resolvers/resolver.go diff --git a/cmd/frontend/graphqlbackend/insights.go b/cmd/frontend/graphqlbackend/insights.go index d7c15965a859..c038bd98505f 100644 --- a/cmd/frontend/graphqlbackend/insights.go +++ b/cmd/frontend/graphqlbackend/insights.go @@ -9,12 +9,12 @@ import ( // return an error if not running in enterprise mode. The actual resolvers can be found in // enterprise/internal/insights/resolvers -type InsightDataPointResolver interface { +type InsightsDataPointResolver interface { DateTime() DateTime Value() float64 } -type PointsArgs struct { +type InsightsPointsArgs struct { From *DateTime To *DateTime } @@ -24,7 +24,7 @@ type InsightsResolver interface { Insights(ctx context.Context) (InsightsResolver, error) // Insights type resolvers. - Points(ctx context.Context, args *PointsArgs) ([]InsightDataPointResolver, error) + Points(ctx context.Context, args *InsightsPointsArgs) ([]InsightsDataPointResolver, error) } var insightsOnlyInEnterprise = errors.New("insights are only available in enterprise") @@ -35,7 +35,7 @@ func (defaultInsightsResolver) Insights(ctx context.Context) (InsightsResolver, return nil, insightsOnlyInEnterprise } -func (defaultInsightsResolver) Points(ctx context.Context, args *PointsArgs) ([]InsightDataPointResolver, error) { +func (defaultInsightsResolver) Points(ctx context.Context, args *InsightsPointsArgs) ([]InsightsDataPointResolver, error) { return nil, insightsOnlyInEnterprise } diff --git a/enterprise/cmd/frontend/main.go b/enterprise/cmd/frontend/main.go index 5e828120594a..ddc9033b2a69 100644 --- a/enterprise/cmd/frontend/main.go +++ b/enterprise/cmd/frontend/main.go @@ -20,6 +20,7 @@ import ( "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executor" licensing "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/licensing/init" "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights" _ "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/auth" _ "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/graphqlbackend" @@ -35,6 +36,7 @@ var initFunctions = map[string]func(ctx context.Context, enterpriseServices *ent "licensing": licensing.Init, "executor": executor.Init, "codeintel": codeintel.Init, + "insights": insights.Init, "campaigns": campaigns.InitFrontend, "codemonitors": codemonitors.Init, } diff --git a/enterprise/internal/insights/CODENOTIFY b/enterprise/internal/insights/CODENOTIFY new file mode 100644 index 000000000000..363fb0bda7f4 --- /dev/null +++ b/enterprise/internal/insights/CODENOTIFY @@ -0,0 +1,2 @@ +**/* @slimsag +**/* @felixfbecker diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go new file mode 100644 index 000000000000..05422b7fdf20 --- /dev/null +++ b/enterprise/internal/insights/insights.go @@ -0,0 +1,14 @@ +package insights + +import ( + "context" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/resolvers" +) + +// Init initializes the given enterpriseServices to include the required resolvers for insights. +func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { + enterpriseServices.InsightsResolver = resolvers.New() + return nil +} diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go new file mode 100644 index 000000000000..4bb313fa2bdb --- /dev/null +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -0,0 +1,24 @@ +package resolvers + +import ( + "context" + "errors" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" +) + +// Resolver is the GraphQL resolver of all things related to Insights. +type Resolver struct{} + +// New returns a new Resolver whose store uses the given db +func New() graphqlbackend.InsightsResolver { + return &Resolver{} +} + +func (r *Resolver) Insights(ctx context.Context) (graphqlbackend.InsightsResolver, error) { + return r, nil +} + +func (r *Resolver) Points(ctx context.Context, args *graphqlbackend.InsightsPointsArgs) ([]graphqlbackend.InsightsDataPointResolver, error) { + return nil, errors.New("not yet implemented") +} From 8db17be20f78aadc549a0da209ed83587532cab3 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 20 Jan 2021 16:53:30 -0700 Subject: [PATCH 18/78] graphql schema: fix typo Signed-off-by: Stephen Gutekanst --- cmd/frontend/graphqlbackend/schema.go | 2 +- cmd/frontend/graphqlbackend/schema.graphql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/frontend/graphqlbackend/schema.go b/cmd/frontend/graphqlbackend/schema.go index 505ee8f7c10e..b671703de737 100644 --- a/cmd/frontend/graphqlbackend/schema.go +++ b/cmd/frontend/graphqlbackend/schema.go @@ -2739,7 +2739,7 @@ type Query { """ Queries code insights """ - insights(): Insights + insights: Insights """ Looks up a repository by either name or cloneURL. diff --git a/cmd/frontend/graphqlbackend/schema.graphql b/cmd/frontend/graphqlbackend/schema.graphql index 1adb60b6d7e4..50e912c68b96 100755 --- a/cmd/frontend/graphqlbackend/schema.graphql +++ b/cmd/frontend/graphqlbackend/schema.graphql @@ -2732,7 +2732,7 @@ type Query { """ Queries code insights """ - insights(): Insights + insights: Insights """ Looks up a repository by either name or cloneURL. From 74c18f3b133d28dc48ba4574f7b4511e21285f9e Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 20 Jan 2021 17:06:10 -0700 Subject: [PATCH 19/78] graphql schema: mark as experimental Signed-off-by: Stephen Gutekanst --- cmd/frontend/graphqlbackend/schema.go | 2 +- cmd/frontend/graphqlbackend/schema.graphql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/frontend/graphqlbackend/schema.go b/cmd/frontend/graphqlbackend/schema.go index b671703de737..43f00f834f6d 100644 --- a/cmd/frontend/graphqlbackend/schema.go +++ b/cmd/frontend/graphqlbackend/schema.go @@ -2737,7 +2737,7 @@ type Query { ): Campaign """ - Queries code insights + EXPERIMENTAL: Queries code insights """ insights: Insights diff --git a/cmd/frontend/graphqlbackend/schema.graphql b/cmd/frontend/graphqlbackend/schema.graphql index 50e912c68b96..509bbf14413f 100755 --- a/cmd/frontend/graphqlbackend/schema.graphql +++ b/cmd/frontend/graphqlbackend/schema.graphql @@ -2730,7 +2730,7 @@ type Query { ): Campaign """ - Queries code insights + EXPERIMENTAL: Queries code insights """ insights: Insights From e44cfdd7696f68bd32f76246b1d17b8abe1f1149 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 20 Jan 2021 17:11:12 -0700 Subject: [PATCH 20/78] insights: add GraphQL backend scaffolding This adds the needed GraphQL backend scaffolding for code insights. The actual schema here is not important and will definitely change. I've based this on what we do for Code Intel, Campaigns, and Code Monitoring. It seemed a large enough change (without actually implementing any GraphQL APIs) to warrant sending separately. This way, PRs that implement actual APIs for Code Insights can be reviewed on solely that, rather than all the scaffolding that is required. Helps #17221 Signed-off-by: Stephen Gutekanst --- cmd/frontend/enterprise/enterprise.go | 2 + cmd/frontend/graphqlbackend/CODENOTIFY | 4 ++ cmd/frontend/graphqlbackend/graphqlbackend.go | 8 +++- cmd/frontend/graphqlbackend/insights.go | 42 +++++++++++++++++++ cmd/frontend/graphqlbackend/schema.go | 38 +++++++++++++---- cmd/frontend/graphqlbackend/schema.graphql | 38 +++++++++++++---- cmd/frontend/graphqlbackend/testing.go | 2 +- cmd/frontend/internal/cli/serve_cmd.go | 2 +- enterprise/cmd/frontend/main.go | 2 + enterprise/internal/insights/CODENOTIFY | 2 + enterprise/internal/insights/insights.go | 14 +++++++ .../internal/insights/resolvers/resolver.go | 24 +++++++++++ internal/db/dbconn/migration.go | 5 +++ 13 files changed, 164 insertions(+), 19 deletions(-) create mode 100644 cmd/frontend/graphqlbackend/insights.go create mode 100644 enterprise/internal/insights/CODENOTIFY create mode 100644 enterprise/internal/insights/insights.go create mode 100644 enterprise/internal/insights/resolvers/resolver.go diff --git a/cmd/frontend/enterprise/enterprise.go b/cmd/frontend/enterprise/enterprise.go index af6a1ad114f8..5ae5f3af029a 100644 --- a/cmd/frontend/enterprise/enterprise.go +++ b/cmd/frontend/enterprise/enterprise.go @@ -20,6 +20,7 @@ type Services struct { AuthzResolver graphqlbackend.AuthzResolver CampaignsResolver graphqlbackend.CampaignsResolver CodeIntelResolver graphqlbackend.CodeIntelResolver + InsightsResolver graphqlbackend.InsightsResolver CodeMonitorsResolver graphqlbackend.CodeMonitorsResolver LicenseResolver graphqlbackend.LicenseResolver } @@ -43,6 +44,7 @@ func DefaultServices() Services { NewExecutorProxyHandler: func() http.Handler { return makeNotFoundHandler("executor proxy") }, AuthzResolver: graphqlbackend.DefaultAuthzResolver, CampaignsResolver: graphqlbackend.DefaultCampaignsResolver, + InsightsResolver: graphqlbackend.DefaultInsightsResolver, CodeMonitorsResolver: graphqlbackend.DefaultCodeMonitorsResolver, LicenseResolver: graphqlbackend.DefaultLicenseResolver, } diff --git a/cmd/frontend/graphqlbackend/CODENOTIFY b/cmd/frontend/graphqlbackend/CODENOTIFY index f9e05e465ffa..9a0a80c6290e 100644 --- a/cmd/frontend/graphqlbackend/CODENOTIFY +++ b/cmd/frontend/graphqlbackend/CODENOTIFY @@ -12,3 +12,7 @@ site_monitoring.go @bobheadxi # Campaigns campaigns.go @LawnGnome campaigns.go @eseliger + +# Insights +insights.go @slimsag +insights.go @felixfbecker diff --git a/cmd/frontend/graphqlbackend/graphqlbackend.go b/cmd/frontend/graphqlbackend/graphqlbackend.go index 7758d8919c6a..a641cac30328 100644 --- a/cmd/frontend/graphqlbackend/graphqlbackend.go +++ b/cmd/frontend/graphqlbackend/graphqlbackend.go @@ -339,11 +339,12 @@ func prometheusGraphQLRequestName(requestName string) string { return "other" } -func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, authz AuthzResolver, codeMonitors CodeMonitorsResolver, license LicenseResolver) (*graphql.Schema, error) { +func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, insights InsightsResolver, authz AuthzResolver, codeMonitors CodeMonitorsResolver, license LicenseResolver) (*graphql.Schema, error) { resolver := &schemaResolver{ CampaignsResolver: defaultCampaignsResolver{}, AuthzResolver: defaultAuthzResolver{}, CodeIntelResolver: defaultCodeIntelResolver{}, + Insights: defaultInsightsResolver{}, LicenseResolver: defaultLicenseResolver{}, } if campaigns != nil { @@ -354,6 +355,10 @@ func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, authz A EnterpriseResolvers.codeIntelResolver = codeIntel resolver.CodeIntelResolver = codeIntel } + if insights != nil { + EnterpriseResolvers.insightsResolver = insights + resolver.InsightsResolver = insights + } if authz != nil { EnterpriseResolvers.authzResolver = authz resolver.AuthzResolver = authz @@ -566,6 +571,7 @@ type schemaResolver struct { // in enterprise mode. These resolver instances are nil when running as OSS. var EnterpriseResolvers = struct { codeIntelResolver CodeIntelResolver + insightsResolver CodeInsightsResolver authzResolver AuthzResolver campaignsResolver CampaignsResolver codeMonitorsResolver CodeMonitorsResolver diff --git a/cmd/frontend/graphqlbackend/insights.go b/cmd/frontend/graphqlbackend/insights.go new file mode 100644 index 000000000000..c038bd98505f --- /dev/null +++ b/cmd/frontend/graphqlbackend/insights.go @@ -0,0 +1,42 @@ +package graphqlbackend + +import ( + "context" + "errors" +) + +// This file just contains stub GraphQL resolvers and data types for Code Insights which merely +// return an error if not running in enterprise mode. The actual resolvers can be found in +// enterprise/internal/insights/resolvers + +type InsightsDataPointResolver interface { + DateTime() DateTime + Value() float64 +} + +type InsightsPointsArgs struct { + From *DateTime + To *DateTime +} + +type InsightsResolver interface { + // Root resolver + Insights(ctx context.Context) (InsightsResolver, error) + + // Insights type resolvers. + Points(ctx context.Context, args *InsightsPointsArgs) ([]InsightsDataPointResolver, error) +} + +var insightsOnlyInEnterprise = errors.New("insights are only available in enterprise") + +type defaultInsightsResolver struct{} + +func (defaultInsightsResolver) Insights(ctx context.Context) (InsightsResolver, error) { + return nil, insightsOnlyInEnterprise +} + +func (defaultInsightsResolver) Points(ctx context.Context, args *InsightsPointsArgs) ([]InsightsDataPointResolver, error) { + return nil, insightsOnlyInEnterprise +} + +var DefaultInsightsResolver InsightsResolver = defaultInsightsResolver{} diff --git a/cmd/frontend/graphqlbackend/schema.go b/cmd/frontend/graphqlbackend/schema.go index bc7c0eed38c6..43f00f834f6d 100644 --- a/cmd/frontend/graphqlbackend/schema.go +++ b/cmd/frontend/graphqlbackend/schema.go @@ -2397,6 +2397,31 @@ type ChangesetEventConnection { pageInfo: PageInfo! } +""" +Insights about code. +""" +type Insights { + """ + Data points over a time range (inclusive) + """ + points(from: DateTime, to: DateTime): [InsightDataPoint!]! +} + +""" +A code insight data point. +""" +type InsightDataPoint { + """ + The time of this data point. + """ + dateTime: DateTime! + + """ + The value of the insight at this point in time. + """ + value: Float! +} + """ A new external service. """ @@ -2711,6 +2736,11 @@ type Query { name: String! ): Campaign + """ + EXPERIMENTAL: Queries code insights + """ + insights: Insights + """ Looks up a repository by either name or cloneURL. """ @@ -4151,14 +4181,6 @@ type ExternalService implements Node { will contain any errors that occured during the most recent completed sync. """ lastSyncError: String - """ - LastSyncAt is the time the last sync job was run for this code host - """ - lastSyncAt: DateTime! - """ - The timestamp of the next sync job - """ - nextSyncAt: DateTime! } """ diff --git a/cmd/frontend/graphqlbackend/schema.graphql b/cmd/frontend/graphqlbackend/schema.graphql index 39ec0cfe6624..509bbf14413f 100755 --- a/cmd/frontend/graphqlbackend/schema.graphql +++ b/cmd/frontend/graphqlbackend/schema.graphql @@ -2390,6 +2390,31 @@ type ChangesetEventConnection { pageInfo: PageInfo! } +""" +Insights about code. +""" +type Insights { + """ + Data points over a time range (inclusive) + """ + points(from: DateTime, to: DateTime): [InsightDataPoint!]! +} + +""" +A code insight data point. +""" +type InsightDataPoint { + """ + The time of this data point. + """ + dateTime: DateTime! + + """ + The value of the insight at this point in time. + """ + value: Float! +} + """ A new external service. """ @@ -2704,6 +2729,11 @@ type Query { name: String! ): Campaign + """ + EXPERIMENTAL: Queries code insights + """ + insights: Insights + """ Looks up a repository by either name or cloneURL. """ @@ -4144,14 +4174,6 @@ type ExternalService implements Node { will contain any errors that occured during the most recent completed sync. """ lastSyncError: String - """ - LastSyncAt is the time the last sync job was run for this code host - """ - lastSyncAt: DateTime! - """ - The timestamp of the next sync job - """ - nextSyncAt: DateTime! } """ diff --git a/cmd/frontend/graphqlbackend/testing.go b/cmd/frontend/graphqlbackend/testing.go index da750b17a58d..fb73b41f3851 100644 --- a/cmd/frontend/graphqlbackend/testing.go +++ b/cmd/frontend/graphqlbackend/testing.go @@ -17,7 +17,7 @@ func mustParseGraphQLSchema(t *testing.T) *graphql.Schema { t.Helper() parseSchemaOnce.Do(func() { - parsedSchema, parseSchemaErr = NewSchema(nil, nil, nil, nil, nil) + parsedSchema, parseSchemaErr = NewSchema(nil, nil, nil, nil, nil, nil) }) if parseSchemaErr != nil { t.Fatal(parseSchemaErr) diff --git a/cmd/frontend/internal/cli/serve_cmd.go b/cmd/frontend/internal/cli/serve_cmd.go index edc9305a5dd1..d958e0826b44 100644 --- a/cmd/frontend/internal/cli/serve_cmd.go +++ b/cmd/frontend/internal/cli/serve_cmd.go @@ -209,7 +209,7 @@ func Main(enterpriseSetupHook func() enterprise.Services) error { return errors.New("dbconn.Global is nil when trying to parse GraphQL schema") } - schema, err := graphqlbackend.NewSchema(enterprise.CampaignsResolver, enterprise.CodeIntelResolver, enterprise.AuthzResolver, enterprise.CodeMonitorsResolver, enterprise.LicenseResolver) + schema, err := graphqlbackend.NewSchema(enterprise.CampaignsResolver, enterprise.CodeIntelResolver, enterprise.InsightsResolver, enterprise.AuthzResolver, enterprise.CodeMonitorsResolver, enterprise.LicenseResolver) if err != nil { return err } diff --git a/enterprise/cmd/frontend/main.go b/enterprise/cmd/frontend/main.go index 5e828120594a..ddc9033b2a69 100644 --- a/enterprise/cmd/frontend/main.go +++ b/enterprise/cmd/frontend/main.go @@ -20,6 +20,7 @@ import ( "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executor" licensing "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/licensing/init" "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights" _ "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/auth" _ "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/graphqlbackend" @@ -35,6 +36,7 @@ var initFunctions = map[string]func(ctx context.Context, enterpriseServices *ent "licensing": licensing.Init, "executor": executor.Init, "codeintel": codeintel.Init, + "insights": insights.Init, "campaigns": campaigns.InitFrontend, "codemonitors": codemonitors.Init, } diff --git a/enterprise/internal/insights/CODENOTIFY b/enterprise/internal/insights/CODENOTIFY new file mode 100644 index 000000000000..363fb0bda7f4 --- /dev/null +++ b/enterprise/internal/insights/CODENOTIFY @@ -0,0 +1,2 @@ +**/* @slimsag +**/* @felixfbecker diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go new file mode 100644 index 000000000000..05422b7fdf20 --- /dev/null +++ b/enterprise/internal/insights/insights.go @@ -0,0 +1,14 @@ +package insights + +import ( + "context" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/resolvers" +) + +// Init initializes the given enterpriseServices to include the required resolvers for insights. +func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { + enterpriseServices.InsightsResolver = resolvers.New() + return nil +} diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go new file mode 100644 index 000000000000..4bb313fa2bdb --- /dev/null +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -0,0 +1,24 @@ +package resolvers + +import ( + "context" + "errors" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" +) + +// Resolver is the GraphQL resolver of all things related to Insights. +type Resolver struct{} + +// New returns a new Resolver whose store uses the given db +func New() graphqlbackend.InsightsResolver { + return &Resolver{} +} + +func (r *Resolver) Insights(ctx context.Context) (graphqlbackend.InsightsResolver, error) { + return r, nil +} + +func (r *Resolver) Points(ctx context.Context, args *graphqlbackend.InsightsPointsArgs) ([]graphqlbackend.InsightsDataPointResolver, error) { + return nil, errors.New("not yet implemented") +} diff --git a/internal/db/dbconn/migration.go b/internal/db/dbconn/migration.go index 9608efe53967..a3703b827b9d 100644 --- a/internal/db/dbconn/migration.go +++ b/internal/db/dbconn/migration.go @@ -12,6 +12,7 @@ import ( "github.com/inconshreveable/log15" "github.com/pkg/errors" + codeinsightsMigrations "github.com/sourcegraph/sourcegraph/migrations/codeinsights" codeintelMigrations "github.com/sourcegraph/sourcegraph/migrations/codeintel" frontendMigrations "github.com/sourcegraph/sourcegraph/migrations/frontend" ) @@ -31,6 +32,10 @@ var databases = map[string]struct { MigrationsTable: "codeintel_schema_migrations", Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset), }, + "codeinsights": { + MigrationsTable: "codeinsights_schema_migrations", + Resource: bindata.Resource(codeinsightsMigrations.AssetNames(), codeinsightsMigrations.Asset), + }, } // DatabaseNames returns the list of database names (configured via `dbutil.databases`).. From 3ecec6c7b20de743ff35cde89b3c30429ae56748 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 20 Jan 2021 17:32:57 -0700 Subject: [PATCH 21/78] remove code added from merge conflict Signed-off-by: Stephen Gutekanst --- cmd/frontend/graphqlbackend/graphqlbackend.go | 32 ++-- internal/db/dbutil/dbutil.go | 171 +----------------- 2 files changed, 20 insertions(+), 183 deletions(-) diff --git a/cmd/frontend/graphqlbackend/graphqlbackend.go b/cmd/frontend/graphqlbackend/graphqlbackend.go index a0fd19ef219b..7758d8919c6a 100644 --- a/cmd/frontend/graphqlbackend/graphqlbackend.go +++ b/cmd/frontend/graphqlbackend/graphqlbackend.go @@ -339,12 +339,11 @@ func prometheusGraphQLRequestName(requestName string) string { return "other" } -func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, insights InsightsResolver, authz AuthzResolver, codeMonitors CodeMonitorsResolver, license LicenseResolver) (*graphql.Schema, error) { +func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, authz AuthzResolver, codeMonitors CodeMonitorsResolver, license LicenseResolver) (*graphql.Schema, error) { resolver := &schemaResolver{ CampaignsResolver: defaultCampaignsResolver{}, AuthzResolver: defaultAuthzResolver{}, CodeIntelResolver: defaultCodeIntelResolver{}, - InsightsResolver: defaultInsightsResolver{}, LicenseResolver: defaultLicenseResolver{}, } if campaigns != nil { @@ -355,10 +354,6 @@ func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, insight EnterpriseResolvers.codeIntelResolver = codeIntel resolver.CodeIntelResolver = codeIntel } - if insights != nil { - EnterpriseResolvers.insightsResolver = insights - resolver.InsightsResolver = insights - } if authz != nil { EnterpriseResolvers.authzResolver = authz resolver.AuthzResolver = authz @@ -563,7 +558,6 @@ type schemaResolver struct { CampaignsResolver AuthzResolver CodeIntelResolver - InsightsResolver CodeMonitorsResolver LicenseResolver } @@ -572,14 +566,12 @@ type schemaResolver struct { // in enterprise mode. These resolver instances are nil when running as OSS. var EnterpriseResolvers = struct { codeIntelResolver CodeIntelResolver - insightsResolver InsightsResolver authzResolver AuthzResolver campaignsResolver CampaignsResolver codeMonitorsResolver CodeMonitorsResolver licenseResolver LicenseResolver }{ codeIntelResolver: defaultCodeIntelResolver{}, - insightsResolver: defaultInsightsResolver{}, authzResolver: defaultAuthzResolver{}, campaignsResolver: defaultCampaignsResolver{}, codeMonitorsResolver: defaultCodeMonitorsResolver{}, @@ -861,6 +853,14 @@ func (r *codeHostRepositoryConnectionResolver) Nodes(ctx context.Context) ([]*co // signal the collector to finish close(results) }() + + // are we allowed to show the user private repos? + allowPrivate, err := allowPrivate(ctx, r.userID) + if err != nil { + r.err = err + return + } + // collect all results r.nodes = []*codeHostRepositoryResolver{} for repos := range results { @@ -869,16 +869,15 @@ func (r *codeHostRepositoryConnectionResolver) Nodes(ctx context.Context) ([]*co if r.query != "" && !strings.Contains(strings.ToLower(repo.Name), r.query) { continue } + if !allowPrivate && repo.Private { + continue + } r.nodes = append(r.nodes, &codeHostRepositoryResolver{ codeHost: svcsByID[repo.CodeHostID], repo: &repo, }) } } - if err != nil { - r.err = err - return - } sort.Slice(r.nodes, func(i, j int) bool { return r.nodes[i].repo.Name < r.nodes[j].repo.Name }) @@ -904,3 +903,10 @@ func (r *codeHostRepositoryResolver) CodeHost(ctx context.Context) *externalServ externalService: r.codeHost, } } + +func allowPrivate(ctx context.Context, userID int32) (bool, error) { + if conf.ExternalServiceUserMode() == conf.ExternalServiceModeAll { + return true, nil + } + return db.Users.HasTag(ctx, userID, db.TagAllowUserExternalServicePrivate) +} diff --git a/internal/db/dbutil/dbutil.go b/internal/db/dbutil/dbutil.go index 1cc75588400d..e4bfae0810ea 100644 --- a/internal/db/dbutil/dbutil.go +++ b/internal/db/dbutil/dbutil.go @@ -8,28 +8,15 @@ import ( "encoding/json" "fmt" "net/url" - "os" - "strconv" "strings" "time" - // Register driver - - "github.com/jackc/pgconn" - _ "github.com/lib/pq" - - "github.com/golang-migrate/migrate/v4" - "github.com/golang-migrate/migrate/v4/database/postgres" - bindata "github.com/golang-migrate/migrate/v4/source/go_bindata" "github.com/hashicorp/go-multierror" - "github.com/inconshreveable/log15" + "github.com/jackc/pgconn" "github.com/opentracing/opentracing-go/ext" "github.com/pkg/errors" "github.com/sourcegraph/sourcegraph/internal/trace/ot" - codeinsightsMigrations "github.com/sourcegraph/sourcegraph/migrations/codeinsights" - codeintelMigrations "github.com/sourcegraph/sourcegraph/migrations/codeintel" - frontendMigrations "github.com/sourcegraph/sourcegraph/migrations/frontend" ) // Transaction calls f within a transaction, rolling back if any error is @@ -80,162 +67,6 @@ type TxBeginner interface { BeginTx(context.Context, *sql.TxOptions) (*sql.Tx, error) } -// NewDB returns a new *sql.DB from the given dsn (data source name). -func NewDB(dsn, app string) (*sql.DB, error) { - cfg, err := url.Parse(dsn) - if err != nil { - return nil, errors.Wrap(err, "failed to parse dsn") - } - - qry := cfg.Query() - - // Force PostgreSQL session timezone to UTC. - qry.Set("timezone", "UTC") - - // Force application name. - qry.Set("application_name", app) - - // Set max open and idle connections - maxOpen, _ := strconv.Atoi(qry.Get("max_conns")) - if maxOpen == 0 { - maxOpen = 30 - } - qry.Del("max_conns") - - cfg.RawQuery = qry.Encode() - db, err := sql.Open("postgres", cfg.String()) - if err != nil { - return nil, errors.Wrap(err, "failed to connect to database") - } - - // On sourcegraph.com we can sometimes startup faster than the - // cloud-sql-proxy. - if err := pingDbWithRetry(db, 10); err != nil { - return nil, errors.Wrap(err, "failed to ping database") - } - - db.SetMaxOpenConns(maxOpen) - db.SetMaxIdleConns(maxOpen) - db.SetConnMaxLifetime(time.Minute) - - return db, nil -} - -func pingDbWithRetry(db *sql.DB, attempts int) error { - const maxWait = time.Second - wait := 50 * time.Millisecond - for i := 0; i < attempts-1; i++ { - if err := db.Ping(); err == nil { - return nil - } - time.Sleep(wait) - wait *= 2 - if wait > maxWait { - wait = maxWait - } - } - return db.Ping() -} - -// databases configures the migrations we want based on a database name. This -// configuration includes the name of the migration version table as well as -// the raw migration assets to run to migrate the target schema to a new version. -var databases = map[string]struct { - MigrationsTable string - Resource *bindata.AssetSource -}{ - "frontend": { - MigrationsTable: "schema_migrations", - Resource: bindata.Resource(frontendMigrations.AssetNames(), frontendMigrations.Asset), - }, - "codeintel": { - MigrationsTable: "codeintel_schema_migrations", - Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset), - }, - "codeinsights": { - MigrationsTable: "codeinsights_schema_migrations", - Resource: bindata.Resource(codeinsightsMigrations.AssetNames(), codeinsightsMigrations.Asset), - }, -} - -// DatabaseNames returns the list of database names (configured via `dbutil.databases`).. -var DatabaseNames = func() []string { - var names []string - for databaseName := range databases { - names = append(names, databaseName) - } - - return names -}() - -// MigrationTables returns the list of migration table names (configured via `dbutil.databases`). -var MigrationTables = func() []string { - var migrationTables []string - for _, db := range databases { - migrationTables = append(migrationTables, db.MigrationsTable) - } - - return migrationTables -}() - -// NewMigrate returns a new configured migration object for the given database name. This database -// name must be present in the `dbutil.databases` map. This migration can be subsequently run by -// invoking `dbutil.DoMigrate`. -func NewMigrate(db *sql.DB, databaseName string) (*migrate.Migrate, error) { - schemaData, ok := databases[databaseName] - if !ok { - return nil, fmt.Errorf("unknown database '%s'", databaseName) - } - - driver, err := postgres.WithInstance(db, &postgres.Config{ - MigrationsTable: schemaData.MigrationsTable, - }) - if err != nil { - return nil, err - } - - d, err := bindata.WithInstance(schemaData.Resource) - if err != nil { - return nil, err - } - - m, err := migrate.NewWithInstance("go-bindata", d, "postgres", driver) - if err != nil { - return nil, err - } - - // In case another process was faster and runs migrations, we will wait - // this long - m.LockTimeout = 5 * time.Minute - if os.Getenv("LOG_MIGRATE_TO_STDOUT") != "" { - m.Log = stdoutLogger{} - } - - return m, nil -} - -// DoMigrate runs all up migrations. -func DoMigrate(m *migrate.Migrate) (err error) { - err = m.Up() - if err == nil || err == migrate.ErrNoChange { - return nil - } - - if os.IsNotExist(err) { - // This should only happen if the DB is ahead of the migrations available - version, dirty, verr := m.Version() - if verr != nil { - return verr - } - if dirty { // this shouldn't happen, but checking anyways - return err - } - log15.Warn("WARNING: Detected an old version of Sourcegraph. The database has migrated to a newer version. If you have applied a rollback, this is expected and you can ignore this warning. If not, please contact support@sourcegraph.com for further assistance.", "db_version", version) - return nil - } - return err -} - type stdoutLogger struct{} func (stdoutLogger) Printf(format string, v ...interface{}) { From 7423a50b6af33ce1467da23193bbe62741f9d538 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 20 Jan 2021 17:11:12 -0700 Subject: [PATCH 22/78] insights: add GraphQL backend scaffolding This adds the needed GraphQL backend scaffolding for code insights. The actual schema here is not important and will definitely change. I've based this on what we do for Code Intel, Campaigns, and Code Monitoring. It seemed a large enough change (without actually implementing any GraphQL APIs) to warrant sending separately. This way, PRs that implement actual APIs for Code Insights can be reviewed on solely that, rather than all the scaffolding that is required. Helps #17221 Signed-off-by: Stephen Gutekanst --- cmd/frontend/enterprise/enterprise.go | 2 + cmd/frontend/graphqlbackend/CODENOTIFY | 4 ++ cmd/frontend/graphqlbackend/graphqlbackend.go | 8 +++- cmd/frontend/graphqlbackend/insights.go | 42 +++++++++++++++++++ cmd/frontend/graphqlbackend/schema.go | 30 +++++++++++++ cmd/frontend/graphqlbackend/schema.graphql | 30 +++++++++++++ cmd/frontend/graphqlbackend/testing.go | 2 +- cmd/frontend/internal/cli/serve_cmd.go | 2 +- enterprise/cmd/frontend/main.go | 2 + enterprise/internal/insights/CODENOTIFY | 2 + enterprise/internal/insights/insights.go | 14 +++++++ .../internal/insights/resolvers/resolver.go | 24 +++++++++++ internal/db/dbconn/migration.go | 5 +++ 13 files changed, 164 insertions(+), 3 deletions(-) create mode 100644 cmd/frontend/graphqlbackend/insights.go create mode 100644 enterprise/internal/insights/CODENOTIFY create mode 100644 enterprise/internal/insights/insights.go create mode 100644 enterprise/internal/insights/resolvers/resolver.go diff --git a/cmd/frontend/enterprise/enterprise.go b/cmd/frontend/enterprise/enterprise.go index af6a1ad114f8..5ae5f3af029a 100644 --- a/cmd/frontend/enterprise/enterprise.go +++ b/cmd/frontend/enterprise/enterprise.go @@ -20,6 +20,7 @@ type Services struct { AuthzResolver graphqlbackend.AuthzResolver CampaignsResolver graphqlbackend.CampaignsResolver CodeIntelResolver graphqlbackend.CodeIntelResolver + InsightsResolver graphqlbackend.InsightsResolver CodeMonitorsResolver graphqlbackend.CodeMonitorsResolver LicenseResolver graphqlbackend.LicenseResolver } @@ -43,6 +44,7 @@ func DefaultServices() Services { NewExecutorProxyHandler: func() http.Handler { return makeNotFoundHandler("executor proxy") }, AuthzResolver: graphqlbackend.DefaultAuthzResolver, CampaignsResolver: graphqlbackend.DefaultCampaignsResolver, + InsightsResolver: graphqlbackend.DefaultInsightsResolver, CodeMonitorsResolver: graphqlbackend.DefaultCodeMonitorsResolver, LicenseResolver: graphqlbackend.DefaultLicenseResolver, } diff --git a/cmd/frontend/graphqlbackend/CODENOTIFY b/cmd/frontend/graphqlbackend/CODENOTIFY index f9e05e465ffa..9a0a80c6290e 100644 --- a/cmd/frontend/graphqlbackend/CODENOTIFY +++ b/cmd/frontend/graphqlbackend/CODENOTIFY @@ -12,3 +12,7 @@ site_monitoring.go @bobheadxi # Campaigns campaigns.go @LawnGnome campaigns.go @eseliger + +# Insights +insights.go @slimsag +insights.go @felixfbecker diff --git a/cmd/frontend/graphqlbackend/graphqlbackend.go b/cmd/frontend/graphqlbackend/graphqlbackend.go index 7758d8919c6a..a641cac30328 100644 --- a/cmd/frontend/graphqlbackend/graphqlbackend.go +++ b/cmd/frontend/graphqlbackend/graphqlbackend.go @@ -339,11 +339,12 @@ func prometheusGraphQLRequestName(requestName string) string { return "other" } -func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, authz AuthzResolver, codeMonitors CodeMonitorsResolver, license LicenseResolver) (*graphql.Schema, error) { +func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, insights InsightsResolver, authz AuthzResolver, codeMonitors CodeMonitorsResolver, license LicenseResolver) (*graphql.Schema, error) { resolver := &schemaResolver{ CampaignsResolver: defaultCampaignsResolver{}, AuthzResolver: defaultAuthzResolver{}, CodeIntelResolver: defaultCodeIntelResolver{}, + Insights: defaultInsightsResolver{}, LicenseResolver: defaultLicenseResolver{}, } if campaigns != nil { @@ -354,6 +355,10 @@ func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, authz A EnterpriseResolvers.codeIntelResolver = codeIntel resolver.CodeIntelResolver = codeIntel } + if insights != nil { + EnterpriseResolvers.insightsResolver = insights + resolver.InsightsResolver = insights + } if authz != nil { EnterpriseResolvers.authzResolver = authz resolver.AuthzResolver = authz @@ -566,6 +571,7 @@ type schemaResolver struct { // in enterprise mode. These resolver instances are nil when running as OSS. var EnterpriseResolvers = struct { codeIntelResolver CodeIntelResolver + insightsResolver CodeInsightsResolver authzResolver AuthzResolver campaignsResolver CampaignsResolver codeMonitorsResolver CodeMonitorsResolver diff --git a/cmd/frontend/graphqlbackend/insights.go b/cmd/frontend/graphqlbackend/insights.go new file mode 100644 index 000000000000..c038bd98505f --- /dev/null +++ b/cmd/frontend/graphqlbackend/insights.go @@ -0,0 +1,42 @@ +package graphqlbackend + +import ( + "context" + "errors" +) + +// This file just contains stub GraphQL resolvers and data types for Code Insights which merely +// return an error if not running in enterprise mode. The actual resolvers can be found in +// enterprise/internal/insights/resolvers + +type InsightsDataPointResolver interface { + DateTime() DateTime + Value() float64 +} + +type InsightsPointsArgs struct { + From *DateTime + To *DateTime +} + +type InsightsResolver interface { + // Root resolver + Insights(ctx context.Context) (InsightsResolver, error) + + // Insights type resolvers. + Points(ctx context.Context, args *InsightsPointsArgs) ([]InsightsDataPointResolver, error) +} + +var insightsOnlyInEnterprise = errors.New("insights are only available in enterprise") + +type defaultInsightsResolver struct{} + +func (defaultInsightsResolver) Insights(ctx context.Context) (InsightsResolver, error) { + return nil, insightsOnlyInEnterprise +} + +func (defaultInsightsResolver) Points(ctx context.Context, args *InsightsPointsArgs) ([]InsightsDataPointResolver, error) { + return nil, insightsOnlyInEnterprise +} + +var DefaultInsightsResolver InsightsResolver = defaultInsightsResolver{} diff --git a/cmd/frontend/graphqlbackend/schema.go b/cmd/frontend/graphqlbackend/schema.go index bc7c0eed38c6..6095534f6b23 100644 --- a/cmd/frontend/graphqlbackend/schema.go +++ b/cmd/frontend/graphqlbackend/schema.go @@ -2397,6 +2397,31 @@ type ChangesetEventConnection { pageInfo: PageInfo! } +""" +Insights about code. +""" +type Insights { + """ + Data points over a time range (inclusive) + """ + points(from: DateTime, to: DateTime): [InsightDataPoint!]! +} + +""" +A code insight data point. +""" +type InsightDataPoint { + """ + The time of this data point. + """ + dateTime: DateTime! + + """ + The value of the insight at this point in time. + """ + value: Float! +} + """ A new external service. """ @@ -2711,6 +2736,11 @@ type Query { name: String! ): Campaign + """ + EXPERIMENTAL: Queries code insights + """ + insights: Insights + """ Looks up a repository by either name or cloneURL. """ diff --git a/cmd/frontend/graphqlbackend/schema.graphql b/cmd/frontend/graphqlbackend/schema.graphql index 39ec0cfe6624..fee75dc7f74f 100755 --- a/cmd/frontend/graphqlbackend/schema.graphql +++ b/cmd/frontend/graphqlbackend/schema.graphql @@ -2390,6 +2390,31 @@ type ChangesetEventConnection { pageInfo: PageInfo! } +""" +Insights about code. +""" +type Insights { + """ + Data points over a time range (inclusive) + """ + points(from: DateTime, to: DateTime): [InsightDataPoint!]! +} + +""" +A code insight data point. +""" +type InsightDataPoint { + """ + The time of this data point. + """ + dateTime: DateTime! + + """ + The value of the insight at this point in time. + """ + value: Float! +} + """ A new external service. """ @@ -2704,6 +2729,11 @@ type Query { name: String! ): Campaign + """ + EXPERIMENTAL: Queries code insights + """ + insights: Insights + """ Looks up a repository by either name or cloneURL. """ diff --git a/cmd/frontend/graphqlbackend/testing.go b/cmd/frontend/graphqlbackend/testing.go index da750b17a58d..fb73b41f3851 100644 --- a/cmd/frontend/graphqlbackend/testing.go +++ b/cmd/frontend/graphqlbackend/testing.go @@ -17,7 +17,7 @@ func mustParseGraphQLSchema(t *testing.T) *graphql.Schema { t.Helper() parseSchemaOnce.Do(func() { - parsedSchema, parseSchemaErr = NewSchema(nil, nil, nil, nil, nil) + parsedSchema, parseSchemaErr = NewSchema(nil, nil, nil, nil, nil, nil) }) if parseSchemaErr != nil { t.Fatal(parseSchemaErr) diff --git a/cmd/frontend/internal/cli/serve_cmd.go b/cmd/frontend/internal/cli/serve_cmd.go index edc9305a5dd1..d958e0826b44 100644 --- a/cmd/frontend/internal/cli/serve_cmd.go +++ b/cmd/frontend/internal/cli/serve_cmd.go @@ -209,7 +209,7 @@ func Main(enterpriseSetupHook func() enterprise.Services) error { return errors.New("dbconn.Global is nil when trying to parse GraphQL schema") } - schema, err := graphqlbackend.NewSchema(enterprise.CampaignsResolver, enterprise.CodeIntelResolver, enterprise.AuthzResolver, enterprise.CodeMonitorsResolver, enterprise.LicenseResolver) + schema, err := graphqlbackend.NewSchema(enterprise.CampaignsResolver, enterprise.CodeIntelResolver, enterprise.InsightsResolver, enterprise.AuthzResolver, enterprise.CodeMonitorsResolver, enterprise.LicenseResolver) if err != nil { return err } diff --git a/enterprise/cmd/frontend/main.go b/enterprise/cmd/frontend/main.go index 5e828120594a..ddc9033b2a69 100644 --- a/enterprise/cmd/frontend/main.go +++ b/enterprise/cmd/frontend/main.go @@ -20,6 +20,7 @@ import ( "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executor" licensing "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/licensing/init" "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights" _ "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/auth" _ "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/graphqlbackend" @@ -35,6 +36,7 @@ var initFunctions = map[string]func(ctx context.Context, enterpriseServices *ent "licensing": licensing.Init, "executor": executor.Init, "codeintel": codeintel.Init, + "insights": insights.Init, "campaigns": campaigns.InitFrontend, "codemonitors": codemonitors.Init, } diff --git a/enterprise/internal/insights/CODENOTIFY b/enterprise/internal/insights/CODENOTIFY new file mode 100644 index 000000000000..363fb0bda7f4 --- /dev/null +++ b/enterprise/internal/insights/CODENOTIFY @@ -0,0 +1,2 @@ +**/* @slimsag +**/* @felixfbecker diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go new file mode 100644 index 000000000000..05422b7fdf20 --- /dev/null +++ b/enterprise/internal/insights/insights.go @@ -0,0 +1,14 @@ +package insights + +import ( + "context" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/resolvers" +) + +// Init initializes the given enterpriseServices to include the required resolvers for insights. +func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { + enterpriseServices.InsightsResolver = resolvers.New() + return nil +} diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go new file mode 100644 index 000000000000..4bb313fa2bdb --- /dev/null +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -0,0 +1,24 @@ +package resolvers + +import ( + "context" + "errors" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" +) + +// Resolver is the GraphQL resolver of all things related to Insights. +type Resolver struct{} + +// New returns a new Resolver whose store uses the given db +func New() graphqlbackend.InsightsResolver { + return &Resolver{} +} + +func (r *Resolver) Insights(ctx context.Context) (graphqlbackend.InsightsResolver, error) { + return r, nil +} + +func (r *Resolver) Points(ctx context.Context, args *graphqlbackend.InsightsPointsArgs) ([]graphqlbackend.InsightsDataPointResolver, error) { + return nil, errors.New("not yet implemented") +} diff --git a/internal/db/dbconn/migration.go b/internal/db/dbconn/migration.go index 9608efe53967..a3703b827b9d 100644 --- a/internal/db/dbconn/migration.go +++ b/internal/db/dbconn/migration.go @@ -12,6 +12,7 @@ import ( "github.com/inconshreveable/log15" "github.com/pkg/errors" + codeinsightsMigrations "github.com/sourcegraph/sourcegraph/migrations/codeinsights" codeintelMigrations "github.com/sourcegraph/sourcegraph/migrations/codeintel" frontendMigrations "github.com/sourcegraph/sourcegraph/migrations/frontend" ) @@ -31,6 +32,10 @@ var databases = map[string]struct { MigrationsTable: "codeintel_schema_migrations", Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset), }, + "codeinsights": { + MigrationsTable: "codeinsights_schema_migrations", + Resource: bindata.Resource(codeinsightsMigrations.AssetNames(), codeinsightsMigrations.Asset), + }, } // DatabaseNames returns the list of database names (configured via `dbutil.databases`).. From f807ff5c55579f9b1b80e9833945efda4540c2a2 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 20 Jan 2021 17:11:12 -0700 Subject: [PATCH 23/78] insights: add GraphQL backend scaffolding This adds the needed GraphQL backend scaffolding for code insights. The actual schema here is not important and will definitely change. I've based this on what we do for Code Intel, Campaigns, and Code Monitoring. It seemed a large enough change (without actually implementing any GraphQL APIs) to warrant sending separately. This way, PRs that implement actual APIs for Code Insights can be reviewed on solely that, rather than all the scaffolding that is required. Helps #17221 Signed-off-by: Stephen Gutekanst --- cmd/frontend/enterprise/enterprise.go | 2 + cmd/frontend/graphqlbackend/CODENOTIFY | 4 ++ cmd/frontend/graphqlbackend/graphqlbackend.go | 9 +++- cmd/frontend/graphqlbackend/insights.go | 42 +++++++++++++++++++ cmd/frontend/graphqlbackend/schema.go | 30 +++++++++++++ cmd/frontend/graphqlbackend/schema.graphql | 30 +++++++++++++ cmd/frontend/graphqlbackend/testing.go | 2 +- cmd/frontend/internal/cli/serve_cmd.go | 2 +- enterprise/cmd/frontend/main.go | 2 + enterprise/internal/insights/CODENOTIFY | 2 + enterprise/internal/insights/insights.go | 14 +++++++ .../internal/insights/resolvers/resolver.go | 24 +++++++++++ internal/db/dbconn/migration.go | 5 +++ 13 files changed, 165 insertions(+), 3 deletions(-) create mode 100644 cmd/frontend/graphqlbackend/insights.go create mode 100644 enterprise/internal/insights/CODENOTIFY create mode 100644 enterprise/internal/insights/insights.go create mode 100644 enterprise/internal/insights/resolvers/resolver.go diff --git a/cmd/frontend/enterprise/enterprise.go b/cmd/frontend/enterprise/enterprise.go index af6a1ad114f8..5ae5f3af029a 100644 --- a/cmd/frontend/enterprise/enterprise.go +++ b/cmd/frontend/enterprise/enterprise.go @@ -20,6 +20,7 @@ type Services struct { AuthzResolver graphqlbackend.AuthzResolver CampaignsResolver graphqlbackend.CampaignsResolver CodeIntelResolver graphqlbackend.CodeIntelResolver + InsightsResolver graphqlbackend.InsightsResolver CodeMonitorsResolver graphqlbackend.CodeMonitorsResolver LicenseResolver graphqlbackend.LicenseResolver } @@ -43,6 +44,7 @@ func DefaultServices() Services { NewExecutorProxyHandler: func() http.Handler { return makeNotFoundHandler("executor proxy") }, AuthzResolver: graphqlbackend.DefaultAuthzResolver, CampaignsResolver: graphqlbackend.DefaultCampaignsResolver, + InsightsResolver: graphqlbackend.DefaultInsightsResolver, CodeMonitorsResolver: graphqlbackend.DefaultCodeMonitorsResolver, LicenseResolver: graphqlbackend.DefaultLicenseResolver, } diff --git a/cmd/frontend/graphqlbackend/CODENOTIFY b/cmd/frontend/graphqlbackend/CODENOTIFY index f9e05e465ffa..9a0a80c6290e 100644 --- a/cmd/frontend/graphqlbackend/CODENOTIFY +++ b/cmd/frontend/graphqlbackend/CODENOTIFY @@ -12,3 +12,7 @@ site_monitoring.go @bobheadxi # Campaigns campaigns.go @LawnGnome campaigns.go @eseliger + +# Insights +insights.go @slimsag +insights.go @felixfbecker diff --git a/cmd/frontend/graphqlbackend/graphqlbackend.go b/cmd/frontend/graphqlbackend/graphqlbackend.go index 7758d8919c6a..475e796e2ee9 100644 --- a/cmd/frontend/graphqlbackend/graphqlbackend.go +++ b/cmd/frontend/graphqlbackend/graphqlbackend.go @@ -339,11 +339,12 @@ func prometheusGraphQLRequestName(requestName string) string { return "other" } -func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, authz AuthzResolver, codeMonitors CodeMonitorsResolver, license LicenseResolver) (*graphql.Schema, error) { +func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, insights InsightsResolver, authz AuthzResolver, codeMonitors CodeMonitorsResolver, license LicenseResolver) (*graphql.Schema, error) { resolver := &schemaResolver{ CampaignsResolver: defaultCampaignsResolver{}, AuthzResolver: defaultAuthzResolver{}, CodeIntelResolver: defaultCodeIntelResolver{}, + InsightsResolver: defaultInsightsResolver{}, LicenseResolver: defaultLicenseResolver{}, } if campaigns != nil { @@ -354,6 +355,10 @@ func NewSchema(campaigns CampaignsResolver, codeIntel CodeIntelResolver, authz A EnterpriseResolvers.codeIntelResolver = codeIntel resolver.CodeIntelResolver = codeIntel } + if insights != nil { + EnterpriseResolvers.insightsResolver = insights + resolver.InsightsResolver = insights + } if authz != nil { EnterpriseResolvers.authzResolver = authz resolver.AuthzResolver = authz @@ -558,6 +563,7 @@ type schemaResolver struct { CampaignsResolver AuthzResolver CodeIntelResolver + InsightsResolver CodeMonitorsResolver LicenseResolver } @@ -566,6 +572,7 @@ type schemaResolver struct { // in enterprise mode. These resolver instances are nil when running as OSS. var EnterpriseResolvers = struct { codeIntelResolver CodeIntelResolver + insightsResolver InsightsResolver authzResolver AuthzResolver campaignsResolver CampaignsResolver codeMonitorsResolver CodeMonitorsResolver diff --git a/cmd/frontend/graphqlbackend/insights.go b/cmd/frontend/graphqlbackend/insights.go new file mode 100644 index 000000000000..c038bd98505f --- /dev/null +++ b/cmd/frontend/graphqlbackend/insights.go @@ -0,0 +1,42 @@ +package graphqlbackend + +import ( + "context" + "errors" +) + +// This file just contains stub GraphQL resolvers and data types for Code Insights which merely +// return an error if not running in enterprise mode. The actual resolvers can be found in +// enterprise/internal/insights/resolvers + +type InsightsDataPointResolver interface { + DateTime() DateTime + Value() float64 +} + +type InsightsPointsArgs struct { + From *DateTime + To *DateTime +} + +type InsightsResolver interface { + // Root resolver + Insights(ctx context.Context) (InsightsResolver, error) + + // Insights type resolvers. + Points(ctx context.Context, args *InsightsPointsArgs) ([]InsightsDataPointResolver, error) +} + +var insightsOnlyInEnterprise = errors.New("insights are only available in enterprise") + +type defaultInsightsResolver struct{} + +func (defaultInsightsResolver) Insights(ctx context.Context) (InsightsResolver, error) { + return nil, insightsOnlyInEnterprise +} + +func (defaultInsightsResolver) Points(ctx context.Context, args *InsightsPointsArgs) ([]InsightsDataPointResolver, error) { + return nil, insightsOnlyInEnterprise +} + +var DefaultInsightsResolver InsightsResolver = defaultInsightsResolver{} diff --git a/cmd/frontend/graphqlbackend/schema.go b/cmd/frontend/graphqlbackend/schema.go index bc7c0eed38c6..6095534f6b23 100644 --- a/cmd/frontend/graphqlbackend/schema.go +++ b/cmd/frontend/graphqlbackend/schema.go @@ -2397,6 +2397,31 @@ type ChangesetEventConnection { pageInfo: PageInfo! } +""" +Insights about code. +""" +type Insights { + """ + Data points over a time range (inclusive) + """ + points(from: DateTime, to: DateTime): [InsightDataPoint!]! +} + +""" +A code insight data point. +""" +type InsightDataPoint { + """ + The time of this data point. + """ + dateTime: DateTime! + + """ + The value of the insight at this point in time. + """ + value: Float! +} + """ A new external service. """ @@ -2711,6 +2736,11 @@ type Query { name: String! ): Campaign + """ + EXPERIMENTAL: Queries code insights + """ + insights: Insights + """ Looks up a repository by either name or cloneURL. """ diff --git a/cmd/frontend/graphqlbackend/schema.graphql b/cmd/frontend/graphqlbackend/schema.graphql index 39ec0cfe6624..fee75dc7f74f 100755 --- a/cmd/frontend/graphqlbackend/schema.graphql +++ b/cmd/frontend/graphqlbackend/schema.graphql @@ -2390,6 +2390,31 @@ type ChangesetEventConnection { pageInfo: PageInfo! } +""" +Insights about code. +""" +type Insights { + """ + Data points over a time range (inclusive) + """ + points(from: DateTime, to: DateTime): [InsightDataPoint!]! +} + +""" +A code insight data point. +""" +type InsightDataPoint { + """ + The time of this data point. + """ + dateTime: DateTime! + + """ + The value of the insight at this point in time. + """ + value: Float! +} + """ A new external service. """ @@ -2704,6 +2729,11 @@ type Query { name: String! ): Campaign + """ + EXPERIMENTAL: Queries code insights + """ + insights: Insights + """ Looks up a repository by either name or cloneURL. """ diff --git a/cmd/frontend/graphqlbackend/testing.go b/cmd/frontend/graphqlbackend/testing.go index da750b17a58d..fb73b41f3851 100644 --- a/cmd/frontend/graphqlbackend/testing.go +++ b/cmd/frontend/graphqlbackend/testing.go @@ -17,7 +17,7 @@ func mustParseGraphQLSchema(t *testing.T) *graphql.Schema { t.Helper() parseSchemaOnce.Do(func() { - parsedSchema, parseSchemaErr = NewSchema(nil, nil, nil, nil, nil) + parsedSchema, parseSchemaErr = NewSchema(nil, nil, nil, nil, nil, nil) }) if parseSchemaErr != nil { t.Fatal(parseSchemaErr) diff --git a/cmd/frontend/internal/cli/serve_cmd.go b/cmd/frontend/internal/cli/serve_cmd.go index edc9305a5dd1..d958e0826b44 100644 --- a/cmd/frontend/internal/cli/serve_cmd.go +++ b/cmd/frontend/internal/cli/serve_cmd.go @@ -209,7 +209,7 @@ func Main(enterpriseSetupHook func() enterprise.Services) error { return errors.New("dbconn.Global is nil when trying to parse GraphQL schema") } - schema, err := graphqlbackend.NewSchema(enterprise.CampaignsResolver, enterprise.CodeIntelResolver, enterprise.AuthzResolver, enterprise.CodeMonitorsResolver, enterprise.LicenseResolver) + schema, err := graphqlbackend.NewSchema(enterprise.CampaignsResolver, enterprise.CodeIntelResolver, enterprise.InsightsResolver, enterprise.AuthzResolver, enterprise.CodeMonitorsResolver, enterprise.LicenseResolver) if err != nil { return err } diff --git a/enterprise/cmd/frontend/main.go b/enterprise/cmd/frontend/main.go index 5e828120594a..ddc9033b2a69 100644 --- a/enterprise/cmd/frontend/main.go +++ b/enterprise/cmd/frontend/main.go @@ -20,6 +20,7 @@ import ( "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executor" licensing "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/licensing/init" "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights" _ "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/auth" _ "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/graphqlbackend" @@ -35,6 +36,7 @@ var initFunctions = map[string]func(ctx context.Context, enterpriseServices *ent "licensing": licensing.Init, "executor": executor.Init, "codeintel": codeintel.Init, + "insights": insights.Init, "campaigns": campaigns.InitFrontend, "codemonitors": codemonitors.Init, } diff --git a/enterprise/internal/insights/CODENOTIFY b/enterprise/internal/insights/CODENOTIFY new file mode 100644 index 000000000000..363fb0bda7f4 --- /dev/null +++ b/enterprise/internal/insights/CODENOTIFY @@ -0,0 +1,2 @@ +**/* @slimsag +**/* @felixfbecker diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go new file mode 100644 index 000000000000..05422b7fdf20 --- /dev/null +++ b/enterprise/internal/insights/insights.go @@ -0,0 +1,14 @@ +package insights + +import ( + "context" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/resolvers" +) + +// Init initializes the given enterpriseServices to include the required resolvers for insights. +func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { + enterpriseServices.InsightsResolver = resolvers.New() + return nil +} diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go new file mode 100644 index 000000000000..4bb313fa2bdb --- /dev/null +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -0,0 +1,24 @@ +package resolvers + +import ( + "context" + "errors" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" +) + +// Resolver is the GraphQL resolver of all things related to Insights. +type Resolver struct{} + +// New returns a new Resolver whose store uses the given db +func New() graphqlbackend.InsightsResolver { + return &Resolver{} +} + +func (r *Resolver) Insights(ctx context.Context) (graphqlbackend.InsightsResolver, error) { + return r, nil +} + +func (r *Resolver) Points(ctx context.Context, args *graphqlbackend.InsightsPointsArgs) ([]graphqlbackend.InsightsDataPointResolver, error) { + return nil, errors.New("not yet implemented") +} diff --git a/internal/db/dbconn/migration.go b/internal/db/dbconn/migration.go index 9608efe53967..a3703b827b9d 100644 --- a/internal/db/dbconn/migration.go +++ b/internal/db/dbconn/migration.go @@ -12,6 +12,7 @@ import ( "github.com/inconshreveable/log15" "github.com/pkg/errors" + codeinsightsMigrations "github.com/sourcegraph/sourcegraph/migrations/codeinsights" codeintelMigrations "github.com/sourcegraph/sourcegraph/migrations/codeintel" frontendMigrations "github.com/sourcegraph/sourcegraph/migrations/frontend" ) @@ -31,6 +32,10 @@ var databases = map[string]struct { MigrationsTable: "codeintel_schema_migrations", Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset), }, + "codeinsights": { + MigrationsTable: "codeinsights_schema_migrations", + Resource: bindata.Resource(codeinsightsMigrations.AssetNames(), codeinsightsMigrations.Asset), + }, } // DatabaseNames returns the list of database names (configured via `dbutil.databases`).. From ab84a46171c84b613cd2fdf2f346135648374d54 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 20 Jan 2021 17:59:18 -0700 Subject: [PATCH 24/78] graphql schema: fix merge conflict Signed-off-by: Stephen Gutekanst --- cmd/frontend/graphqlbackend/schema.graphql | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cmd/frontend/graphqlbackend/schema.graphql b/cmd/frontend/graphqlbackend/schema.graphql index 509bbf14413f..fee75dc7f74f 100755 --- a/cmd/frontend/graphqlbackend/schema.graphql +++ b/cmd/frontend/graphqlbackend/schema.graphql @@ -4174,6 +4174,14 @@ type ExternalService implements Node { will contain any errors that occured during the most recent completed sync. """ lastSyncError: String + """ + LastSyncAt is the time the last sync job was run for this code host + """ + lastSyncAt: DateTime! + """ + The timestamp of the next sync job + """ + nextSyncAt: DateTime! } """ From 0df0f1ad8322f6abd5284691072b7977083c705a Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 20 Jan 2021 18:02:44 -0700 Subject: [PATCH 25/78] fix test Signed-off-by: Stephen Gutekanst --- .../cmd/frontend/internal/authz/resolvers/resolver_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/cmd/frontend/internal/authz/resolvers/resolver_test.go b/enterprise/cmd/frontend/internal/authz/resolvers/resolver_test.go index f768bdbfd57b..fec01fffd935 100644 --- a/enterprise/cmd/frontend/internal/authz/resolvers/resolver_test.go +++ b/enterprise/cmd/frontend/internal/authz/resolvers/resolver_test.go @@ -45,7 +45,7 @@ func mustParseGraphQLSchema(t *testing.T, db *sql.DB) *graphql.Schema { t.Helper() parseSchemaOnce.Do(func() { - parsedSchema, parseSchemaErr = graphqlbackend.NewSchema(nil, nil, NewResolver(db, clock), nil, nil) + parsedSchema, parseSchemaErr = graphqlbackend.NewSchema(nil, nil, nil, NewResolver(db, clock), nil, nil) }) if parseSchemaErr != nil { t.Fatal(parseSchemaErr) From 9a62c9d7100efc2c2c3a4298c676d43a505db5b4 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 20 Jan 2021 18:05:33 -0700 Subject: [PATCH 26/78] update test NewSchema calls Signed-off-by: Stephen Gutekanst --- .../resolvers/campaign_connection_test.go | 4 ++-- .../campaigns/resolvers/campaign_spec_test.go | 2 +- .../campaigns/resolvers/campaign_test.go | 2 +- .../changeset_apply_preview_connection_test.go | 2 +- .../resolvers/changeset_apply_preview_test.go | 2 +- .../resolvers/changeset_connection_test.go | 2 +- .../campaigns/resolvers/changeset_counts_test.go | 2 +- .../resolvers/changeset_event_connection_test.go | 2 +- .../resolvers/changeset_spec_connection_test.go | 2 +- .../campaigns/resolvers/changeset_spec_test.go | 2 +- .../campaigns/resolvers/changeset_test.go | 2 +- .../resolvers/code_host_connection_test.go | 2 +- .../campaigns/resolvers/permissions_test.go | 4 ++-- .../campaigns/resolvers/resolver_test.go | 16 ++++++++-------- .../codemonitors/resolvers/resolvers_test.go | 4 ++-- .../licensing/resolvers/resolvers_test.go | 2 +- 16 files changed, 26 insertions(+), 26 deletions(-) diff --git a/enterprise/internal/campaigns/resolvers/campaign_connection_test.go b/enterprise/internal/campaigns/resolvers/campaign_connection_test.go index 56e3e4a983fb..f9cbc3230e80 100644 --- a/enterprise/internal/campaigns/resolvers/campaign_connection_test.go +++ b/enterprise/internal/campaigns/resolvers/campaign_connection_test.go @@ -77,7 +77,7 @@ func TestCampaignConnectionResolver(t *testing.T) { t.Fatal(err) } - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } @@ -191,7 +191,7 @@ func TestCampaignsListing(t *testing.T) { store := store.New(dbconn.Global) r := &Resolver{store: store} - s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/campaign_spec_test.go b/enterprise/internal/campaigns/resolvers/campaign_spec_test.go index e5025776862f..08f201bed69e 100644 --- a/enterprise/internal/campaigns/resolvers/campaign_spec_test.go +++ b/enterprise/internal/campaigns/resolvers/campaign_spec_test.go @@ -83,7 +83,7 @@ func TestCampaignSpecResolver(t *testing.T) { t.Fatal(err) } - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/campaign_test.go b/enterprise/internal/campaigns/resolvers/campaign_test.go index cafcf67a7560..c864791cfc49 100644 --- a/enterprise/internal/campaigns/resolvers/campaign_test.go +++ b/enterprise/internal/campaigns/resolvers/campaign_test.go @@ -60,7 +60,7 @@ func TestCampaignResolver(t *testing.T) { t.Fatal(err) } - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/changeset_apply_preview_connection_test.go b/enterprise/internal/campaigns/resolvers/changeset_apply_preview_connection_test.go index 64333b81a7b3..95010b1d69ef 100644 --- a/enterprise/internal/campaigns/resolvers/changeset_apply_preview_connection_test.go +++ b/enterprise/internal/campaigns/resolvers/changeset_apply_preview_connection_test.go @@ -70,7 +70,7 @@ func TestChangesetApplyPreviewConnectionResolver(t *testing.T) { changesetSpecs = append(changesetSpecs, s) } - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/changeset_apply_preview_test.go b/enterprise/internal/campaigns/resolvers/changeset_apply_preview_test.go index 8c1b84aadefd..ef5bcdc4b31b 100644 --- a/enterprise/internal/campaigns/resolvers/changeset_apply_preview_test.go +++ b/enterprise/internal/campaigns/resolvers/changeset_apply_preview_test.go @@ -98,7 +98,7 @@ func TestChangesetApplyPreviewResolver(t *testing.T) { OwnedByCampaign: campaign.ID, }) - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/changeset_connection_test.go b/enterprise/internal/campaigns/resolvers/changeset_connection_test.go index f6bf0fe266a0..c2496868de77 100644 --- a/enterprise/internal/campaigns/resolvers/changeset_connection_test.go +++ b/enterprise/internal/campaigns/resolvers/changeset_connection_test.go @@ -110,7 +110,7 @@ func TestChangesetConnectionResolver(t *testing.T) { addChangeset(t, ctx, cstore, changeset3, campaign.ID) addChangeset(t, ctx, cstore, changeset4, campaign.ID) - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/changeset_counts_test.go b/enterprise/internal/campaigns/resolvers/changeset_counts_test.go index bbe89bb374df..eb1797fea6c3 100644 --- a/enterprise/internal/campaigns/resolvers/changeset_counts_test.go +++ b/enterprise/internal/campaigns/resolvers/changeset_counts_test.go @@ -170,7 +170,7 @@ func TestChangesetCountsOverTimeIntegration(t *testing.T) { } } - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/changeset_event_connection_test.go b/enterprise/internal/campaigns/resolvers/changeset_event_connection_test.go index d25c634de610..06765328f8c4 100644 --- a/enterprise/internal/campaigns/resolvers/changeset_event_connection_test.go +++ b/enterprise/internal/campaigns/resolvers/changeset_event_connection_test.go @@ -96,7 +96,7 @@ func TestChangesetEventConnectionResolver(t *testing.T) { addChangeset(t, ctx, cstore, changeset, campaign.ID) - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/changeset_spec_connection_test.go b/enterprise/internal/campaigns/resolvers/changeset_spec_connection_test.go index 86d8c300e533..b4ed135f1fc9 100644 --- a/enterprise/internal/campaigns/resolvers/changeset_spec_connection_test.go +++ b/enterprise/internal/campaigns/resolvers/changeset_spec_connection_test.go @@ -70,7 +70,7 @@ func TestChangesetSpecConnectionResolver(t *testing.T) { changesetSpecs = append(changesetSpecs, s) } - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/changeset_spec_test.go b/enterprise/internal/campaigns/resolvers/changeset_spec_test.go index 08dd52f72d2b..53f571abbe3f 100644 --- a/enterprise/internal/campaigns/resolvers/changeset_spec_test.go +++ b/enterprise/internal/campaigns/resolvers/changeset_spec_test.go @@ -63,7 +63,7 @@ func TestChangesetSpecResolver(t *testing.T) { t.Fatal(err) } - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/changeset_test.go b/enterprise/internal/campaigns/resolvers/changeset_test.go index a60a4e274922..37219021cfe3 100644 --- a/enterprise/internal/campaigns/resolvers/changeset_test.go +++ b/enterprise/internal/campaigns/resolvers/changeset_test.go @@ -176,7 +176,7 @@ func TestChangesetResolver(t *testing.T) { // Associate the changeset with a campaign, so it's considered in syncer logic. addChangeset(t, ctx, cstore, syncedGitHubChangeset, campaign.ID) - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/code_host_connection_test.go b/enterprise/internal/campaigns/resolvers/code_host_connection_test.go index cd16a0ab5ab8..79656842f905 100644 --- a/enterprise/internal/campaigns/resolvers/code_host_connection_test.go +++ b/enterprise/internal/campaigns/resolvers/code_host_connection_test.go @@ -52,7 +52,7 @@ func TestCodeHostConnectionResolver(t *testing.T) { t.Fatal(err) } - s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(&Resolver{store: cstore}, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/permissions_test.go b/enterprise/internal/campaigns/resolvers/permissions_test.go index 8392e50d4484..a8925e42d5bf 100644 --- a/enterprise/internal/campaigns/resolvers/permissions_test.go +++ b/enterprise/internal/campaigns/resolvers/permissions_test.go @@ -38,7 +38,7 @@ func TestPermissionLevels(t *testing.T) { cstore := store.New(dbconn.Global) sr := &Resolver{store: cstore} - s, err := graphqlbackend.NewSchema(sr, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(sr, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } @@ -758,7 +758,7 @@ func TestRepositoryPermissions(t *testing.T) { cstore := store.New(dbconn.Global) sr := &Resolver{store: cstore} - s, err := graphqlbackend.NewSchema(sr, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(sr, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/campaigns/resolvers/resolver_test.go b/enterprise/internal/campaigns/resolvers/resolver_test.go index 2bb626ac677b..0401a6fc45f9 100644 --- a/enterprise/internal/campaigns/resolvers/resolver_test.go +++ b/enterprise/internal/campaigns/resolvers/resolver_test.go @@ -34,7 +34,7 @@ import ( func TestNullIDResilience(t *testing.T) { sr := &Resolver{store: store.New(dbconn.Global)} - s, err := graphqlbackend.NewSchema(sr, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(sr, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } @@ -118,7 +118,7 @@ func TestCreateCampaignSpec(t *testing.T) { } r := &Resolver{store: cstore} - s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } @@ -276,7 +276,7 @@ func TestCreateChangesetSpec(t *testing.T) { } r := &Resolver{store: cstore} - s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } @@ -387,7 +387,7 @@ func TestApplyCampaign(t *testing.T) { } r := &Resolver{store: cstore} - s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } @@ -512,7 +512,7 @@ func TestCreateCampaign(t *testing.T) { } r := &Resolver{store: cstore} - s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } @@ -586,7 +586,7 @@ func TestMoveCampaign(t *testing.T) { } r := &Resolver{store: cstore} - s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } @@ -803,7 +803,7 @@ func TestCreateCampaignsCredential(t *testing.T) { cstore := store.New(dbconn.Global) r := &Resolver{store: cstore} - s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } @@ -867,7 +867,7 @@ func TestDeleteCampaignsCredential(t *testing.T) { cstore := store.New(dbconn.Global) r := &Resolver{store: cstore} - s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil) + s, err := graphqlbackend.NewSchema(r, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/codemonitors/resolvers/resolvers_test.go b/enterprise/internal/codemonitors/resolvers/resolvers_test.go index 8f3adc4d0056..4d3512164582 100644 --- a/enterprise/internal/codemonitors/resolvers/resolvers_test.go +++ b/enterprise/internal/codemonitors/resolvers/resolvers_test.go @@ -387,7 +387,7 @@ func TestQueryMonitor(t *testing.T) { t.Fatal(err) } - schema, err := graphqlbackend.NewSchema(nil, nil, nil, r, nil) + schema, err := graphqlbackend.NewSchema(nil, nil, nil, nil, r, nil) if err != nil { t.Fatal(err) } @@ -623,7 +623,7 @@ func TestEditCodeMonitor(t *testing.T) { // Update the code monitor. // We update all fields, delete one action, and add a new action. - schema, err := graphqlbackend.NewSchema(nil, nil, nil, r, nil) + schema, err := graphqlbackend.NewSchema(nil, nil, nil, nil, r, nil) if err != nil { t.Fatal(err) } diff --git a/enterprise/internal/licensing/resolvers/resolvers_test.go b/enterprise/internal/licensing/resolvers/resolvers_test.go index a76bbc40c417..7f5e098aae6b 100644 --- a/enterprise/internal/licensing/resolvers/resolvers_test.go +++ b/enterprise/internal/licensing/resolvers/resolvers_test.go @@ -13,7 +13,7 @@ import ( func TestEnterpriseLicenseHasFeature(t *testing.T) { r := &LicenseResolver{} - schema, err := graphqlbackend.NewSchema(nil, nil, nil, nil, r) + schema, err := graphqlbackend.NewSchema(nil, nil, nil, nil, nil, r) if err != nil { t.Fatal(err) } From 93916706044afb88f89208f38300c26d2e4e75f5 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Thu, 21 Jan 2021 23:32:41 -0700 Subject: [PATCH 27/78] run database migrations Signed-off-by: Stephen Gutekanst --- cmd/frontend/internal/cli/config.go | 7 +++--- enterprise/internal/insights/insights.go | 28 ++++++++++++++++++++++++ internal/conf/conftypes/conftypes.go | 5 +++++ 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/cmd/frontend/internal/cli/config.go b/cmd/frontend/internal/cli/config.go index 69d1e721128b..5ad82bd9fe60 100644 --- a/cmd/frontend/internal/cli/config.go +++ b/cmd/frontend/internal/cli/config.go @@ -341,9 +341,10 @@ func serviceConnections() conftypes.ServiceConnections { } serviceConnectionsVal = conftypes.ServiceConnections{ - GitServers: gitServers(), - PostgresDSN: dbutil.PostgresDSN("", username, os.Getenv), - CodeIntelPostgresDSN: dbutil.PostgresDSN("codeintel", username, os.Getenv), + GitServers: gitServers(), + PostgresDSN: dbutil.PostgresDSN("", username, os.Getenv), + CodeIntelPostgresDSN: dbutil.PostgresDSN("codeintel", username, os.Getenv), + CodeInsightsTimescaleDSN: dbutil.PostgresDSN("codeinsights", username, os.Getenv), } // We set this envvar in development to disable the following check diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go index 05422b7fdf20..2d75ed4d44fe 100644 --- a/enterprise/internal/insights/insights.go +++ b/enterprise/internal/insights/insights.go @@ -2,13 +2,41 @@ package insights import ( "context" + "database/sql" + "fmt" + "log" "github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/resolvers" + "github.com/sourcegraph/sourcegraph/internal/conf" + "github.com/sourcegraph/sourcegraph/internal/db/dbconn" ) // Init initializes the given enterpriseServices to include the required resolvers for insights. func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { + _, err := initializeCodeInsightsDB() + if err != nil { + return err + } enterpriseServices.InsightsResolver = resolvers.New() return nil } + +func initializeCodeInsightsDB() (*sql.DB, error) { + timescaleDSN := conf.Get().ServiceConnections.CodeInsightsTimescaleDSN + conf.Watch(func() { + if newDSN := conf.Get().ServiceConnections.CodeInsightsTimescaleDSN; timescaleDSN != newDSN { + log.Fatalf("Detected codeinsights database DSN change, restarting to take effect: %s", newDSN) + } + }) + + db, err := dbconn.New(timescaleDSN, "") + if err != nil { + return nil, fmt.Errorf("Failed to connect to codeinsights database: %s", err) + } + + if err := dbconn.MigrateDB(db, "codeinsights"); err != nil { + return nil, fmt.Errorf("Failed to perform codeinsights database migration: %s", err) + } + return db, nil +} diff --git a/internal/conf/conftypes/conftypes.go b/internal/conf/conftypes/conftypes.go index 5ffa44a5a02e..d0caad56e11f 100644 --- a/internal/conf/conftypes/conftypes.go +++ b/internal/conf/conftypes/conftypes.go @@ -19,6 +19,11 @@ type ServiceConnections struct { // code intel database. // eg: "postgres://sg@pgsql/sourcegraph_codeintel?sslmode=false" CodeIntelPostgresDSN string `json:"codeIntelPostgresDSN"` + + // CodeInsightsTimescaleDSN is the TimescaleDB data source name for the + // code insights database. + // eg: "postgres://sg@pgsql/sourcegraph_codeintel?sslmode=false" + CodeInsightsTimescaleDSN string `json:"codeinsightsTimescaleDSN"` } // RawUnified is the unparsed variant of conf.Unified. From ecbe2385ea26cacb7c5190fffd1757b83841da53 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Thu, 21 Jan 2021 23:32:58 -0700 Subject: [PATCH 28/78] dev: correct codeinsights-db port; add connection info Signed-off-by: Stephen Gutekanst --- dev/codeinsights-db.sh | 4 ++-- dev/start.sh | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/dev/codeinsights-db.sh b/dev/codeinsights-db.sh index 1b4ae3fd4cd2..f7c803cc557e 100755 --- a/dev/codeinsights-db.sh +++ b/dev/codeinsights-db.sh @@ -12,7 +12,7 @@ fi IMAGE=sourcegraph/codeinsights-db:dev CONTAINER=codeinsights-db -PORT=3370 +PORT=5435 docker inspect $CONTAINER >/dev/null 2>&1 && docker rm -f $CONTAINER @@ -55,6 +55,6 @@ docker run --rm \ --cpus=1 \ --memory=1g \ -e POSTGRES_PASSWORD=password \ - -p 0.0.0.0:5435:5435 \ + -p 0.0.0.0:${PORT}:5432 \ -v "${DISK}":/var/lib/postgresql/data \ ${IMAGE} >"${LOG_FILE}" 2>&1 || finish diff --git a/dev/start.sh b/dev/start.sh index cbc19e0432a6..92edf1568391 100755 --- a/dev/start.sh +++ b/dev/start.sh @@ -61,6 +61,10 @@ export CODEINTEL_PGSSLMODE="${PGSSLMODE:-}" export CODEINTEL_PGDATASOURCE="${PGDATASOURCE:-}" export CODEINTEL_PG_ALLOW_SINGLE_DB=true +# Code Insights uses a separate database, because it's easier to run TimescaleDB in +# Docker than install as a Postgres extension in dev environments. +export CODEINSIGHTS_PGDATASOURCE=postgres://postgres:password@127.0.0.1:5435 + # Default to "info" level debugging, and "condensed" log format (nice for human readers) export SRC_LOG_LEVEL=${SRC_LOG_LEVEL:-info} export SRC_LOG_FORMAT=${SRC_LOG_FORMAT:-condensed} From 1b89ccf48436204491816d8648714793310b2b49 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 22 Jan 2021 12:47:52 -0700 Subject: [PATCH 29/78] run database migrations --- enterprise/internal/insights/insights.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go index 2d75ed4d44fe..af2abc64ab71 100644 --- a/enterprise/internal/insights/insights.go +++ b/enterprise/internal/insights/insights.go @@ -22,6 +22,8 @@ func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { return nil } +// initializeCodeInsightsDB connects to and initializes the Code Insights Timescale DB, running +// database migrations before returning. func initializeCodeInsightsDB() (*sql.DB, error) { timescaleDSN := conf.Get().ServiceConnections.CodeInsightsTimescaleDSN conf.Watch(func() { From 96e54f54a9b571eac4fa04dd28ef3de15f258039 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 22 Jan 2021 15:47:00 -0700 Subject: [PATCH 30/78] internal/db: do not run TimescaleDB migrations against singleton database In our DB testing code we make an assumption that we can run all database migrations against a single testing database. This holds true for frontend and codeintel migrations, because we intentionally designed codeintel migrations to be ran in the same DB for testing/dev purposes - but this does not hold true for Code Insights which runs a separate TimescaleDB instance (it is easier to run it separately in Docker than install it as a Postgres extension in all of our dev/testing environments.) This change merely makes it so we don't run TimescaleDB migrations against our singleton testing/dev Postgres DB. Signed-off-by: Stephen Gutekanst --- internal/db/dbconn/migration.go | 14 +++++++++----- internal/db/dbstore_db_test.go | 4 ++-- internal/db/dbtest/dbtest.go | 2 +- internal/db/dbtesting/dbtesting.go | 2 +- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/internal/db/dbconn/migration.go b/internal/db/dbconn/migration.go index a3703b827b9d..4e088e787497 100644 --- a/internal/db/dbconn/migration.go +++ b/internal/db/dbconn/migration.go @@ -22,6 +22,7 @@ import ( // the raw migration assets to run to migrate the target schema to a new version. var databases = map[string]struct { MigrationsTable string + TimescaleDB bool Resource *bindata.AssetSource }{ "frontend": { @@ -33,18 +34,21 @@ var databases = map[string]struct { Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset), }, "codeinsights": { + TimescaleDB: true, MigrationsTable: "codeinsights_schema_migrations", Resource: bindata.Resource(codeinsightsMigrations.AssetNames(), codeinsightsMigrations.Asset), }, } -// DatabaseNames returns the list of database names (configured via `dbutil.databases`).. -var DatabaseNames = func() []string { +// PostgresDatabaseNames is the list of database names (configured via `dbutil.databases`) that are +// vanilla Postgres (not TimescaleDB). +var PostgresDatabaseNames = func() []string { var names []string - for databaseName := range databases { - names = append(names, databaseName) + for databaseName, info := range databases { + if !info.TimescaleDB { + names = append(names, databaseName) + } } - return names }() diff --git a/internal/db/dbstore_db_test.go b/internal/db/dbstore_db_test.go index 6db7a3784f42..1ab2dbfe3fe6 100644 --- a/internal/db/dbstore_db_test.go +++ b/internal/db/dbstore_db_test.go @@ -17,14 +17,14 @@ func TestMigrations(t *testing.T) { db := dbtesting.GetDB(t) migrate := func() { - for _, databaseName := range dbconn.DatabaseNames { + for _, databaseName := range dbconn.PostgresDatabaseNames { if err := dbconn.MigrateDB(db, databaseName); err != nil { t.Errorf("error running initial migrations: %s", err) } } } - for _, databaseName := range dbconn.DatabaseNames { + for _, databaseName := range dbconn.PostgresDatabaseNames { t.Run(databaseName, func(t *testing.T) { // Dropping a squash schema _all_ the way down just drops the entire public // schema. Because we have a "combined" database that runs migrations for diff --git a/internal/db/dbtest/dbtest.go b/internal/db/dbtest/dbtest.go index 4530d45ed269..7dc33e26e4b1 100644 --- a/internal/db/dbtest/dbtest.go +++ b/internal/db/dbtest/dbtest.go @@ -65,7 +65,7 @@ func NewDB(t testing.TB, dsn string) *sql.DB { config.Path = "/" + dbname testDB := dbConn(t, config) - for _, databaseName := range dbconn.DatabaseNames { + for _, databaseName := range dbconn.PostgresDatabaseNames { m, err := dbconn.NewMigrate(testDB, databaseName) if err != nil { t.Fatalf("failed to construct migrations: %s", err) diff --git a/internal/db/dbtesting/dbtesting.go b/internal/db/dbtesting/dbtesting.go index e4f181c371b6..17009f803e79 100644 --- a/internal/db/dbtesting/dbtesting.go +++ b/internal/db/dbtesting/dbtesting.go @@ -159,7 +159,7 @@ func initTest(nameSuffix string) error { return err } - for _, databaseName := range dbconn.DatabaseNames { + for _, databaseName := range dbconn.PostgresDatabaseNames { if err := dbconn.MigrateDB(dbconn.Global, databaseName); err != nil { return err } From ad13779cc723c89d8d8b4974ba3dcdc218479b54 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 25 Jan 2021 15:39:19 -0700 Subject: [PATCH 31/78] store WIP Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/insights.go | 4 +- .../internal/insights/resolvers/resolver.go | 10 +++-- .../insights/store/integration_test.go | 45 +++++++++++++++++++ enterprise/internal/insights/store/store.go | 41 +++++++++++++++++ 4 files changed, 95 insertions(+), 5 deletions(-) create mode 100644 enterprise/internal/insights/store/integration_test.go create mode 100644 enterprise/internal/insights/store/store.go diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go index af2abc64ab71..7ece4c9d4d25 100644 --- a/enterprise/internal/insights/insights.go +++ b/enterprise/internal/insights/insights.go @@ -14,11 +14,11 @@ import ( // Init initializes the given enterpriseServices to include the required resolvers for insights. func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { - _, err := initializeCodeInsightsDB() + db, err := initializeCodeInsightsDB() if err != nil { return err } - enterpriseServices.InsightsResolver = resolvers.New() + enterpriseServices.InsightsResolver = resolvers.New(db) return nil } diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go index 4bb313fa2bdb..ed8b10d0b2e3 100644 --- a/enterprise/internal/insights/resolvers/resolver.go +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -5,14 +5,18 @@ import ( "errors" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" + "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns/store" + "github.com/sourcegraph/sourcegraph/internal/db/dbutil" ) // Resolver is the GraphQL resolver of all things related to Insights. -type Resolver struct{} +type Resolver struct { + store *store.Store +} // New returns a new Resolver whose store uses the given db -func New() graphqlbackend.InsightsResolver { - return &Resolver{} +func New(db dbutil.DB) graphqlbackend.InsightsResolver { + return &Resolver{store: store.New(db)} } func (r *Resolver) Insights(ctx context.Context) (graphqlbackend.InsightsResolver, error) { diff --git a/enterprise/internal/insights/store/integration_test.go b/enterprise/internal/insights/store/integration_test.go new file mode 100644 index 000000000000..74260919eeae --- /dev/null +++ b/enterprise/internal/insights/store/integration_test.go @@ -0,0 +1,45 @@ +package store + +import ( + "context" + "database/sql" + "os" + "os/user" + "testing" + + "github.com/sourcegraph/sourcegraph/internal/db/dbconn" + "github.com/sourcegraph/sourcegraph/internal/db/dbutil" + "github.com/sourcegraph/sourcegraph/internal/timeutil" +) + +func TestIntegration(t *testing.T) { + if testing.Short() { + t.Skip() + } + + t.Parallel() + + getTimescaleDB := func(t testing.TB) *sql.DB { + // Setup TimescaleDB for testing. + username := "" + if user, err := user.Current(); err == nil { + username = user.Username + } + timescaleDSN := dbutil.PostgresDSN("codeinsights", username, os.Getenv) + db, err := dbconn.New(timescaleDSN, "insights-test-"+t.Name()) + if err != nil { + t.Fatalf("Failed to connect to codeinsights database: %s", err) + } + if err := dbconn.MigrateDB(db, "codeinsights"); err != nil { + t.Fatalf("Failed to perform codeinsights database migration: %s", err) + } + return db + } + + t.Run("Integration", func(t *testing.T) { + ctx := context.Background() + clock := timeutil.Now + store := NewWithClock(getTimescaleDB(t), clock) + t.Run("Insights", func(t *testing.T) { testInsights(t, ctx, store, clock) }) + }) +} diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go new file mode 100644 index 000000000000..87ecab642e20 --- /dev/null +++ b/enterprise/internal/insights/store/store.go @@ -0,0 +1,41 @@ +package store + +import ( + "database/sql" + "time" + + "github.com/sourcegraph/sourcegraph/internal/db/basestore" + "github.com/sourcegraph/sourcegraph/internal/db/dbutil" + "github.com/sourcegraph/sourcegraph/internal/timeutil" +) + +// Store exposes methods to read and write code insights domain models from +// persistent storage. +type Store struct { + *basestore.Store + now func() time.Time +} + +// New returns a new Store backed by the given db. +func New(db dbutil.DB) *Store { + return NewWithClock(db, timeutil.Now) +} + +// NewWithClock returns a new Store backed by the given db and +// clock for timestamps. +func NewWithClock(db dbutil.DB, clock func() time.Time) *Store { + return &Store{Store: basestore.NewWithDB(db, sql.TxOptions{}), now: clock} +} + +var _ basestore.ShareableStore = &Store{} + +// Handle returns the underlying transactable database handle. +// Needed to implement the ShareableStore interface. +func (s *Store) Handle() *basestore.TransactableHandle { return s.Store.Handle() } + +// With creates a new Store with the given basestore.Shareable store as the +// underlying basestore.Store. +// Needed to implement the basestore.Store interface +func (s *Store) With(other basestore.ShareableStore) *Store { + return &Store{Store: s.Store.With(other), now: s.now} +} From 0e0b8162e1a4ae727654b317d9cf418adf728a18 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 25 Jan 2021 15:39:37 -0700 Subject: [PATCH 32/78] WIP Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/insights.go | 5 +- .../internal/insights/resolvers/resolver.go | 14 +- .../internal/insights/store/insights_test.go | 616 ++++++++++++++++++ .../insights/store/integration_test.go | 7 +- enterprise/internal/insights/store/store.go | 127 +++- 5 files changed, 760 insertions(+), 9 deletions(-) create mode 100644 enterprise/internal/insights/store/insights_test.go diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go index 7ece4c9d4d25..d2490f9873ec 100644 --- a/enterprise/internal/insights/insights.go +++ b/enterprise/internal/insights/insights.go @@ -14,11 +14,12 @@ import ( // Init initializes the given enterpriseServices to include the required resolvers for insights. func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { - db, err := initializeCodeInsightsDB() + timescale, err := initializeCodeInsightsDB() if err != nil { return err } - enterpriseServices.InsightsResolver = resolvers.New(db) + postgres := dbconn.Global + enterpriseServices.InsightsResolver = resolvers.New(timescale, postgres) return nil } diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go index ed8b10d0b2e3..d17e4e286c2d 100644 --- a/enterprise/internal/insights/resolvers/resolver.go +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -6,20 +6,26 @@ import ( "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns/store" + "github.com/sourcegraph/sourcegraph/internal/db" "github.com/sourcegraph/sourcegraph/internal/db/dbutil" ) // Resolver is the GraphQL resolver of all things related to Insights. type Resolver struct { - store *store.Store + store *store.Store + settingStore *db.SettingStore } -// New returns a new Resolver whose store uses the given db -func New(db dbutil.DB) graphqlbackend.InsightsResolver { - return &Resolver{store: store.New(db)} +// New returns a new Resolver whose store uses the given Timescale and Postgres DBs. +func New(timescale, postgres dbutil.DB) graphqlbackend.InsightsResolver { + return &Resolver{ + store: store.New(timescale), + settingsStore: db.Settings(postgres), + } } func (r *Resolver) Insights(ctx context.Context) (graphqlbackend.InsightsResolver, error) { + // TODO: locate insights from user, org, global settings using r.settingStore.ListAll() return r, nil } diff --git a/enterprise/internal/insights/store/insights_test.go b/enterprise/internal/insights/store/insights_test.go new file mode 100644 index 000000000000..3c3ecf7db355 --- /dev/null +++ b/enterprise/internal/insights/store/insights_test.go @@ -0,0 +1,616 @@ +package store + +import ( + "context" + "testing" + "time" + /* + "fmt" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/keegancsmith/sqlf" + + "github.com/sourcegraph/go-diff/diff" + + ct "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns/testing" + "github.com/sourcegraph/sourcegraph/internal/actor" + "github.com/sourcegraph/sourcegraph/internal/campaigns" + "github.com/sourcegraph/sourcegraph/internal/db" + "github.com/sourcegraph/sourcegraph/internal/extsvc" + */) + +func testInsights(t *testing.T, ctx context.Context, s *Store, clock func() time.Time) { + /* + cs := make([]*campaigns.Campaign, 0, 3) + + t.Run("Create", func(t *testing.T) { + for i := 0; i < cap(cs); i++ { + c := &campaigns.Campaign{ + Name: fmt.Sprintf("test-campaign-%d", i), + Description: "All the Javascripts are belong to us", + + InitialApplierID: int32(i) + 50, + LastAppliedAt: clock.Now(), + LastApplierID: int32(i) + 99, + + CampaignSpecID: 1742 + int64(i), + ClosedAt: clock.Now(), + } + + if i == 0 { + // Check for nullability of fields by not setting them + c.ClosedAt = time.Time{} + } + + if i%2 == 0 { + c.NamespaceOrgID = int32(i) + 23 + } else { + c.NamespaceUserID = c.InitialApplierID + } + + want := c.Clone() + have := c + + err := s.CreateCampaign(ctx, have) + if err != nil { + t.Fatal(err) + } + + if have.ID == 0 { + t.Fatal("ID should not be zero") + } + + want.ID = have.ID + want.CreatedAt = clock.Now() + want.UpdatedAt = clock.Now() + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatal(diff) + } + + cs = append(cs, c) + } + }) + + t.Run("Count", func(t *testing.T) { + count, err := s.CountCampaigns(ctx, CountCampaignsOpts{}) + if err != nil { + t.Fatal(err) + } + + if have, want := count, len(cs); have != want { + t.Fatalf("have count: %d, want: %d", have, want) + } + + changeset := ct.CreateChangeset(t, ctx, s, ct.TestChangesetOpts{ + Campaigns: []campaigns.CampaignAssoc{{CampaignID: cs[0].ID}}, + }) + + count, err = s.CountCampaigns(ctx, CountCampaignsOpts{ChangesetID: changeset.ID}) + if err != nil { + t.Fatal(err) + } + + if have, want := count, 1; have != want { + t.Fatalf("have count: %d, want: %d", have, want) + } + + t.Run("OnlyForAuthor set", func(t *testing.T) { + for _, c := range cs { + count, err = s.CountCampaigns(ctx, CountCampaignsOpts{InitialApplierID: c.InitialApplierID}) + if err != nil { + t.Fatal(err) + } + if have, want := count, 1; have != want { + t.Fatalf("Incorrect number of campaigns counted, want=%d have=%d", want, have) + } + } + }) + + t.Run("NamespaceUserID", func(t *testing.T) { + wantCounts := map[int32]int{} + for _, c := range cs { + if c.NamespaceUserID == 0 { + continue + } + wantCounts[c.NamespaceUserID] += 1 + } + if len(wantCounts) == 0 { + t.Fatalf("No campaigns with NamespaceUserID") + } + + for userID, want := range wantCounts { + have, err := s.CountCampaigns(ctx, CountCampaignsOpts{NamespaceUserID: userID}) + if err != nil { + t.Fatal(err) + } + + if have != want { + t.Fatalf("campaigns count for NamespaceUserID=%d wrong. want=%d, have=%d", userID, want, have) + } + } + }) + + t.Run("NamespaceOrgID", func(t *testing.T) { + wantCounts := map[int32]int{} + for _, c := range cs { + if c.NamespaceOrgID == 0 { + continue + } + wantCounts[c.NamespaceOrgID] += 1 + } + if len(wantCounts) == 0 { + t.Fatalf("No campaigns with NamespaceOrgID") + } + + for orgID, want := range wantCounts { + have, err := s.CountCampaigns(ctx, CountCampaignsOpts{NamespaceOrgID: orgID}) + if err != nil { + t.Fatal(err) + } + + if have != want { + t.Fatalf("campaigns count for NamespaceOrgID=%d wrong. want=%d, have=%d", orgID, want, have) + } + } + }) + }) + + t.Run("List", func(t *testing.T) { + t.Run("By ChangesetID", func(t *testing.T) { + for i := 1; i <= len(cs); i++ { + changeset := ct.CreateChangeset(t, ctx, s, ct.TestChangesetOpts{ + Campaigns: []campaigns.CampaignAssoc{{CampaignID: cs[i-1].ID}}, + }) + opts := ListCampaignsOpts{ChangesetID: changeset.ID} + + ts, next, err := s.ListCampaigns(ctx, opts) + if err != nil { + t.Fatal(err) + } + + if have, want := next, int64(0); have != want { + t.Fatalf("opts: %+v: have next %v, want %v", opts, have, want) + } + + have, want := ts, cs[i-1:i] + if len(have) != len(want) { + t.Fatalf("listed %d campaigns, want: %d", len(have), len(want)) + } + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatalf("opts: %+v, diff: %s", opts, diff) + } + } + }) + + // The campaigns store returns the campaigns in reversed order. + reversedCampaigns := make([]*campaigns.Campaign, len(cs)) + for i, c := range cs { + reversedCampaigns[len(cs)-i-1] = c + } + + t.Run("With Limit", func(t *testing.T) { + for i := 1; i <= len(reversedCampaigns); i++ { + cs, next, err := s.ListCampaigns(ctx, ListCampaignsOpts{LimitOpts: LimitOpts{Limit: i}}) + if err != nil { + t.Fatal(err) + } + + { + have, want := next, int64(0) + if i < len(reversedCampaigns) { + want = reversedCampaigns[i].ID + } + + if have != want { + t.Fatalf("limit: %v: have next %v, want %v", i, have, want) + } + } + + { + have, want := cs, reversedCampaigns[:i] + if len(have) != len(want) { + t.Fatalf("listed %d campaigns, want: %d", len(have), len(want)) + } + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatal(diff) + } + } + } + }) + + t.Run("With Cursor", func(t *testing.T) { + var cursor int64 + for i := 1; i <= len(reversedCampaigns); i++ { + opts := ListCampaignsOpts{Cursor: cursor, LimitOpts: LimitOpts{Limit: 1}} + have, next, err := s.ListCampaigns(ctx, opts) + if err != nil { + t.Fatal(err) + } + + want := reversedCampaigns[i-1 : i] + if diff := cmp.Diff(have, want); diff != "" { + t.Fatalf("opts: %+v, diff: %s", opts, diff) + } + + cursor = next + } + }) + + filterTests := []struct { + name string + state campaigns.CampaignState + want []*campaigns.Campaign + }{ + { + name: "Any", + state: campaigns.CampaignStateAny, + want: reversedCampaigns, + }, + { + name: "Closed", + state: campaigns.CampaignStateClosed, + want: reversedCampaigns[:len(reversedCampaigns)-1], + }, + { + name: "Open", + state: campaigns.CampaignStateOpen, + want: cs[0:1], + }, + } + + for _, tc := range filterTests { + t.Run("ListCampaigns State "+tc.name, func(t *testing.T) { + have, _, err := s.ListCampaigns(ctx, ListCampaignsOpts{State: tc.state}) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(have, tc.want); diff != "" { + t.Fatal(diff) + } + }) + } + + t.Run("ListCampaigns OnlyForAuthor set", func(t *testing.T) { + for _, c := range cs { + have, next, err := s.ListCampaigns(ctx, ListCampaignsOpts{InitialApplierID: c.InitialApplierID}) + if err != nil { + t.Fatal(err) + } + if next != 0 { + t.Fatal("Next value was true, but false expected") + } + if have, want := len(have), 1; have != want { + t.Fatalf("Incorrect number of campaigns returned, want=%d have=%d", want, have) + } + if diff := cmp.Diff(have[0], c); diff != "" { + t.Fatal(diff) + } + } + }) + + t.Run("ListCampaigns by NamespaceUserID", func(t *testing.T) { + for _, c := range cs { + if c.NamespaceUserID == 0 { + continue + } + opts := ListCampaignsOpts{NamespaceUserID: c.NamespaceUserID} + have, _, err := s.ListCampaigns(ctx, opts) + if err != nil { + t.Fatal(err) + } + + for _, haveCampaign := range have { + if have, want := haveCampaign.NamespaceUserID, opts.NamespaceUserID; have != want { + t.Fatalf("campaign has wrong NamespaceUserID. want=%d, have=%d", want, have) + } + } + } + }) + + t.Run("ListCampaigns by NamespaceOrgID", func(t *testing.T) { + for _, c := range cs { + if c.NamespaceOrgID == 0 { + continue + } + opts := ListCampaignsOpts{NamespaceOrgID: c.NamespaceOrgID} + have, _, err := s.ListCampaigns(ctx, opts) + if err != nil { + t.Fatal(err) + } + + for _, haveCampaign := range have { + if have, want := haveCampaign.NamespaceOrgID, opts.NamespaceOrgID; have != want { + t.Fatalf("campaign has wrong NamespaceOrgID. want=%d, have=%d", want, have) + } + } + } + }) + }) + + t.Run("Update", func(t *testing.T) { + for _, c := range cs { + c.Name += "-updated" + c.Description += "-updated" + c.InitialApplierID++ + c.ClosedAt = c.ClosedAt.Add(5 * time.Second) + + if c.NamespaceUserID != 0 { + c.NamespaceUserID++ + } + + if c.NamespaceOrgID != 0 { + c.NamespaceOrgID++ + } + + clock.Add(1 * time.Second) + + want := c + want.UpdatedAt = clock.Now() + + have := c.Clone() + if err := s.UpdateCampaign(ctx, have); err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatal(diff) + } + } + }) + + t.Run("Get", func(t *testing.T) { + t.Run("ByID", func(t *testing.T) { + want := cs[0] + opts := GetCampaignOpts{ID: want.ID} + + have, err := s.GetCampaign(ctx, opts) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatal(diff) + } + }) + + t.Run("ByCampaignSpecID", func(t *testing.T) { + want := cs[0] + opts := GetCampaignOpts{CampaignSpecID: want.CampaignSpecID} + + have, err := s.GetCampaign(ctx, opts) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatal(diff) + } + }) + + t.Run("ByName", func(t *testing.T) { + want := cs[0] + + have, err := s.GetCampaign(ctx, GetCampaignOpts{Name: want.Name}) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatal(diff) + } + }) + + t.Run("ByNamespaceUserID", func(t *testing.T) { + for _, c := range cs { + if c.NamespaceUserID == 0 { + continue + } + + want := c + opts := GetCampaignOpts{NamespaceUserID: c.NamespaceUserID} + + have, err := s.GetCampaign(ctx, opts) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatal(diff) + } + } + }) + + t.Run("ByNamespaceOrgID", func(t *testing.T) { + for _, c := range cs { + if c.NamespaceOrgID == 0 { + continue + } + + want := c + opts := GetCampaignOpts{NamespaceOrgID: c.NamespaceOrgID} + + have, err := s.GetCampaign(ctx, opts) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatal(diff) + } + } + }) + + t.Run("NoResults", func(t *testing.T) { + opts := GetCampaignOpts{ID: 0xdeadbeef} + + _, have := s.GetCampaign(ctx, opts) + want := ErrNoResults + + if have != want { + t.Fatalf("have err %v, want %v", have, want) + } + }) + }) + + t.Run("GetCampaignDiffStat", func(t *testing.T) { + userID := ct.CreateTestUser(t, false).ID + userCtx := actor.WithActor(ctx, actor.FromUser(userID)) + repoStore := db.ReposWith(s) + esStore := db.ExternalServicesWith(s) + repo := ct.TestRepo(t, esStore, extsvc.KindGitHub) + repo.Private = true + if err := repoStore.Create(ctx, repo); err != nil { + t.Fatal(err) + } + + campaignID := cs[0].ID + var testDiffStatCount int32 = 10 + ct.CreateChangeset(t, ctx, s, ct.TestChangesetOpts{ + Repo: repo.ID, + Campaigns: []campaigns.CampaignAssoc{{CampaignID: campaignID}}, + DiffStatAdded: testDiffStatCount, + DiffStatChanged: testDiffStatCount, + DiffStatDeleted: testDiffStatCount, + }) + + { + want := &diff.Stat{ + Added: testDiffStatCount, + Changed: testDiffStatCount, + Deleted: testDiffStatCount, + } + opts := GetCampaignDiffStatOpts{CampaignID: campaignID} + have, err := s.GetCampaignDiffStat(userCtx, opts) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatal(diff) + } + } + + // Now revoke repo access, and check that we don't see it in the diff stat anymore. + ct.MockRepoPermissions(t, 0, repo.ID) + { + want := &diff.Stat{ + Added: 0, + Changed: 0, + Deleted: 0, + } + opts := GetCampaignDiffStatOpts{CampaignID: campaignID} + have, err := s.GetCampaignDiffStat(userCtx, opts) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(have, want); diff != "" { + t.Fatal(diff) + } + } + }) + + t.Run("Delete", func(t *testing.T) { + for i := range cs { + err := s.DeleteCampaign(ctx, cs[i].ID) + if err != nil { + t.Fatal(err) + } + + count, err := s.CountCampaigns(ctx, CountCampaignsOpts{}) + if err != nil { + t.Fatal(err) + } + + if have, want := count, len(cs)-(i+1); have != want { + t.Fatalf("have count: %d, want: %d", have, want) + } + } + }) + } + + func testUserDeleteCascades(t *testing.T, ctx context.Context, s *Store, clock ct.Clock) { + orgID := ct.InsertTestOrg(t, "user-delete-cascades") + user := ct.CreateTestUser(t, false) + + t.Run("User delete", func(t *testing.T) { + // Set up two campaigns and specs: one in the user's namespace (which + // should be deleted when the user is hard deleted), and one that is + // merely created by the user (which should remain). + ownedSpec := &campaigns.CampaignSpec{ + NamespaceUserID: user.ID, + UserID: user.ID, + } + if err := s.CreateCampaignSpec(ctx, ownedSpec); err != nil { + t.Fatal(err) + } + + unownedSpec := &campaigns.CampaignSpec{ + NamespaceOrgID: orgID, + UserID: user.ID, + } + if err := s.CreateCampaignSpec(ctx, unownedSpec); err != nil { + t.Fatal(err) + } + + ownedCampaign := &campaigns.Campaign{ + Name: "owned", + NamespaceUserID: user.ID, + InitialApplierID: user.ID, + LastApplierID: user.ID, + LastAppliedAt: clock.Now(), + CampaignSpecID: ownedSpec.ID, + } + if err := s.CreateCampaign(ctx, ownedCampaign); err != nil { + t.Fatal(err) + } + + unownedCampaign := &campaigns.Campaign{ + Name: "unowned", + NamespaceOrgID: orgID, + InitialApplierID: user.ID, + LastApplierID: user.ID, + LastAppliedAt: clock.Now(), + CampaignSpecID: ownedSpec.ID, + } + if err := s.CreateCampaign(ctx, unownedCampaign); err != nil { + t.Fatal(err) + } + + // Now we'll try actually deleting the user. + if err := s.Exec(ctx, sqlf.Sprintf( + "DELETE FROM users WHERE id = %s", + user.ID, + )); err != nil { + t.Fatal(err) + } + + // We should now have the unowned campaign still be valid, but the + // owned campaign should have gone away. + cs, _, err := s.ListCampaigns(ctx, ListCampaignsOpts{}) + if err != nil { + t.Fatal(err) + } + if len(cs) != 1 { + t.Errorf("unexpected number of campaigns: have %d; want %d", len(cs), 1) + } + if cs[0].ID != unownedCampaign.ID { + t.Errorf("unexpected campaign: %+v", cs[0]) + } + + // Both campaign specs should still be in place, at least until we add + // a foreign key constraint to campaign_specs.namespace_user_id. + specs, _, err := s.ListCampaignSpecs(ctx, ListCampaignSpecsOpts{}) + if err != nil { + t.Fatal(err) + } + if len(specs) != 2 { + t.Errorf("unexpected number of campaign specs: have %d; want %d", len(specs), 2) + } + }) + */ +} diff --git a/enterprise/internal/insights/store/integration_test.go b/enterprise/internal/insights/store/integration_test.go index 74260919eeae..fdeade0f60f5 100644 --- a/enterprise/internal/insights/store/integration_test.go +++ b/enterprise/internal/insights/store/integration_test.go @@ -5,9 +5,11 @@ import ( "database/sql" "os" "os/user" + "strings" "testing" "github.com/sourcegraph/sourcegraph/internal/db/dbconn" + "github.com/sourcegraph/sourcegraph/internal/db/dbtesting" "github.com/sourcegraph/sourcegraph/internal/db/dbutil" "github.com/sourcegraph/sourcegraph/internal/timeutil" ) @@ -26,7 +28,7 @@ func TestIntegration(t *testing.T) { username = user.Username } timescaleDSN := dbutil.PostgresDSN("codeinsights", username, os.Getenv) - db, err := dbconn.New(timescaleDSN, "insights-test-"+t.Name()) + db, err := dbconn.New(timescaleDSN, "insights-test-"+strings.Replace(t.Name(), "/", "_", -1)) if err != nil { t.Fatalf("Failed to connect to codeinsights database: %s", err) } @@ -35,11 +37,12 @@ func TestIntegration(t *testing.T) { } return db } + getPostgresDB := dbtesting.GetDB t.Run("Integration", func(t *testing.T) { ctx := context.Background() clock := timeutil.Now - store := NewWithClock(getTimescaleDB(t), clock) + store := NewWithClock(getTimescaleDB(t), getPostgresDB(), clock) t.Run("Insights", func(t *testing.T) { testInsights(t, ctx, store, clock) }) }) } diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index 87ecab642e20..12b0ce6a13b5 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -16,7 +16,7 @@ type Store struct { now func() time.Time } -// New returns a new Store backed by the given db. +// New returns a new Store backed by the given Timescale db. func New(db dbutil.DB) *Store { return NewWithClock(db, timeutil.Now) } @@ -39,3 +39,128 @@ func (s *Store) Handle() *basestore.TransactableHandle { return s.Store.Handle() func (s *Store) With(other basestore.ShareableStore) *Store { return &Store{Store: s.Store.With(other), now: s.now} } + +/* +// Transact creates a new transaction. +// It's required to implement this method and wrap the Transact method of the +// underlying basestore.Store. +func (s *Store) Transact(ctx context.Context) (*Store, error) { + txBase, err := s.Store.Transact(ctx) + if err != nil { + return nil, err + } + return &Store{Store: txBase, now: s.now}, nil +} + +func (s *Store) query(ctx context.Context, q *sqlf.Query, sc scanFunc) error { + rows, err := s.Store.Query(ctx, q) + if err != nil { + return err + } + return scanAll(rows, sc) +} + +func (s *Store) queryCount(ctx context.Context, q *sqlf.Query) (int, error) { + count, ok, err := basestore.ScanFirstInt(s.Query(ctx, q)) + if err != nil || !ok { + return count, err + } + return count, nil +} + +// scanner captures the Scan method of sql.Rows and sql.Row +type scanner interface { + Scan(dst ...interface{}) error +} + +// a scanFunc scans one or more rows from a scanner, returning +// the last id column scanned and the count of scanned rows. +type scanFunc func(scanner) (err error) + +func scanAll(rows *sql.Rows, scan scanFunc) (err error) { + defer func() { err = basestore.CloseRows(rows, err) }() + + for rows.Next() { + if err = scan(rows); err != nil { + return err + } + } + + return rows.Err() +} +*/ + +/* +func jsonbColumn(metadata interface{}) (msg json.RawMessage, err error) { + switch m := metadata.(type) { + case nil: + msg = json.RawMessage("{}") + case string: + msg = json.RawMessage(m) + case []byte: + msg = m + case json.RawMessage: + msg = m + default: + msg, err = json.MarshalIndent(m, " ", " ") + } + return +} + +func jsonSetColumn(ids []int64) ([]byte, error) { + set := make(map[int64]*struct{}, len(ids)) + for _, id := range ids { + set[id] = nil + } + return json.Marshal(set) +} + +func nullInt32Column(n int32) *int32 { + if n == 0 { + return nil + } + return &n +} + +func nullInt64Column(n int64) *int64 { + if n == 0 { + return nil + } + return &n +} + +func nullTimeColumn(t time.Time) *time.Time { + if t.IsZero() { + return nil + } + return &t +} + +func nullStringColumn(s string) *string { + if s == "" { + return nil + } + return &s +} + +type LimitOpts struct { + Limit int +} + +func (o LimitOpts) DBLimit() int { + if o.Limit == 0 { + return o.Limit + } + // We always request one item more than actually requested, to determine the next ID for pagination. + // The store should make sure to strip the last element in a result set, if len(rs) == o.DBLimit(). + return o.Limit + 1 +} +g +func (o LimitOpts) ToDB() string { + var limitClause string + if o.Limit > 0 { + limitClause = fmt.Sprintf("LIMIT %d", o.DBLimit()) + } + return limitClause +} +*/ From 600a8562d187c8326a48318c6a668b44aada0887 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 22 Jan 2021 15:47:00 -0700 Subject: [PATCH 33/78] internal/db: do not run TimescaleDB migrations against singleton database In our DB testing code we make an assumption that we can run all database migrations against a single testing database. This holds true for frontend and codeintel migrations, because we intentionally designed codeintel migrations to be ran in the same DB for testing/dev purposes - but this does not hold true for Code Insights which runs a separate TimescaleDB instance (it is easier to run it separately in Docker than install it as a Postgres extension in all of our dev/testing environments.) This change merely makes it so we don't run TimescaleDB migrations against our singleton testing/dev Postgres DB. Signed-off-by: Stephen Gutekanst --- internal/database/dbconn/migration.go | 14 +++++++++----- internal/database/dbstore_db_test.go | 4 ++-- internal/database/dbtest/dbtest.go | 2 +- internal/database/dbtesting/dbtesting.go | 2 +- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/internal/database/dbconn/migration.go b/internal/database/dbconn/migration.go index a3703b827b9d..4e088e787497 100644 --- a/internal/database/dbconn/migration.go +++ b/internal/database/dbconn/migration.go @@ -22,6 +22,7 @@ import ( // the raw migration assets to run to migrate the target schema to a new version. var databases = map[string]struct { MigrationsTable string + TimescaleDB bool Resource *bindata.AssetSource }{ "frontend": { @@ -33,18 +34,21 @@ var databases = map[string]struct { Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset), }, "codeinsights": { + TimescaleDB: true, MigrationsTable: "codeinsights_schema_migrations", Resource: bindata.Resource(codeinsightsMigrations.AssetNames(), codeinsightsMigrations.Asset), }, } -// DatabaseNames returns the list of database names (configured via `dbutil.databases`).. -var DatabaseNames = func() []string { +// PostgresDatabaseNames is the list of database names (configured via `dbutil.databases`) that are +// vanilla Postgres (not TimescaleDB). +var PostgresDatabaseNames = func() []string { var names []string - for databaseName := range databases { - names = append(names, databaseName) + for databaseName, info := range databases { + if !info.TimescaleDB { + names = append(names, databaseName) + } } - return names }() diff --git a/internal/database/dbstore_db_test.go b/internal/database/dbstore_db_test.go index 534df545f577..dee594306dbd 100644 --- a/internal/database/dbstore_db_test.go +++ b/internal/database/dbstore_db_test.go @@ -17,14 +17,14 @@ func TestMigrations(t *testing.T) { db := dbtesting.GetDB(t) migrate := func() { - for _, databaseName := range dbconn.DatabaseNames { + for _, databaseName := range dbconn.PostgresDatabaseNames { if err := dbconn.MigrateDB(db, databaseName); err != nil { t.Errorf("error running initial migrations: %s", err) } } } - for _, databaseName := range dbconn.DatabaseNames { + for _, databaseName := range dbconn.PostgresDatabaseNames { t.Run(databaseName, func(t *testing.T) { // Dropping a squash schema _all_ the way down just drops the entire public // schema. Because we have a "combined" database that runs migrations for diff --git a/internal/database/dbtest/dbtest.go b/internal/database/dbtest/dbtest.go index 5e9360802b4b..03e462a30ea2 100644 --- a/internal/database/dbtest/dbtest.go +++ b/internal/database/dbtest/dbtest.go @@ -65,7 +65,7 @@ func NewDB(t testing.TB, dsn string) *sql.DB { config.Path = "/" + dbname testDB := dbConn(t, config) - for _, databaseName := range dbconn.DatabaseNames { + for _, databaseName := range dbconn.PostgresDatabaseNames { m, err := dbconn.NewMigrate(testDB, databaseName) if err != nil { t.Fatalf("failed to construct migrations: %s", err) diff --git a/internal/database/dbtesting/dbtesting.go b/internal/database/dbtesting/dbtesting.go index 3a83f98dcbaf..1c44f0d05e55 100644 --- a/internal/database/dbtesting/dbtesting.go +++ b/internal/database/dbtesting/dbtesting.go @@ -159,7 +159,7 @@ func initTest(nameSuffix string) error { return err } - for _, databaseName := range dbconn.DatabaseNames { + for _, databaseName := range dbconn.PostgresDatabaseNames { if err := dbconn.MigrateDB(dbconn.Global, databaseName); err != nil { return err } From 22634277aafb7a831aa8030364803e72ea9de8d8 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 25 Jan 2021 18:55:00 -0700 Subject: [PATCH 34/78] internal/database: make "single database" usage explicit Signed-off-by: Stephen Gutekanst --- internal/database/dbconn/migration.go | 76 ++++++++++-------------- internal/database/dbstore_db_test.go | 18 +++--- internal/database/dbtest/dbtest.go | 7 ++- internal/database/dbtesting/dbtesting.go | 12 ++-- 4 files changed, 53 insertions(+), 60 deletions(-) diff --git a/internal/database/dbconn/migration.go b/internal/database/dbconn/migration.go index 4e088e787497..98942f9bc861 100644 --- a/internal/database/dbconn/migration.go +++ b/internal/database/dbconn/migration.go @@ -17,50 +17,39 @@ import ( frontendMigrations "github.com/sourcegraph/sourcegraph/migrations/frontend" ) -// databases configures the migrations we want based on a database name. This -// configuration includes the name of the migration version table as well as -// the raw migration assets to run to migrate the target schema to a new version. -var databases = map[string]struct { +// Database describes one of our Postgres (or Postgres-like) databases. +type Database struct { + // Name is the name of the database. + Name string + + // MigrationsTable is the migrations SQL table name. MigrationsTable string - TimescaleDB bool - Resource *bindata.AssetSource -}{ - "frontend": { + + // Resource describes the raw migration assets to run to migrate the target schema to a new + // version. + Resource *bindata.AssetSource + + // TargetsTimescaleDB indicates if the database targets TimescaleDB. Otherwise, Postgres. + TargetsTimescaleDB bool +} + +var ( + Frontend = &Database{ MigrationsTable: "schema_migrations", Resource: bindata.Resource(frontendMigrations.AssetNames(), frontendMigrations.Asset), - }, - "codeintel": { + } + + CodeIntel = &Database{ MigrationsTable: "codeintel_schema_migrations", Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset), - }, - "codeinsights": { - TimescaleDB: true, - MigrationsTable: "codeinsights_schema_migrations", - Resource: bindata.Resource(codeinsightsMigrations.AssetNames(), codeinsightsMigrations.Asset), - }, -} - -// PostgresDatabaseNames is the list of database names (configured via `dbutil.databases`) that are -// vanilla Postgres (not TimescaleDB). -var PostgresDatabaseNames = func() []string { - var names []string - for databaseName, info := range databases { - if !info.TimescaleDB { - names = append(names, databaseName) - } - } - return names -}() - -// MigrationTables returns the list of migration table names (configured via `dbutil.databases`). -var MigrationTables = func() []string { - var migrationTables []string - for _, db := range databases { - migrationTables = append(migrationTables, db.MigrationsTable) } - return migrationTables -}() + CodeInsights = &Database{ + TargetsTimescaleDB: true, + MigrationsTable: "codeinsights_schema_migrations", + Resource: bindata.Resource(codeinsightsMigrations.AssetNames(), codeinsightsMigrations.Asset), + } +) func MigrateDB(db *sql.DB, databaseName string) error { m, err := NewMigrate(db, databaseName) @@ -73,17 +62,12 @@ func MigrateDB(db *sql.DB, databaseName string) error { return nil } -// NewMigrate returns a new configured migration object for the given database name. This database -// name must be present in the `dbconn.databases` map. This migration can be subsequently run by -// invoking `dbconn.DoMigrate`. -func NewMigrate(db *sql.DB, databaseName string) (*migrate.Migrate, error) { - schemaData, ok := databases[databaseName] - if !ok { - return nil, fmt.Errorf("unknown database '%s'", databaseName) - } +// NewMigrate returns a new configured migration object for the given database. The migration can +// be subsequently run by invoking `dbconn.DoMigrate`. +func NewMigrate(db *sql.DB, database *Database) (*migrate.Migrate, error) { driver, err := postgres.WithInstance(db, &postgres.Config{ - MigrationsTable: schemaData.MigrationsTable, + MigrationsTable: database.MigrationsTable, }) if err != nil { return nil, err diff --git a/internal/database/dbstore_db_test.go b/internal/database/dbstore_db_test.go index dee594306dbd..9729ed8b46f2 100644 --- a/internal/database/dbstore_db_test.go +++ b/internal/database/dbstore_db_test.go @@ -17,15 +17,19 @@ func TestMigrations(t *testing.T) { db := dbtesting.GetDB(t) migrate := func() { - for _, databaseName := range dbconn.PostgresDatabaseNames { - if err := dbconn.MigrateDB(db, databaseName); err != nil { - t.Errorf("error running initial migrations: %s", err) - } + if err := dbconn.MigrateDB(db, dbconn.Frontend); err != nil { + t.Errorf("error running initial migrations: %s", err) + } + if err := dbconn.MigrateDB(db, dbconn.CodeIntel); err != nil { + t.Errorf("error running initial migrations: %s", err) } } - for _, databaseName := range dbconn.PostgresDatabaseNames { - t.Run(databaseName, func(t *testing.T) { + for _, database := range []*dbconn.Database{ + dbconn.Frontend, + dbconn.CodeIntel, + } { + t.Run(database.Name, func(t *testing.T) { // Dropping a squash schema _all_ the way down just drops the entire public // schema. Because we have a "combined" database that runs migrations for // multiple disjoint schemas in development environments, migrating all the @@ -33,7 +37,7 @@ func TestMigrations(t *testing.T) { // migrations, so we prep our tests by re-migrating up on each iteration. migrate() - m, err := dbconn.NewMigrate(db, databaseName) + m, err := dbconn.NewMigrate(db, database) if err != nil { t.Errorf("error constructing migrations: %s", err) } diff --git a/internal/database/dbtest/dbtest.go b/internal/database/dbtest/dbtest.go index 03e462a30ea2..bec16ff1d408 100644 --- a/internal/database/dbtest/dbtest.go +++ b/internal/database/dbtest/dbtest.go @@ -65,8 +65,11 @@ func NewDB(t testing.TB, dsn string) *sql.DB { config.Path = "/" + dbname testDB := dbConn(t, config) - for _, databaseName := range dbconn.PostgresDatabaseNames { - m, err := dbconn.NewMigrate(testDB, databaseName) + for _, database := range []*dbconn.Database{ + dbconn.Frontend, + dbconn.CodeIntel, + } { + m, err := dbconn.NewMigrate(testDB, database) if err != nil { t.Fatalf("failed to construct migrations: %s", err) } diff --git a/internal/database/dbtesting/dbtesting.go b/internal/database/dbtesting/dbtesting.go index 1c44f0d05e55..f5ea12525a51 100644 --- a/internal/database/dbtesting/dbtesting.go +++ b/internal/database/dbtesting/dbtesting.go @@ -97,9 +97,8 @@ func emptyDBPreserveSchema(t testing.TB, d *sql.DB) { } var conds []string - for _, migrationTable := range dbconn.MigrationTables { - conds = append(conds, fmt.Sprintf("table_name != '%s'", migrationTable)) - } + conds = append(conds, fmt.Sprintf("table_name != '%s'", dbconn.Frontend.MigrationsTable)) + conds = append(conds, fmt.Sprintf("table_name != '%s'", dbconn.CodeInsights.MigrationsTable)) rows, err := d.Query("SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE' AND " + strings.Join(conds, " AND ")) if err != nil { @@ -159,8 +158,11 @@ func initTest(nameSuffix string) error { return err } - for _, databaseName := range dbconn.PostgresDatabaseNames { - if err := dbconn.MigrateDB(dbconn.Global, databaseName); err != nil { + for _, database := range []*dbconn.Database{ + dbconn.Frontend, + dbconn.CodeIntel, + } { + if err := dbconn.MigrateDB(dbconn.Global, database); err != nil { return err } } From 701851aedb688d82f36a59d42680282507f9e2c4 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 25 Jan 2021 19:11:50 -0700 Subject: [PATCH 35/78] update MigrateDB callers (+ more type safety) Signed-off-by: Stephen Gutekanst --- cmd/frontend/internal/cli/serve_cmd.go | 2 +- .../frontend/internal/codeintel/services.go | 2 +- .../cmd/precise-code-intel-worker/main.go | 2 +- internal/database/dbconn/migration.go | 4 +-- internal/database/schemadoc/main.go | 30 +++++++++---------- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/cmd/frontend/internal/cli/serve_cmd.go b/cmd/frontend/internal/cli/serve_cmd.go index 07ab81b82294..a2a8cf775989 100644 --- a/cmd/frontend/internal/cli/serve_cmd.go +++ b/cmd/frontend/internal/cli/serve_cmd.go @@ -108,7 +108,7 @@ func InitDB() (*sql.DB, error) { return dbconn.Global, nil } - if err := dbconn.MigrateDB(dbconn.Global, "frontend"); err != nil { + if err := dbconn.MigrateDB(dbconn.Global, dbconn.Frontend); err != nil { return nil, err } diff --git a/enterprise/cmd/frontend/internal/codeintel/services.go b/enterprise/cmd/frontend/internal/codeintel/services.go index 386ff4b311ab..f6b7d19e08f0 100644 --- a/enterprise/cmd/frontend/internal/codeintel/services.go +++ b/enterprise/cmd/frontend/internal/codeintel/services.go @@ -94,7 +94,7 @@ func mustInitializeCodeIntelDB() *sql.DB { log.Fatalf("Failed to connect to codeintel database: %s", err) } - if err := dbconn.MigrateDB(db, "codeintel"); err != nil { + if err := dbconn.MigrateDB(db, dbconn.CodeIntel); err != nil { log.Fatalf("Failed to perform codeintel database migration: %s", err) } diff --git a/enterprise/cmd/precise-code-intel-worker/main.go b/enterprise/cmd/precise-code-intel-worker/main.go index f18c6540567b..f9b8923bf529 100644 --- a/enterprise/cmd/precise-code-intel-worker/main.go +++ b/enterprise/cmd/precise-code-intel-worker/main.go @@ -149,7 +149,7 @@ func mustInitializeCodeIntelDB() *sql.DB { log.Fatalf("Failed to connect to codeintel database: %s", err) } - if err := dbconn.MigrateDB(db, "codeintel"); err != nil { + if err := dbconn.MigrateDB(db, dbconn.CodeIntel); err != nil { log.Fatalf("Failed to perform codeintel database migration: %s", err) } diff --git a/internal/database/dbconn/migration.go b/internal/database/dbconn/migration.go index 98942f9bc861..d8398efa3699 100644 --- a/internal/database/dbconn/migration.go +++ b/internal/database/dbconn/migration.go @@ -51,8 +51,8 @@ var ( } ) -func MigrateDB(db *sql.DB, databaseName string) error { - m, err := NewMigrate(db, databaseName) +func MigrateDB(db *sql.DB, database *Database) error { + m, err := NewMigrate(db, database) if err != nil { return err } diff --git a/internal/database/schemadoc/main.go b/internal/database/schemadoc/main.go index 0c69f50b8db2..52985fcc3068 100644 --- a/internal/database/schemadoc/main.go +++ b/internal/database/schemadoc/main.go @@ -35,9 +35,9 @@ var logger = log.New(os.Stderr, "", log.LstdFlags) var versionRe = lazyregexp.New(fmt.Sprintf(`\b%s\b`, regexp.QuoteMeta("9.6"))) -var databases = map[string]string{ - "frontend": "schema.md", - "codeintel": "schema.codeintel.md", +var databases = map[*dbconn.Database]string{ + dbconn.Frontend: "schema.md", + dbconn.CodeIntel: "schema.codeintel.md", } // This script generates markdown formatted output containing descriptions of @@ -61,8 +61,8 @@ func mainErr() error { func mainLocal() error { dataSourcePrefix := "dbname=" + databaseNamePrefix - for databaseName, destinationFile := range databases { - if err := generateAndWrite(databaseName, dataSourcePrefix+databaseName, nil, destinationFile); err != nil { + for database, destinationFile := range databases { + if err := generateAndWrite(database, dataSourcePrefix+database.Name, nil, destinationFile); err != nil { return err } } @@ -81,8 +81,8 @@ func mainContainer() error { dataSourcePrefix := "postgres://postgres@127.0.0.1:5433/postgres?dbname=" + databaseNamePrefix - for databaseName, destinationFile := range databases { - if err := generateAndWrite(databaseName, dataSourcePrefix+databaseName, prefix, destinationFile); err != nil { + for database, destinationFile := range databases { + if err := generateAndWrite(database, dataSourcePrefix+database.Name, prefix, destinationFile); err != nil { return err } } @@ -90,20 +90,20 @@ func mainContainer() error { return nil } -func generateAndWrite(databaseName, dataSource string, commandPrefix []string, destinationFile string) error { +func generateAndWrite(database *dbconn.Database, dataSource string, commandPrefix []string, destinationFile string) error { run := runWithPrefix(commandPrefix) // Try to drop a database if it already exists - _, _ = run(true, "dropdb", databaseNamePrefix+databaseName) + _, _ = run(true, "dropdb", databaseNamePrefix+database.Name) // Let's also try to clean up after ourselves - defer func() { _, _ = run(true, "dropdb", databaseNamePrefix+databaseName) }() + defer func() { _, _ = run(true, "dropdb", databaseNamePrefix+database.Name) }() - if out, err := run(false, "createdb", databaseNamePrefix+databaseName); err != nil { + if out, err := run(false, "createdb", databaseNamePrefix+database.Name); err != nil { return errors.Wrap(err, fmt.Sprintf("run: %s", out)) } - out, err := generateInternal(databaseName, dataSource, run) + out, err := generateInternal(database, dataSource, run) if err != nil { return err } @@ -153,7 +153,7 @@ func startDocker() (commandPrefix []string, shutdown func(), _ error) { return []string{"docker", "exec", "-u", "postgres", containerName}, shutdown, nil } -func generateInternal(databaseName, dataSource string, run runFunc) (string, error) { +func generateInternal(database *dbconn.Database, dataSource string, run runFunc) (string, error) { db, err := dbconn.NewRaw(dataSource) if err != nil { return "", errors.Wrap(err, "NewRaw") @@ -164,7 +164,7 @@ func generateInternal(databaseName, dataSource string, run runFunc) (string, err } }() - if err := dbconn.MigrateDB(db, databaseName); err != nil { + if err := dbconn.MigrateDB(db, database); err != nil { return "", errors.Wrap(err, "MigrateDB") } @@ -197,7 +197,7 @@ func generateInternal(databaseName, dataSource string, run runFunc) (string, err for table := range ch { logger.Println("describe", table) - doc, err := describeTable(db, databaseName, table, run) + doc, err := describeTable(db, database.Name, table, run) if err != nil { logger.Fatalf("error: %s", err) continue From e44e1b2ae7163ed54790cf47e3efa5e432517098 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 25 Jan 2021 19:18:43 -0700 Subject: [PATCH 36/78] fixup Signed-off-by: Stephen Gutekanst --- internal/database/dbconn/migration.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/database/dbconn/migration.go b/internal/database/dbconn/migration.go index d8398efa3699..0077a350cd07 100644 --- a/internal/database/dbconn/migration.go +++ b/internal/database/dbconn/migration.go @@ -73,7 +73,7 @@ func NewMigrate(db *sql.DB, database *Database) (*migrate.Migrate, error) { return nil, err } - d, err := bindata.WithInstance(schemaData.Resource) + d, err := bindata.WithInstance(database.Resource) if err != nil { return nil, err } From 54b2835918b44d132515d8f8786f1ab984ac7401 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 26 Jan 2021 20:04:29 -0700 Subject: [PATCH 37/78] fix DB names (caught by test) Signed-off-by: Stephen Gutekanst --- internal/database/dbconn/migration.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/database/dbconn/migration.go b/internal/database/dbconn/migration.go index 0077a350cd07..abaf3078c468 100644 --- a/internal/database/dbconn/migration.go +++ b/internal/database/dbconn/migration.go @@ -35,16 +35,19 @@ type Database struct { var ( Frontend = &Database{ + Name: "frontend", MigrationsTable: "schema_migrations", Resource: bindata.Resource(frontendMigrations.AssetNames(), frontendMigrations.Asset), } CodeIntel = &Database{ + Name: "codeintel", MigrationsTable: "codeintel_schema_migrations", Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset), } CodeInsights = &Database{ + Name: "codeinsights", TargetsTimescaleDB: true, MigrationsTable: "codeinsights_schema_migrations", Resource: bindata.Resource(codeinsightsMigrations.AssetNames(), codeinsightsMigrations.Asset), From 53e33ecf97a6605c1a032a1968f324486074c512 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 26 Jan 2021 20:13:23 -0700 Subject: [PATCH 38/78] store WIP Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/internal/insights/store/integration_test.go b/enterprise/internal/insights/store/integration_test.go index fdeade0f60f5..dc38d40b37c5 100644 --- a/enterprise/internal/insights/store/integration_test.go +++ b/enterprise/internal/insights/store/integration_test.go @@ -32,7 +32,7 @@ func TestIntegration(t *testing.T) { if err != nil { t.Fatalf("Failed to connect to codeinsights database: %s", err) } - if err := dbconn.MigrateDB(db, "codeinsights"); err != nil { + if err := dbconn.MigrateDB(db, dbconn.CodeInsights); err != nil { t.Fatalf("Failed to perform codeinsights database migration: %s", err) } return db From 5fb7f1425490509a2294fb7bd25b6c54d2798b3f Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 27 Jan 2021 00:51:23 -0700 Subject: [PATCH 39/78] fix typo Signed-off-by: Stephen Gutekanst --- internal/database/dbtesting/dbtesting.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/database/dbtesting/dbtesting.go b/internal/database/dbtesting/dbtesting.go index f5ea12525a51..20aa96d67b4f 100644 --- a/internal/database/dbtesting/dbtesting.go +++ b/internal/database/dbtesting/dbtesting.go @@ -98,7 +98,7 @@ func emptyDBPreserveSchema(t testing.TB, d *sql.DB) { var conds []string conds = append(conds, fmt.Sprintf("table_name != '%s'", dbconn.Frontend.MigrationsTable)) - conds = append(conds, fmt.Sprintf("table_name != '%s'", dbconn.CodeInsights.MigrationsTable)) + conds = append(conds, fmt.Sprintf("table_name != '%s'", dbconn.CodeIntel.MigrationsTable)) rows, err := d.Query("SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE' AND " + strings.Join(conds, " AND ")) if err != nil { From 5e5aa5a7e0beea7d8c1a0a8623aff3d56d3ee068 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 27 Jan 2021 01:10:48 -0700 Subject: [PATCH 40/78] fixup store+resolvers Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/insights.go | 4 ++-- enterprise/internal/insights/resolvers/resolver.go | 10 +++++----- enterprise/internal/insights/store/integration_test.go | 8 +++----- enterprise/internal/insights/store/store.go | 4 ++-- 4 files changed, 12 insertions(+), 14 deletions(-) diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go index d2490f9873ec..6b02ca949c1b 100644 --- a/enterprise/internal/insights/insights.go +++ b/enterprise/internal/insights/insights.go @@ -9,7 +9,7 @@ import ( "github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/resolvers" "github.com/sourcegraph/sourcegraph/internal/conf" - "github.com/sourcegraph/sourcegraph/internal/db/dbconn" + "github.com/sourcegraph/sourcegraph/internal/database/dbconn" ) // Init initializes the given enterpriseServices to include the required resolvers for insights. @@ -38,7 +38,7 @@ func initializeCodeInsightsDB() (*sql.DB, error) { return nil, fmt.Errorf("Failed to connect to codeinsights database: %s", err) } - if err := dbconn.MigrateDB(db, "codeinsights"); err != nil { + if err := dbconn.MigrateDB(db, dbconn.CodeInsights); err != nil { return nil, fmt.Errorf("Failed to perform codeinsights database migration: %s", err) } return db, nil diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go index d17e4e286c2d..ef6feef6b1b2 100644 --- a/enterprise/internal/insights/resolvers/resolver.go +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -6,21 +6,21 @@ import ( "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns/store" - "github.com/sourcegraph/sourcegraph/internal/db" - "github.com/sourcegraph/sourcegraph/internal/db/dbutil" + "github.com/sourcegraph/sourcegraph/internal/database" + "github.com/sourcegraph/sourcegraph/internal/database/dbutil" ) // Resolver is the GraphQL resolver of all things related to Insights. type Resolver struct { store *store.Store - settingStore *db.SettingStore + settingStore *database.SettingStore } // New returns a new Resolver whose store uses the given Timescale and Postgres DBs. func New(timescale, postgres dbutil.DB) graphqlbackend.InsightsResolver { return &Resolver{ - store: store.New(timescale), - settingsStore: db.Settings(postgres), + store: store.New(timescale), + settingStore: database.Settings(postgres), } } diff --git a/enterprise/internal/insights/store/integration_test.go b/enterprise/internal/insights/store/integration_test.go index dc38d40b37c5..610b510860d8 100644 --- a/enterprise/internal/insights/store/integration_test.go +++ b/enterprise/internal/insights/store/integration_test.go @@ -8,9 +8,8 @@ import ( "strings" "testing" - "github.com/sourcegraph/sourcegraph/internal/db/dbconn" - "github.com/sourcegraph/sourcegraph/internal/db/dbtesting" - "github.com/sourcegraph/sourcegraph/internal/db/dbutil" + "github.com/sourcegraph/sourcegraph/internal/database/dbconn" + "github.com/sourcegraph/sourcegraph/internal/database/dbutil" "github.com/sourcegraph/sourcegraph/internal/timeutil" ) @@ -37,12 +36,11 @@ func TestIntegration(t *testing.T) { } return db } - getPostgresDB := dbtesting.GetDB t.Run("Integration", func(t *testing.T) { ctx := context.Background() clock := timeutil.Now - store := NewWithClock(getTimescaleDB(t), getPostgresDB(), clock) + store := NewWithClock(getTimescaleDB(t), clock) t.Run("Insights", func(t *testing.T) { testInsights(t, ctx, store, clock) }) }) } diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index 12b0ce6a13b5..8fa9d497acf1 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -4,8 +4,8 @@ import ( "database/sql" "time" - "github.com/sourcegraph/sourcegraph/internal/db/basestore" - "github.com/sourcegraph/sourcegraph/internal/db/dbutil" + "github.com/sourcegraph/sourcegraph/internal/database/basestore" + "github.com/sourcegraph/sourcegraph/internal/database/dbutil" "github.com/sourcegraph/sourcegraph/internal/timeutil" ) From ef43acc755909023aa5222293339685bfbcd411e Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Thu, 28 Jan 2021 15:04:35 -0700 Subject: [PATCH 41/78] generate Signed-off-by: Stephen Gutekanst --- cmd/frontend/graphqlbackend/schema.go | 8 ++++ migrations/codeinsights/bindata.go | 54 +++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/cmd/frontend/graphqlbackend/schema.go b/cmd/frontend/graphqlbackend/schema.go index c3e41b12a432..477654a4bb30 100644 --- a/cmd/frontend/graphqlbackend/schema.go +++ b/cmd/frontend/graphqlbackend/schema.go @@ -4204,6 +4204,14 @@ type ExternalService implements Node { will contain any errors that occured during the most recent completed sync. """ lastSyncError: String + """ + LastSyncAt is the time the last sync job was run for this code host + """ + lastSyncAt: DateTime! + """ + The timestamp of the next sync job + """ + nextSyncAt: DateTime! } """ diff --git a/migrations/codeinsights/bindata.go b/migrations/codeinsights/bindata.go index 214fc770fa2f..badebe403986 100644 --- a/migrations/codeinsights/bindata.go +++ b/migrations/codeinsights/bindata.go @@ -2,6 +2,8 @@ // sources: // 1000000000_init.down.sql (19B) // 1000000000_init.up.sql (19B) +// 1000000001_initial_schema.down.sql (471B) +// 1000000001_initial_schema.up.sql (4.428kB) package migrations @@ -110,6 +112,46 @@ func _1000000000_initUpSql() (*asset, error) { return a, nil } +var __1000000001_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x8f\xc1\xaa\xc2\x30\x10\x45\xf7\xf9\x8a\xfc\x47\x56\xaf\xaf\x51\x02\xb6\x15\x9b\x45\x77\x61\xa4\x43\x08\x98\x54\xd3\x89\xf8\xf9\x22\x62\x05\x65\x50\x77\xb3\xb8\xe7\x9e\x3b\x95\x5e\x9b\x56\x09\x51\xef\xba\xad\x34\x6d\xad\x07\x69\x56\x52\x0f\xa6\xb7\xbd\xf4\x50\x3c\x3a\x3c\x63\xa2\xd9\x65\x3c\x4e\x2e\x8c\x6e\x4f\x19\x51\x7d\x0b\x24\x88\xf8\x13\x35\xe5\xe0\x43\x82\x03\x8f\xdb\xbf\x6a\xa3\x19\x9c\xfb\x64\x29\x9b\xef\x95\x25\x85\x53\xb9\x35\x5f\x98\x4d\xaf\x00\x65\x1f\x19\xff\x33\xca\xd9\x23\x12\x8c\x40\xe0\x96\xe3\xa3\xff\x1d\xf1\x21\x31\x03\x1e\x11\x25\xc4\x7f\xd7\x34\xc6\x2a\x71\x0d\x00\x00\xff\xff\x0d\x60\xa7\x10\xd7\x01\x00\x00") + +func _1000000001_initial_schemaDownSqlBytes() ([]byte, error) { + return bindataRead( + __1000000001_initial_schemaDownSql, + "1000000001_initial_schema.down.sql", + ) +} + +func _1000000001_initial_schemaDownSql() (*asset, error) { + bytes, err := _1000000001_initial_schemaDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1000000001_initial_schema.down.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x46, 0x11, 0x7d, 0x48, 0x3a, 0xe0, 0x31, 0xc5, 0x3a, 0x2e, 0xd9, 0xa8, 0x64, 0xed, 0xe9, 0x73, 0xfc, 0x74, 0x4d, 0xdc, 0x8e, 0x2f, 0x32, 0xf6, 0xcf, 0x71, 0xfc, 0x39, 0x9b, 0xc1, 0x92, 0xca}} + return a, nil +} + +var __1000000001_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x57\x5f\x73\xdb\xb8\x11\x7f\xd7\xa7\xd8\xb9\x87\x5a\x9a\xa1\xe9\x77\xbb\x73\xa9\x22\x31\x39\xf6\x6c\x2a\x95\x98\x69\xae\x9d\x8e\x06\x22\x57\x24\x2e\x20\xc0\x00\xa0\x64\xf5\xcf\x77\xef\x2c\x40\x52\xa4\x2c\x27\x99\x4c\xeb\x17\x6b\x88\xc5\x6f\x7f\xfb\x7f\xf1\x36\x7a\x1f\x27\x0f\x93\xc9\x62\x1d\xcd\xd3\x08\xa2\x4f\x69\x94\x6c\xe2\x55\x02\xf1\x3b\x48\x56\x29\x44\x9f\xe2\x4d\xba\x01\xcb\x2b\x34\x19\x13\x98\xef\x1e\xbe\x25\x5b\x17\x5b\xab\x8b\xea\x9b\x72\x19\xb7\xf8\x6c\x1f\x26\x93\xdb\x5b\x58\x63\xa6\x74\x6e\x40\x63\xad\x0c\xb7\x4a\x9f\x40\xb2\x0a\x4d\x00\x3b\x65\x4b\x28\xb9\xb1\x4a\xf3\x8c\x09\x60\x32\x87\x5a\xa3\x41\x69\x03\x68\x0c\x97\x05\x30\x68\x24\xff\xd2\xe0\xf0\xf6\x96\xae\x6f\x21\x5e\x12\xfa\xb4\x91\x1a\x05\xb3\x98\x83\x55\x60\xcb\x91\x64\xbc\x0c\x67\x1d\xd5\x74\xfe\xf6\x31\x72\x87\xee\xbe\x81\xe9\x04\x00\xe0\xf6\x16\xd2\xf2\x3a\x7c\xe8\x04\x78\x0e\x3b\x5e\x18\xd4\x9c\x09\x67\x62\xf2\xf1\xf1\x11\x3e\xac\xe3\xa7\xf9\xfa\x37\xf8\x35\xfa\x2d\x98\x0c\x81\xe8\x72\x00\x56\xf3\x42\xb3\xea\x96\xcb\x1c\x9f\x31\x87\xbd\xd2\xb0\x67\xc6\x02\x86\x45\x08\x1a\x0b\x7c\xae\x61\xcf\x85\x45\xcd\x65\xe1\x15\xd1\xcd\xd6\x71\xbd\x9e\x16\x7b\xb1\x4a\x36\xe9\x7a\x1e\x27\x29\x64\x25\x66\x9f\x3d\x45\xa9\x24\x56\xb5\x3d\xc1\xe2\x97\x68\xf1\x2b\x4c\xa7\x0e\x61\xf5\x21\x5a\xcf\xd3\xd5\x7a\xfa\xc7\x9f\x67\x70\x73\x73\x7f\xef\x21\x67\xb3\xc9\xcc\xc7\x23\x92\x7b\xa5\x33\x04\x5b\x32\xeb\x23\x01\x4c\x63\xeb\xe7\xb0\x73\xd7\xc7\x24\xfe\xcb\xc7\x08\xe2\x64\x19\x7d\x1a\x78\xcd\x6b\xf6\xb2\x5b\x9e\x3f\xc3\x2a\x19\x9c\x3a\x06\xad\x9a\x85\x46\x66\xb1\xf3\x04\x78\x4f\x18\xe7\x89\x8b\x4c\x38\x3b\x02\x76\xcc\x60\x0e\x4a\x0e\xfd\x64\x7a\x4e\xd7\xc9\x50\x42\x8e\x69\xc0\xc7\x4d\x9c\xbc\x87\x82\x4b\x98\x0a\x75\x44\xed\x5d\x33\xbb\xbf\x77\x9e\xa0\x03\x77\x6b\xab\x6a\x43\x6c\x87\x59\xca\xf4\x8e\x5b\xcd\xf4\x09\x2a\xb4\x2c\x67\x96\x01\xdb\xa9\xc6\x02\x1e\x50\x5a\x13\xc2\xc6\x2a\x8d\x39\x70\x09\x0c\x0c\xd6\x4c\x3b\x2b\xd9\x4e\x20\x30\x03\xdc\x02\x37\xa0\xf6\x16\x25\x11\x22\x17\xe4\x04\x4f\x66\x57\x8d\xb0\xbc\x16\xd8\x41\x8d\x33\xb3\x57\xf7\xbf\xcf\xcb\xa7\xb1\x25\xb6\xe4\xc6\x73\x08\xfc\xef\x8c\x49\xd8\x21\x30\x79\x1a\x98\xff\xe7\xcd\x2a\x39\x93\x3a\x96\x3c\x2b\xe1\xc8\x85\x20\x49\x8d\xb6\xd1\x12\xf3\x4e\xc1\xb1\x44\x09\x5f\x1a\xd4\x27\x0a\xa2\x37\x2f\x70\xe5\xdc\x42\xfb\x08\xfb\xd0\xd2\xe7\x42\xab\xa6\xc6\xbc\x2d\xf2\xdf\x8d\x92\x3b\x50\x35\x6a\x66\x95\x36\xf0\x26\x80\x37\x7f\x08\xe0\xcd\xbf\x83\x4e\x01\xdd\xf9\xd3\xcf\x21\xa4\x44\xd7\x94\xaa\x11\x39\xc1\x9a\x8a\x09\x01\x8e\xa0\x92\xe2\x14\x40\xad\x79\x45\xe4\x1b\x83\x90\x31\x83\x14\x0c\x2f\x24\xb8\xb1\x06\x4c\x93\x95\xc0\xcc\x7d\x8b\xdb\xc1\xc3\xbf\x7e\xfa\x9d\x1d\xd8\xf6\x80\xda\x70\x25\xcd\x4f\xf7\xf0\xf7\x30\x0c\xff\xf1\x9f\x81\x80\x60\xb2\x68\x58\x81\x74\x48\x7f\x2f\x04\xea\x46\x88\xad\xc6\x2f\x0d\x1a\x7b\x15\x81\x49\xa9\x2c\xb3\xad\x82\x0b\x04\xf7\xaf\x77\xb7\xf7\x48\x17\xd5\xab\x95\xdb\xcb\x72\xf3\xd5\xda\xed\xe4\xb6\xfd\x8f\x71\xf5\x76\x9f\xa7\xdd\x8f\x56\x59\x4c\x05\x7b\xd6\x62\x15\xa8\xda\xf2\x8a\xff\x13\xe1\xaf\xbf\x44\xeb\x08\x32\xc1\x1a\x83\x06\x8e\xdc\x96\x2d\xe1\x73\xe0\xda\x88\x9d\x83\x7a\x51\xc4\x2f\x59\x51\xb5\x0e\x33\xce\xd7\xf0\xfb\x38\x81\x4b\x66\x5d\xa9\xfa\x3c\x03\x75\x40\xed\x86\x18\x30\x63\x54\xc6\xdd\x2c\x70\xa4\xd8\xb0\x7c\xa6\x4a\x03\x75\xcc\x00\x78\x88\x21\x14\x42\xed\x98\x10\xa7\x19\x25\xaf\x46\x2a\x66\x2e\x0b\x81\xa4\x40\x36\x15\xfa\x89\x74\x60\xa2\x71\x49\x54\x28\x37\x8d\xda\xea\xe0\xe2\x04\x4d\xed\x6c\xcc\xd5\x51\x86\x93\xdb\x5b\x4f\xac\xd7\xd6\x51\xe1\x4a\xd2\xf5\xbe\xaf\xb9\x91\x37\x9a\x50\x0e\x85\x4a\x3b\x74\x05\x1f\x2f\xbb\x9a\x69\x8c\x9f\x69\x1a\xf7\x64\xa0\x22\x0d\x0c\x4c\x8d\x19\xdf\xf3\x6c\x00\x12\x80\xd2\x20\x94\xfa\xdc\xd4\x6e\x00\x66\x8d\xd6\x28\x7d\x6f\x07\xb5\x1f\xbb\x81\xed\x2d\x6a\x6a\x53\x25\x33\xb0\x43\xec\x5b\x2d\x49\xe7\x64\x49\x3f\xc6\x5e\x23\xe2\x94\x74\xe0\x17\x13\x97\x59\xf7\xc5\x85\xa3\x3d\x75\x61\xba\x31\x90\xd1\x40\xe0\x4a\x06\x5d\x3f\xc4\x67\x56\x51\x3b\x24\x44\xcd\x5c\x5e\x23\x64\x25\x93\x05\xfa\xf6\x5a\xb0\xa6\x40\xd8\xb1\xec\x33\xc9\x8c\xcc\xd8\x21\xc5\xa3\x67\x3d\xea\xa4\xee\xda\xb6\xcd\x8e\x51\x37\x75\xab\x8e\x65\x55\x7d\x66\x4e\xa9\x84\xb9\x27\xe9\x1b\xab\xe3\x9e\xc6\x4f\xd1\x26\x9d\x3f\x7d\x48\xff\x76\x39\x8c\x5b\xac\xbd\x50\xcc\x12\x89\x5a\x71\x69\xdb\x4c\x79\xcd\x7c\x8f\xec\x65\x72\xd5\xd0\xb0\xa8\x35\x66\x9c\xba\xcd\x15\xfc\xf9\x39\x91\xfb\x7a\x20\x8f\x0d\x3b\x37\xdf\x53\xc3\x0e\x47\x7d\x63\xcb\x69\x30\x59\x2c\x50\x8f\xc9\x8e\x33\x6e\xba\xd7\xaa\x72\xe4\x2a\x46\x8e\xae\x6b\xc1\x33\x9f\xab\xcb\xb7\xb3\x91\x11\xbd\x05\x70\x64\x6d\x08\x31\x0f\x21\x51\x16\x3b\x7c\xd7\x8d\x2e\xd2\xa0\x62\x27\x90\x0a\x84\x92\x05\x52\xa0\xb9\xb1\x70\x47\xb9\x74\x60\x82\xe7\xa4\xc1\x4d\x0b\xa7\x23\x80\x52\x1d\xf1\x80\x3a\xbc\xe8\xca\xb2\x11\x82\xcc\x1c\x73\x90\xca\x3a\x5f\x74\x05\x3b\xaa\x71\x57\xda\xac\x2d\x6e\x9f\x08\x33\x0f\xeb\xb6\x83\xd7\xdc\x53\x29\x63\x29\x17\x50\x5a\x71\x82\xcf\x52\x1d\x65\xbb\x99\x38\xa7\xe3\xa8\xd8\x9a\x3a\x77\x91\xa9\x51\x73\x95\x53\x9b\x10\x27\x97\x9f\x59\xa6\x1a\xe9\xc9\x51\x4d\x75\x0a\x06\xfc\x7c\xbe\x9a\x10\xe2\x17\x85\x43\xa6\xe5\x28\xd0\x62\xde\x4e\x65\x1a\x5d\x96\x06\xae\xbd\xce\xb0\x77\x13\xf5\x8e\xff\xbb\xeb\xdc\xf2\xf1\x1d\xe9\xe5\xdc\xe6\x17\x21\x52\xe9\x7d\xf9\x1d\x19\x15\x5b\x97\x34\x25\x3b\xa0\x6f\x4b\x6d\x6d\x77\x6a\x0c\x97\x59\x6b\xa6\xd2\xbc\xe0\x92\xd1\xb0\xfd\x1a\xb1\x48\x9a\x46\x23\x79\x41\x49\x4f\x71\xd4\x95\xf7\x1c\x45\xee\x9c\xec\x3b\x2a\xf9\x9d\x16\x05\xa6\x5b\x35\x2f\x96\x6e\xa7\xce\x5d\x33\xdb\xf6\xd2\x79\xf9\x76\x57\xe8\x6f\x3a\xed\x52\x2d\xde\xb8\xb2\x9e\xc1\x3c\x59\xc2\x74\x44\x76\x7c\x74\xdd\xa0\x4e\x66\xd6\x43\xaf\xd6\xaf\x68\x69\x1b\xc8\x6b\x9a\xc6\xc7\xaf\x6b\xeb\xe4\xbc\xc6\x59\xeb\xca\x77\xab\x75\x14\xbf\x4f\x68\xa5\x3c\x8f\xe2\x2d\xcf\x67\xb0\x8e\xde\x45\xeb\x28\x59\x44\x9b\xf3\x16\x41\xdf\x57\x09\x2c\xa3\xc7\x28\x8d\x60\x31\xdf\x2c\xe6\xcb\x08\x96\x24\xb9\xa6\xee\x1c\xbc\xc4\x1c\xf2\x18\x81\x0e\x1e\x16\x3f\x00\x7b\xd5\xce\x1f\xc5\x9f\x8c\x5f\x35\xe5\xa9\x46\xed\x96\xfe\x00\x6a\xa6\x2d\xa7\x94\x3a\xef\xbe\xb0\xf3\xad\xcd\x8d\xd3\x0d\x22\x94\xd6\xd6\xe6\xfe\xee\x2e\x57\x99\x09\xfb\x07\x77\x98\xa9\xea\x8e\xde\xae\xc6\xde\xb9\x4d\xf8\x76\xf0\x16\xbf\x3b\xeb\x30\x93\x4d\xf4\x18\x2d\xd2\xb6\x5a\xb6\xe7\x93\xe9\xcd\x70\xd8\xdd\x04\x70\x43\x08\x37\x63\xb2\x3b\xab\x11\x5f\x7b\x80\x0d\x1e\xa1\xa3\x05\x6d\x88\xbb\x6d\x73\x6d\xeb\x91\x56\xc9\x78\xc4\xfa\x4d\xcd\x9f\x75\x69\x39\x7b\xf8\x16\x5c\x1b\x90\xef\xc6\xec\x02\xf8\x35\xe0\xab\x21\xff\x1e\x0d\xd7\x73\xe5\x61\x32\x59\xac\x9e\x9e\xe2\xf4\x61\xf2\xdf\x00\x00\x00\xff\xff\x04\xb2\x25\xad\x4c\x11\x00\x00") + +func _1000000001_initial_schemaUpSqlBytes() ([]byte, error) { + return bindataRead( + __1000000001_initial_schemaUpSql, + "1000000001_initial_schema.up.sql", + ) +} + +func _1000000001_initial_schemaUpSql() (*asset, error) { + bytes, err := _1000000001_initial_schemaUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1000000001_initial_schema.up.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x92, 0xb4, 0x34, 0xea, 0xec, 0x2b, 0x56, 0x72, 0xb, 0x7c, 0xa2, 0xb2, 0x6d, 0xae, 0x73, 0x94, 0x87, 0x93, 0x31, 0x3b, 0x53, 0xce, 0x4d, 0x8d, 0x96, 0xd1, 0x35, 0xdc, 0xdf, 0xa, 0x86, 0x2e}} + return a, nil +} + // Asset loads and returns the asset for the given name. // It returns an error if the asset could not be found or // could not be loaded. @@ -201,8 +243,10 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "1000000000_init.down.sql": _1000000000_initDownSql, - "1000000000_init.up.sql": _1000000000_initUpSql, + "1000000000_init.down.sql": _1000000000_initDownSql, + "1000000000_init.up.sql": _1000000000_initUpSql, + "1000000001_initial_schema.down.sql": _1000000001_initial_schemaDownSql, + "1000000001_initial_schema.up.sql": _1000000001_initial_schemaUpSql, } // AssetDebug is true if the assets were built with the debug flag enabled. @@ -249,8 +293,10 @@ type bintree struct { } var _bintree = &bintree{nil, map[string]*bintree{ - "1000000000_init.down.sql": {_1000000000_initDownSql, map[string]*bintree{}}, - "1000000000_init.up.sql": {_1000000000_initUpSql, map[string]*bintree{}}, + "1000000000_init.down.sql": {_1000000000_initDownSql, map[string]*bintree{}}, + "1000000000_init.up.sql": {_1000000000_initUpSql, map[string]*bintree{}}, + "1000000001_initial_schema.down.sql": {_1000000001_initial_schemaDownSql, map[string]*bintree{}}, + "1000000001_initial_schema.up.sql": {_1000000001_initial_schemaUpSql, map[string]*bintree{}}, }} // RestoreAsset restores an asset under the given directory. From b1583dd3d840d97ca7480e2394e5363de4799eaf Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Thu, 28 Jan 2021 15:11:56 -0700 Subject: [PATCH 42/78] settings schema Signed-off-by: Stephen Gutekanst --- schema/schema.go | 18 ++++++++++++ schema/settings.schema.json | 52 +++++++++++++++++++++++++++++++++++ schema/settings_stringdata.go | 52 +++++++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+) diff --git a/schema/schema.go b/schema/schema.go index db024211de62..0272f540063b 100644 --- a/schema/schema.go +++ b/schema/schema.go @@ -739,6 +739,24 @@ type ImportChangesets struct { // Repository description: The repository name as configured on your Sourcegraph instance. Repository string `json:"repository"` } +type Insight struct { + // Description description: The description of this insight + Description string `json:"description"` + // Series description: Series of data to show for this insight + Series []*InsightSeries `json:"series"` + // Title description: The short title of this insight + Title string `json:"title"` +} +type InsightSeries struct { + // Label description: The label to use for the series in the graph. + Label string `json:"label"` + // RepositoriesList description: Performs a search query and shows the number of results returned. + RepositoriesList []interface{} `json:"repositoriesList,omitempty"` + // Search description: Performs a search query and shows the number of results returned. + Search string `json:"search,omitempty"` + // Webhook description: (not yet supported) Fetch data from a webhook URL. + Webhook string `json:"webhook,omitempty"` +} // Log description: Configuration for logging and alerting, including to external services. type Log struct { diff --git a/schema/settings.schema.json b/schema/settings.schema.json index de88b92eddbe..6a11ffc31b39 100644 --- a/schema/settings.schema.json +++ b/schema/settings.schema.json @@ -323,7 +323,59 @@ } } }, + "insights": { + "description": "EXPERIMENTAL: Code Insights", + "type": "array", + "items": { + "$ref": "#/definitions/Insight" + } + }, "definitions": { + "Insight": { + "type": "object", + "additionalProperties": false, + "required": ["title", "description", "series"], + "properties": { + "title": { + "type": "string", + "description": "The short title of this insight" + }, + "description": { + "type": "string", + "description": "The description of this insight" + }, + "series": { + "type": "array", + "description": "Series of data to show for this insight", + "items": { + "$ref": "#/definitions/InsightSeries" + } + } + } + }, + "InsightSeries": { + "type": "object", + "additionalProperties": false, + "required": ["label"], + "properties": { + "label": { + "type": "string", + "description": "The label to use for the series in the graph." + }, + "repositoriesList": { + "type": "array", + "description": "Performs a search query and shows the number of results returned." + }, + "search": { + "type": "string", + "description": "Performs a search query and shows the number of results returned." + }, + "webhook": { + "type": "string", + "description": "(not yet supported) Fetch data from a webhook URL." + } + } + }, "SearchScope": { "type": "object", "additionalProperties": false, diff --git a/schema/settings_stringdata.go b/schema/settings_stringdata.go index d28c8fd57a51..659c725c5002 100644 --- a/schema/settings_stringdata.go +++ b/schema/settings_stringdata.go @@ -328,7 +328,59 @@ const SettingsSchemaJSON = `{ } } }, + "insights": { + "description": "EXPERIMENTAL: Code Insights", + "type": "array", + "items": { + "$ref": "#/definitions/Insight" + } + }, "definitions": { + "Insight": { + "type": "object", + "additionalProperties": false, + "required": ["title", "description", "series"], + "properties": { + "title": { + "type": "string", + "description": "The short title of this insight" + }, + "description": { + "type": "string", + "description": "The description of this insight" + }, + "series": { + "type": "array", + "description": "Series of data to show for this insight", + "items": { + "$ref": "#/definitions/InsightSeries" + } + } + } + }, + "InsightSeries": { + "type": "object", + "additionalProperties": false, + "required": ["label"], + "properties": { + "label": { + "type": "string", + "description": "The label to use for the series in the graph." + }, + "repositoriesList": { + "type": "array", + "description": "Performs a search query and shows the number of results returned." + }, + "search": { + "type": "string", + "description": "Performs a search query and shows the number of results returned." + }, + "webhook": { + "type": "string", + "description": "(not yet supported) Fetch data from a webhook URL." + } + } + }, "SearchScope": { "type": "object", "additionalProperties": false, From 04117f2d94b58675b2f6447dd2e96d78558ebdbc Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Thu, 28 Jan 2021 15:27:36 -0700 Subject: [PATCH 43/78] graphql schema: add Connection --- cmd/frontend/graphqlbackend/schema.go | 51 ++++++++++++++++++++-- cmd/frontend/graphqlbackend/schema.graphql | 51 ++++++++++++++++++++-- 2 files changed, 96 insertions(+), 6 deletions(-) diff --git a/cmd/frontend/graphqlbackend/schema.go b/cmd/frontend/graphqlbackend/schema.go index 477654a4bb30..dfed4fb9606d 100644 --- a/cmd/frontend/graphqlbackend/schema.go +++ b/cmd/frontend/graphqlbackend/schema.go @@ -2421,9 +2421,54 @@ type ChangesetEventConnection { } """ -Insights about code. +A list of insights. """ -type Insights { +type InsightConnection { + """ + A list of insights. + """ + nodes: [Insight!]! + + """ + The total number of insights in the connection. + """ + totalCount: Int! + + """ + Pagination information. + """ + pageInfo: PageInfo! +} + +""" +An insight about code. +""" +type Insight { + """ + The short title of the insight. + """ + title: String! + + """ + The description of the insight. + """ + description: String! + + """ + Data points over a time range (inclusive) + """ + series: [InsightsSeries!]! +} + +""" +A series of data about a code insight. +""" +type InsightsSeries { + """ + The label used to describe this series of data points. + """ + label: String! + """ Data points over a time range (inclusive) """ @@ -2762,7 +2807,7 @@ type Query { """ EXPERIMENTAL: Queries code insights """ - insights: Insights + insights: InsightConnection """ Looks up a repository by either name or cloneURL. diff --git a/cmd/frontend/graphqlbackend/schema.graphql b/cmd/frontend/graphqlbackend/schema.graphql index d4aa57441174..3de22879c134 100755 --- a/cmd/frontend/graphqlbackend/schema.graphql +++ b/cmd/frontend/graphqlbackend/schema.graphql @@ -2414,9 +2414,54 @@ type ChangesetEventConnection { } """ -Insights about code. +A list of insights. """ -type Insights { +type InsightConnection { + """ + A list of insights. + """ + nodes: [Insight!]! + + """ + The total number of insights in the connection. + """ + totalCount: Int! + + """ + Pagination information. + """ + pageInfo: PageInfo! +} + +""" +An insight about code. +""" +type Insight { + """ + The short title of the insight. + """ + title: String! + + """ + The description of the insight. + """ + description: String! + + """ + Data points over a time range (inclusive) + """ + series: [InsightsSeries!]! +} + +""" +A series of data about a code insight. +""" +type InsightsSeries { + """ + The label used to describe this series of data points. + """ + label: String! + """ Data points over a time range (inclusive) """ @@ -2755,7 +2800,7 @@ type Query { """ EXPERIMENTAL: Queries code insights """ - insights: Insights + insights: InsightConnection """ Looks up a repository by either name or cloneURL. From 8aae68dd11716632f130e8bef8a234c5bce5910f Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Thu, 28 Jan 2021 23:53:32 -0700 Subject: [PATCH 44/78] update GraphQL stubs to match new schema Signed-off-by: Stephen Gutekanst --- cmd/frontend/graphqlbackend/insights.go | 32 +++++++--- .../resolvers/insight_connection_resolver.go | 61 +++++++++++++++++++ .../insights/resolvers/insight_resolver.go | 22 +++++++ .../resolvers/insight_series_resolver.go | 20 ++++++ .../internal/insights/resolvers/resolver.go | 17 +++--- 5 files changed, 133 insertions(+), 19 deletions(-) create mode 100644 enterprise/internal/insights/resolvers/insight_connection_resolver.go create mode 100644 enterprise/internal/insights/resolvers/insight_resolver.go create mode 100644 enterprise/internal/insights/resolvers/insight_series_resolver.go diff --git a/cmd/frontend/graphqlbackend/insights.go b/cmd/frontend/graphqlbackend/insights.go index c038bd98505f..071d47efc429 100644 --- a/cmd/frontend/graphqlbackend/insights.go +++ b/cmd/frontend/graphqlbackend/insights.go @@ -3,6 +3,8 @@ package graphqlbackend import ( "context" "errors" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend/graphqlutil" ) // This file just contains stub GraphQL resolvers and data types for Code Insights which merely @@ -19,23 +21,33 @@ type InsightsPointsArgs struct { To *DateTime } -type InsightsResolver interface { - // Root resolver - Insights(ctx context.Context) (InsightsResolver, error) - - // Insights type resolvers. +type InsightSeriesResolver interface { + Label() string Points(ctx context.Context, args *InsightsPointsArgs) ([]InsightsDataPointResolver, error) } -var insightsOnlyInEnterprise = errors.New("insights are only available in enterprise") +type InsightResolver interface { + Title() string + Description() string + Series() []InsightSeriesResolver +} -type defaultInsightsResolver struct{} +type InsightConnectionResolver interface { + Nodes(ctx context.Context) ([]InsightResolver, error) + TotalCount(ctx context.Context) (int32, error) + PageInfo(ctx context.Context) (*graphqlutil.PageInfo, error) +} -func (defaultInsightsResolver) Insights(ctx context.Context) (InsightsResolver, error) { - return nil, insightsOnlyInEnterprise +// InsightsResolver is the root resolver. +type InsightsResolver interface { + Insights(ctx context.Context) (InsightConnectionResolver, error) } -func (defaultInsightsResolver) Points(ctx context.Context, args *InsightsPointsArgs) ([]InsightsDataPointResolver, error) { +var insightsOnlyInEnterprise = errors.New("insights are only available in enterprise") + +type defaultInsightsResolver struct{} + +func (defaultInsightsResolver) Insights(ctx context.Context) (InsightConnectionResolver, error) { return nil, insightsOnlyInEnterprise } diff --git a/enterprise/internal/insights/resolvers/insight_connection_resolver.go b/enterprise/internal/insights/resolvers/insight_connection_resolver.go new file mode 100644 index 000000000000..c1719b9b37e7 --- /dev/null +++ b/enterprise/internal/insights/resolvers/insight_connection_resolver.go @@ -0,0 +1,61 @@ +package resolvers + +import ( + "context" + "errors" + "strconv" + "sync" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend/graphqlutil" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" + "github.com/sourcegraph/sourcegraph/internal/database" +) + +var _ graphqlbackend.InsightConnectionResolver = &insightConnectionResolver{} + +type insightConnectionResolver struct { + store *store.Store + settingStore *database.SettingStore + + // cache results because they are used by multiple fields + once sync.Once + insights []graphqlbackend.InsightResolver + next int64 + err error +} + +func (r *insightConnectionResolver) Nodes(ctx context.Context) ([]graphqlbackend.InsightResolver, error) { + nodes, _, err := r.compute(ctx) + if err != nil { + return nil, err + } + resolvers := make([]graphqlbackend.InsightResolver, 0, len(nodes)) + for _, insight := range nodes { + resolvers = append(resolvers, &insightResolver{store: r.store, insight: insight}) + } + return resolvers, nil +} + +func (r *insightConnectionResolver) TotalCount(ctx context.Context) (int32, error) { + return 0, errors.New("not yet implemented") +} + +func (r *insightConnectionResolver) PageInfo(ctx context.Context) (*graphqlutil.PageInfo, error) { + _, next, err := r.compute(ctx) + if err != nil { + return nil, err + } + if next != 0 { + return graphqlutil.NextPageCursor(strconv.Itoa(int(next))), nil + } + return graphqlutil.HasNextPage(false), nil +} + +func (r *insightConnectionResolver) compute(ctx context.Context) ([]graphqlbackend.InsightResolver, int64, error) { + r.once.Do(func() { + // TODO: populate r.insights, r.next, r.err + // TODO: locate insights from user, org, global settings using r.settingStore.ListAll() + }) + return r.insights, r.next, r.err +} diff --git a/enterprise/internal/insights/resolvers/insight_resolver.go b/enterprise/internal/insights/resolvers/insight_resolver.go new file mode 100644 index 000000000000..4cd06283f38a --- /dev/null +++ b/enterprise/internal/insights/resolvers/insight_resolver.go @@ -0,0 +1,22 @@ +package resolvers + +import ( + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" +) + +var _ graphqlbackend.InsightResolver = &insightResolver{} + +type insightResolver struct { + store *store.Store + insight graphqlbackend.InsightResolver +} + +func (r *insightResolver) Title() string { return r.insight.Title() } + +func (r *insightResolver) Description() string { return r.insight.Description() } + +func (r *insightResolver) Series() []graphqlbackend.InsightSeriesResolver { + // TODO: locate time series from r.store DB. + return nil +} diff --git a/enterprise/internal/insights/resolvers/insight_series_resolver.go b/enterprise/internal/insights/resolvers/insight_series_resolver.go new file mode 100644 index 000000000000..f5531900f36a --- /dev/null +++ b/enterprise/internal/insights/resolvers/insight_series_resolver.go @@ -0,0 +1,20 @@ +package resolvers + +import ( + "context" + "errors" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" +) + +var _ graphqlbackend.InsightSeriesResolver = &insightSeriesResolver{} + +type insightSeriesResolver struct { + label string +} + +func (r *insightSeriesResolver) Label() string { return r.label } + +func (r *insightSeriesResolver) Points(ctx context.Context, args *graphqlbackend.InsightsPointsArgs) ([]graphqlbackend.InsightsDataPointResolver, error) { + return nil, errors.New("not yet implemented") +} diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go index ef6feef6b1b2..2a48dabf3abf 100644 --- a/enterprise/internal/insights/resolvers/resolver.go +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -2,14 +2,15 @@ package resolvers import ( "context" - "errors" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" - "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns/store" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/database/dbutil" ) +var _ graphqlbackend.InsightsResolver = &Resolver{} + // Resolver is the GraphQL resolver of all things related to Insights. type Resolver struct { store *store.Store @@ -24,11 +25,9 @@ func New(timescale, postgres dbutil.DB) graphqlbackend.InsightsResolver { } } -func (r *Resolver) Insights(ctx context.Context) (graphqlbackend.InsightsResolver, error) { - // TODO: locate insights from user, org, global settings using r.settingStore.ListAll() - return r, nil -} - -func (r *Resolver) Points(ctx context.Context, args *graphqlbackend.InsightsPointsArgs) ([]graphqlbackend.InsightsDataPointResolver, error) { - return nil, errors.New("not yet implemented") +func (r *Resolver) Insights(ctx context.Context) (graphqlbackend.InsightConnectionResolver, error) { + return &insightConnectionResolver{ + store: r.store, + settingStore: r.settingStore, + }, nil } From dc06e678136bde27ebdefbf4e27bd9ac11180e13 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Thu, 28 Jan 2021 23:53:51 -0700 Subject: [PATCH 45/78] store: fix test Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/insights_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/enterprise/internal/insights/store/insights_test.go b/enterprise/internal/insights/store/insights_test.go index b6a92d00b49b..5ec101083b72 100644 --- a/enterprise/internal/insights/store/insights_test.go +++ b/enterprise/internal/insights/store/insights_test.go @@ -18,8 +18,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/campaigns" "github.com/sourcegraph/sourcegraph/internal/db" "github.com/sourcegraph/sourcegraph/internal/extsvc" - */ -) + */) func testInsights(t *testing.T, ctx context.Context, s *Store, clock func() time.Time) { // TODO: write tests against the store once it is implemented @@ -616,4 +615,4 @@ func testInsights(t *testing.T, ctx context.Context, s *Store, clock func() time } }) */ -) +} From b1026e4c08ebd0a4e91eac1d6b4c6d142699464c Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 00:02:14 -0700 Subject: [PATCH 46/78] improve testing experience Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/integration_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/enterprise/internal/insights/store/integration_test.go b/enterprise/internal/insights/store/integration_test.go index 6dcd17bb21b8..928a86ebbce2 100644 --- a/enterprise/internal/insights/store/integration_test.go +++ b/enterprise/internal/insights/store/integration_test.go @@ -22,6 +22,9 @@ func TestIntegration(t *testing.T) { getTimescaleDB := func(t testing.TB) *sql.DB { // Setup TimescaleDB for testing. + if os.Getenv("CODEINSIGHTS_PGDATASOURCE") == "" { + os.Setenv("CODEINSIGHTS_PGDATASOURCE", "postgres://postgres:password@127.0.0.1:5435/postgres") + } username := "" if user, err := user.Current(); err == nil { username = user.Username @@ -33,7 +36,6 @@ func TestIntegration(t *testing.T) { t.Log("README: To run these tests you need to have the codeinsights TimescaleDB running:") t.Log("") t.Log("$ ./dev/codeinsights-db.sh &") - t.Log("$ export CODEINSIGHTS_PGDATASOURCE=postgres://postgres:password@127.0.0.1:5435/postgres") t.Log("") t.Log("Or skip them with 'go test -short'") t.Log("") From aa3be53e844ff19fb64aefb838d372316fc13fdb Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 00:32:33 -0700 Subject: [PATCH 47/78] settings schema fixup Signed-off-by: Stephen Gutekanst --- schema/schema.go | 2 ++ schema/settings.schema.json | 14 +++++++------- schema/settings_stringdata.go | 14 +++++++------- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/schema/schema.go b/schema/schema.go index 0272f540063b..c6632c9bcd1e 100644 --- a/schema/schema.go +++ b/schema/schema.go @@ -1109,6 +1109,8 @@ type Settings struct { ExperimentalFeatures *SettingsExperimentalFeatures `json:"experimentalFeatures,omitempty"` // Extensions description: The Sourcegraph extensions to use. Enable an extension by adding a property `"my/extension": true` (where `my/extension` is the extension ID). Override a previously enabled extension and disable it by setting its value to `false`. Extensions map[string]bool `json:"extensions,omitempty"` + // Insights description: EXPERIMENTAL: Code Insights + Insights []*Insight `json:"insights,omitempty"` // Motd description: DEPRECATED: Use `notices` instead. // // An array (often with just one element) of messages to display at the top of all pages, including for unauthenticated users. Users may dismiss a message (and any message with the same string value will remain dismissed for the user). diff --git a/schema/settings.schema.json b/schema/settings.schema.json index 6a11ffc31b39..135f7d4e368a 100644 --- a/schema/settings.schema.json +++ b/schema/settings.schema.json @@ -321,13 +321,13 @@ "!go": { "pointer": true } - } - }, - "insights": { - "description": "EXPERIMENTAL: Code Insights", - "type": "array", - "items": { - "$ref": "#/definitions/Insight" + }, + "insights": { + "description": "EXPERIMENTAL: Code Insights", + "type": "array", + "items": { + "$ref": "#/definitions/Insight" + } } }, "definitions": { diff --git a/schema/settings_stringdata.go b/schema/settings_stringdata.go index 659c725c5002..61bb4aeccb08 100644 --- a/schema/settings_stringdata.go +++ b/schema/settings_stringdata.go @@ -326,13 +326,13 @@ const SettingsSchemaJSON = `{ "!go": { "pointer": true } - } - }, - "insights": { - "description": "EXPERIMENTAL: Code Insights", - "type": "array", - "items": { - "$ref": "#/definitions/Insight" + }, + "insights": { + "description": "EXPERIMENTAL: Code Insights", + "type": "array", + "items": { + "$ref": "#/definitions/Insight" + } } }, "definitions": { From 6ad3eb94111b7060f8cdf68d10792ac378888aee Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 00:33:05 -0700 Subject: [PATCH 48/78] insights: resolvers: fetch insights from global user settings Signed-off-by: Stephen Gutekanst --- .../resolvers/insight_connection_resolver.go | 32 ++++++++++++++++--- .../insights/resolvers/insight_resolver.go | 7 ++-- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/enterprise/internal/insights/resolvers/insight_connection_resolver.go b/enterprise/internal/insights/resolvers/insight_connection_resolver.go index c1719b9b37e7..6c7462f63f32 100644 --- a/enterprise/internal/insights/resolvers/insight_connection_resolver.go +++ b/enterprise/internal/insights/resolvers/insight_connection_resolver.go @@ -9,7 +9,10 @@ import ( "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend/graphqlutil" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" + "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/database" + "github.com/sourcegraph/sourcegraph/internal/jsonc" + "github.com/sourcegraph/sourcegraph/schema" ) var _ graphqlbackend.InsightConnectionResolver = &insightConnectionResolver{} @@ -20,7 +23,7 @@ type insightConnectionResolver struct { // cache results because they are used by multiple fields once sync.Once - insights []graphqlbackend.InsightResolver + insights []*schema.Insight next int64 err error } @@ -52,10 +55,31 @@ func (r *insightConnectionResolver) PageInfo(ctx context.Context) (*graphqlutil. return graphqlutil.HasNextPage(false), nil } -func (r *insightConnectionResolver) compute(ctx context.Context) ([]graphqlbackend.InsightResolver, int64, error) { +func (r *insightConnectionResolver) compute(ctx context.Context) ([]*schema.Insight, int64, error) { r.once.Do(func() { - // TODO: populate r.insights, r.next, r.err - // TODO: locate insights from user, org, global settings using r.settingStore.ListAll() + // Get latest Global user settings. + // + // FUTURE: include user/org settings. + subject := api.SettingsSubject{Site: true} + globalSettingsRaw, err := r.settingStore.GetLatest(ctx, subject) + if err != nil { + r.err = err + return + } + globalSettings, err := parseUserSettings(globalSettingsRaw) + r.insights = globalSettings.Insights }) return r.insights, r.next, r.err } + +func parseUserSettings(settings *api.Settings) (*schema.Settings, error) { + if settings == nil { + // Settings have never been saved for this subject; equivalent to `{}`. + return &schema.Settings{}, nil + } + var v schema.Settings + if err := jsonc.Unmarshal(settings.Contents, &v); err != nil { + return nil, err + } + return &v, nil +} diff --git a/enterprise/internal/insights/resolvers/insight_resolver.go b/enterprise/internal/insights/resolvers/insight_resolver.go index 4cd06283f38a..9e4c9a5ee0a7 100644 --- a/enterprise/internal/insights/resolvers/insight_resolver.go +++ b/enterprise/internal/insights/resolvers/insight_resolver.go @@ -3,18 +3,19 @@ package resolvers import ( "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" + "github.com/sourcegraph/sourcegraph/schema" ) var _ graphqlbackend.InsightResolver = &insightResolver{} type insightResolver struct { store *store.Store - insight graphqlbackend.InsightResolver + insight *schema.Insight } -func (r *insightResolver) Title() string { return r.insight.Title() } +func (r *insightResolver) Title() string { return r.insight.Title } -func (r *insightResolver) Description() string { return r.insight.Description() } +func (r *insightResolver) Description() string { return r.insight.Description } func (r *insightResolver) Series() []graphqlbackend.InsightSeriesResolver { // TODO: locate time series from r.store DB. From 1c53ca999c992e65d25735f2144d75a5bd4e65a4 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 00:36:27 -0700 Subject: [PATCH 49/78] resolvers: implement TotalCount Signed-off-by: Stephen Gutekanst --- .../insights/resolvers/insight_connection_resolver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/enterprise/internal/insights/resolvers/insight_connection_resolver.go b/enterprise/internal/insights/resolvers/insight_connection_resolver.go index 6c7462f63f32..0f563989ea24 100644 --- a/enterprise/internal/insights/resolvers/insight_connection_resolver.go +++ b/enterprise/internal/insights/resolvers/insight_connection_resolver.go @@ -2,7 +2,6 @@ package resolvers import ( "context" - "errors" "strconv" "sync" @@ -41,7 +40,8 @@ func (r *insightConnectionResolver) Nodes(ctx context.Context) ([]graphqlbackend } func (r *insightConnectionResolver) TotalCount(ctx context.Context) (int32, error) { - return 0, errors.New("not yet implemented") + insights, _, err := r.compute(ctx) + return int32(len(insights)), err } func (r *insightConnectionResolver) PageInfo(ctx context.Context) (*graphqlutil.PageInfo, error) { From 36285a62ee89801d9b0e945c0bdb810e42528f4a Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 00:42:46 -0700 Subject: [PATCH 50/78] resolvers: pass through series schema/settings data Signed-off-by: Stephen Gutekanst --- .../internal/insights/resolvers/insight_resolver.go | 8 ++++++-- .../insights/resolvers/insight_series_resolver.go | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/enterprise/internal/insights/resolvers/insight_resolver.go b/enterprise/internal/insights/resolvers/insight_resolver.go index 9e4c9a5ee0a7..9d5ba38a272f 100644 --- a/enterprise/internal/insights/resolvers/insight_resolver.go +++ b/enterprise/internal/insights/resolvers/insight_resolver.go @@ -18,6 +18,10 @@ func (r *insightResolver) Title() string { return r.insight.Title } func (r *insightResolver) Description() string { return r.insight.Description } func (r *insightResolver) Series() []graphqlbackend.InsightSeriesResolver { - // TODO: locate time series from r.store DB. - return nil + series := r.insight.Series + resolvers := make([]graphqlbackend.InsightSeriesResolver, 0, len(series)) + for _, series := range series { + resolvers = append(resolvers, &insightSeriesResolver{store: r.store, series: series}) + } + return resolvers } diff --git a/enterprise/internal/insights/resolvers/insight_series_resolver.go b/enterprise/internal/insights/resolvers/insight_series_resolver.go index f5531900f36a..2872a1567836 100644 --- a/enterprise/internal/insights/resolvers/insight_series_resolver.go +++ b/enterprise/internal/insights/resolvers/insight_series_resolver.go @@ -5,16 +5,20 @@ import ( "errors" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" + "github.com/sourcegraph/sourcegraph/schema" ) var _ graphqlbackend.InsightSeriesResolver = &insightSeriesResolver{} type insightSeriesResolver struct { - label string + store *store.Store + series *schema.InsightSeries } -func (r *insightSeriesResolver) Label() string { return r.label } +func (r *insightSeriesResolver) Label() string { return r.series.Label } func (r *insightSeriesResolver) Points(ctx context.Context, args *graphqlbackend.InsightsPointsArgs) ([]graphqlbackend.InsightsDataPointResolver, error) { + // TODO: locate time series from r.store DB. return nil, errors.New("not yet implemented") } From c5050c6b14546a52b037dd796f3223395bf5e96b Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 00:44:08 -0700 Subject: [PATCH 51/78] resolvers: keep track of which tests are needed Signed-off-by: Stephen Gutekanst --- .../internal/insights/resolvers/insight_resolver_test.go | 3 +++ .../insights/resolvers/insights_connection_resolver_test.go | 3 +++ enterprise/internal/insights/resolvers/resolver_test.go | 3 +++ 3 files changed, 9 insertions(+) create mode 100644 enterprise/internal/insights/resolvers/insight_resolver_test.go create mode 100644 enterprise/internal/insights/resolvers/insights_connection_resolver_test.go create mode 100644 enterprise/internal/insights/resolvers/resolver_test.go diff --git a/enterprise/internal/insights/resolvers/insight_resolver_test.go b/enterprise/internal/insights/resolvers/insight_resolver_test.go new file mode 100644 index 000000000000..b88996ff58b3 --- /dev/null +++ b/enterprise/internal/insights/resolvers/insight_resolver_test.go @@ -0,0 +1,3 @@ +package resolvers + +// TODO(slimsag) diff --git a/enterprise/internal/insights/resolvers/insights_connection_resolver_test.go b/enterprise/internal/insights/resolvers/insights_connection_resolver_test.go new file mode 100644 index 000000000000..b88996ff58b3 --- /dev/null +++ b/enterprise/internal/insights/resolvers/insights_connection_resolver_test.go @@ -0,0 +1,3 @@ +package resolvers + +// TODO(slimsag) diff --git a/enterprise/internal/insights/resolvers/resolver_test.go b/enterprise/internal/insights/resolvers/resolver_test.go new file mode 100644 index 000000000000..b88996ff58b3 --- /dev/null +++ b/enterprise/internal/insights/resolvers/resolver_test.go @@ -0,0 +1,3 @@ +package resolvers + +// TODO(slimsag) From fdb48f559a572ce10cf4887c6d67c2b460de09d1 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 02:06:56 -0700 Subject: [PATCH 52/78] rename gauge_events -> series_points Signed-off-by: Stephen Gutekanst --- README.codeinsights.md | 12 ++++++------ .../codeinsights/1000000001_initial_schema.down.sql | 8 ++++---- .../codeinsights/1000000001_initial_schema.up.sql | 10 +++++----- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/README.codeinsights.md b/README.codeinsights.md index 686860f6c39d..1cd67dfd95d6 100644 --- a/README.codeinsights.md +++ b/README.codeinsights.md @@ -67,7 +67,7 @@ UNION ## Inserting gauge events ``` -INSERT INTO gauge_events( +INSERT INTO series_points( time, value, metadata_id, @@ -89,14 +89,14 @@ INSERT INTO gauge_events( ### All data ``` -SELECT * FROM gauge_events ORDER BY time DESC LIMIT 100; +SELECT * FROM series_points ORDER BY time DESC LIMIT 100; ``` ### Filter by repo name, returning metadata (may be more optimally queried separately) ``` SELECT * -FROM gauge_events +FROM series_points JOIN metadata ON metadata.id = metadata_id WHERE repo_name_id IN ( SELECT id FROM repo_names WHERE name ~ '.*-renamed' @@ -109,7 +109,7 @@ DESC LIMIT 100; ``` SELECT * -FROM gauge_events +FROM series_points JOIN metadata ON metadata.id = metadata_id WHERE metadata @> '{"hello": "world"}' ORDER BY time @@ -120,7 +120,7 @@ DESC LIMIT 100; ``` SELECT * -FROM gauge_events +FROM series_points JOIN metadata ON metadata.id = metadata_id WHERE metadata @> '{"languages": ["Go"]}' ORDER BY time @@ -138,7 +138,7 @@ SELECT AVG(value), MAX(value), MIN(value) -FROM gauge_events +FROM series_points GROUP BY value, bucket; ``` diff --git a/migrations/codeinsights/1000000001_initial_schema.down.sql b/migrations/codeinsights/1000000001_initial_schema.down.sql index fd967e3a014d..a86646b1cf4b 100644 --- a/migrations/codeinsights/1000000001_initial_schema.down.sql +++ b/migrations/codeinsights/1000000001_initial_schema.down.sql @@ -1,9 +1,9 @@ BEGIN; -DROP INDEX IF EXISTS gauge_events_repo_id_btree; -DROP INDEX IF EXISTS gauge_events_repo_name_id_btree; -DROP INDEX IF EXISTS gauge_events_original_repo_name_id_btree; -DROP TABLE IF EXISTS gauge_events; +DROP INDEX IF EXISTS series_points_repo_id_btree; +DROP INDEX IF EXISTS series_points_repo_name_id_btree; +DROP INDEX IF EXISTS series_points_original_repo_name_id_btree; +DROP TABLE IF EXISTS series_points; DROP INDEX IF EXISTS repo_names_name_unique_idx; DROP INDEX IF EXISTS repo_names_name_trgm; diff --git a/migrations/codeinsights/1000000001_initial_schema.up.sql b/migrations/codeinsights/1000000001_initial_schema.up.sql index 2d7d61cc22b6..baeef9fef1b1 100644 --- a/migrations/codeinsights/1000000001_initial_schema.up.sql +++ b/migrations/codeinsights/1000000001_initial_schema.up.sql @@ -54,7 +54,7 @@ CREATE INDEX metadata_metadata_gin ON metadata USING GIN (metadata); -- a specific repository, or lookup the current name of a repository after it has been e.g. renamed. -- The name can be used to refer to the name of the repository at the time of the event's creation, -- for example to trace the change in a gauge back to a repository being renamed. -CREATE TABLE gauge_events ( +CREATE TABLE series_points ( -- The timestamp of the recorded event. time TIMESTAMPTZ NOT NULL, @@ -95,11 +95,11 @@ CREATE TABLE gauge_events ( -- Create hypertable, partitioning events by time. -- See https://docs.timescale.com/latest/using-timescaledb/hypertables -SELECT create_hypertable('gauge_events', 'time'); +SELECT create_hypertable('series_points', 'time'); -- Create btree indexes for repository filtering. -CREATE INDEX gauge_events_repo_id_btree ON gauge_events USING btree (repo_id); -CREATE INDEX gauge_events_repo_name_id_btree ON gauge_events USING btree (repo_name_id); -CREATE INDEX gauge_events_original_repo_name_id_btree ON gauge_events USING btree (original_repo_name_id); +CREATE INDEX series_points_repo_id_btree ON series_points USING btree (repo_id); +CREATE INDEX series_points_repo_name_id_btree ON series_points USING btree (repo_name_id); +CREATE INDEX series_points_original_repo_name_id_btree ON series_points USING btree (original_repo_name_id); COMMIT; From e5eb37d5141898397612a40ca30d9e5e9519f709 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 02:09:05 -0700 Subject: [PATCH 53/78] store: fetch data points Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/store.go | 147 +++++++------------- 1 file changed, 52 insertions(+), 95 deletions(-) diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index 8fa9d497acf1..645ef9ee4b7d 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -1,9 +1,13 @@ package store import ( + "context" "database/sql" + "fmt" "time" + "github.com/keegancsmith/sqlf" + "github.com/sourcegraph/sourcegraph/internal/database/basestore" "github.com/sourcegraph/sourcegraph/internal/database/dbutil" "github.com/sourcegraph/sourcegraph/internal/timeutil" @@ -40,16 +44,55 @@ func (s *Store) With(other basestore.ShareableStore) *Store { return &Store{Store: s.Store.With(other), now: s.now} } -/* -// Transact creates a new transaction. -// It's required to implement this method and wrap the Transact method of the -// underlying basestore.Store. -func (s *Store) Transact(ctx context.Context) (*Store, error) { - txBase, err := s.Store.Transact(ctx) - if err != nil { - return nil, err +type SeriesPointsOpts struct { + Limit int +} + +type SeriesPoint struct { + Time time.Time + Value float64 +} + +func (s *Store) SeriesPoints(ctx context.Context, opts SeriesPointsOpts) ([]SeriesPoint, error) { + points := make([]SeriesPoint, 0, opts.Limit) + err := s.query(ctx, seriesPointsQuery(opts), func(sc scanner) error { + var point SeriesPoint + err := sc.Scan( + &point.Time, + &point.Value, + ) + if err != nil { + return err + } + points = append(points, point) + return nil + }) + return points, err +} + +var seriesPointsQueryFmtstr = ` +-- source: enterprise/internal/insights/store/series_points.go +SELECT time, value FROM series_points +WHERE %s +ORDER BY time DESC +` + +func seriesPointsQuery(opts SeriesPointsOpts) *sqlf.Query { + joins := []*sqlf.Query{} + preds := []*sqlf.Query{} + + if len(preds) == 0 { + preds = append(preds, sqlf.Sprintf("TRUE")) + } + limitClause := "" + if opts.Limit > 0 { + limitClause = fmt.Sprintf("LIMIT %d", opts.Limit) } - return &Store{Store: txBase, now: s.now}, nil + return sqlf.Sprintf( + seriesPointsQueryFmtstr+limitClause, + sqlf.Join(joins, "\n"), + sqlf.Join(preds, "\n AND "), + ) } func (s *Store) query(ctx context.Context, q *sqlf.Query, sc scanFunc) error { @@ -60,14 +103,6 @@ func (s *Store) query(ctx context.Context, q *sqlf.Query, sc scanFunc) error { return scanAll(rows, sc) } -func (s *Store) queryCount(ctx context.Context, q *sqlf.Query) (int, error) { - count, ok, err := basestore.ScanFirstInt(s.Query(ctx, q)) - if err != nil || !ok { - return count, err - } - return count, nil -} - // scanner captures the Scan method of sql.Rows and sql.Row type scanner interface { Scan(dst ...interface{}) error @@ -79,88 +114,10 @@ type scanFunc func(scanner) (err error) func scanAll(rows *sql.Rows, scan scanFunc) (err error) { defer func() { err = basestore.CloseRows(rows, err) }() - for rows.Next() { if err = scan(rows); err != nil { return err } } - return rows.Err() } -*/ - -/* -func jsonbColumn(metadata interface{}) (msg json.RawMessage, err error) { - switch m := metadata.(type) { - case nil: - msg = json.RawMessage("{}") - case string: - msg = json.RawMessage(m) - case []byte: - msg = m - case json.RawMessage: - msg = m - default: - msg, err = json.MarshalIndent(m, " ", " ") - } - return -} - -func jsonSetColumn(ids []int64) ([]byte, error) { - set := make(map[int64]*struct{}, len(ids)) - for _, id := range ids { - set[id] = nil - } - return json.Marshal(set) -} - -func nullInt32Column(n int32) *int32 { - if n == 0 { - return nil - } - return &n -} - -func nullInt64Column(n int64) *int64 { - if n == 0 { - return nil - } - return &n -} - -func nullTimeColumn(t time.Time) *time.Time { - if t.IsZero() { - return nil - } - return &t -} - -func nullStringColumn(s string) *string { - if s == "" { - return nil - } - return &s -} - -type LimitOpts struct { - Limit int -} - -func (o LimitOpts) DBLimit() int { - if o.Limit == 0 { - return o.Limit - } - // We always request one item more than actually requested, to determine the next ID for pagination. - // The store should make sure to strip the last element in a result set, if len(rs) == o.DBLimit(). - return o.Limit + 1 -} -g -func (o LimitOpts) ToDB() string { - var limitClause string - if o.Limit > 0 { - limitClause = fmt.Sprintf("LIMIT %d", o.DBLimit()) - } - return limitClause -} -*/ From f367fe9e651ea918c8f18775e046feaf8cdbb8ca Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 02:13:32 -0700 Subject: [PATCH 54/78] DB schema: add series_id Signed-off-by: Stephen Gutekanst --- migrations/codeinsights/1000000001_initial_schema.up.sql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/migrations/codeinsights/1000000001_initial_schema.up.sql b/migrations/codeinsights/1000000001_initial_schema.up.sql index baeef9fef1b1..5cd0dde4229b 100644 --- a/migrations/codeinsights/1000000001_initial_schema.up.sql +++ b/migrations/codeinsights/1000000001_initial_schema.up.sql @@ -55,6 +55,10 @@ CREATE INDEX metadata_metadata_gin ON metadata USING GIN (metadata); -- The name can be used to refer to the name of the repository at the time of the event's creation, -- for example to trace the change in a gauge back to a repository being renamed. CREATE TABLE series_points ( + -- A unique identifier for the series of data being recorded. This is not an ID from another + -- table, but rather just a unique identifier. + series_id integer, + -- The timestamp of the recorded event. time TIMESTAMPTZ NOT NULL, From 1f5fec85efabb5930cd6b4883c5d4083febc0528 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 02:13:47 -0700 Subject: [PATCH 55/78] store: add series_id filtering Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/store.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index 645ef9ee4b7d..60a196247bef 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -45,7 +45,8 @@ func (s *Store) With(other basestore.ShareableStore) *Store { } type SeriesPointsOpts struct { - Limit int + SeriesID *int32 + Limit int } type SeriesPoint struct { @@ -81,6 +82,10 @@ func seriesPointsQuery(opts SeriesPointsOpts) *sqlf.Query { joins := []*sqlf.Query{} preds := []*sqlf.Query{} + if opts.SeriesID != nil { + preds = append(preds, sqlf.Sprintf("series_id = %s", *opts.SeriesID)) + } + if len(preds) == 0 { preds = append(preds, sqlf.Sprintf("TRUE")) } From 41dc093998e12057f5d20bcf2eb964f485833d0d Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 11:48:19 -0700 Subject: [PATCH 56/78] migrations: generate Signed-off-by: Stephen Gutekanst --- migrations/codeinsights/bindata.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/migrations/codeinsights/bindata.go b/migrations/codeinsights/bindata.go index badebe403986..28dceb89f22f 100644 --- a/migrations/codeinsights/bindata.go +++ b/migrations/codeinsights/bindata.go @@ -2,8 +2,8 @@ // sources: // 1000000000_init.down.sql (19B) // 1000000000_init.up.sql (19B) -// 1000000001_initial_schema.down.sql (471B) -// 1000000001_initial_schema.up.sql (4.428kB) +// 1000000001_initial_schema.down.sql (475B) +// 1000000001_initial_schema.up.sql (4.608kB) package migrations @@ -112,7 +112,7 @@ func _1000000000_initUpSql() (*asset, error) { return a, nil } -var __1000000001_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x8f\xc1\xaa\xc2\x30\x10\x45\xf7\xf9\x8a\xfc\x47\x56\xaf\xaf\x51\x02\xb6\x15\x9b\x45\x77\x61\xa4\x43\x08\x98\x54\xd3\x89\xf8\xf9\x22\x62\x05\x65\x50\x77\xb3\xb8\xe7\x9e\x3b\x95\x5e\x9b\x56\x09\x51\xef\xba\xad\x34\x6d\xad\x07\x69\x56\x52\x0f\xa6\xb7\xbd\xf4\x50\x3c\x3a\x3c\x63\xa2\xd9\x65\x3c\x4e\x2e\x8c\x6e\x4f\x19\x51\x7d\x0b\x24\x88\xf8\x13\x35\xe5\xe0\x43\x82\x03\x8f\xdb\xbf\x6a\xa3\x19\x9c\xfb\x64\x29\x9b\xef\x95\x25\x85\x53\xb9\x35\x5f\x98\x4d\xaf\x00\x65\x1f\x19\xff\x33\xca\xd9\x23\x12\x8c\x40\xe0\x96\xe3\xa3\xff\x1d\xf1\x21\x31\x03\x1e\x11\x25\xc4\x7f\xd7\x34\xc6\x2a\x71\x0d\x00\x00\xff\xff\x0d\x60\xa7\x10\xd7\x01\x00\x00") +var __1000000001_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x8f\x41\x0a\xc2\x30\x10\x45\xf7\x39\x45\xee\x91\x95\xb5\x51\x02\xb6\x15\x9b\x45\x77\x21\xd2\x21\x0c\x98\xa4\x26\x29\x78\x7c\x11\xb1\x82\x32\x68\x77\xb3\xf8\xef\xff\x37\x95\xdc\xab\x56\x30\x56\x9f\xba\x23\x57\x6d\x2d\x07\xae\x76\x5c\x0e\xaa\xd7\x3d\xcf\x90\x10\xb2\x99\x22\x86\x92\x4d\x82\x29\x1a\x1c\xcd\xb9\x24\x00\xf1\x37\x11\xac\x87\x75\x58\x4c\xe8\x30\xd8\x0b\xcd\xeb\x4d\x75\x90\x14\x4f\x7d\xb3\xb4\xe5\x67\xe7\x1c\xf0\x3a\x3f\xaa\x6f\x84\xd5\x27\x50\x92\xf3\x84\xc0\x3b\x4a\xad\x7b\x28\x76\xb4\xc5\x9a\xe5\xf8\xb9\xff\x8d\x38\x0c\x84\xc0\x2b\x22\x18\xdb\x76\x4d\xa3\xb4\x60\xf7\x00\x00\x00\xff\xff\xe1\xb5\x30\xdc\xdb\x01\x00\x00") func _1000000001_initial_schemaDownSqlBytes() ([]byte, error) { return bindataRead( @@ -128,11 +128,11 @@ func _1000000001_initial_schemaDownSql() (*asset, error) { } info := bindataFileInfo{name: "1000000001_initial_schema.down.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x46, 0x11, 0x7d, 0x48, 0x3a, 0xe0, 0x31, 0xc5, 0x3a, 0x2e, 0xd9, 0xa8, 0x64, 0xed, 0xe9, 0x73, 0xfc, 0x74, 0x4d, 0xdc, 0x8e, 0x2f, 0x32, 0xf6, 0xcf, 0x71, 0xfc, 0x39, 0x9b, 0xc1, 0x92, 0xca}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2c, 0xd2, 0x4, 0x8c, 0x45, 0xac, 0xd5, 0xee, 0x57, 0x8f, 0xba, 0x64, 0x82, 0x72, 0x50, 0x52, 0x95, 0x91, 0x23, 0xf5, 0x42, 0xf6, 0x3, 0x65, 0xb, 0x7, 0x31, 0xdc, 0xe5, 0x12, 0x12, 0x91}} return a, nil } -var __1000000001_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x57\x5f\x73\xdb\xb8\x11\x7f\xd7\xa7\xd8\xb9\x87\x5a\x9a\xa1\xe9\x77\xbb\x73\xa9\x22\x31\x39\xf6\x6c\x2a\x95\x98\x69\xae\x9d\x8e\x06\x22\x57\x24\x2e\x20\xc0\x00\xa0\x64\xf5\xcf\x77\xef\x2c\x40\x52\xa4\x2c\x27\x99\x4c\xeb\x17\x6b\x88\xc5\x6f\x7f\xfb\x7f\xf1\x36\x7a\x1f\x27\x0f\x93\xc9\x62\x1d\xcd\xd3\x08\xa2\x4f\x69\x94\x6c\xe2\x55\x02\xf1\x3b\x48\x56\x29\x44\x9f\xe2\x4d\xba\x01\xcb\x2b\x34\x19\x13\x98\xef\x1e\xbe\x25\x5b\x17\x5b\xab\x8b\xea\x9b\x72\x19\xb7\xf8\x6c\x1f\x26\x93\xdb\x5b\x58\x63\xa6\x74\x6e\x40\x63\xad\x0c\xb7\x4a\x9f\x40\xb2\x0a\x4d\x00\x3b\x65\x4b\x28\xb9\xb1\x4a\xf3\x8c\x09\x60\x32\x87\x5a\xa3\x41\x69\x03\x68\x0c\x97\x05\x30\x68\x24\xff\xd2\xe0\xf0\xf6\x96\xae\x6f\x21\x5e\x12\xfa\xb4\x91\x1a\x05\xb3\x98\x83\x55\x60\xcb\x91\x64\xbc\x0c\x67\x1d\xd5\x74\xfe\xf6\x31\x72\x87\xee\xbe\x81\xe9\x04\x00\xe0\xf6\x16\xd2\xf2\x3a\x7c\xe8\x04\x78\x0e\x3b\x5e\x18\xd4\x9c\x09\x67\x62\xf2\xf1\xf1\x11\x3e\xac\xe3\xa7\xf9\xfa\x37\xf8\x35\xfa\x2d\x98\x0c\x81\xe8\x72\x00\x56\xf3\x42\xb3\xea\x96\xcb\x1c\x9f\x31\x87\xbd\xd2\xb0\x67\xc6\x02\x86\x45\x08\x1a\x0b\x7c\xae\x61\xcf\x85\x45\xcd\x65\xe1\x15\xd1\xcd\xd6\x71\xbd\x9e\x16\x7b\xb1\x4a\x36\xe9\x7a\x1e\x27\x29\x64\x25\x66\x9f\x3d\x45\xa9\x24\x56\xb5\x3d\xc1\xe2\x97\x68\xf1\x2b\x4c\xa7\x0e\x61\xf5\x21\x5a\xcf\xd3\xd5\x7a\xfa\xc7\x9f\x67\x70\x73\x73\x7f\xef\x21\x67\xb3\xc9\xcc\xc7\x23\x92\x7b\xa5\x33\x04\x5b\x32\xeb\x23\x01\x4c\x63\xeb\xe7\xb0\x73\xd7\xc7\x24\xfe\xcb\xc7\x08\xe2\x64\x19\x7d\x1a\x78\xcd\x6b\xf6\xb2\x5b\x9e\x3f\xc3\x2a\x19\x9c\x3a\x06\xad\x9a\x85\x46\x66\xb1\xf3\x04\x78\x4f\x18\xe7\x89\x8b\x4c\x38\x3b\x02\x76\xcc\x60\x0e\x4a\x0e\xfd\x64\x7a\x4e\xd7\xc9\x50\x42\x8e\x69\xc0\xc7\x4d\x9c\xbc\x87\x82\x4b\x98\x0a\x75\x44\xed\x5d\x33\xbb\xbf\x77\x9e\xa0\x03\x77\x6b\xab\x6a\x43\x6c\x87\x59\xca\xf4\x8e\x5b\xcd\xf4\x09\x2a\xb4\x2c\x67\x96\x01\xdb\xa9\xc6\x02\x1e\x50\x5a\x13\xc2\xc6\x2a\x8d\x39\x70\x09\x0c\x0c\xd6\x4c\x3b\x2b\xd9\x4e\x20\x30\x03\xdc\x02\x37\xa0\xf6\x16\x25\x11\x22\x17\xe4\x04\x4f\x66\x57\x8d\xb0\xbc\x16\xd8\x41\x8d\x33\xb3\x57\xf7\xbf\xcf\xcb\xa7\xb1\x25\xb6\xe4\xc6\x73\x08\xfc\xef\x8c\x49\xd8\x21\x30\x79\x1a\x98\xff\xe7\xcd\x2a\x39\x93\x3a\x96\x3c\x2b\xe1\xc8\x85\x20\x49\x8d\xb6\xd1\x12\xf3\x4e\xc1\xb1\x44\x09\x5f\x1a\xd4\x27\x0a\xa2\x37\x2f\x70\xe5\xdc\x42\xfb\x08\xfb\xd0\xd2\xe7\x42\xab\xa6\xc6\xbc\x2d\xf2\xdf\x8d\x92\x3b\x50\x35\x6a\x66\x95\x36\xf0\x26\x80\x37\x7f\x08\xe0\xcd\xbf\x83\x4e\x01\xdd\xf9\xd3\xcf\x21\xa4\x44\xd7\x94\xaa\x11\x39\xc1\x9a\x8a\x09\x01\x8e\xa0\x92\xe2\x14\x40\xad\x79\x45\xe4\x1b\x83\x90\x31\x83\x14\x0c\x2f\x24\xb8\xb1\x06\x4c\x93\x95\xc0\xcc\x7d\x8b\xdb\xc1\xc3\xbf\x7e\xfa\x9d\x1d\xd8\xf6\x80\xda\x70\x25\xcd\x4f\xf7\xf0\xf7\x30\x0c\xff\xf1\x9f\x81\x80\x60\xb2\x68\x58\x81\x74\x48\x7f\x2f\x04\xea\x46\x88\xad\xc6\x2f\x0d\x1a\x7b\x15\x81\x49\xa9\x2c\xb3\xad\x82\x0b\x04\xf7\xaf\x77\xb7\xf7\x48\x17\xd5\xab\x95\xdb\xcb\x72\xf3\xd5\xda\xed\xe4\xb6\xfd\x8f\x71\xf5\x76\x9f\xa7\xdd\x8f\x56\x59\x4c\x05\x7b\xd6\x62\x15\xa8\xda\xf2\x8a\xff\x13\xe1\xaf\xbf\x44\xeb\x08\x32\xc1\x1a\x83\x06\x8e\xdc\x96\x2d\xe1\x73\xe0\xda\x88\x9d\x83\x7a\x51\xc4\x2f\x59\x51\xb5\x0e\x33\xce\xd7\xf0\xfb\x38\x81\x4b\x66\x5d\xa9\xfa\x3c\x03\x75\x40\xed\x86\x18\x30\x63\x54\xc6\xdd\x2c\x70\xa4\xd8\xb0\x7c\xa6\x4a\x03\x75\xcc\x00\x78\x88\x21\x14\x42\xed\x98\x10\xa7\x19\x25\xaf\x46\x2a\x66\x2e\x0b\x81\xa4\x40\x36\x15\xfa\x89\x74\x60\xa2\x71\x49\x54\x28\x37\x8d\xda\xea\xe0\xe2\x04\x4d\xed\x6c\xcc\xd5\x51\x86\x93\xdb\x5b\x4f\xac\xd7\xd6\x51\xe1\x4a\xd2\xf5\xbe\xaf\xb9\x91\x37\x9a\x50\x0e\x85\x4a\x3b\x74\x05\x1f\x2f\xbb\x9a\x69\x8c\x9f\x69\x1a\xf7\x64\xa0\x22\x0d\x0c\x4c\x8d\x19\xdf\xf3\x6c\x00\x12\x80\xd2\x20\x94\xfa\xdc\xd4\x6e\x00\x66\x8d\xd6\x28\x7d\x6f\x07\xb5\x1f\xbb\x81\xed\x2d\x6a\x6a\x53\x25\x33\xb0\x43\xec\x5b\x2d\x49\xe7\x64\x49\x3f\xc6\x5e\x23\xe2\x94\x74\xe0\x17\x13\x97\x59\xf7\xc5\x85\xa3\x3d\x75\x61\xba\x31\x90\xd1\x40\xe0\x4a\x06\x5d\x3f\xc4\x67\x56\x51\x3b\x24\x44\xcd\x5c\x5e\x23\x64\x25\x93\x05\xfa\xf6\x5a\xb0\xa6\x40\xd8\xb1\xec\x33\xc9\x8c\xcc\xd8\x21\xc5\xa3\x67\x3d\xea\xa4\xee\xda\xb6\xcd\x8e\x51\x37\x75\xab\x8e\x65\x55\x7d\x66\x4e\xa9\x84\xb9\x27\xe9\x1b\xab\xe3\x9e\xc6\x4f\xd1\x26\x9d\x3f\x7d\x48\xff\x76\x39\x8c\x5b\xac\xbd\x50\xcc\x12\x89\x5a\x71\x69\xdb\x4c\x79\xcd\x7c\x8f\xec\x65\x72\xd5\xd0\xb0\xa8\x35\x66\x9c\xba\xcd\x15\xfc\xf9\x39\x91\xfb\x7a\x20\x8f\x0d\x3b\x37\xdf\x53\xc3\x0e\x47\x7d\x63\xcb\x69\x30\x59\x2c\x50\x8f\xc9\x8e\x33\x6e\xba\xd7\xaa\x72\xe4\x2a\x46\x8e\xae\x6b\xc1\x33\x9f\xab\xcb\xb7\xb3\x91\x11\xbd\x05\x70\x64\x6d\x08\x31\x0f\x21\x51\x16\x3b\x7c\xd7\x8d\x2e\xd2\xa0\x62\x27\x90\x0a\x84\x92\x05\x52\xa0\xb9\xb1\x70\x47\xb9\x74\x60\x82\xe7\xa4\xc1\x4d\x0b\xa7\x23\x80\x52\x1d\xf1\x80\x3a\xbc\xe8\xca\xb2\x11\x82\xcc\x1c\x73\x90\xca\x3a\x5f\x74\x05\x3b\xaa\x71\x57\xda\xac\x2d\x6e\x9f\x08\x33\x0f\xeb\xb6\x83\xd7\xdc\x53\x29\x63\x29\x17\x50\x5a\x71\x82\xcf\x52\x1d\x65\xbb\x99\x38\xa7\xe3\xa8\xd8\x9a\x3a\x77\x91\xa9\x51\x73\x95\x53\x9b\x10\x27\x97\x9f\x59\xa6\x1a\xe9\xc9\x51\x4d\x75\x0a\x06\xfc\x7c\xbe\x9a\x10\xe2\x17\x85\x43\xa6\xe5\x28\xd0\x62\xde\x4e\x65\x1a\x5d\x96\x06\xae\xbd\xce\xb0\x77\x13\xf5\x8e\xff\xbb\xeb\xdc\xf2\xf1\x1d\xe9\xe5\xdc\xe6\x17\x21\x52\xe9\x7d\xf9\x1d\x19\x15\x5b\x97\x34\x25\x3b\xa0\x6f\x4b\x6d\x6d\x77\x6a\x0c\x97\x59\x6b\xa6\xd2\xbc\xe0\x92\xd1\xb0\xfd\x1a\xb1\x48\x9a\x46\x23\x79\x41\x49\x4f\x71\xd4\x95\xf7\x1c\x45\xee\x9c\xec\x3b\x2a\xf9\x9d\x16\x05\xa6\x5b\x35\x2f\x96\x6e\xa7\xce\x5d\x33\xdb\xf6\xd2\x79\xf9\x76\x57\xe8\x6f\x3a\xed\x52\x2d\xde\xb8\xb2\x9e\xc1\x3c\x59\xc2\x74\x44\x76\x7c\x74\xdd\xa0\x4e\x66\xd6\x43\xaf\xd6\xaf\x68\x69\x1b\xc8\x6b\x9a\xc6\xc7\xaf\x6b\xeb\xe4\xbc\xc6\x59\xeb\xca\x77\xab\x75\x14\xbf\x4f\x68\xa5\x3c\x8f\xe2\x2d\xcf\x67\xb0\x8e\xde\x45\xeb\x28\x59\x44\x9b\xf3\x16\x41\xdf\x57\x09\x2c\xa3\xc7\x28\x8d\x60\x31\xdf\x2c\xe6\xcb\x08\x96\x24\xb9\xa6\xee\x1c\xbc\xc4\x1c\xf2\x18\x81\x0e\x1e\x16\x3f\x00\x7b\xd5\xce\x1f\xc5\x9f\x8c\x5f\x35\xe5\xa9\x46\xed\x96\xfe\x00\x6a\xa6\x2d\xa7\x94\x3a\xef\xbe\xb0\xf3\xad\xcd\x8d\xd3\x0d\x22\x94\xd6\xd6\xe6\xfe\xee\x2e\x57\x99\x09\xfb\x07\x77\x98\xa9\xea\x8e\xde\xae\xc6\xde\xb9\x4d\xf8\x76\xf0\x16\xbf\x3b\xeb\x30\x93\x4d\xf4\x18\x2d\xd2\xb6\x5a\xb6\xe7\x93\xe9\xcd\x70\xd8\xdd\x04\x70\x43\x08\x37\x63\xb2\x3b\xab\x11\x5f\x7b\x80\x0d\x1e\xa1\xa3\x05\x6d\x88\xbb\x6d\x73\x6d\xeb\x91\x56\xc9\x78\xc4\xfa\x4d\xcd\x9f\x75\x69\x39\x7b\xf8\x16\x5c\x1b\x90\xef\xc6\xec\x02\xf8\x35\xe0\xab\x21\xff\x1e\x0d\xd7\x73\xe5\x61\x32\x59\xac\x9e\x9e\xe2\xf4\x61\xf2\xdf\x00\x00\x00\xff\xff\x04\xb2\x25\xad\x4c\x11\x00\x00") +var __1000000001_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x58\x5b\x73\xdb\xba\x11\x7e\xd7\xaf\xd8\x39\x0f\xb5\x34\x23\xd3\xef\x76\xe7\xa4\x8a\xc4\xe4\xb0\xc7\xa6\x52\x49\x99\xe6\xb4\xd3\xd1\x40\xe4\x8a\x44\x02\x02\x0c\x00\x5a\x56\x2f\xff\xbd\xb3\x00\x78\xb3\xe5\x24\x73\xa6\xf5\x8b\x35\xc2\xe2\xdb\x6f\xef\x0b\xbd\x8d\xdf\x27\xe9\xdd\x64\xb2\xdc\xc4\x8b\x5d\x0c\xf1\xa7\x5d\x9c\x6e\x93\x75\x0a\xc9\x3b\x48\xd7\x3b\x88\x3f\x25\xdb\xdd\x16\x2c\xaf\xd0\x64\x4c\x60\x7e\xb8\xfb\x9e\x6c\x5d\xec\xad\x2e\xaa\xef\xca\x65\xdc\xe2\x93\xbd\x9b\x4c\xae\xaf\x61\x83\x99\xd2\xb9\x01\x8d\xb5\x32\xdc\x2a\x7d\x06\xc9\x2a\x34\x73\x38\x28\x5b\x42\xc9\x8d\x55\x9a\x67\x4c\x00\x93\x39\xd4\x1a\x0d\x4a\x3b\x87\xc6\x70\x59\x00\x83\x46\xf2\xaf\x0d\x0e\x6f\xef\xe9\xfa\x1e\x92\x15\xa1\x4f\x1b\xa9\x51\x30\x8b\x39\x58\x05\xb6\x1c\x49\x26\xab\x68\xd6\x52\xdd\x2d\xde\xde\xc7\xee\xd0\xdd\x37\x30\x9d\x00\x00\x5c\x5f\xc3\xae\xbc\x0c\x1f\x39\x01\x9e\xc3\x81\x17\x06\x35\x67\xc2\x99\x98\x7e\xbc\xbf\x87\x0f\x9b\xe4\x61\xb1\xf9\x0d\x7e\x8d\x7f\x9b\x4f\x86\x40\x74\x79\x0e\x56\xf3\x42\xb3\xea\x9a\xcb\x1c\x9f\x30\x87\xa3\xd2\x70\x64\xc6\x02\x46\x45\x04\x1a\x0b\x7c\xaa\xe1\xc8\x85\x45\xcd\x65\xe1\x15\xd1\xcd\xe0\xb8\x4e\x4f\xc0\x5e\xae\xd3\xed\x6e\xb3\x48\xd2\x1d\x64\x25\x66\x5f\x3c\x45\xa9\x24\x56\xb5\x3d\xc3\xf2\x97\x78\xf9\x2b\x4c\xa7\x0e\x61\xfd\x21\xde\x2c\x76\xeb\xcd\xf4\x8f\x3f\xcf\xe0\xea\xea\xf6\xd6\x43\xce\x66\x93\x99\x8f\x47\x2c\x8f\x4a\x67\x08\xb6\x64\xd6\x47\x02\x98\xc6\xe0\xe7\xa8\x75\xd7\xc7\x34\xf9\xcb\xc7\x18\x92\x74\x15\x7f\x1a\x78\xcd\x6b\xf6\xb2\x7b\x9e\x3f\xc1\x3a\x1d\x9c\x3a\x06\x41\xcd\x52\x23\xb3\xd8\x7a\x02\xbc\x27\x8c\xf3\xc4\xb3\x4c\xe8\x1d\x01\x07\x66\x30\x07\x25\x87\x7e\x32\x1d\xa7\xcb\x64\x28\x21\xc7\x34\xe0\xe3\x36\x49\xdf\x43\xc1\x25\x4c\x85\x3a\xa1\xf6\xae\x99\xdd\xde\x3a\x4f\xd0\x81\xbb\xb5\x57\xb5\x21\xb6\xc3\x2c\x65\xfa\xc0\xad\x66\xfa\x0c\x15\x5a\x96\x33\xcb\x80\x1d\x54\x63\x01\x1f\x51\x5a\x13\xc1\xd6\x2a\x8d\x39\x70\x09\x0c\x0c\xd6\x4c\x3b\x2b\xd9\x41\x20\x30\x03\xdc\x02\x37\xa0\x8e\x16\x25\x11\x22\x17\xe4\x04\x4f\x66\x57\x8d\xb0\xbc\x16\xd8\x42\x8d\x33\xb3\x53\xf7\xbf\xcf\xcb\x87\xb1\x25\xb6\xe4\xc6\x73\x98\xfb\xcf\x19\x93\x70\x40\x60\xf2\x3c\x30\xff\xcf\xdb\x75\xda\x93\x3a\x95\x3c\x2b\xe1\xc4\x85\x20\x49\x8d\xb6\xd1\x12\xf3\x56\xc1\xa9\x44\x09\x5f\x1b\xd4\x67\x0a\xa2\x37\x6f\xee\xca\x39\x40\xfb\x08\xfb\xd0\xd2\xd7\x85\x56\x4d\x8d\x79\x28\xf2\xcf\x46\xc9\x03\xa8\x1a\x35\xb3\x4a\x1b\x78\x33\x87\x37\x7f\x98\xc3\x9b\x7f\xcf\x5b\x05\x74\xe7\x4f\x3f\x47\xb0\x23\xba\xa6\x54\x8d\xc8\x09\xd6\x54\x4c\x08\x70\x04\x95\x14\xe7\x39\xd4\x9a\x57\x44\xbe\x31\x08\x19\x33\x48\xc1\xf0\x42\x82\x1b\x6b\xc0\x34\x59\x09\xcc\xdc\x06\xdc\x16\x1e\xfe\xf5\xd3\x67\xf6\xc8\xf6\x8f\xa8\x0d\x57\xd2\xfc\x74\x0b\x7f\x8f\xa2\xe8\x1f\xff\x19\x08\x08\x26\x8b\x86\x15\x48\x87\xf4\xf7\x42\xa0\x6e\x84\xd8\x6b\xfc\xda\xa0\xb1\x17\x11\x98\x94\xca\x32\x1b\x14\x3c\x43\x70\xff\x3a\x77\x7b\x8f\xb4\x51\xbd\x58\xb9\x9d\x2c\x37\xdf\xac\xdd\x56\x6e\xdf\x7d\x18\x57\x6f\xfb\xf5\xb4\xfd\x10\x94\x25\x54\xb0\xbd\x16\xab\x40\xd5\x96\x57\xfc\x9f\x08\x7f\xfd\x25\xde\xc4\x90\x09\xd6\x18\x34\x70\xe2\xb6\x0c\x84\xfb\xc0\x85\x88\xf5\x41\x7d\x56\xc4\x2f\x59\x51\xb5\x0e\x33\xce\xd7\xf0\xfb\x24\x85\xe7\xcc\xda\x52\xf5\x79\x06\xea\x11\xb5\x1b\x62\xc0\x8c\x51\x19\x77\xb3\xc0\x91\x62\xc3\xf2\x99\x2a\x0d\xd4\x31\xe7\xc0\x23\x8c\xa0\x10\xea\xc0\x84\x38\xcf\x28\x79\x35\x52\x31\x73\x59\x08\x24\x05\xb2\xa9\xd0\x4f\xa4\x47\x26\x1a\x97\x44\x85\x72\xd3\x28\x54\x07\x17\x67\x68\x6a\x67\x63\xae\x4e\x32\x9a\x5c\x5f\x7b\x62\x9d\xb6\x96\x0a\x57\x92\xae\x77\x7d\xcd\x8d\xbc\xd1\x84\x72\x28\x54\xda\x91\x2b\xf8\x64\xd5\xd6\x4c\x63\xfc\x4c\xd3\x78\x24\x03\x15\x69\x60\x60\x6a\xcc\xf8\x91\x67\x03\x90\x39\x28\x0d\x42\xa9\x2f\x4d\xed\x06\x60\xd6\x68\x8d\xd2\xf7\x76\x50\xc7\xb1\x1b\xd8\xd1\xa2\xa6\x36\x55\x32\x03\x07\xc4\xae\xd5\x92\x74\x4e\x96\x74\x63\xec\x35\x22\x4e\x49\x0b\xfe\x6c\xe2\x32\xeb\xbe\x71\xe1\x08\xa7\x2e\x4c\x57\x06\x32\x1a\x08\x5c\xc9\x79\xdb\x0f\xf1\x89\x55\xd4\x0e\x09\x51\x33\x97\xd7\x08\x59\xc9\x64\x81\xbe\xbd\x16\xac\x29\x10\x0e\x2c\xfb\x42\x32\x23\x33\x0e\x48\xf1\xe8\x58\x8f\x3a\x29\xf5\x44\x34\xfb\x5a\x71\x4a\x8f\xae\x9d\x2e\xda\x55\x82\xe7\x28\x2d\x3f\x72\xd4\x8e\x06\x69\xf5\x57\x88\xb1\xcb\xbd\x16\x9d\xf2\x0c\xf3\xd0\x76\xb8\x01\xa9\x2c\x30\x49\x31\x3a\x6a\x55\x01\x93\xca\x96\xa8\x5b\x05\x6e\x08\xcc\xe1\xd0\x58\xd0\x8c\x0e\xe0\x73\x63\x6c\xbf\xc2\xf4\x7a\x7d\x07\x0f\x3c\x39\x0d\x13\x8b\x05\xea\xf1\x26\xe1\xf6\x32\xcb\xaa\xba\x77\xb3\xe7\xe3\x3d\xea\x31\x9c\xa3\x77\xc9\x43\xbc\xdd\x2d\x1e\x3e\xec\xfe\xf6\x7c\x73\x08\x58\x47\xa1\x98\x25\x9b\x9c\x53\x42\x5a\xbf\x16\x2b\x8f\xec\x65\x72\xd5\xd0\x64\xab\x35\x66\x9c\x5a\xe3\x05\xfc\x45\x5f\x75\x5d\xf1\x7a\xbf\xf6\x63\x86\x1f\x69\xba\x44\xa3\x26\xf7\xaa\xe1\xe3\xf2\x98\x3a\x57\x13\xb9\x8a\x51\x56\xd4\xb5\xe0\x99\x2f\xac\xd5\xdb\xd9\xc8\x88\xce\x02\x38\xb1\x90\x6f\x14\xbd\x54\x59\xec\x62\x54\x86\x0b\x03\x25\x15\x3b\x83\x54\x20\x94\x2c\x90\xb2\x92\x1b\x0b\x37\x94\xf8\x8f\x4c\xf0\x9c\x34\xb8\xd1\xe6\x74\xcc\xa1\x54\x27\x7c\x6c\x43\xd8\x8f\x10\xd9\x08\x41\x66\x8e\x39\x50\xc2\x90\x2f\xda\xee\x32\x6a\x48\xae\x0f\xb1\xd0\x89\x7c\xb2\xcf\x3c\xac\x5b\x65\x5e\x73\x4f\xa5\x8c\xa5\x5c\x40\x69\xc5\x19\xbe\x48\x75\x92\x61\x8d\x0a\xc9\x3c\xec\x0c\x4d\x9d\xbb\xc8\xd4\xa8\xb9\xca\xa9\xa7\x89\xb3\x2b\xa6\x2c\x53\x8d\xf4\xe4\xa8\x01\xb4\x0a\x06\xfc\x7c\x71\x99\x08\x92\x17\x55\x4e\xa6\xe5\x28\xd0\x62\x1e\x56\x08\x9a\xb3\x96\xb6\x03\x7b\x99\x61\xe7\x26\x6a\x74\xff\x77\xd7\xb9\x4d\xe9\x07\xd2\xcb\xb9\xcd\x6f\x6d\xa4\xd2\xfb\xf2\x07\x32\x2a\xb1\x2e\x69\x4a\xf6\x88\xbe\x87\x86\x46\xd4\xaa\x31\x5c\x66\xc1\x4c\xa5\x79\xc1\x25\xa3\xcd\xe0\x5b\xc4\x62\x69\x1a\x8d\xe4\x05\x25\x3d\xc5\xd1\x08\x39\x72\x14\xb9\x73\xb2\x6f\xff\xe4\x77\xda\x6a\x98\x0e\x6a\x5e\xbc\x10\x9c\x3a\x77\xcd\xec\xc3\xa5\xfe\xa5\xe0\xae\xd0\xdf\x74\xda\xa6\x5a\xb2\x75\x65\x3d\x83\x45\xba\x82\xe9\x88\xec\xf8\xe8\xb2\x41\xad\xcc\xac\x83\x5e\x6f\x5e\xd1\x12\x1a\xc8\x6b\x9a\xc6\xc7\xaf\x6b\x6b\xe5\xbc\xc6\x59\x70\xe5\xbb\xf5\x26\x4e\xde\xa7\xb4\xff\xf6\x7b\xc3\x9e\xe7\x33\xd8\xc4\xef\xe2\x4d\x9c\x2e\xe3\x6d\xbf\xf2\xd0\xf7\xeb\x14\x56\xf1\x7d\xbc\x8b\x61\xb9\xd8\x2e\x17\xab\x18\x56\x24\xb9\xa1\x51\x32\x7f\x89\x39\xe4\x31\x02\x1d\xbc\x82\x7e\x07\xec\x45\x3b\x7f\x2f\xfe\x64\xfc\x04\x2b\xcf\x35\xea\x30\x9c\x6a\xa6\x2d\xa7\x94\xea\x17\x75\x38\xf8\xd6\xe6\x66\xff\x16\x11\x4a\x6b\x6b\x73\x7b\x73\x93\xab\xcc\x44\xdd\xaf\x03\x51\xa6\xaa\x1b\x7a\x68\x1b\x7b\xe3\xd6\xf6\xeb\xc1\x0f\x07\x37\xbd\x0e\x33\xd9\xc6\xf7\xf1\x72\x17\xaa\x65\xdf\x9f\x4c\xaf\x46\x93\xf9\x6a\x0e\x57\x04\x71\x35\x66\x7b\xb0\x1a\xf1\xb5\xe7\xe2\xe0\xc9\x3c\x5a\x27\x47\xc0\xfb\x90\x6d\x7b\x0f\xb5\x4e\x9f\x6d\x04\x7e\xb3\xf4\x87\x6d\x66\xce\xee\xbe\x0b\x18\x82\xf2\xe3\xa8\x6d\x14\xbf\x09\x7d\x31\xf0\x3f\xa4\xe3\x72\xca\xdc\x4d\x26\xcb\xf5\xc3\x43\xb2\xbb\x9b\xfc\x37\x00\x00\xff\xff\x60\x0c\x23\xbc\x00\x12\x00\x00") func _1000000001_initial_schemaUpSqlBytes() ([]byte, error) { return bindataRead( @@ -148,7 +148,7 @@ func _1000000001_initial_schemaUpSql() (*asset, error) { } info := bindataFileInfo{name: "1000000001_initial_schema.up.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x92, 0xb4, 0x34, 0xea, 0xec, 0x2b, 0x56, 0x72, 0xb, 0x7c, 0xa2, 0xb2, 0x6d, 0xae, 0x73, 0x94, 0x87, 0x93, 0x31, 0x3b, 0x53, 0xce, 0x4d, 0x8d, 0x96, 0xd1, 0x35, 0xdc, 0xdf, 0xa, 0x86, 0x2e}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbd, 0x80, 0xef, 0x57, 0xde, 0xe0, 0x13, 0x78, 0xc8, 0xad, 0x78, 0x9, 0x5, 0xa6, 0x95, 0x19, 0x55, 0xb0, 0x53, 0xd7, 0x79, 0x95, 0xbb, 0xe5, 0xed, 0xaa, 0xda, 0xc5, 0x43, 0xb1, 0xc, 0x63}} return a, nil } From 87e493448401b03b867335bd199f743e6b070687 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 11:48:38 -0700 Subject: [PATCH 57/78] README: inserting fake data; global settings Signed-off-by: Stephen Gutekanst --- README.codeinsights.md | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/README.codeinsights.md b/README.codeinsights.md index 1cd67dfd95d6..1536a4641c65 100644 --- a/README.codeinsights.md +++ b/README.codeinsights.md @@ -84,6 +84,46 @@ INSERT INTO series_points( ); ``` +## Inserting fake data + +``` +INSERT INTO series_points( + time, + value, + metadata_id, + repo_id, + repo_name_id, + original_repo_name_id) +SELECT time, + random()*80 - 40, + (SELECT id FROM metadata WHERE metadata = '{"hello": "world", "languages": ["Go", "Python", "Java"]}'), + 2, + (SELECT id FROM repo_names WHERE name = 'github.com/gorilla/mux-renamed'), + (SELECT id FROM repo_names WHERE name = 'github.com/gorilla/mux-original') + FROM generate_series(TIMESTAMP '2020-01-01 00:00:00', TIMESTAMP '2020-06-01 00:00:00', INTERVAL '10 min') AS time; +``` + +## Example Global Settings + +``` + "insights": [ + { + "title": "fmt usage", + "description": "fmt.Errorf/fmt.Printf usage", + "series": [ + { + "label": "fmt.Errorf", + "search": "errorf", + }, + { + "label": "printf", + "search": "fmt.Printf", + } + ] + } + ] +``` + ## Query data ### All data From 6615f35d42368c5695b9a4a49897522de829d7f1 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 11:49:21 -0700 Subject: [PATCH 58/78] insights: store/resolvers: implement series data point querying Signed-off-by: Stephen Gutekanst --- .../resolvers/insight_series_resolver.go | 32 +++++++++++++++++-- enterprise/internal/insights/store/store.go | 9 ++++-- 2 files changed, 36 insertions(+), 5 deletions(-) diff --git a/enterprise/internal/insights/resolvers/insight_series_resolver.go b/enterprise/internal/insights/resolvers/insight_series_resolver.go index 2872a1567836..39c8e446651e 100644 --- a/enterprise/internal/insights/resolvers/insight_series_resolver.go +++ b/enterprise/internal/insights/resolvers/insight_series_resolver.go @@ -2,7 +2,6 @@ package resolvers import ( "context" - "errors" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" @@ -19,6 +18,33 @@ type insightSeriesResolver struct { func (r *insightSeriesResolver) Label() string { return r.series.Label } func (r *insightSeriesResolver) Points(ctx context.Context, args *graphqlbackend.InsightsPointsArgs) ([]graphqlbackend.InsightsDataPointResolver, error) { - // TODO: locate time series from r.store DB. - return nil, errors.New("not yet implemented") + var opts store.SeriesPointsOpts + opts.SeriesID = nil // FUTURE: TODO: set opts.SeriesID to effective hash of r.series + if args.From != nil { + opts.From = &args.From.Time + } + if args.To != nil { + opts.To = &args.To.Time + } + // FUTURE: Pass through opts.Limit + + points, err := r.store.SeriesPoints(ctx, opts) + if err != nil { + return nil, err + } + resolvers := make([]graphqlbackend.InsightsDataPointResolver, 0, len(points)) + for _, point := range points { + resolvers = append(resolvers, insightsDataPointResolver{point}) + } + return resolvers, nil +} + +var _ graphqlbackend.InsightsDataPointResolver = insightsDataPointResolver{} + +type insightsDataPointResolver struct{ p store.SeriesPoint } + +func (i insightsDataPointResolver) DateTime() graphqlbackend.DateTime { + return graphqlbackend.DateTime{Time: i.p.Time} } + +func (i insightsDataPointResolver) Value() float64 { return i.p.Value } diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index 60a196247bef..94b812789372 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -46,6 +46,7 @@ func (s *Store) With(other basestore.ShareableStore) *Store { type SeriesPointsOpts struct { SeriesID *int32 + From, To *time.Time Limit int } @@ -79,12 +80,17 @@ ORDER BY time DESC ` func seriesPointsQuery(opts SeriesPointsOpts) *sqlf.Query { - joins := []*sqlf.Query{} preds := []*sqlf.Query{} if opts.SeriesID != nil { preds = append(preds, sqlf.Sprintf("series_id = %s", *opts.SeriesID)) } + if opts.From != nil { + preds = append(preds, sqlf.Sprintf("time > %s", *opts.From)) + } + if opts.To != nil { + preds = append(preds, sqlf.Sprintf("time < %s", *opts.To)) + } if len(preds) == 0 { preds = append(preds, sqlf.Sprintf("TRUE")) @@ -95,7 +101,6 @@ func seriesPointsQuery(opts SeriesPointsOpts) *sqlf.Query { } return sqlf.Sprintf( seriesPointsQueryFmtstr+limitClause, - sqlf.Join(joins, "\n"), sqlf.Join(preds, "\n AND "), ) } From 8db18bdf56fe492400e3a589b531204c0bb7fa7f Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 29 Jan 2021 17:30:39 -0700 Subject: [PATCH 59/78] WIP repo-updater integration Signed-off-by: Stephen Gutekanst --- enterprise/cmd/repo-updater/main.go | 2 + .../insights/background/background.go | 28 ++ .../internal/insights/background/graphql.go | 203 ++++++++++++ .../internal/insights/background/metrics.go | 84 +++++ .../internal/insights/background/workers.go | 289 ++++++++++++++++++ .../insights/background/workers_test.go | 125 ++++++++ enterprise/internal/insights/insights.go | 25 +- 7 files changed, 750 insertions(+), 6 deletions(-) create mode 100644 enterprise/internal/insights/background/background.go create mode 100644 enterprise/internal/insights/background/graphql.go create mode 100644 enterprise/internal/insights/background/metrics.go create mode 100644 enterprise/internal/insights/background/workers.go create mode 100644 enterprise/internal/insights/background/workers_test.go diff --git a/enterprise/cmd/repo-updater/main.go b/enterprise/cmd/repo-updater/main.go index f8718be58799..bce8c07f573d 100644 --- a/enterprise/cmd/repo-updater/main.go +++ b/enterprise/cmd/repo-updater/main.go @@ -16,6 +16,7 @@ import ( "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns" codemonitorsBackground "github.com/sourcegraph/sourcegraph/enterprise/internal/codemonitors/background" edb "github.com/sourcegraph/sourcegraph/enterprise/internal/database" + insightsBackground "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/bac" "github.com/sourcegraph/sourcegraph/internal/actor" ossAuthz "github.com/sourcegraph/sourcegraph/internal/authz" "github.com/sourcegraph/sourcegraph/internal/conf" @@ -48,6 +49,7 @@ func enterpriseInit( ctx := actor.WithInternalActor(context.Background()) codemonitorsBackground.StartBackgroundJobs(ctx, db) + insightsBackground.StartBackgroundJobs(ctx, db) campaigns.InitBackgroundJobs(ctx, db, cf, server) diff --git a/enterprise/internal/insights/background/background.go b/enterprise/internal/insights/background/background.go new file mode 100644 index 000000000000..747e34633bba --- /dev/null +++ b/enterprise/internal/insights/background/background.go @@ -0,0 +1,28 @@ +package background + +import ( + "context" + "database/sql" + + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights" + "github.com/sourcegraph/sourcegraph/internal/goroutine" +) + +func StartBackgroundJobs(ctx context.Context, db *sql.DB) { + resolver := insights.InitResolver(ctx, db) + + triggerMetrics := newMetricsForTriggerQueries() + actionMetrics := newActionMetrics() + + routines := []goroutine.BackgroundRoutine{ + /* + newTriggerQueryEnqueuer(ctx, resolver), + newTriggerJobsLogDeleter(ctx, resolver), + newTriggerQueryRunner(ctx, resolver, triggerMetrics), + newTriggerQueryResetter(ctx, resolver, triggerMetrics), + newActionRunner(ctx, resolver, actionMetrics), + newActionJobResetter(ctx, resolver, actionMetrics), + */ + } + go goroutine.MonitorBackgroundRoutines(ctx, routines...) +} diff --git a/enterprise/internal/insights/background/graphql.go b/enterprise/internal/insights/background/graphql.go new file mode 100644 index 000000000000..303c91908027 --- /dev/null +++ b/enterprise/internal/insights/background/graphql.go @@ -0,0 +1,203 @@ +package background + +// TODO: likely worth extracting to shared package? + +/* +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "net/url" + "runtime" + "time" + + "github.com/sourcegraph/sourcegraph/internal/api" + + "golang.org/x/net/context/ctxhttp" + + "github.com/pkg/errors" +) + +type graphQLQuery struct { + Query string `json:"query"` + Variables interface{} `json:"variables"` +} + +const gqlSearchQuery = `query Search( + $query: String!, +) { + search(query: $query) { + results { + approximateResultCount + limitHit + cloning { name } + timedout { name } + results { + __typename + ... on FileMatch { + resource + limitHit + lineMatches { + preview + lineNumber + offsetAndLengths + } + } + ... on CommitSearchResult { + refs { + name + displayName + prefix + repository { + name + } + } + sourceRefs { + name + displayName + prefix + repository { + name + } + } + messagePreview { + value + highlights { + line + character + length + } + } + diffPreview { + value + highlights { + line + character + length + } + } + commit { + repository { + name + } + oid + abbreviatedOID + author { + person { + displayName + avatarURL + } + date + } + message + } + } + } + alert { + title + description + proposedQueries { + description + query + } + } + } + } +}` + +type gqlSearchVars struct { + Query string `json:"query"` +} + +type gqlSearchResponse struct { + Data struct { + Search struct { + Results struct { + ApproximateResultCount string + Cloning []*api.Repo + Timedout []*api.Repo + Results []interface{} + } + } + } + Errors []interface{} +} + +func search(ctx context.Context, query string) (*gqlSearchResponse, error) { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(graphQLQuery{ + Query: gqlSearchQuery, + Variables: gqlSearchVars{Query: query}, + }) + if err != nil { + return nil, errors.Wrap(err, "Encode") + } + + url, err := gqlURL("Search") + if err != nil { + return nil, errors.Wrap(err, "constructing frontend URL") + } + + resp, err := ctxhttp.Post(ctx, nil, url, "application/json", &buf) + if err != nil { + return nil, errors.Wrap(err, "Post") + } + defer resp.Body.Close() + + var res *gqlSearchResponse + if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { + return nil, errors.Wrap(err, "Decode") + } + if len(res.Errors) > 0 { + return res, fmt.Errorf("graphql: errors: %v", res.Errors) + } + return res, nil +} + +func gqlURL(queryName string) (string, error) { + u, err := url.Parse(api.InternalClient.URL) + if err != nil { + return "", err + } + u.Path = "/.internal/graphql" + u.RawQuery = queryName + return u.String(), nil +} + +// extractTime extracts the time from the given search result. +func extractTime(result interface{}) (t *time.Time, err error) { + // Use recover because we assume the data structure here a lot, for less + // error checking. + defer func() { + if r := recover(); r != nil { + // Same as net/http + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + log.Printf("failed to extract time from search result: %v\n%s", r, buf) + err = fmt.Errorf("failed to extract time from search result") + } + }() + + m := result.(map[string]interface{}) + typeName := m["__typename"].(string) + switch typeName { + case "CommitSearchResult": + commit := m["commit"].(map[string]interface{}) + author := commit["author"].(map[string]interface{}) + date := author["date"].(string) + + // This relies on the date format that our API returns. It was previously broken + // and should be checked first in case date extraction stops working. + t, err := time.Parse(time.RFC3339, date) + if err != nil { + return nil, err + } + return &t, nil + default: + return nil, fmt.Errorf("unexpected result __typename %q", typeName) + } +} +*/ \ No newline at end of file diff --git a/enterprise/internal/insights/background/metrics.go b/enterprise/internal/insights/background/metrics.go new file mode 100644 index 000000000000..47d6f2ebb149 --- /dev/null +++ b/enterprise/internal/insights/background/metrics.go @@ -0,0 +1,84 @@ +package background + +import ( + "github.com/inconshreveable/log15" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + + "github.com/sourcegraph/sourcegraph/internal/observation" + "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/internal/workerutil" +) + +type insightsMetrics struct { + workerMetrics workerutil.WorkerMetrics + resets prometheus.Counter + resetFailures prometheus.Counter + errors prometheus.Counter +} + +func newMetricsForTriggerQueries() insightsMetrics { + observationContext := &observation.Context{ + Logger: log15.Root(), + Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, + Registerer: prometheus.DefaultRegisterer, + } + + resetFailures := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "src_insights_query_reset_failures_total", + Help: "The number of reset failures.", + }) + observationContext.Registerer.MustRegister(resetFailures) + + resets := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "src_insights_query_resets_total", + Help: "The number of records reset.", + }) + observationContext.Registerer.MustRegister(resets) + + errors := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "src_insights_query_errors_total", + Help: "The number of errors that occur during job.", + }) + observationContext.Registerer.MustRegister(errors) + + return insightsMetrics{ + workerMetrics: workerutil.NewMetrics(observationContext, "insights_trigger_queries", nil), + resets: resets, + resetFailures: resetFailures, + errors: errors, + } +} + +func newActionMetrics() insightsMetrics { + observationContext := &observation.Context{ + Logger: log15.Root(), + Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, + Registerer: prometheus.DefaultRegisterer, + } + + resetFailures := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "src_insights_action_reset_failures_total", + Help: "The number of reset failures.", + }) + observationContext.Registerer.MustRegister(resetFailures) + + resets := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "src_insights_action_resets_total", + Help: "The number of records reset.", + }) + observationContext.Registerer.MustRegister(resets) + + errors := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "src_insights_action_errors_total", + Help: "The number of errors that occur during job.", + }) + observationContext.Registerer.MustRegister(errors) + + return insightsMetrics{ + workerMetrics: workerutil.NewMetrics(observationContext, "insights_actions", nil), + resets: resets, + resetFailures: resetFailures, + errors: errors, + } +} diff --git a/enterprise/internal/insights/background/workers.go b/enterprise/internal/insights/background/workers.go new file mode 100644 index 000000000000..a505c601354a --- /dev/null +++ b/enterprise/internal/insights/background/workers.go @@ -0,0 +1,289 @@ +package background + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/inconshreveable/log15" + "github.com/keegancsmith/sqlf" + + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/email" + "github.com/sourcegraph/sourcegraph/internal/goroutine" + "github.com/sourcegraph/sourcegraph/internal/workerutil" + "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker" + dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store" +) + +const ( + jobLogsRetentionInDays int = 7 +) + +func newTriggerQueryRunner(ctx context.Context, s *store.Store, metrics insightsMetrics) *workerutil.Worker { + options := workerutil.WorkerOptions{ + Name: "code_insights_trigger_jobs_worker", + NumHandlers: 1, + Interval: 5 * time.Second, + Metrics: metrics.workerMetrics, + } + worker := dbworker.NewWorker(ctx, createDBWorkerStoreForTriggerJobs(s), &queryRunner{s}, options) + return worker +} + +func newTriggerQueryEnqueuer(ctx context.Context, store *store.Store) goroutine.BackgroundRoutine { + enqueueActive := goroutine.NewHandlerWithErrorMessage( + "code_insights_trigger_query_enqueuer", + func(ctx context.Context) error { + return store.EnqueueTriggerQueries(ctx) + }) + return goroutine.NewPeriodicGoroutine(ctx, 1*time.Minute, enqueueActive) +} + +func newTriggerQueryResetter(ctx context.Context, s *store.Store, metrics insightsMetrics) *dbworker.Resetter { + workerStore := createDBWorkerStoreForTriggerJobs(s) + + options := dbworker.ResetterOptions{ + Name: "code_insights_trigger_jobs_worker_resetter", + Interval: 1 * time.Minute, + Metrics: dbworker.ResetterMetrics{ + Errors: metrics.errors, + RecordResetFailures: metrics.resetFailures, + RecordResets: metrics.resets, + }, + } + return dbworker.NewResetter(workerStore, options) +} + +func newTriggerJobsLogDeleter(ctx context.Context, store *store.Store) goroutine.BackgroundRoutine { + deleteLogs := goroutine.NewHandlerWithErrorMessage( + "code_insights_trigger_jobs_log_deleter", + func(ctx context.Context) error { + // Delete obsolete logs. + err := store.DeleteObsoleteJobLogs(ctx) + if err != nil { + return err + } + // Delete old logs. + err = store.DeleteOldJobLogs(ctx, jobLogsRetentionInDays) + if err != nil { + return err + } + return nil + }) + return goroutine.NewPeriodicGoroutine(ctx, 60*time.Minute, deleteLogs) +} + +func newActionRunner(ctx context.Context, s *store.Store, metrics insightsMetrics) *workerutil.Worker { + options := workerutil.WorkerOptions{ + Name: "code_insights_action_jobs_worker", + NumHandlers: 1, + Interval: 5 * time.Second, + Metrics: metrics.workerMetrics, + } + worker := dbworker.NewWorker(ctx, createDBWorkerStoreForActionJobs(s), &actionRunner{s}, options) + return worker +} + +func newActionJobResetter(ctx context.Context, s *store.Store, metrics insightsMetrics) *dbworker.Resetter { + workerStore := createDBWorkerStoreForActionJobs(s) + + options := dbworker.ResetterOptions{ + Name: "code_insights_action_jobs_worker_resetter", + Interval: 1 * time.Minute, + Metrics: dbworker.ResetterMetrics{ + Errors: metrics.errors, + RecordResetFailures: metrics.resetFailures, + RecordResets: metrics.resets, + }, + } + return dbworker.NewResetter(workerStore, options) +} + +/* +func createDBWorkerStoreForTriggerJobs(s *store.Store) dbworkerstore.Store { + return dbworkerstore.New(s.Handle(), dbworkerstore.Options{ + Name: "code_insights_trigger_jobs_worker_store", + TableName: "cm_trigger_jobs", + ColumnExpressions: cm.TriggerJobsColumns, + Scan: cm.ScanTriggerJobs, + StalledMaxAge: 60 * time.Second, + RetryAfter: 10 * time.Second, + MaxNumRetries: 3, + OrderByExpression: sqlf.Sprintf("id"), + }) +} + +func createDBWorkerStoreForActionJobs(s *store.Store) dbworkerstore.Store { + return dbworkerstore.New(s.Handle(), dbworkerstore.Options{ + Name: "code_insights_action_jobs_worker_store", + TableName: "cm_action_jobs", + ColumnExpressions: cm.ActionJobsColumns, + Scan: cm.ScanActionJobs, + StalledMaxAge: 60 * time.Second, + RetryAfter: 10 * time.Second, + MaxNumRetries: 3, + OrderByExpression: sqlf.Sprintf("id"), + }) +} + +type queryRunner struct { + *store.Store +} + +func (r *queryRunner) Handle(ctx context.Context, workerStore dbworkerstore.Store, record workerutil.Record) (err error) { + defer func() { + if err != nil { + log15.Error("queryRunner.Handle", "error", err) + } + }() + + s := r.Store.With(workerStore) + + var q *cm.MonitorQuery + q, err = s.GetQueryByRecordID(ctx, record.RecordID()) + if err != nil { + return err + } + newQuery := newQueryWithAfterFilter(q) + + // Search. + var results *gqlSearchResponse + results, err = search(ctx, newQuery) + if err != nil { + return err + } + var numResults int + if results != nil { + numResults = len(results.Data.Search.Results.Results) + } + if numResults > 0 { + err := s.EnqueueActionEmailsForQueryIDInt64(ctx, q.Id, record.RecordID()) + if err != nil { + return fmt.Errorf("store.EnqueueActionEmailsForQueryIDInt64: %w", err) + } + } + // Log next_run and latest_result to table cm_queries. + newLatestResult := latestResultTime(q.LatestResult, results, err) + err = s.SetTriggerQueryNextRun(ctx, q.Id, s.Clock()().Add(5*time.Minute), newLatestResult.UTC()) + if err != nil { + return err + } + // Log the actual query we ran and whether we got any new results. + err = s.LogSearch(ctx, newQuery, numResults, record.RecordID()) + if err != nil { + return fmt.Errorf("LogSearch: %w", err) + + } + return nil +} + +type actionRunner struct { + *store.Store +} + +func (r *actionRunner) Handle(ctx context.Context, workerStore dbworkerstore.Store, record workerutil.Record) (err error) { + log15.Info("actionRunner.Handle starting") + defer func() { + if err != nil { + log15.Error("actionRunner.Handle", "error", err) + } + }() + + s := r.Store.With(workerStore) + + var ( + j *cm.ActionJob + m *cm.ActionJobMetadata + e *cm.MonitorEmail + recs []*cm.Recipient + data *email.TemplateDataNewSearchResults + ) + + var ok bool + j, ok = record.(*cm.ActionJob) + if !ok { + return fmt.Errorf("type assertion failed") + } + + m, err = s.GetActionJobMetadata(ctx, record.RecordID()) + if err != nil { + return fmt.Errorf("store.GetActionJobMetadata: %w", err) + } + + e, err = s.ActionEmailByIDInt64(ctx, j.Email) + if err != nil { + return fmt.Errorf("store.ActionEmailByIDInt64: %w", err) + } + + recs, err = s.AllRecipientsForEmailIDInt64(ctx, j.Email) + if err != nil { + return fmt.Errorf("store.AllRecipientsForEmailIDInt64: %w", err) + } + + data, err = email.NewTemplateDataForNewSearchResults(ctx, m.Description, m.Query, e, zeroOrVal(m.NumResults)) + if err != nil { + return fmt.Errorf("email.NewTemplateDataForNewSearchResults: %w", err) + } + for _, rec := range recs { + if rec.NamespaceOrgID != nil { + // TODO (stefan): Send emails to org members. + continue + } + if rec.NamespaceUserID == nil { + return fmt.Errorf("nil recipient") + } + err = email.SendEmailForNewSearchResult(ctx, *rec.NamespaceUserID, data) + if err != nil { + return err + } + } + return nil +} + +// newQueryWithAfterFilter constructs a new query which finds search results +// introduced after the last time we queried. +func newQueryWithAfterFilter(q *cm.MonitorQuery) string { + // For q.LatestResult = nil we return a query string without after: filter, which + // effectively triggers actions immediately provided the query returns any + // results. + if q.LatestResult == nil { + return q.QueryString + } + // ATTENTION: This is a stop gap. Add(time.Second) is necessary because currently + // the after: filter is implemented as "at OR after". If we didn't add a second + // here, we would send out emails for every run, always showing at least the last + // result. This means there is non-zero chance that we miss results whenever + // commits have a timestamp equal to the value of :after but arrive after this + // job has run. + afterTime := (*q.LatestResult).UTC().Add(time.Second).Format(time.RFC3339) + return strings.Join([]string{q.QueryString, fmt.Sprintf(`after:"%s"`, afterTime)}, " ") +} + +func latestResultTime(previousLastResult *time.Time, v *gqlSearchResponse, searchErr error) time.Time { + if searchErr != nil || len(v.Data.Search.Results.Results) == 0 { + // Error performing the search, or there were no results. Assume the + // previous info's result time. + if previousLastResult != nil { + return *previousLastResult + } + return time.Now() + } + + // Results are ordered chronologically, so first result is the latest. + t, err := extractTime(v.Data.Search.Results.Results[0]) + if err != nil { + // Error already logged by extractTime. + return time.Now() + } + return *t +} + +func zeroOrVal(i *int) int { + if i == nil { + return 0 + } + return *i +} +*/ \ No newline at end of file diff --git a/enterprise/internal/insights/background/workers_test.go b/enterprise/internal/insights/background/workers_test.go new file mode 100644 index 000000000000..5595227ac4d9 --- /dev/null +++ b/enterprise/internal/insights/background/workers_test.go @@ -0,0 +1,125 @@ +package background + +// TODO + +/* +import ( + "context" + "net/url" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/graph-gophers/graphql-go/relay" + + "github.com/sourcegraph/sourcegraph/enterprise/internal/codemonitors" + "github.com/sourcegraph/sourcegraph/enterprise/internal/codemonitors/email" + "github.com/sourcegraph/sourcegraph/enterprise/internal/codemonitors/resolvers" + "github.com/sourcegraph/sourcegraph/enterprise/internal/codemonitors/storetest" + "github.com/sourcegraph/sourcegraph/internal/database/dbconn" + "github.com/sourcegraph/sourcegraph/internal/database/dbtesting" +) + +func init() { + dbtesting.DBNameSuffix = "codemonitorsbackground" +} + +func TestActionRunner(t *testing.T) { + if testing.Short() { + t.Skip() + } + + externalURL := "https://www.sourcegraph.com" + testQuery := "test patternType:literal" + + // Mocks. + got := email.TemplateDataNewSearchResults{} + email.MockSendEmailForNewSearchResult = func(ctx context.Context, userID int32, data *email.TemplateDataNewSearchResults) error { + got = *data + return nil + } + email.MockExternalURL = func() *url.URL { + externalURL, _ := url.Parse("https://www.sourcegraph.com") + return externalURL + } + + // Create a TestStore. + var err error + dbtesting.SetupGlobalTestDB(t) + now := time.Now() + clock := func() time.Time { return now } + s := codemonitors.NewStoreWithClock(dbconn.Global, clock) + ctx, ts := storetest.NewTestStoreWithStore(t, s) + + tests := []struct { + name string + numResults int + wantNumResultsText string + }{ + { + name: "5 results", + numResults: 5, + wantNumResultsText: "There were 5 new search results for your query", + }, + { + name: "1 result", + numResults: 1, + wantNumResultsText: "There was 1 new search result for your query", + }, + } + + want := email.TemplateDataNewSearchResults{ + Priority: "New", + SearchURL: externalURL + "/search?q=test+patternType%3Aliteral&utm_source=code-monitoring-email", + Description: "test description", + CodeMonitorURL: externalURL + "/code-monitoring/" + string(relay.MarshalID(resolvers.MonitorKind, 1)) + "?utm_source=code-monitoring-email", + } + + var ( + queryID int64 = 1 + triggerEvent = 1 + record *codemonitors.ActionJob + ) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + //Empty database, preserve schema. + dbtesting.SetupGlobalTestDB(t) + + _, _, _, userCtx := storetest.NewTestUser(ctx, t) + + // Run a complete pipeline from creation of a code monitor to sending of an email. + _, err = ts.InsertTestMonitor(userCtx, t) + if err != nil { + t.Fatal(err) + } + err = ts.EnqueueTriggerQueries(ctx) + if err != nil { + t.Fatal(err) + } + err = ts.LogSearch(ctx, testQuery, tt.numResults, triggerEvent) + if err != nil { + t.Fatal(err) + } + err = ts.EnqueueActionEmailsForQueryIDInt64(ctx, queryID, triggerEvent) + if err != nil { + t.Fatal(err) + } + record, err = ts.ActionJobForIDInt(ctx, 1) + if err != nil { + t.Fatal(err) + } + + a := actionRunner{s} + err = a.Handle(ctx, createDBWorkerStoreForActionJobs(s), record) + if err != nil { + t.Fatal(err) + } + + want.NumberOfResultsWithDetail = tt.wantNumResultsText + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("diff: %s", diff) + } + }) + } +} +*/ \ No newline at end of file diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go index 8086549a8a0c..0457a23491b2 100644 --- a/enterprise/internal/insights/insights.go +++ b/enterprise/internal/insights/insights.go @@ -9,6 +9,7 @@ import ( "strconv" "github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise" + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/resolvers" "github.com/sourcegraph/sourcegraph/internal/conf" "github.com/sourcegraph/sourcegraph/internal/database/dbconn" @@ -16,27 +17,39 @@ import ( // Init initializes the given enterpriseServices to include the required resolvers for insights. func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { + resolver, err := InitResolver(ctx) + if err != nil { + return err + } + if resolver == nil { + return nil // e.g. code insights is disabled or not supported in this deployment type. + } + enterpriseServices.InsightsResolver = resolver + return nil +} + +// InitResolver connects to and initializes TimescaleDB and returns an initialized resolver. +func InitResolver(ctx context.Context) (graphqlbackend.InsightsResolver, error) { if !conf.IsDev(conf.DeployType()) { // Code Insights is not yet deployed to non-dev/testing instances. We don't yet have // TimescaleDB in those deployments. https://github.com/sourcegraph/sourcegraph/issues/17218 - return nil + return nil, nil } if conf.IsDeployTypeSingleDockerContainer(conf.DeployType()) { // Code insights is not supported in single-container Docker demo deployments. - return nil + return nil, nil } if v, _ := strconv.ParseBool(os.Getenv("DISABLE_CODE_INSIGHTS")); v { // Dev option for disabling code insights. Helpful if e.g. you have issues running the // codeinsights-db or don't want to spend resources on it. - return nil + return nil, nil } timescale, err := initializeCodeInsightsDB() if err != nil { - return err + return nil, err } postgres := dbconn.Global - enterpriseServices.InsightsResolver = resolvers.New(timescale, postgres) - return nil + return resolvers.New(timescale, postgres), nil } // initializeCodeInsightsDB connects to and initializes the Code Insights Timescale DB, running From 4d14ef4a6506d535b534365f61ff50c5f11b09cb Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 3 Feb 2021 16:59:40 -0700 Subject: [PATCH 60/78] WIP Signed-off-by: Stephen Gutekanst --- enterprise/cmd/repo-updater/main.go | 2 + .../insights/background/background.go | 41 +++++ .../internal/insights/background/graphql.go | 102 +++++++++++ .../internal/insights/background/metrics.go | 48 +++++ .../insights/background/query_runner.go | 75 ++++++++ .../internal/insights/background/workers.go | 170 ++++++++++++++++++ .../insights/background/workers_test.go | 3 + enterprise/internal/insights/insights.go | 25 ++- .../internal/insights/resolvers/resolver.go | 8 +- 9 files changed, 463 insertions(+), 11 deletions(-) create mode 100644 enterprise/internal/insights/background/background.go create mode 100644 enterprise/internal/insights/background/graphql.go create mode 100644 enterprise/internal/insights/background/metrics.go create mode 100644 enterprise/internal/insights/background/query_runner.go create mode 100644 enterprise/internal/insights/background/workers.go create mode 100644 enterprise/internal/insights/background/workers_test.go diff --git a/enterprise/cmd/repo-updater/main.go b/enterprise/cmd/repo-updater/main.go index f8718be58799..bce8c07f573d 100644 --- a/enterprise/cmd/repo-updater/main.go +++ b/enterprise/cmd/repo-updater/main.go @@ -16,6 +16,7 @@ import ( "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns" codemonitorsBackground "github.com/sourcegraph/sourcegraph/enterprise/internal/codemonitors/background" edb "github.com/sourcegraph/sourcegraph/enterprise/internal/database" + insightsBackground "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/bac" "github.com/sourcegraph/sourcegraph/internal/actor" ossAuthz "github.com/sourcegraph/sourcegraph/internal/authz" "github.com/sourcegraph/sourcegraph/internal/conf" @@ -48,6 +49,7 @@ func enterpriseInit( ctx := actor.WithInternalActor(context.Background()) codemonitorsBackground.StartBackgroundJobs(ctx, db) + insightsBackground.StartBackgroundJobs(ctx, db) campaigns.InitBackgroundJobs(ctx, db, cf, server) diff --git a/enterprise/internal/insights/background/background.go b/enterprise/internal/insights/background/background.go new file mode 100644 index 000000000000..c7035062f93a --- /dev/null +++ b/enterprise/internal/insights/background/background.go @@ -0,0 +1,41 @@ +package background + +import ( + "context" + "database/sql" + "log" + + "github.com/inconshreveable/log15" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights" + "github.com/sourcegraph/sourcegraph/internal/goroutine" + "github.com/sourcegraph/sourcegraph/internal/observation" + "github.com/sourcegraph/sourcegraph/internal/trace" +) + +func StartBackgroundJobs(ctx context.Context, db *sql.DB) { + resolver, err := insights.InitResolver(ctx, db) + if err != nil { + // e.g. migration failed, DB unavailable, etc. code insights is non-functional so we do not + // want to continue. + log.Fatal("failed to initialize code insights (set DISABLE_CODE_INSIGHTS=true if needed)", err) + } + + // Create metrics for recording information about background jobs. + observationContext := &observation.Context{ + Logger: log15.Root(), + Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, + Registerer: prometheus.DefaultRegisterer, + } + metrics := newMetrics(observationContext) + + // Start background goroutines for all of our workers. + routines := []goroutine.BackgroundRoutine{ + newInsightEnqueuer(ctx, resolver.Store), + newQueryRunner(ctx, resolver.Store, resolver, metrics), // TODO(slimsag): should not store in TimescaleDB + newQueryRunnerResetter(ctx, resolver.Store, metrics), // TODO(slimsag): should not store in TimescaleDB + } + go goroutine.MonitorBackgroundRoutines(ctx, routines...) +} diff --git a/enterprise/internal/insights/background/graphql.go b/enterprise/internal/insights/background/graphql.go new file mode 100644 index 000000000000..4ae31cf2e10f --- /dev/null +++ b/enterprise/internal/insights/background/graphql.go @@ -0,0 +1,102 @@ +package background + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/sourcegraph/sourcegraph/internal/api" + + "golang.org/x/net/context/ctxhttp" + + "github.com/pkg/errors" +) + +type graphQLQuery struct { + Query string `json:"query"` + Variables interface{} `json:"variables"` +} + +const gqlSearchQuery = `query Search( + $query: String!, +) { + search(query: $query, ) { + results { + limitHit + cloning { name } + missing { name } + timedout { name } + matchCount + alert { + title + description + } + } + } +}` + +type gqlSearchVars struct { + Query string `json:"query"` +} + +type gqlSearchResponse struct { + Data struct { + Search struct { + Results struct { + LimitHit bool + Cloning []*api.Repo + Missing []*api.Repo + Timedout []*api.Repo + MatchCount int + Alert struct { + Title string + Description string + } + } + } + } + Errors []interface{} +} + +func search(ctx context.Context, query string) (*gqlSearchResponse, error) { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(graphQLQuery{ + Query: gqlSearchQuery, + Variables: gqlSearchVars{Query: query}, + }) + if err != nil { + return nil, errors.Wrap(err, "Encode") + } + + url, err := gqlURL("Search") + if err != nil { + return nil, errors.Wrap(err, "constructing frontend URL") + } + + resp, err := ctxhttp.Post(ctx, nil, url, "application/json", &buf) + if err != nil { + return nil, errors.Wrap(err, "Post") + } + defer resp.Body.Close() + + var res *gqlSearchResponse + if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { + return nil, errors.Wrap(err, "Decode") + } + if len(res.Errors) > 0 { + return res, fmt.Errorf("graphql: errors: %v", res.Errors) + } + return res, nil +} + +func gqlURL(queryName string) (string, error) { + u, err := url.Parse(api.InternalClient.URL) + if err != nil { + return "", err + } + u.Path = "/.internal/graphql" + u.RawQuery = queryName + return u.String(), nil +} diff --git a/enterprise/internal/insights/background/metrics.go b/enterprise/internal/insights/background/metrics.go new file mode 100644 index 000000000000..5abe53feda75 --- /dev/null +++ b/enterprise/internal/insights/background/metrics.go @@ -0,0 +1,48 @@ +package background + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/sourcegraph/sourcegraph/internal/observation" + "github.com/sourcegraph/sourcegraph/internal/workerutil" + "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker" +) + +// metrics describes all Prometheus metrics to be recorded during the background execution of +// workers. +type metrics struct { + // workerMetrics records worker operations & number of jobs. + workerMetrics workerutil.WorkerMetrics + + // resetterMetrics records the number of jobs that got reset because workers timed out / took + // too long. + resetterMetrics dbworker.ResetterMetrics +} + +func newMetrics(observationContext *observation.Context) *metrics { + workerResets := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "src_insights_worker_resets_total", + Help: "The number of times work took too long and was reset for retry later.", + }) + observationContext.Registerer.MustRegister(workerResets) + + workerResetFailures := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "src_insights_worker_reset_failures_total", + Help: "The number of times work took too long so many times that retries will no longer happen.", + }) + observationContext.Registerer.MustRegister(workerResetFailures) + + workerErrors := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "src_insights_worker_errors_total", + Help: "The number of errors that occurred during a worker job.", + }) + + return &metrics{ + workerMetrics: workerutil.NewMetrics(observationContext, "insights", nil), + resetterMetrics: dbworker.ResetterMetrics{ + RecordResets: workerResets, + RecordResetFailures: workerResetFailures, + Errors: workerErrors, + }, + } +} diff --git a/enterprise/internal/insights/background/query_runner.go b/enterprise/internal/insights/background/query_runner.go new file mode 100644 index 000000000000..5917244bb9e7 --- /dev/null +++ b/enterprise/internal/insights/background/query_runner.go @@ -0,0 +1,75 @@ +package background + +import ( + "context" + + "github.com/inconshreveable/log15" + + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" + "github.com/sourcegraph/sourcegraph/internal/workerutil" + "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker" + dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store" +) + +var _ dbworker.Handler = &queryRunner{} + +// queryRunner implements the dbworker.Handler interface by executing search queries and inserting +// insights about them to the insights database. +type queryRunner struct { + workerStore *store.Store // TODO(slimsag): should not create in TimescaleDB + insightsStore *store.Store +} + +func (r *queryRunner) Handle(ctx context.Context, workerStore dbworkerstore.Store, record workerutil.Record) (err error) { + defer func() { + if err != nil { + log15.Error("insights.queryRunner.Handle", "error", err) + } + }() + + s := r.workerStore.With(workerStore) + + // TODO(slimsag): get query from work queue similar to below: + var q = struct { + ID int + }{} + newQuery := "errorf" + /* + var q *cm.MonitorQuery + q, err = s.GetQueryByRecordID(ctx, record.RecordID()) + if err != nil { + return err + } + */ + + // Search. + var results *gqlSearchResponse + results, err = search(ctx, newQuery) + if err != nil { + return err + } + var matchCount int + if results != nil { + matchCount = results.Data.Search.Results.MatchCount + } + // TODO(slimsag): record result count to insights DB + + // TODO(slimsag): implement equivilent? + _ = s + _ = matchCount + _ = q + /* + // Log next_run and latest_result to table cm_queries. + newLatestResult := latestResultTime(q.LatestResult, results, err) + err = s.SetTriggerQueryNextRun(ctx, q.Id, s.Clock()().Add(5*time.Minute), newLatestResult.UTC()) + if err != nil { + return err + } + // Log the actual query we ran and whether we got any new results. + err = s.LogSearch(ctx, newQuery, numResults, record.RecordID()) + if err != nil { + return fmt.Errorf("LogSearch: %w", err) + } + */ + return nil +} diff --git a/enterprise/internal/insights/background/workers.go b/enterprise/internal/insights/background/workers.go new file mode 100644 index 000000000000..5fe52133f6a7 --- /dev/null +++ b/enterprise/internal/insights/background/workers.go @@ -0,0 +1,170 @@ +package background + +import ( + "context" + "database/sql" + "time" + + "github.com/keegancsmith/sqlf" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" + "github.com/sourcegraph/sourcegraph/internal/database/basestore" + "github.com/sourcegraph/sourcegraph/internal/goroutine" + "github.com/sourcegraph/sourcegraph/internal/workerutil" + "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker" + dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store" +) + +// newInsightEnqueuer returns a background goroutine which will periodically find all of the search +// and webhook insights across all user settings, and enqueue work for the query runner and webhook +// runner workers to perform. +func newInsightEnqueuer(ctx context.Context, store *store.Store) goroutine.BackgroundRoutine { + // TODO: 1 minute may be too slow? hmm + return goroutine.NewPeriodicGoroutine(ctx, 1*time.Minute, goroutine.NewHandlerWithErrorMessage( + "insights_enqueuer", + func(ctx context.Context) error { + // TODO: needs metrics + // TODO: similar to EnqueueTriggerQueries, actually enqueue work + return nil + }, + )) +} + +// newQueryRunner returns a worker that will execute search queries and insert information about +// the results into the code insights database. +func newQueryRunner(ctx context.Context, insightsStore *store.Store, resolver graphqlbackend.InsightsResolver, metrics *metrics) *workerutil.Worker { + store := createDBWorkerStoreForInsightsJobs(insightsStore) // TODO(slimsag): should not create in TimescaleDB + options := workerutil.WorkerOptions{ + Name: "insights_query_runner_worker", + NumHandlers: 1, + Interval: 5 * time.Second, + Metrics: metrics.workerMetrics, + } + worker := dbworker.NewWorker(ctx, store, &queryRunner{ + workerStore: insightsStore, // TODO(slimsag): should not create in TimescaleDB + insightsStore: insightsStore, + }, options) + return worker +} + +// newQueryRunnerResetter returns a worker that will reset pending query runner jobs if they take +// too long to complete. +func newQueryRunnerResetter(ctx context.Context, s *store.Store, metrics *metrics) *dbworker.Resetter { + store := createDBWorkerStoreForInsightsJobs(s) // TODO(slimsag): should not create in TimescaleDB + options := dbworker.ResetterOptions{ + Name: "code_insights_trigger_jobs_worker_resetter", + Interval: 1 * time.Minute, + Metrics: metrics.resetterMetrics, + } + return dbworker.NewResetter(store, options) +} + +func createDBWorkerStoreForInsightsJobs(s *store.Store) dbworkerstore.Store { + return dbworkerstore.New(s.Handle(), dbworkerstore.Options{ + Name: "insights_trigger_jobs_worker_store", + TableName: "insights_trigger_jobs", + // TODO(slimsag): table names + ColumnExpressions: InsightsJobsColumns, + Scan: ScanInsightsJobs, + + // We will let a search query or webhook run for up to 60s. After that, it times out and + // retries in 10s. If 3 timeouts occur, it is not retried. + StalledMaxAge: 60 * time.Second, + RetryAfter: 10 * time.Second, + MaxNumRetries: 3, + OrderByExpression: sqlf.Sprintf("id"), + }) +} + +// TODO(slimsag): move to a insights/dbworkerstore package? + +type InsightsJobs struct { + // TODO(slimsag): all these columns are wrong. + Id int + Query int64 + + // The query we ran including after: filter. + QueryString *string + + // Whether we got any results. + Results *bool + NumResults *int + + // Fields demanded for any dbworker. + State string + FailureMessage *string + StartedAt *time.Time + FinishedAt *time.Time + ProcessAfter *time.Time + NumResets int32 + NumFailures int32 + LogContents *string +} + +func (r *InsightsJobs) RecordID() int { + return r.Id +} + +func ScanInsightsJobs(rows *sql.Rows, err error) (workerutil.Record, bool, error) { + records, err := scanInsightsJobs(rows, err) + if err != nil { + return &InsightsJobs{}, false, err + } + return records[0], true, nil +} + +func scanInsightsJobs(rows *sql.Rows, err error) ([]*InsightsJobs, error) { + if err != nil { + return nil, err + } + defer func() { err = basestore.CloseRows(rows, err) }() + var ms []*InsightsJobs + for rows.Next() { + m := &InsightsJobs{} + if err := rows.Scan( + // TODO(slimsag): all these columns are wrong. + &m.Id, + &m.Query, + &m.QueryString, + &m.Results, + &m.NumResults, + &m.State, + &m.FailureMessage, + &m.StartedAt, + &m.FinishedAt, + &m.ProcessAfter, + &m.NumResets, + &m.NumFailures, + &m.LogContents, + ); err != nil { + return nil, err + } + ms = append(ms, m) + } + if err != nil { + return nil, err + } + // Rows.Err will report the last error encountered by Rows.Scan. + if err := rows.Err(); err != nil { + return nil, err + } + return ms, nil +} + +var InsightsJobsColumns = []*sqlf.Query{ + // TODO(slimsag): all these columns are wrong. + sqlf.Sprintf("cm_trigger_jobs.id"), + sqlf.Sprintf("cm_trigger_jobs.query"), + sqlf.Sprintf("cm_trigger_jobs.query_string"), + sqlf.Sprintf("cm_trigger_jobs.results"), + sqlf.Sprintf("cm_trigger_jobs.num_results"), + sqlf.Sprintf("cm_trigger_jobs.state"), + sqlf.Sprintf("cm_trigger_jobs.failure_message"), + sqlf.Sprintf("cm_trigger_jobs.started_at"), + sqlf.Sprintf("cm_trigger_jobs.finished_at"), + sqlf.Sprintf("cm_trigger_jobs.process_after"), + sqlf.Sprintf("cm_trigger_jobs.num_resets"), + sqlf.Sprintf("cm_trigger_jobs.num_failures"), + sqlf.Sprintf("cm_trigger_jobs.log_contents"), +} diff --git a/enterprise/internal/insights/background/workers_test.go b/enterprise/internal/insights/background/workers_test.go new file mode 100644 index 000000000000..2f7b889258cc --- /dev/null +++ b/enterprise/internal/insights/background/workers_test.go @@ -0,0 +1,3 @@ +package background + +// TODO(slimsag) diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go index 8086549a8a0c..a9ef628d09b6 100644 --- a/enterprise/internal/insights/insights.go +++ b/enterprise/internal/insights/insights.go @@ -16,27 +16,38 @@ import ( // Init initializes the given enterpriseServices to include the required resolvers for insights. func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { + resolver, err := InitResolver(ctx, dbconn.Global) + if err != nil { + return err + } + if resolver == nil { + return nil // e.g. code insights is disabled or not supported in this deployment type. + } + enterpriseServices.InsightsResolver = resolver + return nil +} + +// InitResolver connects to and initializes TimescaleDB and returns an initialized resolver. +func InitResolver(ctx context.Context, postgresAppDB *sql.DB) (*resolvers.Resolver, error) { if !conf.IsDev(conf.DeployType()) { // Code Insights is not yet deployed to non-dev/testing instances. We don't yet have // TimescaleDB in those deployments. https://github.com/sourcegraph/sourcegraph/issues/17218 - return nil + return nil, nil } if conf.IsDeployTypeSingleDockerContainer(conf.DeployType()) { // Code insights is not supported in single-container Docker demo deployments. - return nil + return nil, nil } if v, _ := strconv.ParseBool(os.Getenv("DISABLE_CODE_INSIGHTS")); v { // Dev option for disabling code insights. Helpful if e.g. you have issues running the // codeinsights-db or don't want to spend resources on it. - return nil + return nil, nil } timescale, err := initializeCodeInsightsDB() if err != nil { - return err + return nil, err } - postgres := dbconn.Global - enterpriseServices.InsightsResolver = resolvers.New(timescale, postgres) - return nil + return resolvers.New(timescale, postgresAppDB), nil } // initializeCodeInsightsDB connects to and initializes the Code Insights Timescale DB, running diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go index f4660cba316d..04cc97843322 100644 --- a/enterprise/internal/insights/resolvers/resolver.go +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -5,21 +5,21 @@ import ( "errors" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" - "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns/store" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/database/dbutil" ) // Resolver is the GraphQL resolver of all things related to Insights. type Resolver struct { - store *store.Store + Store *store.Store settingStore *database.SettingStore } // New returns a new Resolver whose store uses the given Timescale and Postgres DBs. -func New(timescale, postgres dbutil.DB) graphqlbackend.InsightsResolver { +func New(timescale, postgres dbutil.DB) *Resolver { return &Resolver{ - store: store.New(timescale), + Store: store.New(timescale), settingStore: database.Settings(postgres), } } From 5d2ab43fadd6d7ac0fb0a5cc0b139484ce504db0 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 8 Feb 2021 13:32:40 -0700 Subject: [PATCH 61/78] insights: add GraphQL resolvers + extensive tests This change adds the GraphQL resolvers + extensive tests for them. I used my [autogold](github.com/hexops/autogold) package for some of the tests as I've seen some other folks here use it already (otherwise I wouldn't out of bias.) The resolvers here pull information from the global settings, with user/org settings (which requires some auth handling) deferred to later. The resolvers are not yet hooked up to the DB store / TimescaleDB for pulling information, that will come next. Fixes #17221 Helps #17218 Signed-off-by: Stephen Gutekanst --- .../resolvers/insight_connection_resolver.go | 67 +++++- .../insight_connection_resolver_test.go | 196 ++++++++++++++++++ .../insights/resolvers/insight_resolver.go | 22 -- .../resolvers/insight_series_resolver.go | 19 +- .../resolvers/insight_series_resolver_test.go | 70 +++++++ .../insights/resolvers/resolver_test.go | 2 +- 6 files changed, 345 insertions(+), 31 deletions(-) create mode 100644 enterprise/internal/insights/resolvers/insight_connection_resolver_test.go delete mode 100644 enterprise/internal/insights/resolvers/insight_resolver.go create mode 100644 enterprise/internal/insights/resolvers/insight_series_resolver_test.go diff --git a/enterprise/internal/insights/resolvers/insight_connection_resolver.go b/enterprise/internal/insights/resolvers/insight_connection_resolver.go index c1719b9b37e7..32da5f37b4e2 100644 --- a/enterprise/internal/insights/resolvers/insight_connection_resolver.go +++ b/enterprise/internal/insights/resolvers/insight_connection_resolver.go @@ -2,14 +2,16 @@ package resolvers import ( "context" - "errors" "strconv" "sync" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend/graphqlutil" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" + "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/database" + "github.com/sourcegraph/sourcegraph/internal/jsonc" + "github.com/sourcegraph/sourcegraph/schema" ) var _ graphqlbackend.InsightConnectionResolver = &insightConnectionResolver{} @@ -18,9 +20,13 @@ type insightConnectionResolver struct { store *store.Store settingStore *database.SettingStore + // We use our own mock here because database.Mocks.Settings.GetLatest is a global which means + // we could not run our tests in parallel. + mocksSettingsGetLatest func(ctx context.Context, subject api.SettingsSubject) (*api.Settings, error) + // cache results because they are used by multiple fields once sync.Once - insights []graphqlbackend.InsightResolver + insights []*schema.Insight next int64 err error } @@ -38,7 +44,8 @@ func (r *insightConnectionResolver) Nodes(ctx context.Context) ([]graphqlbackend } func (r *insightConnectionResolver) TotalCount(ctx context.Context) (int32, error) { - return 0, errors.New("not yet implemented") + insights, _, err := r.compute(ctx) + return int32(len(insights)), err } func (r *insightConnectionResolver) PageInfo(ctx context.Context) (*graphqlutil.PageInfo, error) { @@ -52,10 +59,58 @@ func (r *insightConnectionResolver) PageInfo(ctx context.Context) (*graphqlutil. return graphqlutil.HasNextPage(false), nil } -func (r *insightConnectionResolver) compute(ctx context.Context) ([]graphqlbackend.InsightResolver, int64, error) { +func (r *insightConnectionResolver) compute(ctx context.Context) ([]*schema.Insight, int64, error) { r.once.Do(func() { - // TODO: populate r.insights, r.next, r.err - // TODO: locate insights from user, org, global settings using r.settingStore.ListAll() + settingsGetLatest := r.settingStore.GetLatest + if r.mocksSettingsGetLatest != nil { + settingsGetLatest = r.mocksSettingsGetLatest + } + + // Get latest Global user settings. + // + // FUTURE: include user/org settings. + subject := api.SettingsSubject{Site: true} + globalSettingsRaw, err := settingsGetLatest(ctx, subject) + if err != nil { + r.err = err + return + } + globalSettings, err := parseUserSettings(globalSettingsRaw) + r.insights = globalSettings.Insights }) return r.insights, r.next, r.err } + +func parseUserSettings(settings *api.Settings) (*schema.Settings, error) { + if settings == nil { + // Settings have never been saved for this subject; equivalent to `{}`. + return &schema.Settings{}, nil + } + var v schema.Settings + if err := jsonc.Unmarshal(settings.Contents, &v); err != nil { + return nil, err + } + return &v, nil +} + +// InsightResolver is also defined here as it is covered by the same tests. + +var _ graphqlbackend.InsightResolver = &insightResolver{} + +type insightResolver struct { + store *store.Store + insight *schema.Insight +} + +func (r *insightResolver) Title() string { return r.insight.Title } + +func (r *insightResolver) Description() string { return r.insight.Description } + +func (r *insightResolver) Series() []graphqlbackend.InsightSeriesResolver { + series := r.insight.Series + resolvers := make([]graphqlbackend.InsightSeriesResolver, 0, len(series)) + for _, series := range series { + resolvers = append(resolvers, &insightSeriesResolver{store: r.store, series: series}) + } + return resolvers +} diff --git a/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go b/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go new file mode 100644 index 000000000000..f5d281534e92 --- /dev/null +++ b/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go @@ -0,0 +1,196 @@ +package resolvers + +import ( + "context" + "testing" + "time" + + "github.com/hexops/autogold" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/backend" + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend/graphqlutil" + insightsdbtesting "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/dbtesting" + "github.com/sourcegraph/sourcegraph/internal/api" + "github.com/sourcegraph/sourcegraph/internal/database/dbtesting" + "github.com/sourcegraph/sourcegraph/schema" +) + +// Note: You can `go test ./resolvers -update` to update the expected `want` values in these tests. +// See https://github.com/hexops/autogold for more information. + +var testRealGlobalSettings = &api.Settings{ID: 1, Contents: `{ + "insights": [ + { + "title": "fmt usage", + "description": "fmt.Errorf/fmt.Printf usage", + "series": [ + { + "label": "fmt.Errorf", + "search": "errorf", + }, + { + "label": "printf", + "search": "fmt.Printf", + } + ] + }, + { + "title": "gitserver usage", + "description": "gitserver exec & close usage", + "series": [ + { + "label": "exec", + "search": "gitserver.Exec", + }, + { + "label": "close", + "search": "gitserver.Close", + } + ] + } + ] + } +`} + +// TestResolver_InsightConnection tests that the InsightConnection GraphQL resolver works. +func TestResolver_InsightConnection(t *testing.T) { + if testing.Short() { + t.Skip() + } + //t.Parallel() // TODO: dbtesting.GetDB is not parallel-safe, yuck. + + testSetup := func(t *testing.T) (context.Context, graphqlbackend.InsightConnectionResolver) { + // Setup the GraphQL resolver. + ctx := backend.WithAuthzBypass(context.Background()) + now := time.Now().UTC().Truncate(time.Microsecond) + clock := func() time.Time { return now } + timescale, cleanup := insightsdbtesting.TimescaleDB(t) + defer cleanup() + postgres := dbtesting.GetDB(t) + resolver := newWithClock(timescale, postgres, clock) + + // Create the insights connection resolver. + conn, err := resolver.Insights(ctx) + if err != nil { + t.Fatal(err) + } + conn.(*insightConnectionResolver).mocksSettingsGetLatest = func(ctx context.Context, subject api.SettingsSubject) (*api.Settings, error) { + if !subject.Site { // TODO: future: site is an extremely poor name for "global settings", we should change this. + t.Fatal("expected only to request settings from global user settings") + } + return testRealGlobalSettings, nil + } + return ctx, conn + } + + t.Run("TotalCount", func(t *testing.T) { + ctx, conn := testSetup(t) + totalCount, err := conn.TotalCount(ctx) + if err != nil { + t.Fatal(err) + } + if totalCount != 2 { + t.Fatal("incorrect length") + } + }) + + t.Run("PageInfo", func(t *testing.T) { + // TODO: future: our pagination support is non-existent. Currently we just return all + // insights, regardless of how many you ask for. + ctx, conn := testSetup(t) + gotPageInfo, err := conn.PageInfo(ctx) + if err != nil { + t.Fatal(err) + } + autogold.Want("PageInfo", &graphqlutil.PageInfo{}).Equal(t, gotPageInfo) + }) + + t.Run("Nodes", func(t *testing.T) { + ctx, conn := testSetup(t) + nodes, err := conn.Nodes(ctx) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 2 { + t.Fatal("incorrect length") + } + autogold.Want("first insight", map[string]interface{}{"description": "fmt.Errorf/fmt.Printf usage", "title": "fmt usage"}).Equal(t, map[string]interface{}{ + "title": nodes[0].Title(), + "description": nodes[0].Description(), + }) + // TODO(slimsag): put series length into map (autogold bug, omits the field for some reason?) + autogold.Want("first insight: series length", int(2)).Equal(t, len(nodes[0].Series())) + + autogold.Want("second insight", map[string]interface{}{"description": "gitserver exec & close usage", "title": "gitserver usage"}).Equal(t, map[string]interface{}{ + "title": nodes[1].Title(), + "description": nodes[1].Description(), + }) + autogold.Want("second insight: series length", int(2)).Equal(t, len(nodes[1].Series())) + }) +} + +func Test_parseUserSettings(t *testing.T) { + tests := []struct { + name string + input *api.Settings + want autogold.Value + }{ + { + name: "nil", + input: nil, + want: autogold.Want("nil", [2]interface{}{&schema.Settings{}, nil}), + }, + { + name: "empty", + input: &api.Settings{ + Contents: "{}", + }, + want: autogold.Want("empty", [2]interface{}{&schema.Settings{}, nil}), + }, + { + name: "real", + input: testRealGlobalSettings, + want: autogold.Want("real", [2]interface{}{ + &schema.Settings{Insights: []*schema.Insight{ + &schema.Insight{ + Description: "fmt.Errorf/fmt.Printf usage", + Series: []*schema.InsightSeries{ + &schema.InsightSeries{ + Label: "fmt.Errorf", + Search: "errorf", + }, + &schema.InsightSeries{ + Label: "printf", + Search: "fmt.Printf", + }, + }, + Title: "fmt usage", + }, + &schema.Insight{ + Description: "gitserver exec & close usage", + Series: []*schema.InsightSeries{ + &schema.InsightSeries{ + Label: "exec", + Search: "gitserver.Exec", + }, + &schema.InsightSeries{ + Label: "close", + Search: "gitserver.Close", + }, + }, + Title: "gitserver usage", + }, + }}, + nil, + }), + }, + } + for _, tst := range tests { + t.Run(tst.name, func(t *testing.T) { + got, err := parseUserSettings(tst.input) + tst.want.Equal(t, [2]interface{}{got, err}) + }) + } + +} diff --git a/enterprise/internal/insights/resolvers/insight_resolver.go b/enterprise/internal/insights/resolvers/insight_resolver.go deleted file mode 100644 index 4cd06283f38a..000000000000 --- a/enterprise/internal/insights/resolvers/insight_resolver.go +++ /dev/null @@ -1,22 +0,0 @@ -package resolvers - -import ( - "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" - "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" -) - -var _ graphqlbackend.InsightResolver = &insightResolver{} - -type insightResolver struct { - store *store.Store - insight graphqlbackend.InsightResolver -} - -func (r *insightResolver) Title() string { return r.insight.Title() } - -func (r *insightResolver) Description() string { return r.insight.Description() } - -func (r *insightResolver) Series() []graphqlbackend.InsightSeriesResolver { - // TODO: locate time series from r.store DB. - return nil -} diff --git a/enterprise/internal/insights/resolvers/insight_series_resolver.go b/enterprise/internal/insights/resolvers/insight_series_resolver.go index f5531900f36a..034d80fa43ee 100644 --- a/enterprise/internal/insights/resolvers/insight_series_resolver.go +++ b/enterprise/internal/insights/resolvers/insight_series_resolver.go @@ -3,18 +3,33 @@ package resolvers import ( "context" "errors" + "time" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" + "github.com/sourcegraph/sourcegraph/schema" ) var _ graphqlbackend.InsightSeriesResolver = &insightSeriesResolver{} type insightSeriesResolver struct { - label string + store *store.Store + series *schema.InsightSeries } -func (r *insightSeriesResolver) Label() string { return r.label } +func (r *insightSeriesResolver) Label() string { return r.series.Label } func (r *insightSeriesResolver) Points(ctx context.Context, args *graphqlbackend.InsightsPointsArgs) ([]graphqlbackend.InsightsDataPointResolver, error) { + // TODO(slimsag): future: use r.store to query insights data points return nil, errors.New("not yet implemented") } + +var _ graphqlbackend.InsightsDataPointResolver = insightsDataPointResolver{} + +type insightsDataPointResolver struct{} + +func (i insightsDataPointResolver) DateTime() graphqlbackend.DateTime { + return graphqlbackend.DateTime{Time: time.Now()} // TODO(slimsag): future: use actual data. +} + +func (i insightsDataPointResolver) Value() float64 { return 0 } // TODO(slimsag): future: use actual data. diff --git a/enterprise/internal/insights/resolvers/insight_series_resolver_test.go b/enterprise/internal/insights/resolvers/insight_series_resolver_test.go new file mode 100644 index 000000000000..a444b873c34e --- /dev/null +++ b/enterprise/internal/insights/resolvers/insight_series_resolver_test.go @@ -0,0 +1,70 @@ +package resolvers + +import ( + "context" + "testing" + "time" + + "github.com/hexops/autogold" + + "github.com/sourcegraph/sourcegraph/cmd/frontend/backend" + "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" + insightsdbtesting "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/dbtesting" + "github.com/sourcegraph/sourcegraph/internal/api" + "github.com/sourcegraph/sourcegraph/internal/database/dbtesting" +) + +// TestResolver_InsightSeries tests that the InsightSeries GraphQL resolver works. +func TestResolver_InsightSeries(t *testing.T) { + if testing.Short() { + t.Skip() + } + t.Parallel() + + testSetup := func(t *testing.T) (context.Context, [][]graphqlbackend.InsightSeriesResolver) { + // Setup the GraphQL resolver. + ctx := backend.WithAuthzBypass(context.Background()) + now := time.Now().UTC().Truncate(time.Microsecond) + clock := func() time.Time { return now } + timescale, cleanup := insightsdbtesting.TimescaleDB(t) + defer cleanup() + postgres := dbtesting.GetDB(t) + resolver := newWithClock(timescale, postgres, clock) + + // Create the insights connection resolver and query series. + conn, err := resolver.Insights(ctx) + if err != nil { + t.Fatal(err) + } + conn.(*insightConnectionResolver).mocksSettingsGetLatest = func(ctx context.Context, subject api.SettingsSubject) (*api.Settings, error) { + if !subject.Site { // TODO: future: site is an extremely poor name for "global settings", we should change this. + t.Fatal("expected only to request settings from global user settings") + } + return testRealGlobalSettings, nil + } + nodes, err := conn.Nodes(ctx) + if err != nil { + t.Fatal(err) + } + var series [][]graphqlbackend.InsightSeriesResolver + for _, node := range nodes { + series = append(series, node.Series()) + } + return ctx, series + } + + t.Run("metadata", func(t *testing.T) { + _, insights := testSetup(t) + autogold.Want("insights length", int(2)).Equal(t, len(insights)) + + autogold.Want("insights[0].length", int(2)).Equal(t, len(insights[0])) + autogold.Want("insights[0].series[0].Label", "fmt.Errorf").Equal(t, insights[0][0].Label()) + autogold.Want("insights[0].series[1].Label", "printf").Equal(t, insights[0][1].Label()) + + autogold.Want("insights[1].length", int(2)).Equal(t, len(insights[1])) + autogold.Want("insights[1].series[0].Label", "exec").Equal(t, insights[1][0].Label()) + autogold.Want("insights[1].series[1].Label", "close").Equal(t, insights[1][1].Label()) + }) +} + +// TODO(slimsag) diff --git a/enterprise/internal/insights/resolvers/resolver_test.go b/enterprise/internal/insights/resolvers/resolver_test.go index 5a1ed05a1240..95d542917e8c 100644 --- a/enterprise/internal/insights/resolvers/resolver_test.go +++ b/enterprise/internal/insights/resolvers/resolver_test.go @@ -20,7 +20,7 @@ func TestResolver_Insights(t *testing.T) { if testing.Short() { t.Skip() } - t.Parallel() + //t.Parallel() // TODO: dbtesting.GetDB is not parallel-safe, yuck. ctx := backend.WithAuthzBypass(context.Background()) now := time.Now().UTC().Truncate(time.Microsecond) From 501614add0a466af9886ea835125858718cacfaa Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 8 Feb 2021 13:43:37 -0700 Subject: [PATCH 62/78] gofmt Signed-off-by: Stephen Gutekanst --- .../resolvers/insight_connection_resolver_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go b/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go index f5d281534e92..81b06477474d 100644 --- a/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go +++ b/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go @@ -153,28 +153,28 @@ func Test_parseUserSettings(t *testing.T) { input: testRealGlobalSettings, want: autogold.Want("real", [2]interface{}{ &schema.Settings{Insights: []*schema.Insight{ - &schema.Insight{ + { Description: "fmt.Errorf/fmt.Printf usage", Series: []*schema.InsightSeries{ - &schema.InsightSeries{ + { Label: "fmt.Errorf", Search: "errorf", }, - &schema.InsightSeries{ + { Label: "printf", Search: "fmt.Printf", }, }, Title: "fmt usage", }, - &schema.Insight{ + { Description: "gitserver exec & close usage", Series: []*schema.InsightSeries{ - &schema.InsightSeries{ + { Label: "exec", Search: "gitserver.Exec", }, - &schema.InsightSeries{ + { Label: "close", Search: "gitserver.Close", }, From e1819fb3c5a652381b2679c037d2466ed4c842c2 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 8 Feb 2021 14:10:37 -0700 Subject: [PATCH 63/78] insights: add initial DB schema This change adds the initial code insights DB schema. It is generally well-thought-out and I did a fair amount of analysis on whether or not to use more involved TimescaleDB features like [compression](https://github.com/sourcegraph/sourcegraph/pull/17227#issuecomment-773704129) instead of / in addition to storing repo names as IDs, for example. I am sure we will change the schema as we learn more, but this generally seems sufficient/appropriate for now. Helps #17218 Signed-off-by: Stephen Gutekanst --- .../1000000001_initial_schema.down.sql | 16 +++ .../1000000001_initial_schema.up.sql | 109 ++++++++++++++++++ migrations/codeinsights/bindata.go | 54 ++++++++- 3 files changed, 175 insertions(+), 4 deletions(-) create mode 100644 migrations/codeinsights/1000000001_initial_schema.down.sql create mode 100644 migrations/codeinsights/1000000001_initial_schema.up.sql diff --git a/migrations/codeinsights/1000000001_initial_schema.down.sql b/migrations/codeinsights/1000000001_initial_schema.down.sql new file mode 100644 index 000000000000..a86646b1cf4b --- /dev/null +++ b/migrations/codeinsights/1000000001_initial_schema.down.sql @@ -0,0 +1,16 @@ +BEGIN; + +DROP INDEX IF EXISTS series_points_repo_id_btree; +DROP INDEX IF EXISTS series_points_repo_name_id_btree; +DROP INDEX IF EXISTS series_points_original_repo_name_id_btree; +DROP TABLE IF EXISTS series_points; + +DROP INDEX IF EXISTS repo_names_name_unique_idx; +DROP INDEX IF EXISTS repo_names_name_trgm; +DROP TABLE IF EXISTS repo_names; + +DROP INDEX IF EXISTS metadata_metadata_unique_idx; +DROP INDEX IF EXISTS metadata_metadata_gin; +DROP TABLE IF EXISTS metadata; + +COMMIT; diff --git a/migrations/codeinsights/1000000001_initial_schema.up.sql b/migrations/codeinsights/1000000001_initial_schema.up.sql new file mode 100644 index 000000000000..5cd0dde4229b --- /dev/null +++ b/migrations/codeinsights/1000000001_initial_schema.up.sql @@ -0,0 +1,109 @@ +BEGIN; + +CREATE EXTENSION IF NOT EXISTS timescaledb; +CREATE EXTENSION IF NOT EXISTS pg_trgm; +CREATE EXTENSION IF NOT EXISTS citext; + +-- Records repository names, both historical and present, using a unique repository _name_ ID +-- (unrelated to the repository ID.) +CREATE TABLE repo_names ( + -- The repository _name_ ID. + id bigserial NOT NULL PRIMARY KEY, + + -- The name, trigram-indexed for fast e.g. regexp filtering. + name citext NOT NULL, + + CONSTRAINT check_name_nonempty CHECK ((name OPERATOR(<>) ''::citext)) +); + +-- Enforce that names are unique. +CREATE UNIQUE INDEX repo_names_name_unique_idx ON repo_names(name); + +-- Create trigram indexes for repository name filtering based on e.g. regexps. +CREATE INDEX repo_names_name_trgm ON repo_names USING gin (lower((name)::text) gin_trgm_ops); + + +-- Records arbitrary metadata about events. Stored in a separate table as it is often repeated +-- for multiple events. +CREATE TABLE metadata ( + -- The repository _name_ ID. + id bigserial NOT NULL PRIMARY KEY, + + -- Metadata about this event, this can be any arbitrary JSON metadata which will be returned + -- when querying events, and can be filtered on and grouped using jsonb operators ?, ?&, ?|, + -- and @>. This should be small data only, primary use case is small lists such as: + -- + -- {"java_versions": [...]} + -- {"languages": [...]} + -- {"pull_requests": [...]} + -- {"annotations": [...]} + -- + metadata jsonb NOT NULL +); + +-- Enforce that metadata is unique. +CREATE UNIQUE INDEX metadata_metadata_unique_idx ON metadata(metadata); + +-- Index metadata to optimize WHERE clauses with jsonb ?, ?&, ?|, and @> operators. +CREATE INDEX metadata_metadata_gin ON metadata USING GIN (metadata); + +-- Records events over time associated with a repository (or none, i.e. globally) where a single +-- numerical value is going arbitrarily up and down. +-- +-- Repository association is based on both repository ID and name. The ID can be used to refer to +-- a specific repository, or lookup the current name of a repository after it has been e.g. renamed. +-- The name can be used to refer to the name of the repository at the time of the event's creation, +-- for example to trace the change in a gauge back to a repository being renamed. +CREATE TABLE series_points ( + -- A unique identifier for the series of data being recorded. This is not an ID from another + -- table, but rather just a unique identifier. + series_id integer, + + -- The timestamp of the recorded event. + time TIMESTAMPTZ NOT NULL, + + -- The floating point value at the time of the event. + value double precision NOT NULL, + + -- Associated metadata for this event, if any. + metadata_id integer, + + -- The repository ID (from the main application DB) at the time the event was created. Note + -- that the repository may no longer exist / be valid at query time, however. + -- + -- null if the event was not for a single repository (i.e. a global gauge). + repo_id integer, + + -- The most recently known name for the repository, updated periodically to account for e.g. + -- repository renames. If the repository was deleted, this is still the most recently known + -- name. + -- + -- null if the event was not for a single repository (i.e. a global gauge). + repo_name_id integer, + + -- The repository name as it was known at the time the event was created. It may have been renamed + -- since. + original_repo_name_id integer, + + -- Ensure if one repo association field is specified, all are. + CONSTRAINT check_repo_fields_specifity CHECK ( + ((repo_id IS NULL) AND (repo_name_id IS NULL) AND (original_repo_name_id IS NULL)) + OR + ((repo_id IS NOT NULL) AND (repo_name_id IS NOT NULL) AND (original_repo_name_id IS NOT NULL)) + ), + + FOREIGN KEY (metadata_id) REFERENCES metadata(id) ON DELETE CASCADE DEFERRABLE, + FOREIGN KEY (repo_name_id) REFERENCES repo_names(id) ON DELETE CASCADE DEFERRABLE, + FOREIGN KEY (original_repo_name_id) REFERENCES repo_names(id) ON DELETE CASCADE DEFERRABLE +); + +-- Create hypertable, partitioning events by time. +-- See https://docs.timescale.com/latest/using-timescaledb/hypertables +SELECT create_hypertable('series_points', 'time'); + +-- Create btree indexes for repository filtering. +CREATE INDEX series_points_repo_id_btree ON series_points USING btree (repo_id); +CREATE INDEX series_points_repo_name_id_btree ON series_points USING btree (repo_name_id); +CREATE INDEX series_points_original_repo_name_id_btree ON series_points USING btree (original_repo_name_id); + +COMMIT; diff --git a/migrations/codeinsights/bindata.go b/migrations/codeinsights/bindata.go index 214fc770fa2f..28dceb89f22f 100644 --- a/migrations/codeinsights/bindata.go +++ b/migrations/codeinsights/bindata.go @@ -2,6 +2,8 @@ // sources: // 1000000000_init.down.sql (19B) // 1000000000_init.up.sql (19B) +// 1000000001_initial_schema.down.sql (475B) +// 1000000001_initial_schema.up.sql (4.608kB) package migrations @@ -110,6 +112,46 @@ func _1000000000_initUpSql() (*asset, error) { return a, nil } +var __1000000001_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x8f\x41\x0a\xc2\x30\x10\x45\xf7\x39\x45\xee\x91\x95\xb5\x51\x02\xb6\x15\x9b\x45\x77\x21\xd2\x21\x0c\x98\xa4\x26\x29\x78\x7c\x11\xb1\x82\x32\x68\x77\xb3\xf8\xef\xff\x37\x95\xdc\xab\x56\x30\x56\x9f\xba\x23\x57\x6d\x2d\x07\xae\x76\x5c\x0e\xaa\xd7\x3d\xcf\x90\x10\xb2\x99\x22\x86\x92\x4d\x82\x29\x1a\x1c\xcd\xb9\x24\x00\xf1\x37\x11\xac\x87\x75\x58\x4c\xe8\x30\xd8\x0b\xcd\xeb\x4d\x75\x90\x14\x4f\x7d\xb3\xb4\xe5\x67\xe7\x1c\xf0\x3a\x3f\xaa\x6f\x84\xd5\x27\x50\x92\xf3\x84\xc0\x3b\x4a\xad\x7b\x28\x76\xb4\xc5\x9a\xe5\xf8\xb9\xff\x8d\x38\x0c\x84\xc0\x2b\x22\x18\xdb\x76\x4d\xa3\xb4\x60\xf7\x00\x00\x00\xff\xff\xe1\xb5\x30\xdc\xdb\x01\x00\x00") + +func _1000000001_initial_schemaDownSqlBytes() ([]byte, error) { + return bindataRead( + __1000000001_initial_schemaDownSql, + "1000000001_initial_schema.down.sql", + ) +} + +func _1000000001_initial_schemaDownSql() (*asset, error) { + bytes, err := _1000000001_initial_schemaDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1000000001_initial_schema.down.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2c, 0xd2, 0x4, 0x8c, 0x45, 0xac, 0xd5, 0xee, 0x57, 0x8f, 0xba, 0x64, 0x82, 0x72, 0x50, 0x52, 0x95, 0x91, 0x23, 0xf5, 0x42, 0xf6, 0x3, 0x65, 0xb, 0x7, 0x31, 0xdc, 0xe5, 0x12, 0x12, 0x91}} + return a, nil +} + +var __1000000001_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x58\x5b\x73\xdb\xba\x11\x7e\xd7\xaf\xd8\x39\x0f\xb5\x34\x23\xd3\xef\x76\xe7\xa4\x8a\xc4\xe4\xb0\xc7\xa6\x52\x49\x99\xe6\xb4\xd3\xd1\x40\xe4\x8a\x44\x02\x02\x0c\x00\x5a\x56\x2f\xff\xbd\xb3\x00\x78\xb3\xe5\x24\x73\xa6\xf5\x8b\x35\xc2\xe2\xdb\x6f\xef\x0b\xbd\x8d\xdf\x27\xe9\xdd\x64\xb2\xdc\xc4\x8b\x5d\x0c\xf1\xa7\x5d\x9c\x6e\x93\x75\x0a\xc9\x3b\x48\xd7\x3b\x88\x3f\x25\xdb\xdd\x16\x2c\xaf\xd0\x64\x4c\x60\x7e\xb8\xfb\x9e\x6c\x5d\xec\xad\x2e\xaa\xef\xca\x65\xdc\xe2\x93\xbd\x9b\x4c\xae\xaf\x61\x83\x99\xd2\xb9\x01\x8d\xb5\x32\xdc\x2a\x7d\x06\xc9\x2a\x34\x73\x38\x28\x5b\x42\xc9\x8d\x55\x9a\x67\x4c\x00\x93\x39\xd4\x1a\x0d\x4a\x3b\x87\xc6\x70\x59\x00\x83\x46\xf2\xaf\x0d\x0e\x6f\xef\xe9\xfa\x1e\x92\x15\xa1\x4f\x1b\xa9\x51\x30\x8b\x39\x58\x05\xb6\x1c\x49\x26\xab\x68\xd6\x52\xdd\x2d\xde\xde\xc7\xee\xd0\xdd\x37\x30\x9d\x00\x00\x5c\x5f\xc3\xae\xbc\x0c\x1f\x39\x01\x9e\xc3\x81\x17\x06\x35\x67\xc2\x99\x98\x7e\xbc\xbf\x87\x0f\x9b\xe4\x61\xb1\xf9\x0d\x7e\x8d\x7f\x9b\x4f\x86\x40\x74\x79\x0e\x56\xf3\x42\xb3\xea\x9a\xcb\x1c\x9f\x30\x87\xa3\xd2\x70\x64\xc6\x02\x46\x45\x04\x1a\x0b\x7c\xaa\xe1\xc8\x85\x45\xcd\x65\xe1\x15\xd1\xcd\xe0\xb8\x4e\x4f\xc0\x5e\xae\xd3\xed\x6e\xb3\x48\xd2\x1d\x64\x25\x66\x5f\x3c\x45\xa9\x24\x56\xb5\x3d\xc3\xf2\x97\x78\xf9\x2b\x4c\xa7\x0e\x61\xfd\x21\xde\x2c\x76\xeb\xcd\xf4\x8f\x3f\xcf\xe0\xea\xea\xf6\xd6\x43\xce\x66\x93\x99\x8f\x47\x2c\x8f\x4a\x67\x08\xb6\x64\xd6\x47\x02\x98\xc6\xe0\xe7\xa8\x75\xd7\xc7\x34\xf9\xcb\xc7\x18\x92\x74\x15\x7f\x1a\x78\xcd\x6b\xf6\xb2\x7b\x9e\x3f\xc1\x3a\x1d\x9c\x3a\x06\x41\xcd\x52\x23\xb3\xd8\x7a\x02\xbc\x27\x8c\xf3\xc4\xb3\x4c\xe8\x1d\x01\x07\x66\x30\x07\x25\x87\x7e\x32\x1d\xa7\xcb\x64\x28\x21\xc7\x34\xe0\xe3\x36\x49\xdf\x43\xc1\x25\x4c\x85\x3a\xa1\xf6\xae\x99\xdd\xde\x3a\x4f\xd0\x81\xbb\xb5\x57\xb5\x21\xb6\xc3\x2c\x65\xfa\xc0\xad\x66\xfa\x0c\x15\x5a\x96\x33\xcb\x80\x1d\x54\x63\x01\x1f\x51\x5a\x13\xc1\xd6\x2a\x8d\x39\x70\x09\x0c\x0c\xd6\x4c\x3b\x2b\xd9\x41\x20\x30\x03\xdc\x02\x37\xa0\x8e\x16\x25\x11\x22\x17\xe4\x04\x4f\x66\x57\x8d\xb0\xbc\x16\xd8\x42\x8d\x33\xb3\x53\xf7\xbf\xcf\xcb\x87\xb1\x25\xb6\xe4\xc6\x73\x98\xfb\xcf\x19\x93\x70\x40\x60\xf2\x3c\x30\xff\xcf\xdb\x75\xda\x93\x3a\x95\x3c\x2b\xe1\xc4\x85\x20\x49\x8d\xb6\xd1\x12\xf3\x56\xc1\xa9\x44\x09\x5f\x1b\xd4\x67\x0a\xa2\x37\x6f\xee\xca\x39\x40\xfb\x08\xfb\xd0\xd2\xd7\x85\x56\x4d\x8d\x79\x28\xf2\xcf\x46\xc9\x03\xa8\x1a\x35\xb3\x4a\x1b\x78\x33\x87\x37\x7f\x98\xc3\x9b\x7f\xcf\x5b\x05\x74\xe7\x4f\x3f\x47\xb0\x23\xba\xa6\x54\x8d\xc8\x09\xd6\x54\x4c\x08\x70\x04\x95\x14\xe7\x39\xd4\x9a\x57\x44\xbe\x31\x08\x19\x33\x48\xc1\xf0\x42\x82\x1b\x6b\xc0\x34\x59\x09\xcc\xdc\x06\xdc\x16\x1e\xfe\xf5\xd3\x67\xf6\xc8\xf6\x8f\xa8\x0d\x57\xd2\xfc\x74\x0b\x7f\x8f\xa2\xe8\x1f\xff\x19\x08\x08\x26\x8b\x86\x15\x48\x87\xf4\xf7\x42\xa0\x6e\x84\xd8\x6b\xfc\xda\xa0\xb1\x17\x11\x98\x94\xca\x32\x1b\x14\x3c\x43\x70\xff\x3a\x77\x7b\x8f\xb4\x51\xbd\x58\xb9\x9d\x2c\x37\xdf\xac\xdd\x56\x6e\xdf\x7d\x18\x57\x6f\xfb\xf5\xb4\xfd\x10\x94\x25\x54\xb0\xbd\x16\xab\x40\xd5\x96\x57\xfc\x9f\x08\x7f\xfd\x25\xde\xc4\x90\x09\xd6\x18\x34\x70\xe2\xb6\x0c\x84\xfb\xc0\x85\x88\xf5\x41\x7d\x56\xc4\x2f\x59\x51\xb5\x0e\x33\xce\xd7\xf0\xfb\x24\x85\xe7\xcc\xda\x52\xf5\x79\x06\xea\x11\xb5\x1b\x62\xc0\x8c\x51\x19\x77\xb3\xc0\x91\x62\xc3\xf2\x99\x2a\x0d\xd4\x31\xe7\xc0\x23\x8c\xa0\x10\xea\xc0\x84\x38\xcf\x28\x79\x35\x52\x31\x73\x59\x08\x24\x05\xb2\xa9\xd0\x4f\xa4\x47\x26\x1a\x97\x44\x85\x72\xd3\x28\x54\x07\x17\x67\x68\x6a\x67\x63\xae\x4e\x32\x9a\x5c\x5f\x7b\x62\x9d\xb6\x96\x0a\x57\x92\xae\x77\x7d\xcd\x8d\xbc\xd1\x84\x72\x28\x54\xda\x91\x2b\xf8\x64\xd5\xd6\x4c\x63\xfc\x4c\xd3\x78\x24\x03\x15\x69\x60\x60\x6a\xcc\xf8\x91\x67\x03\x90\x39\x28\x0d\x42\xa9\x2f\x4d\xed\x06\x60\xd6\x68\x8d\xd2\xf7\x76\x50\xc7\xb1\x1b\xd8\xd1\xa2\xa6\x36\x55\x32\x03\x07\xc4\xae\xd5\x92\x74\x4e\x96\x74\x63\xec\x35\x22\x4e\x49\x0b\xfe\x6c\xe2\x32\xeb\xbe\x71\xe1\x08\xa7\x2e\x4c\x57\x06\x32\x1a\x08\x5c\xc9\x79\xdb\x0f\xf1\x89\x55\xd4\x0e\x09\x51\x33\x97\xd7\x08\x59\xc9\x64\x81\xbe\xbd\x16\xac\x29\x10\x0e\x2c\xfb\x42\x32\x23\x33\x0e\x48\xf1\xe8\x58\x8f\x3a\x29\xf5\x44\x34\xfb\x5a\x71\x4a\x8f\xae\x9d\x2e\xda\x55\x82\xe7\x28\x2d\x3f\x72\xd4\x8e\x06\x69\xf5\x57\x88\xb1\xcb\xbd\x16\x9d\xf2\x0c\xf3\xd0\x76\xb8\x01\xa9\x2c\x30\x49\x31\x3a\x6a\x55\x01\x93\xca\x96\xa8\x5b\x05\x6e\x08\xcc\xe1\xd0\x58\xd0\x8c\x0e\xe0\x73\x63\x6c\xbf\xc2\xf4\x7a\x7d\x07\x0f\x3c\x39\x0d\x13\x8b\x05\xea\xf1\x26\xe1\xf6\x32\xcb\xaa\xba\x77\xb3\xe7\xe3\x3d\xea\x31\x9c\xa3\x77\xc9\x43\xbc\xdd\x2d\x1e\x3e\xec\xfe\xf6\x7c\x73\x08\x58\x47\xa1\x98\x25\x9b\x9c\x53\x42\x5a\xbf\x16\x2b\x8f\xec\x65\x72\xd5\xd0\x64\xab\x35\x66\x9c\x5a\xe3\x05\xfc\x45\x5f\x75\x5d\xf1\x7a\xbf\xf6\x63\x86\x1f\x69\xba\x44\xa3\x26\xf7\xaa\xe1\xe3\xf2\x98\x3a\x57\x13\xb9\x8a\x51\x56\xd4\xb5\xe0\x99\x2f\xac\xd5\xdb\xd9\xc8\x88\xce\x02\x38\xb1\x90\x6f\x14\xbd\x54\x59\xec\x62\x54\x86\x0b\x03\x25\x15\x3b\x83\x54\x20\x94\x2c\x90\xb2\x92\x1b\x0b\x37\x94\xf8\x8f\x4c\xf0\x9c\x34\xb8\xd1\xe6\x74\xcc\xa1\x54\x27\x7c\x6c\x43\xd8\x8f\x10\xd9\x08\x41\x66\x8e\x39\x50\xc2\x90\x2f\xda\xee\x32\x6a\x48\xae\x0f\xb1\xd0\x89\x7c\xb2\xcf\x3c\xac\x5b\x65\x5e\x73\x4f\xa5\x8c\xa5\x5c\x40\x69\xc5\x19\xbe\x48\x75\x92\x61\x8d\x0a\xc9\x3c\xec\x0c\x4d\x9d\xbb\xc8\xd4\xa8\xb9\xca\xa9\xa7\x89\xb3\x2b\xa6\x2c\x53\x8d\xf4\xe4\xa8\x01\xb4\x0a\x06\xfc\x7c\x71\x99\x08\x92\x17\x55\x4e\xa6\xe5\x28\xd0\x62\x1e\x56\x08\x9a\xb3\x96\xb6\x03\x7b\x99\x61\xe7\x26\x6a\x74\xff\x77\xd7\xb9\x4d\xe9\x07\xd2\xcb\xb9\xcd\x6f\x6d\xa4\xd2\xfb\xf2\x07\x32\x2a\xb1\x2e\x69\x4a\xf6\x88\xbe\x87\x86\x46\xd4\xaa\x31\x5c\x66\xc1\x4c\xa5\x79\xc1\x25\xa3\xcd\xe0\x5b\xc4\x62\x69\x1a\x8d\xe4\x05\x25\x3d\xc5\xd1\x08\x39\x72\x14\xb9\x73\xb2\x6f\xff\xe4\x77\xda\x6a\x98\x0e\x6a\x5e\xbc\x10\x9c\x3a\x77\xcd\xec\xc3\xa5\xfe\xa5\xe0\xae\xd0\xdf\x74\xda\xa6\x5a\xb2\x75\x65\x3d\x83\x45\xba\x82\xe9\x88\xec\xf8\xe8\xb2\x41\xad\xcc\xac\x83\x5e\x6f\x5e\xd1\x12\x1a\xc8\x6b\x9a\xc6\xc7\xaf\x6b\x6b\xe5\xbc\xc6\x59\x70\xe5\xbb\xf5\x26\x4e\xde\xa7\xb4\xff\xf6\x7b\xc3\x9e\xe7\x33\xd8\xc4\xef\xe2\x4d\x9c\x2e\xe3\x6d\xbf\xf2\xd0\xf7\xeb\x14\x56\xf1\x7d\xbc\x8b\x61\xb9\xd8\x2e\x17\xab\x18\x56\x24\xb9\xa1\x51\x32\x7f\x89\x39\xe4\x31\x02\x1d\xbc\x82\x7e\x07\xec\x45\x3b\x7f\x2f\xfe\x64\xfc\x04\x2b\xcf\x35\xea\x30\x9c\x6a\xa6\x2d\xa7\x94\xea\x17\x75\x38\xf8\xd6\xe6\x66\xff\x16\x11\x4a\x6b\x6b\x73\x7b\x73\x93\xab\xcc\x44\xdd\xaf\x03\x51\xa6\xaa\x1b\x7a\x68\x1b\x7b\xe3\xd6\xf6\xeb\xc1\x0f\x07\x37\xbd\x0e\x33\xd9\xc6\xf7\xf1\x72\x17\xaa\x65\xdf\x9f\x4c\xaf\x46\x93\xf9\x6a\x0e\x57\x04\x71\x35\x66\x7b\xb0\x1a\xf1\xb5\xe7\xe2\xe0\xc9\x3c\x5a\x27\x47\xc0\xfb\x90\x6d\x7b\x0f\xb5\x4e\x9f\x6d\x04\x7e\xb3\xf4\x87\x6d\x66\xce\xee\xbe\x0b\x18\x82\xf2\xe3\xa8\x6d\x14\xbf\x09\x7d\x31\xf0\x3f\xa4\xe3\x72\xca\xdc\x4d\x26\xcb\xf5\xc3\x43\xb2\xbb\x9b\xfc\x37\x00\x00\xff\xff\x60\x0c\x23\xbc\x00\x12\x00\x00") + +func _1000000001_initial_schemaUpSqlBytes() ([]byte, error) { + return bindataRead( + __1000000001_initial_schemaUpSql, + "1000000001_initial_schema.up.sql", + ) +} + +func _1000000001_initial_schemaUpSql() (*asset, error) { + bytes, err := _1000000001_initial_schemaUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1000000001_initial_schema.up.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbd, 0x80, 0xef, 0x57, 0xde, 0xe0, 0x13, 0x78, 0xc8, 0xad, 0x78, 0x9, 0x5, 0xa6, 0x95, 0x19, 0x55, 0xb0, 0x53, 0xd7, 0x79, 0x95, 0xbb, 0xe5, 0xed, 0xaa, 0xda, 0xc5, 0x43, 0xb1, 0xc, 0x63}} + return a, nil +} + // Asset loads and returns the asset for the given name. // It returns an error if the asset could not be found or // could not be loaded. @@ -201,8 +243,10 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "1000000000_init.down.sql": _1000000000_initDownSql, - "1000000000_init.up.sql": _1000000000_initUpSql, + "1000000000_init.down.sql": _1000000000_initDownSql, + "1000000000_init.up.sql": _1000000000_initUpSql, + "1000000001_initial_schema.down.sql": _1000000001_initial_schemaDownSql, + "1000000001_initial_schema.up.sql": _1000000001_initial_schemaUpSql, } // AssetDebug is true if the assets were built with the debug flag enabled. @@ -249,8 +293,10 @@ type bintree struct { } var _bintree = &bintree{nil, map[string]*bintree{ - "1000000000_init.down.sql": {_1000000000_initDownSql, map[string]*bintree{}}, - "1000000000_init.up.sql": {_1000000000_initUpSql, map[string]*bintree{}}, + "1000000000_init.down.sql": {_1000000000_initDownSql, map[string]*bintree{}}, + "1000000000_init.up.sql": {_1000000000_initUpSql, map[string]*bintree{}}, + "1000000001_initial_schema.down.sql": {_1000000001_initial_schemaDownSql, map[string]*bintree{}}, + "1000000001_initial_schema.up.sql": {_1000000001_initial_schemaUpSql, map[string]*bintree{}}, }} // RestoreAsset restores an asset under the given directory. From 5f76b744bc2cf055c5569f40b286e02b596954a1 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 9 Feb 2021 19:10:07 -0700 Subject: [PATCH 64/78] fix merge conflicts Signed-off-by: Stephen Gutekanst --- .../internal/insights/resolvers/insight_resolver_test.go | 3 --- .../insights/resolvers/insights_connection_resolver_test.go | 3 --- enterprise/internal/insights/store/store.go | 2 ++ 3 files changed, 2 insertions(+), 6 deletions(-) delete mode 100644 enterprise/internal/insights/resolvers/insight_resolver_test.go delete mode 100644 enterprise/internal/insights/resolvers/insights_connection_resolver_test.go diff --git a/enterprise/internal/insights/resolvers/insight_resolver_test.go b/enterprise/internal/insights/resolvers/insight_resolver_test.go deleted file mode 100644 index b88996ff58b3..000000000000 --- a/enterprise/internal/insights/resolvers/insight_resolver_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package resolvers - -// TODO(slimsag) diff --git a/enterprise/internal/insights/resolvers/insights_connection_resolver_test.go b/enterprise/internal/insights/resolvers/insights_connection_resolver_test.go deleted file mode 100644 index b88996ff58b3..000000000000 --- a/enterprise/internal/insights/resolvers/insights_connection_resolver_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package resolvers - -// TODO(slimsag) diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index 4e2509252420..6662d77c0e61 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -52,6 +52,8 @@ func (s *Store) With(other basestore.ShareableStore) *Store { return &Store{Store: s.Store.With(other), now: s.now} } +var _ Interface = &Store{} + // SeriesPoint describes a single insights' series data point. type SeriesPoint struct { Time time.Time From 274a5cbe03dc88b1391ce92765d03efe1d89738f Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 9 Feb 2021 19:32:14 -0700 Subject: [PATCH 65/78] background WIP Signed-off-by: Stephen Gutekanst --- .../insights/background/background.go | 17 ++++++--- .../internal/insights/background/workers.go | 5 +-- enterprise/internal/insights/insights.go | 35 +++++++------------ .../internal/insights/resolvers/resolver.go | 3 +- 4 files changed, 30 insertions(+), 30 deletions(-) diff --git a/enterprise/internal/insights/background/background.go b/enterprise/internal/insights/background/background.go index c7035062f93a..f7c873124672 100644 --- a/enterprise/internal/insights/background/background.go +++ b/enterprise/internal/insights/background/background.go @@ -10,18 +10,27 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" "github.com/sourcegraph/sourcegraph/internal/goroutine" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" ) func StartBackgroundJobs(ctx context.Context, db *sql.DB) { - resolver, err := insights.InitResolver(ctx, db) + // Create a connection to TimescaleDB, so we can record results. + timescale, err := insights.InitializeCodeInsightsDB() if err != nil { // e.g. migration failed, DB unavailable, etc. code insights is non-functional so we do not // want to continue. + // + // In some situations (i.e. if the frontend is running migrations), this will be expected + // and we should restart until the frontend finishes - the exact same way repo updater would + // behave if the frontend had not yet migrated the main app DB. log.Fatal("failed to initialize code insights (set DISABLE_CODE_INSIGHTS=true if needed)", err) } + store := store.New(timescale) + + // TODO(slimsag): introduce workerstore // Create metrics for recording information about background jobs. observationContext := &observation.Context{ @@ -33,9 +42,9 @@ func StartBackgroundJobs(ctx context.Context, db *sql.DB) { // Start background goroutines for all of our workers. routines := []goroutine.BackgroundRoutine{ - newInsightEnqueuer(ctx, resolver.Store), - newQueryRunner(ctx, resolver.Store, resolver, metrics), // TODO(slimsag): should not store in TimescaleDB - newQueryRunnerResetter(ctx, resolver.Store, metrics), // TODO(slimsag): should not store in TimescaleDB + newInsightEnqueuer(ctx, store), + newQueryRunner(ctx, store, metrics), // TODO(slimsag): should not store in TimescaleDB + newQueryRunnerResetter(ctx, store, metrics), // TODO(slimsag): should not store in TimescaleDB } go goroutine.MonitorBackgroundRoutines(ctx, routines...) } diff --git a/enterprise/internal/insights/background/workers.go b/enterprise/internal/insights/background/workers.go index 5fe52133f6a7..b7e82dbb7e27 100644 --- a/enterprise/internal/insights/background/workers.go +++ b/enterprise/internal/insights/background/workers.go @@ -7,7 +7,6 @@ import ( "github.com/keegancsmith/sqlf" - "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" "github.com/sourcegraph/sourcegraph/internal/database/basestore" "github.com/sourcegraph/sourcegraph/internal/goroutine" @@ -33,7 +32,9 @@ func newInsightEnqueuer(ctx context.Context, store *store.Store) goroutine.Backg // newQueryRunner returns a worker that will execute search queries and insert information about // the results into the code insights database. -func newQueryRunner(ctx context.Context, insightsStore *store.Store, resolver graphqlbackend.InsightsResolver, metrics *metrics) *workerutil.Worker { +// +// TODO(slimsag): needs main app DB for settings discovery +func newQueryRunner(ctx context.Context, insightsStore *store.Store, metrics *metrics) *workerutil.Worker { store := createDBWorkerStoreForInsightsJobs(insightsStore) // TODO(slimsag): should not create in TimescaleDB options := workerutil.WorkerOptions{ Name: "insights_query_runner_worker", diff --git a/enterprise/internal/insights/insights.go b/enterprise/internal/insights/insights.go index a9ef628d09b6..a12a5c1ff970 100644 --- a/enterprise/internal/insights/insights.go +++ b/enterprise/internal/insights/insights.go @@ -16,43 +16,34 @@ import ( // Init initializes the given enterpriseServices to include the required resolvers for insights. func Init(ctx context.Context, enterpriseServices *enterprise.Services) error { - resolver, err := InitResolver(ctx, dbconn.Global) - if err != nil { - return err - } - if resolver == nil { - return nil // e.g. code insights is disabled or not supported in this deployment type. - } - enterpriseServices.InsightsResolver = resolver - return nil -} - -// InitResolver connects to and initializes TimescaleDB and returns an initialized resolver. -func InitResolver(ctx context.Context, postgresAppDB *sql.DB) (*resolvers.Resolver, error) { if !conf.IsDev(conf.DeployType()) { // Code Insights is not yet deployed to non-dev/testing instances. We don't yet have // TimescaleDB in those deployments. https://github.com/sourcegraph/sourcegraph/issues/17218 - return nil, nil + return nil } if conf.IsDeployTypeSingleDockerContainer(conf.DeployType()) { // Code insights is not supported in single-container Docker demo deployments. - return nil, nil + return nil } if v, _ := strconv.ParseBool(os.Getenv("DISABLE_CODE_INSIGHTS")); v { // Dev option for disabling code insights. Helpful if e.g. you have issues running the // codeinsights-db or don't want to spend resources on it. - return nil, nil + return nil } - timescale, err := initializeCodeInsightsDB() + timescale, err := InitializeCodeInsightsDB() if err != nil { - return nil, err + return err } - return resolvers.New(timescale, postgresAppDB), nil + postgres := dbconn.Global + enterpriseServices.InsightsResolver = resolvers.New(timescale, postgres) + return nil } -// initializeCodeInsightsDB connects to and initializes the Code Insights Timescale DB, running -// database migrations before returning. -func initializeCodeInsightsDB() (*sql.DB, error) { +// InitializeCodeInsightsDB connects to and initializes the Code Insights Timescale DB, running +// database migrations before returning. It is safe to call from multiple services/containers (in +// which case, one's migration will win and the other caller will receive an error and should exit +// and restart until the other finishes.) +func InitializeCodeInsightsDB() (*sql.DB, error) { timescaleDSN := conf.Get().ServiceConnections.CodeInsightsTimescaleDSN conf.Watch(func() { if newDSN := conf.Get().ServiceConnections.CodeInsightsTimescaleDSN; timescaleDSN != newDSN { diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go index 1f1133ab0a5d..9393afc873f8 100644 --- a/enterprise/internal/insights/resolvers/resolver.go +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -20,8 +20,7 @@ type Resolver struct { } // New returns a new Resolver whose store uses the given Timescale and Postgres DBs. -func New(timescale, postgres dbutil.DB) graphqlbackend.InsightsResolver { - func New(timescale, postgres dbutil.DB) *Resolver { +func New(timescale, postgres dbutil.DB) *Resolver { return newWithClock(timescale, postgres, timeutil.Now) } From 29e0c41f6b84d856457ba31b62080f923e0720e6 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Tue, 9 Feb 2021 19:45:57 -0700 Subject: [PATCH 66/78] worker WIP Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/resolvers/resolver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/internal/insights/resolvers/resolver.go b/enterprise/internal/insights/resolvers/resolver.go index 9393afc873f8..aacd3fbd2fa4 100644 --- a/enterprise/internal/insights/resolvers/resolver.go +++ b/enterprise/internal/insights/resolvers/resolver.go @@ -20,7 +20,7 @@ type Resolver struct { } // New returns a new Resolver whose store uses the given Timescale and Postgres DBs. -func New(timescale, postgres dbutil.DB) *Resolver { +func New(timescale, postgres dbutil.DB) graphqlbackend.InsightsResolver { return newWithClock(timescale, postgres, timeutil.Now) } From ae76295ff28de48e45ae65c6b43c66238b15fd08 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 10 Feb 2021 16:28:24 -0700 Subject: [PATCH 67/78] background WIP Signed-off-by: Stephen Gutekanst --- enterprise/cmd/repo-updater/main.go | 2 +- enterprise/internal/insights/background/workers.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/enterprise/cmd/repo-updater/main.go b/enterprise/cmd/repo-updater/main.go index bce8c07f573d..48b595315e82 100644 --- a/enterprise/cmd/repo-updater/main.go +++ b/enterprise/cmd/repo-updater/main.go @@ -16,7 +16,7 @@ import ( "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns" codemonitorsBackground "github.com/sourcegraph/sourcegraph/enterprise/internal/codemonitors/background" edb "github.com/sourcegraph/sourcegraph/enterprise/internal/database" - insightsBackground "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/bac" + insightsBackground "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/background" "github.com/sourcegraph/sourcegraph/internal/actor" ossAuthz "github.com/sourcegraph/sourcegraph/internal/authz" "github.com/sourcegraph/sourcegraph/internal/conf" diff --git a/enterprise/internal/insights/background/workers.go b/enterprise/internal/insights/background/workers.go index b7e82dbb7e27..b77e2e767403 100644 --- a/enterprise/internal/insights/background/workers.go +++ b/enterprise/internal/insights/background/workers.go @@ -35,14 +35,14 @@ func newInsightEnqueuer(ctx context.Context, store *store.Store) goroutine.Backg // // TODO(slimsag): needs main app DB for settings discovery func newQueryRunner(ctx context.Context, insightsStore *store.Store, metrics *metrics) *workerutil.Worker { - store := createDBWorkerStoreForInsightsJobs(insightsStore) // TODO(slimsag): should not create in TimescaleDB + workerStore := createDBWorkerStoreForInsightsJobs(insightsStore) // TODO(slimsag): should not create in TimescaleDB options := workerutil.WorkerOptions{ Name: "insights_query_runner_worker", NumHandlers: 1, Interval: 5 * time.Second, Metrics: metrics.workerMetrics, } - worker := dbworker.NewWorker(ctx, store, &queryRunner{ + worker := dbworker.NewWorker(ctx, workerStore, &queryRunner{ workerStore: insightsStore, // TODO(slimsag): should not create in TimescaleDB insightsStore: insightsStore, }, options) @@ -52,13 +52,13 @@ func newQueryRunner(ctx context.Context, insightsStore *store.Store, metrics *me // newQueryRunnerResetter returns a worker that will reset pending query runner jobs if they take // too long to complete. func newQueryRunnerResetter(ctx context.Context, s *store.Store, metrics *metrics) *dbworker.Resetter { - store := createDBWorkerStoreForInsightsJobs(s) // TODO(slimsag): should not create in TimescaleDB + workerStore := createDBWorkerStoreForInsightsJobs(s) // TODO(slimsag): should not create in TimescaleDB options := dbworker.ResetterOptions{ Name: "code_insights_trigger_jobs_worker_resetter", Interval: 1 * time.Minute, Metrics: metrics.resetterMetrics, } - return dbworker.NewResetter(store, options) + return dbworker.NewResetter(workerStore, options) } func createDBWorkerStoreForInsightsJobs(s *store.Store) dbworkerstore.Store { From 2e5263769008918c907b1a1e29844c06ea5b5e63 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 16:26:48 -0700 Subject: [PATCH 68/78] Update README.codeinsights.md --- README.codeinsights.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.codeinsights.md b/README.codeinsights.md index 1536a4641c65..fd31f4609d2e 100644 --- a/README.codeinsights.md +++ b/README.codeinsights.md @@ -103,6 +103,23 @@ SELECT time, FROM generate_series(TIMESTAMP '2020-01-01 00:00:00', TIMESTAMP '2020-06-01 00:00:00', INTERVAL '10 min') AS time; ``` +## Querying all data + +``` +SELECT series_id, + time, + value, + m.metadata, + repo_id, + repo_name.name, + original_repo_name.name +FROM series_points p +INNER JOIN metadata m ON p.metadata_id = m.id +INNER JOIN repo_names repo_name on p.repo_name_id = repo_name.id +INNER JOIN repo_names original_repo_name on p.original_repo_name_id = original_repo_name.id +ORDER BY time DESC; +``` + ## Example Global Settings ``` From e3a4e2194fea6625bf6dbfe3753e8e9676de2376 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 16:34:15 -0700 Subject: [PATCH 69/78] insights: store: query metadata & other minor improvements * Query metadata for points. * Improve formatting of test data. * Change incorrect `Series *int32` to `Series *string` * Add TODOs for improved filtering abilities in the future. Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/store.go | 27 +++++++++++++++---- .../internal/insights/store/store_test.go | 15 +++++------ 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index 6662d77c0e61..fbfc395d8248 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -55,15 +55,27 @@ func (s *Store) With(other basestore.ShareableStore) *Store { var _ Interface = &Store{} // SeriesPoint describes a single insights' series data point. +// +// Some fields that could be queried (series ID, repo ID/names) are omitted as they are primarily +// only useful for filtering the data you get back, and would inflate the data size considerably +// otherwise. type SeriesPoint struct { - Time time.Time - Value float64 + Time time.Time + Value float64 + Metadata []byte +} + +func (s *SeriesPoint) String() string { + return fmt.Sprintf("SeriesPoint{Time: %q, Value: %v, Metadata: %s}", s.Time, s.Value, s.Metadata) } // SeriesPointsOpts describes options for querying insights' series data points. type SeriesPointsOpts struct { // SeriesID is the unique series ID to query, if non-nil. - SeriesID *int32 + SeriesID *string + + // TODO(slimsag): Add ability to filter based on repo ID, name, original name. + // TODO(slimsag): Add ability to do limited filtering based on metadata. // Time ranges to query from/to, if non-nil. From, To *time.Time @@ -80,6 +92,7 @@ func (s *Store) SeriesPoints(ctx context.Context, opts SeriesPointsOpts) ([]Seri err := sc.Scan( &point.Time, &point.Value, + &point.Metadata, ) if err != nil { return err @@ -91,8 +104,12 @@ func (s *Store) SeriesPoints(ctx context.Context, opts SeriesPointsOpts) ([]Seri } var seriesPointsQueryFmtstr = ` --- source: enterprise/internal/insights/store/series_points.go -SELECT time, value FROM series_points +-- source: enterprise/internal/insights/store/store.go:SeriesPoints +SELECT time, + value, + m.metadata +FROM series_points p +INNER JOIN metadata m ON p.metadata_id = m.id WHERE %s ORDER BY time DESC ` diff --git a/enterprise/internal/insights/store/store_test.go b/enterprise/internal/insights/store/store_test.go index bff028c047cb..814d7b1cad24 100644 --- a/enterprise/internal/insights/store/store_test.go +++ b/enterprise/internal/insights/store/store_test.go @@ -2,7 +2,6 @@ package store import ( "context" - "fmt" "testing" "time" @@ -73,8 +72,8 @@ SELECT time, t.Fatal(err) } autogold.Want("SeriesPoints(2).len", int(913)).Equal(t, len(points)) - autogold.Want("SeriesPoints(2)[len()-1]", "{Time:2020-01-01 00:00:00 +0000 UTC Value:-20.00716650672132}").Equal(t, fmt.Sprintf("%+v", points[len(points)-1])) - autogold.Want("SeriesPoints(2)[0]", "{Time:2020-06-01 00:00:00 +0000 UTC Value:-37.8750440811433}").Equal(t, fmt.Sprintf("%+v", points[0])) + autogold.Want("SeriesPoints(2)[len()-1].String()", `SeriesPoint{Time: "2020-01-01 00:00:00 +0000 UTC", Value: -20.00716650672132, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[len(points)-1].String()) + autogold.Want("SeriesPoints(2)[0].String()", `SeriesPoint{Time: "2020-06-01 00:00:00 +0000 UTC", Value: -37.8750440811433, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[0].String()) }) t.Run("subset of data", func(t *testing.T) { @@ -87,8 +86,8 @@ SELECT time, t.Fatal(err) } autogold.Want("SeriesPoints(3).len", int(551)).Equal(t, len(points)) - autogold.Want("SeriesPoints(3)[0]", "{Time:2020-05-31 20:00:00 +0000 UTC Value:-11.269436460802638}").Equal(t, fmt.Sprintf("%+v", points[0])) - autogold.Want("SeriesPoints(3)[len()-1]", "{Time:2020-03-01 04:00:00 +0000 UTC Value:35.85710033014749}").Equal(t, fmt.Sprintf("%+v", points[len(points)-1])) + autogold.Want("SeriesPoints(3)[0].String()", `SeriesPoint{Time: "2020-05-31 20:00:00 +0000 UTC", Value: -11.269436460802638, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[0].String()) + autogold.Want("SeriesPoints(3)[len()-1].String()", `SeriesPoint{Time: "2020-03-01 04:00:00 +0000 UTC", Value: 35.85710033014749, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[len(points)-1].String()) }) t.Run("latest 3 points", func(t *testing.T) { @@ -100,9 +99,9 @@ SELECT time, t.Fatal(err) } autogold.Want("SeriesPoints(4).len", int(3)).Equal(t, len(points)) - autogold.Want("SeriesPoints(4)[0]", "{Time:2020-06-01 00:00:00 +0000 UTC Value:-37.8750440811433}").Equal(t, fmt.Sprintf("%+v", points[0])) - autogold.Want("SeriesPoints(4)[1]", "{Time:2020-05-31 20:00:00 +0000 UTC Value:-11.269436460802638}").Equal(t, fmt.Sprintf("%+v", points[1])) - autogold.Want("SeriesPoints(4)[2]", "{Time:2020-05-31 16:00:00 +0000 UTC Value:17.838503552871998}").Equal(t, fmt.Sprintf("%+v", points[2])) + autogold.Want("SeriesPoints(4)[0].String()", `SeriesPoint{Time: "2020-06-01 00:00:00 +0000 UTC", Value: -37.8750440811433, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[0].String()) + autogold.Want("SeriesPoints(4)[1].String()", `SeriesPoint{Time: "2020-05-31 20:00:00 +0000 UTC", Value: -11.269436460802638, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[1].String()) + autogold.Want("SeriesPoints(4)[2].String()", `SeriesPoint{Time: "2020-05-31 16:00:00 +0000 UTC", Value: 17.838503552871998, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[2].String()) }) } From 139e77d30675831eb05135c7b41038459b5bedef Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 16:53:54 -0700 Subject: [PATCH 70/78] update resolver data Signed-off-by: Stephen Gutekanst --- .../internal/insights/resolvers/insight_series_resolver_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/internal/insights/resolvers/insight_series_resolver_test.go b/enterprise/internal/insights/resolvers/insight_series_resolver_test.go index 2c143b9be85b..b8100f9cfafe 100644 --- a/enterprise/internal/insights/resolvers/insight_series_resolver_test.go +++ b/enterprise/internal/insights/resolvers/insight_series_resolver_test.go @@ -110,6 +110,6 @@ func TestResolver_InsightSeries(t *testing.T) { if err != nil { t.Fatal(err) } - autogold.Want("insights[0][0].Points mocked", "[{p:{Time:{wall:0 ext:63271811045 loc:} Value:1}} {p:{Time:{wall:0 ext:63271811045 loc:} Value:2}} {p:{Time:{wall:0 ext:63271811045 loc:} Value:3}}]").Equal(t, fmt.Sprintf("%+v", points)) + autogold.Want("insights[0][0].Points mocked", "[{p:{Time:{wall:0 ext:63271811045 loc:} Value:1 Metadata:[]}} {p:{Time:{wall:0 ext:63271811045 loc:} Value:2 Metadata:[]}} {p:{Time:{wall:0 ext:63271811045 loc:} Value:3 Metadata:[]}}]").Equal(t, fmt.Sprintf("%+v", points)) }) } From 70c85f817a68fcda60c5e772f57f4bfd4dca1cf7 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 16:52:31 -0700 Subject: [PATCH 71/78] insights: store: add support for recording data points This PR adds support for the store to record data points. Stacked on top of #18254 Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/store.go | 117 ++++++++++++++++++ .../internal/insights/store/store_test.go | 56 +++++++++ 2 files changed, 173 insertions(+) diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index fbfc395d8248..8b5e7ace8b36 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -3,11 +3,14 @@ package store import ( "context" "database/sql" + "encoding/json" "fmt" "time" "github.com/keegancsmith/sqlf" + "github.com/pkg/errors" + "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/database/basestore" "github.com/sourcegraph/sourcegraph/internal/database/dbutil" "github.com/sourcegraph/sourcegraph/internal/timeutil" @@ -17,6 +20,7 @@ import ( // for actual API usage. type Interface interface { SeriesPoints(ctx context.Context, opts SeriesPointsOpts) ([]SeriesPoint, error) + RecordSeriesPoint(ctx context.Context, v RecordSeriesPointArgs) error } var _ Interface = &Store{} @@ -140,6 +144,119 @@ func seriesPointsQuery(opts SeriesPointsOpts) *sqlf.Query { ) } +// RecordSeriesPointArgs describes arguments for the RecordSeriesPoint method. +type RecordSeriesPointArgs struct { + // SeriesID is the unique series ID to query. It should describe the series of data uniquely, + // but is not a DB table primary key ID. + SeriesID string + + // Point is the actual data point recorded and at what time. + Point SeriesPoint + + // Repository name and DB ID to associate with this data point, if any. + // + // Both must be specified if one is specified. + RepoName *string + RepoID *api.RepoID + + // Metadata contains arbitrary JSON metadata to associate with the data point, if any. + // + // See the DB schema comments for intended use cases. This should generally be small, + // low-cardinality data to avoid inflating the table. + Metadata interface{} +} + +// RecordSeriesPoint records a data point for the specfied series ID (which is a unique ID for the +// series, not a DB table primary key ID). +func (s *Store) RecordSeriesPoint(ctx context.Context, v RecordSeriesPointArgs) (err error) { + // Start transaction. + var txStore *basestore.Store + txStore, err = s.Transact(ctx) + if err != nil { + return err + } + defer func() { err = txStore.Done(err) }() + + if (v.RepoName != nil && v.RepoID == nil) || (v.RepoID != nil && v.RepoName == nil) { + return errors.New("RepoName and RepoID must be mutually specified") + } + + // Upsert the repository name into a separate table, so we get a small ID we can reference + // many times from the series_points table without storing the repo name multiple times. + var repoNameID *int32 + if v.RepoName != nil { + row := txStore.QueryRow(ctx, sqlf.Sprintf(upsertRepoNameFmtStr, *v.RepoName, *v.RepoName)) + repoNameID = new(int32) + if err := row.Scan(repoNameID); err != nil { + return errors.Wrap(err, "upserting repo name ID") + } + } + + // Upsert the metadata into a separate table, so we get a small ID we can reference many times + // from the series_points table without storing the metadata multiple times. + var metadataID *int32 + if v.Metadata != nil { + jsonMetadata, err := json.Marshal(v.Metadata) + if err != nil { + return errors.Wrap(err, "upserting>encoding metadata") + } + row := txStore.QueryRow(ctx, sqlf.Sprintf(upsertMetadataFmtStr, jsonMetadata, jsonMetadata)) + metadataID = new(int32) + if err := row.Scan(metadataID); err != nil { + return errors.Wrap(err, "upserting metadata ID") + } + } + + // Insert the actual data point. + return txStore.Exec(ctx, sqlf.Sprintf( + recordSeriesPointFmtstr, + v.SeriesID, // series_id + v.Point.Time, // time + v.Point.Value, // value + metadataID, // metadata_id + v.RepoID, // repo_id + repoNameID, // repo_name_id + repoNameID, // original_repo_name_id + )) +} + +const upsertRepoNameFmtStr = ` +WITH e AS( + INSERT INTO repo_names(name) + VALUES (%s) + ON CONFLICT DO NOTHING + RETURNING id +) +SELECT * FROM e +UNION + SELECT id FROM repo_names WHERE name = %s; +` + +const upsertMetadataFmtStr = ` +WITH e AS( + INSERT INTO metadata(metadata) + VALUES (%s) + ON CONFLICT DO NOTHING + RETURNING id +) +SELECT * FROM e +UNION + SELECT id FROM metadata WHERE metadata = %s; +` + +const recordSeriesPointFmtstr = ` +-- source: enterprise/internal/insights/store/store.go:RecordSeriesPoint +INSERT INTO series_points( + series_id, + time, + value, + metadata_id, + repo_id, + repo_name_id, + original_repo_name_id) +VALUES (%s, %s, %s, %s, %s, %s, %s); +` + func (s *Store) query(ctx context.Context, q *sqlf.Query, sc scanFunc) error { rows, err := s.Store.Query(ctx, q) if err != nil { diff --git a/enterprise/internal/insights/store/store_test.go b/enterprise/internal/insights/store/store_test.go index 814d7b1cad24..a4612515f0d6 100644 --- a/enterprise/internal/insights/store/store_test.go +++ b/enterprise/internal/insights/store/store_test.go @@ -8,6 +8,7 @@ import ( "github.com/hexops/autogold" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/dbtesting" + "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/timeutil" ) @@ -105,3 +106,58 @@ SELECT time, }) } + +func TestRecordSeriesPoints(t *testing.T) { + if testing.Short() { + t.Skip() + } + t.Parallel() + + ctx := context.Background() + clock := timeutil.Now + timescale, cleanup := dbtesting.TimescaleDB(t) + defer cleanup() + store := NewWithClock(timescale, clock) + + time := func(s string) time.Time { + v, err := time.Parse(time.RFC3339, s) + if err != nil { + t.Fatal(err) + } + return v + } + optionalString := func(v string) *string { return &v } + optionalRepoID := func(v api.RepoID) *api.RepoID { return &v } + + // Record some data points. + for _, record := range []RecordSeriesPointArgs{ + { + SeriesID: "one", + Point: SeriesPoint{Time: time("2020-03-01T00:00:00Z"), Value: 1.1}, + RepoName: optionalString("repo1"), + RepoID: optionalRepoID(3), + Metadata: map[string]interface{}{"some": "data"}, + }, + { + SeriesID: "two", + Point: SeriesPoint{Time: time("2020-03-02T00:00:00Z"), Value: 2.2}, + Metadata: []interface{}{"some", "data", "two"}, + }, + } { + if err := store.RecordSeriesPoint(ctx, record); err != nil { + t.Fatal(err) + } + } + + // Confirm we get the expected data back. + points, err := store.SeriesPoints(ctx, SeriesPointsOpts{}) + if err != nil { + t.Fatal(err) + } + autogold.Want("len(points)", 0).Equal(t, len(points)) + autogold.Want("points[0].String()", 0).Equal(t, points[0].String()) + autogold.Want("points[1].String()", 0).Equal(t, points[1].String()) + + // Confirm the data point with repository name got recorded correctly. + // TODO(slimsag): future: once we support querying by repo ID/names, add tests to ensure that information is inserted properly here. +} From 07b26346f0c9dbc3993657a499c6d743121f6559 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 17:30:42 -0700 Subject: [PATCH 72/78] go generate ./enterprise/internal/insights/store/ (regenerate mocks) Signed-off-by: Stephen Gutekanst --- .../insights/store/mock_store_interface.go | 117 ++++++++++++++++++ 1 file changed, 117 insertions(+) diff --git a/enterprise/internal/insights/store/mock_store_interface.go b/enterprise/internal/insights/store/mock_store_interface.go index 0ef935976bd6..8af2c0e0777e 100644 --- a/enterprise/internal/insights/store/mock_store_interface.go +++ b/enterprise/internal/insights/store/mock_store_interface.go @@ -12,6 +12,9 @@ import ( // github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store) // used for unit testing. type MockInterface struct { + // RecordSeriesPointFunc is an instance of a mock function object + // controlling the behavior of the method RecordSeriesPoint. + RecordSeriesPointFunc *InterfaceRecordSeriesPointFunc // SeriesPointsFunc is an instance of a mock function object controlling // the behavior of the method SeriesPoints. SeriesPointsFunc *InterfaceSeriesPointsFunc @@ -21,6 +24,11 @@ type MockInterface struct { // methods return zero values for all results, unless overwritten. func NewMockInterface() *MockInterface { return &MockInterface{ + RecordSeriesPointFunc: &InterfaceRecordSeriesPointFunc{ + defaultHook: func(context.Context, RecordSeriesPointArgs) error { + return nil + }, + }, SeriesPointsFunc: &InterfaceSeriesPointsFunc{ defaultHook: func(context.Context, SeriesPointsOpts) ([]SeriesPoint, error) { return nil, nil @@ -33,12 +41,121 @@ func NewMockInterface() *MockInterface { // All methods delegate to the given implementation, unless overwritten. func NewMockInterfaceFrom(i Interface) *MockInterface { return &MockInterface{ + RecordSeriesPointFunc: &InterfaceRecordSeriesPointFunc{ + defaultHook: i.RecordSeriesPoint, + }, SeriesPointsFunc: &InterfaceSeriesPointsFunc{ defaultHook: i.SeriesPoints, }, } } +// InterfaceRecordSeriesPointFunc describes the behavior when the +// RecordSeriesPoint method of the parent MockInterface instance is invoked. +type InterfaceRecordSeriesPointFunc struct { + defaultHook func(context.Context, RecordSeriesPointArgs) error + hooks []func(context.Context, RecordSeriesPointArgs) error + history []InterfaceRecordSeriesPointFuncCall + mutex sync.Mutex +} + +// RecordSeriesPoint delegates to the next hook function in the queue and +// stores the parameter and result values of this invocation. +func (m *MockInterface) RecordSeriesPoint(v0 context.Context, v1 RecordSeriesPointArgs) error { + r0 := m.RecordSeriesPointFunc.nextHook()(v0, v1) + m.RecordSeriesPointFunc.appendCall(InterfaceRecordSeriesPointFuncCall{v0, v1, r0}) + return r0 +} + +// SetDefaultHook sets function that is called when the RecordSeriesPoint +// method of the parent MockInterface instance is invoked and the hook queue +// is empty. +func (f *InterfaceRecordSeriesPointFunc) SetDefaultHook(hook func(context.Context, RecordSeriesPointArgs) error) { + f.defaultHook = hook +} + +// PushHook adds a function to the end of hook queue. Each invocation of the +// RecordSeriesPoint method of the parent MockInterface instance invokes the +// hook at the front of the queue and discards it. After the queue is empty, +// the default hook function is invoked for any future action. +func (f *InterfaceRecordSeriesPointFunc) PushHook(hook func(context.Context, RecordSeriesPointArgs) error) { + f.mutex.Lock() + f.hooks = append(f.hooks, hook) + f.mutex.Unlock() +} + +// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns +// the given values. +func (f *InterfaceRecordSeriesPointFunc) SetDefaultReturn(r0 error) { + f.SetDefaultHook(func(context.Context, RecordSeriesPointArgs) error { + return r0 + }) +} + +// PushReturn calls PushDefaultHook with a function that returns the given +// values. +func (f *InterfaceRecordSeriesPointFunc) PushReturn(r0 error) { + f.PushHook(func(context.Context, RecordSeriesPointArgs) error { + return r0 + }) +} + +func (f *InterfaceRecordSeriesPointFunc) nextHook() func(context.Context, RecordSeriesPointArgs) error { + f.mutex.Lock() + defer f.mutex.Unlock() + + if len(f.hooks) == 0 { + return f.defaultHook + } + + hook := f.hooks[0] + f.hooks = f.hooks[1:] + return hook +} + +func (f *InterfaceRecordSeriesPointFunc) appendCall(r0 InterfaceRecordSeriesPointFuncCall) { + f.mutex.Lock() + f.history = append(f.history, r0) + f.mutex.Unlock() +} + +// History returns a sequence of InterfaceRecordSeriesPointFuncCall objects +// describing the invocations of this function. +func (f *InterfaceRecordSeriesPointFunc) History() []InterfaceRecordSeriesPointFuncCall { + f.mutex.Lock() + history := make([]InterfaceRecordSeriesPointFuncCall, len(f.history)) + copy(history, f.history) + f.mutex.Unlock() + + return history +} + +// InterfaceRecordSeriesPointFuncCall is an object that describes an +// invocation of method RecordSeriesPoint on an instance of MockInterface. +type InterfaceRecordSeriesPointFuncCall struct { + // Arg0 is the value of the 1st argument passed to this method + // invocation. + Arg0 context.Context + // Arg1 is the value of the 2nd argument passed to this method + // invocation. + Arg1 RecordSeriesPointArgs + // Result0 is the value of the 1st result returned from this method + // invocation. + Result0 error +} + +// Args returns an interface slice containing the arguments of this +// invocation. +func (c InterfaceRecordSeriesPointFuncCall) Args() []interface{} { + return []interface{}{c.Arg0, c.Arg1} +} + +// Results returns an interface slice containing the results of this +// invocation. +func (c InterfaceRecordSeriesPointFuncCall) Results() []interface{} { + return []interface{}{c.Result0} +} + // InterfaceSeriesPointsFunc describes the behavior when the SeriesPoints // method of the parent MockInterface instance is invoked. type InterfaceSeriesPointsFunc struct { From c64564288bd4933cf48218c6b0d9c78eef815ca4 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 17:35:43 -0700 Subject: [PATCH 73/78] go test -update Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/store_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/enterprise/internal/insights/store/store_test.go b/enterprise/internal/insights/store/store_test.go index a4612515f0d6..8f5219f9f42e 100644 --- a/enterprise/internal/insights/store/store_test.go +++ b/enterprise/internal/insights/store/store_test.go @@ -154,9 +154,9 @@ func TestRecordSeriesPoints(t *testing.T) { if err != nil { t.Fatal(err) } - autogold.Want("len(points)", 0).Equal(t, len(points)) - autogold.Want("points[0].String()", 0).Equal(t, points[0].String()) - autogold.Want("points[1].String()", 0).Equal(t, points[1].String()) + autogold.Want("len(points)", int(2)).Equal(t, len(points)) + autogold.Want("points[0].String()", `SeriesPoint{Time: "2020-03-02 00:00:00 +0000 UTC", Value: 2.2, Metadata: ["some", "data", "two"]}`).Equal(t, points[0].String()) + autogold.Want("points[1].String()", `SeriesPoint{Time: "2020-03-01 00:00:00 +0000 UTC", Value: 1.1, Metadata: {"some": "data"}}`).Equal(t, points[1].String()) // Confirm the data point with repository name got recorded correctly. // TODO(slimsag): future: once we support querying by repo ID/names, add tests to ensure that information is inserted properly here. From b1afbf9ad122bc14b4d90b2d31fcc693b11dfc98 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 18:01:47 -0700 Subject: [PATCH 74/78] insights: add new discovery package for locating insights This will be used both from the GraphQL resolver layer, as well as the background workers - both of which need to scan the user/org/global settings for insights defined within. Signed-off-by: Stephen Gutekanst --- .../internal/insights/discovery/discovery.go | 41 +++++ .../insights/discovery/discovery_test.go | 155 ++++++++++++++++++ enterprise/internal/insights/discovery/gen.go | 4 + .../insights/discovery/mock_setting_store.go | 151 +++++++++++++++++ 4 files changed, 351 insertions(+) create mode 100644 enterprise/internal/insights/discovery/discovery.go create mode 100644 enterprise/internal/insights/discovery/discovery_test.go create mode 100644 enterprise/internal/insights/discovery/gen.go create mode 100644 enterprise/internal/insights/discovery/mock_setting_store.go diff --git a/enterprise/internal/insights/discovery/discovery.go b/enterprise/internal/insights/discovery/discovery.go new file mode 100644 index 000000000000..350a5cb54663 --- /dev/null +++ b/enterprise/internal/insights/discovery/discovery.go @@ -0,0 +1,41 @@ +package discovery + +import ( + "context" + + "github.com/sourcegraph/sourcegraph/internal/api" + "github.com/sourcegraph/sourcegraph/internal/jsonc" + "github.com/sourcegraph/sourcegraph/schema" +) + +// SettingStore is a subset of the API exposed by the database.Settings() store. +type SettingStore interface { + GetLatest(context.Context, api.SettingsSubject) (*api.Settings, error) +} + +// Discover uses the given settings store to look for insights in the global user settings. +// +// TODO(slimsag): future: include user/org settings and consider security implications of doing so. +// In the future, this will be expanded to also include insights from users/orgs. +func Discover(ctx context.Context, settingStore SettingStore) ([]*schema.Insight, error) { + // Get latest Global user settings. + subject := api.SettingsSubject{Site: true} + globalSettingsRaw, err := settingStore.GetLatest(ctx, subject) + if err != nil { + return nil, err + } + globalSettings, err := parseUserSettings(globalSettingsRaw) + return globalSettings.Insights, nil +} + +func parseUserSettings(settings *api.Settings) (*schema.Settings, error) { + if settings == nil { + // Settings have never been saved for this subject; equivalent to `{}`. + return &schema.Settings{}, nil + } + var v schema.Settings + if err := jsonc.Unmarshal(settings.Contents, &v); err != nil { + return nil, err + } + return &v, nil +} diff --git a/enterprise/internal/insights/discovery/discovery_test.go b/enterprise/internal/insights/discovery/discovery_test.go new file mode 100644 index 000000000000..e620ebdfd43d --- /dev/null +++ b/enterprise/internal/insights/discovery/discovery_test.go @@ -0,0 +1,155 @@ +package discovery + +import ( + "context" + "testing" + + "github.com/hexops/autogold" + + "github.com/sourcegraph/sourcegraph/internal/api" + "github.com/sourcegraph/sourcegraph/schema" +) + +var settingsExample = &api.Settings{ID: 1, Contents: `{ + "insights": [ + { + "title": "fmt usage", + "description": "fmt.Errorf/fmt.Printf usage", + "series": [ + { + "label": "fmt.Errorf", + "search": "errorf", + }, + { + "label": "printf", + "search": "fmt.Printf", + } + ] + }, + { + "title": "gitserver usage", + "description": "gitserver exec & close usage", + "series": [ + { + "label": "exec", + "search": "gitserver.Exec", + }, + { + "label": "close", + "search": "gitserver.Close", + } + ] + } + ] + } +`} + +func TestDiscover(t *testing.T) { + settingStore := NewMockSettingStore() + settingStore.GetLatestFunc.SetDefaultHook(func(ctx context.Context, subject api.SettingsSubject) (*api.Settings, error) { + if !subject.Site { // TODO: future: site is an extremely poor name for "global settings", we should change this. + t.Fatal("expected only to request settings from global user settings") + } + return settingsExample, nil + }) + ctx := context.Background() + insights, err := Discover(ctx, settingStore) + if err != nil { + t.Fatal(err) + } + autogold.Want("insights", []*schema.Insight{ + &schema.Insight{ + Description: "fmt.Errorf/fmt.Printf usage", + Series: []*schema.InsightSeries{ + &schema.InsightSeries{ + Label: "fmt.Errorf", + Search: "errorf", + }, + &schema.InsightSeries{ + Label: "printf", + Search: "fmt.Printf", + }, + }, + Title: "fmt usage", + }, + &schema.Insight{ + Description: "gitserver exec & close usage", + Series: []*schema.InsightSeries{ + &schema.InsightSeries{ + Label: "exec", + Search: "gitserver.Exec", + }, + &schema.InsightSeries{ + Label: "close", + Search: "gitserver.Close", + }, + }, + Title: "gitserver usage", + }, + }).Equal(t, insights) +} + +func Test_parseUserSettings(t *testing.T) { + tests := []struct { + name string + input *api.Settings + want autogold.Value + }{ + { + name: "nil", + input: nil, + want: autogold.Want("nil", [2]interface{}{&schema.Settings{}, nil}), + }, + { + name: "empty", + input: &api.Settings{ + Contents: "{}", + }, + want: autogold.Want("empty", [2]interface{}{&schema.Settings{}, nil}), + }, + { + name: "real", + input: settingsExample, + want: autogold.Want("real", [2]interface{}{ + &schema.Settings{Insights: []*schema.Insight{ + { + Description: "fmt.Errorf/fmt.Printf usage", + Series: []*schema.InsightSeries{ + { + Label: "fmt.Errorf", + Search: "errorf", + }, + { + Label: "printf", + Search: "fmt.Printf", + }, + }, + Title: "fmt usage", + }, + { + Description: "gitserver exec & close usage", + Series: []*schema.InsightSeries{ + { + Label: "exec", + Search: "gitserver.Exec", + }, + { + Label: "close", + Search: "gitserver.Close", + }, + }, + Title: "gitserver usage", + }, + }}, + nil, + }), + }, + } + for _, tst := range tests { + t.Run(tst.name, func(t *testing.T) { + got, err := parseUserSettings(tst.input) + tst.want.Equal(t, [2]interface{}{got, err}) + }) + } + +} diff --git a/enterprise/internal/insights/discovery/gen.go b/enterprise/internal/insights/discovery/gen.go new file mode 100644 index 000000000000..f1fc8a65bbdf --- /dev/null +++ b/enterprise/internal/insights/discovery/gen.go @@ -0,0 +1,4 @@ +package discovery + +//go:generate env GOBIN=$PWD/.bin GO111MODULE=on go install github.com/efritz/go-mockgen +//go:generate $PWD/.bin/go-mockgen -f github.com/sourcegraph/sourcegraph/enterprise/internal/insights/discovery -i SettingStore -o mock_setting_store.go diff --git a/enterprise/internal/insights/discovery/mock_setting_store.go b/enterprise/internal/insights/discovery/mock_setting_store.go new file mode 100644 index 000000000000..93ff14d05a72 --- /dev/null +++ b/enterprise/internal/insights/discovery/mock_setting_store.go @@ -0,0 +1,151 @@ +// Code generated by github.com/efritz/go-mockgen 0.1.0; DO NOT EDIT. + +package discovery + +import ( + "context" + api "github.com/sourcegraph/sourcegraph/internal/api" + "sync" +) + +// MockSettingStore is a mock implementation of the SettingStore interface +// (from the package +// github.com/sourcegraph/sourcegraph/enterprise/internal/insights/discovery) +// used for unit testing. +type MockSettingStore struct { + // GetLatestFunc is an instance of a mock function object controlling + // the behavior of the method GetLatest. + GetLatestFunc *SettingStoreGetLatestFunc +} + +// NewMockSettingStore creates a new mock of the SettingStore interface. All +// methods return zero values for all results, unless overwritten. +func NewMockSettingStore() *MockSettingStore { + return &MockSettingStore{ + GetLatestFunc: &SettingStoreGetLatestFunc{ + defaultHook: func(context.Context, api.SettingsSubject) (*api.Settings, error) { + return nil, nil + }, + }, + } +} + +// NewMockSettingStoreFrom creates a new mock of the MockSettingStore +// interface. All methods delegate to the given implementation, unless +// overwritten. +func NewMockSettingStoreFrom(i SettingStore) *MockSettingStore { + return &MockSettingStore{ + GetLatestFunc: &SettingStoreGetLatestFunc{ + defaultHook: i.GetLatest, + }, + } +} + +// SettingStoreGetLatestFunc describes the behavior when the GetLatest +// method of the parent MockSettingStore instance is invoked. +type SettingStoreGetLatestFunc struct { + defaultHook func(context.Context, api.SettingsSubject) (*api.Settings, error) + hooks []func(context.Context, api.SettingsSubject) (*api.Settings, error) + history []SettingStoreGetLatestFuncCall + mutex sync.Mutex +} + +// GetLatest delegates to the next hook function in the queue and stores the +// parameter and result values of this invocation. +func (m *MockSettingStore) GetLatest(v0 context.Context, v1 api.SettingsSubject) (*api.Settings, error) { + r0, r1 := m.GetLatestFunc.nextHook()(v0, v1) + m.GetLatestFunc.appendCall(SettingStoreGetLatestFuncCall{v0, v1, r0, r1}) + return r0, r1 +} + +// SetDefaultHook sets function that is called when the GetLatest method of +// the parent MockSettingStore instance is invoked and the hook queue is +// empty. +func (f *SettingStoreGetLatestFunc) SetDefaultHook(hook func(context.Context, api.SettingsSubject) (*api.Settings, error)) { + f.defaultHook = hook +} + +// PushHook adds a function to the end of hook queue. Each invocation of the +// GetLatest method of the parent MockSettingStore instance invokes the hook +// at the front of the queue and discards it. After the queue is empty, the +// default hook function is invoked for any future action. +func (f *SettingStoreGetLatestFunc) PushHook(hook func(context.Context, api.SettingsSubject) (*api.Settings, error)) { + f.mutex.Lock() + f.hooks = append(f.hooks, hook) + f.mutex.Unlock() +} + +// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns +// the given values. +func (f *SettingStoreGetLatestFunc) SetDefaultReturn(r0 *api.Settings, r1 error) { + f.SetDefaultHook(func(context.Context, api.SettingsSubject) (*api.Settings, error) { + return r0, r1 + }) +} + +// PushReturn calls PushDefaultHook with a function that returns the given +// values. +func (f *SettingStoreGetLatestFunc) PushReturn(r0 *api.Settings, r1 error) { + f.PushHook(func(context.Context, api.SettingsSubject) (*api.Settings, error) { + return r0, r1 + }) +} + +func (f *SettingStoreGetLatestFunc) nextHook() func(context.Context, api.SettingsSubject) (*api.Settings, error) { + f.mutex.Lock() + defer f.mutex.Unlock() + + if len(f.hooks) == 0 { + return f.defaultHook + } + + hook := f.hooks[0] + f.hooks = f.hooks[1:] + return hook +} + +func (f *SettingStoreGetLatestFunc) appendCall(r0 SettingStoreGetLatestFuncCall) { + f.mutex.Lock() + f.history = append(f.history, r0) + f.mutex.Unlock() +} + +// History returns a sequence of SettingStoreGetLatestFuncCall objects +// describing the invocations of this function. +func (f *SettingStoreGetLatestFunc) History() []SettingStoreGetLatestFuncCall { + f.mutex.Lock() + history := make([]SettingStoreGetLatestFuncCall, len(f.history)) + copy(history, f.history) + f.mutex.Unlock() + + return history +} + +// SettingStoreGetLatestFuncCall is an object that describes an invocation +// of method GetLatest on an instance of MockSettingStore. +type SettingStoreGetLatestFuncCall struct { + // Arg0 is the value of the 1st argument passed to this method + // invocation. + Arg0 context.Context + // Arg1 is the value of the 2nd argument passed to this method + // invocation. + Arg1 api.SettingsSubject + // Result0 is the value of the 1st result returned from this method + // invocation. + Result0 *api.Settings + // Result1 is the value of the 2nd result returned from this method + // invocation. + Result1 error +} + +// Args returns an interface slice containing the arguments of this +// invocation. +func (c SettingStoreGetLatestFuncCall) Args() []interface{} { + return []interface{}{c.Arg0, c.Arg1} +} + +// Results returns an interface slice containing the results of this +// invocation. +func (c SettingStoreGetLatestFuncCall) Results() []interface{} { + return []interface{}{c.Result0, c.Result1} +} From c0b23b3fb4c1414cef2547e5ee3c6d919f9188b9 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 18:07:09 -0700 Subject: [PATCH 75/78] insights: resolvers: use the new discovery package Signed-off-by: Stephen Gutekanst --- .../resolvers/insight_connection_resolver.go | 39 +--------- .../insight_connection_resolver_test.go | 78 ++----------------- .../resolvers/insight_series_resolver_test.go | 14 ++-- 3 files changed, 16 insertions(+), 115 deletions(-) diff --git a/enterprise/internal/insights/resolvers/insight_connection_resolver.go b/enterprise/internal/insights/resolvers/insight_connection_resolver.go index 95460679e83a..e1df4f488193 100644 --- a/enterprise/internal/insights/resolvers/insight_connection_resolver.go +++ b/enterprise/internal/insights/resolvers/insight_connection_resolver.go @@ -7,10 +7,8 @@ import ( "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend/graphqlutil" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/discovery" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" - "github.com/sourcegraph/sourcegraph/internal/api" - "github.com/sourcegraph/sourcegraph/internal/database" - "github.com/sourcegraph/sourcegraph/internal/jsonc" "github.com/sourcegraph/sourcegraph/schema" ) @@ -18,11 +16,7 @@ var _ graphqlbackend.InsightConnectionResolver = &insightConnectionResolver{} type insightConnectionResolver struct { store store.Interface - settingStore *database.SettingStore - - // We use our own mock here because database.Mocks.Settings.GetLatest is a global which means - // we could not run our tests in parallel. - mocksSettingsGetLatest func(ctx context.Context, subject api.SettingsSubject) (*api.Settings, error) + settingStore discovery.SettingStore // cache results because they are used by multiple fields once sync.Once @@ -61,38 +55,11 @@ func (r *insightConnectionResolver) PageInfo(ctx context.Context) (*graphqlutil. func (r *insightConnectionResolver) compute(ctx context.Context) ([]*schema.Insight, int64, error) { r.once.Do(func() { - settingsGetLatest := r.settingStore.GetLatest - if r.mocksSettingsGetLatest != nil { - settingsGetLatest = r.mocksSettingsGetLatest - } - - // Get latest Global user settings. - // - // FUTURE: include user/org settings. - subject := api.SettingsSubject{Site: true} - globalSettingsRaw, err := settingsGetLatest(ctx, subject) - if err != nil { - r.err = err - return - } - globalSettings, err := parseUserSettings(globalSettingsRaw) - r.insights = globalSettings.Insights + r.insights, r.err = discovery.Discover(ctx, r.settingStore) }) return r.insights, r.next, r.err } -func parseUserSettings(settings *api.Settings) (*schema.Settings, error) { - if settings == nil { - // Settings have never been saved for this subject; equivalent to `{}`. - return &schema.Settings{}, nil - } - var v schema.Settings - if err := jsonc.Unmarshal(settings.Contents, &v); err != nil { - return nil, err - } - return &v, nil -} - // InsightResolver is also defined here as it is covered by the same tests. var _ graphqlbackend.InsightResolver = &insightResolver{} diff --git a/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go b/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go index 81b06477474d..d2046aa9ba3c 100644 --- a/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go +++ b/enterprise/internal/insights/resolvers/insight_connection_resolver_test.go @@ -11,9 +11,9 @@ import ( "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend/graphqlutil" insightsdbtesting "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/dbtesting" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/discovery" "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/database/dbtesting" - "github.com/sourcegraph/sourcegraph/schema" ) // Note: You can `go test ./resolvers -update` to update the expected `want` values in these tests. @@ -75,12 +75,11 @@ func TestResolver_InsightConnection(t *testing.T) { if err != nil { t.Fatal(err) } - conn.(*insightConnectionResolver).mocksSettingsGetLatest = func(ctx context.Context, subject api.SettingsSubject) (*api.Settings, error) { - if !subject.Site { // TODO: future: site is an extremely poor name for "global settings", we should change this. - t.Fatal("expected only to request settings from global user settings") - } - return testRealGlobalSettings, nil - } + + // Mock the setting store to return the desired settings. + settingStore := discovery.NewMockSettingStore() + conn.(*insightConnectionResolver).settingStore = settingStore + settingStore.GetLatestFunc.SetDefaultReturn(testRealGlobalSettings, nil) return ctx, conn } @@ -129,68 +128,3 @@ func TestResolver_InsightConnection(t *testing.T) { autogold.Want("second insight: series length", int(2)).Equal(t, len(nodes[1].Series())) }) } - -func Test_parseUserSettings(t *testing.T) { - tests := []struct { - name string - input *api.Settings - want autogold.Value - }{ - { - name: "nil", - input: nil, - want: autogold.Want("nil", [2]interface{}{&schema.Settings{}, nil}), - }, - { - name: "empty", - input: &api.Settings{ - Contents: "{}", - }, - want: autogold.Want("empty", [2]interface{}{&schema.Settings{}, nil}), - }, - { - name: "real", - input: testRealGlobalSettings, - want: autogold.Want("real", [2]interface{}{ - &schema.Settings{Insights: []*schema.Insight{ - { - Description: "fmt.Errorf/fmt.Printf usage", - Series: []*schema.InsightSeries{ - { - Label: "fmt.Errorf", - Search: "errorf", - }, - { - Label: "printf", - Search: "fmt.Printf", - }, - }, - Title: "fmt usage", - }, - { - Description: "gitserver exec & close usage", - Series: []*schema.InsightSeries{ - { - Label: "exec", - Search: "gitserver.Exec", - }, - { - Label: "close", - Search: "gitserver.Close", - }, - }, - Title: "gitserver usage", - }, - }}, - nil, - }), - }, - } - for _, tst := range tests { - t.Run(tst.name, func(t *testing.T) { - got, err := parseUserSettings(tst.input) - tst.want.Equal(t, [2]interface{}{got, err}) - }) - } - -} diff --git a/enterprise/internal/insights/resolvers/insight_series_resolver_test.go b/enterprise/internal/insights/resolvers/insight_series_resolver_test.go index 2c143b9be85b..9831bcfa71bb 100644 --- a/enterprise/internal/insights/resolvers/insight_series_resolver_test.go +++ b/enterprise/internal/insights/resolvers/insight_series_resolver_test.go @@ -11,8 +11,8 @@ import ( "github.com/sourcegraph/sourcegraph/cmd/frontend/backend" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" insightsdbtesting "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/dbtesting" + "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/discovery" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" - "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/database/dbtesting" ) @@ -43,12 +43,12 @@ func TestResolver_InsightSeries(t *testing.T) { cleanup() t.Fatal(err) } - conn.(*insightConnectionResolver).mocksSettingsGetLatest = func(ctx context.Context, subject api.SettingsSubject) (*api.Settings, error) { - if !subject.Site { // TODO: future: site is an extremely poor name for "global settings", we should change this. - t.Fatal("expected only to request settings from global user settings") - } - return testRealGlobalSettings, nil - } + + // Mock the setting store to return the desired settings. + settingStore := discovery.NewMockSettingStore() + conn.(*insightConnectionResolver).settingStore = settingStore + settingStore.GetLatestFunc.SetDefaultReturn(testRealGlobalSettings, nil) + nodes, err := conn.Nodes(ctx) if err != nil { cleanup() From bd4f45547a21a00127eec2e317f3f1ccbaebd7dc Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 18:12:29 -0700 Subject: [PATCH 76/78] gofmt Signed-off-by: Stephen Gutekanst --- .../internal/insights/discovery/discovery_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/enterprise/internal/insights/discovery/discovery_test.go b/enterprise/internal/insights/discovery/discovery_test.go index e620ebdfd43d..ecce4e69e5c9 100644 --- a/enterprise/internal/insights/discovery/discovery_test.go +++ b/enterprise/internal/insights/discovery/discovery_test.go @@ -58,28 +58,28 @@ func TestDiscover(t *testing.T) { t.Fatal(err) } autogold.Want("insights", []*schema.Insight{ - &schema.Insight{ + { Description: "fmt.Errorf/fmt.Printf usage", Series: []*schema.InsightSeries{ - &schema.InsightSeries{ + { Label: "fmt.Errorf", Search: "errorf", }, - &schema.InsightSeries{ + { Label: "printf", Search: "fmt.Printf", }, }, Title: "fmt usage", }, - &schema.Insight{ + { Description: "gitserver exec & close usage", Series: []*schema.InsightSeries{ - &schema.InsightSeries{ + { Label: "exec", Search: "gitserver.Exec", }, - &schema.InsightSeries{ + { Label: "close", Search: "gitserver.Close", }, From 066fbcf70f979d78693b5632cfb90c95d9233579 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 15 Feb 2021 19:54:32 -0700 Subject: [PATCH 77/78] fix merge conflicts Signed-off-by: Stephen Gutekanst --- .../insights/background/background.go | 10 - .../internal/insights/background/graphql.go | 102 ----------- .../internal/insights/background/metrics.go | 48 ----- .../insights/background/query_runner.go | 75 -------- .../internal/insights/background/workers.go | 171 ------------------ .../insights/background/workers_test.go | 3 - 6 files changed, 409 deletions(-) delete mode 100644 enterprise/internal/insights/background/graphql.go delete mode 100644 enterprise/internal/insights/background/metrics.go delete mode 100644 enterprise/internal/insights/background/query_runner.go delete mode 100644 enterprise/internal/insights/background/workers.go delete mode 100644 enterprise/internal/insights/background/workers_test.go diff --git a/enterprise/internal/insights/background/background.go b/enterprise/internal/insights/background/background.go index 0602bb9c871a..02d92c95ae60 100644 --- a/enterprise/internal/insights/background/background.go +++ b/enterprise/internal/insights/background/background.go @@ -10,15 +10,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights" -<<<<<<< HEAD - "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" - "github.com/sourcegraph/sourcegraph/internal/goroutine" - "github.com/sourcegraph/sourcegraph/internal/observation" - "github.com/sourcegraph/sourcegraph/internal/trace" -) - -func StartBackgroundJobs(ctx context.Context, db *sql.DB) { -======= "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/background/queryrunner" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" "github.com/sourcegraph/sourcegraph/internal/database/basestore" @@ -36,7 +27,6 @@ func StartBackgroundJobs(ctx context.Context, mainAppDB *sql.DB) { return } ->>>>>>> origin/main // Create a connection to TimescaleDB, so we can record results. timescale, err := insights.InitializeCodeInsightsDB() if err != nil { diff --git a/enterprise/internal/insights/background/graphql.go b/enterprise/internal/insights/background/graphql.go deleted file mode 100644 index 4ae31cf2e10f..000000000000 --- a/enterprise/internal/insights/background/graphql.go +++ /dev/null @@ -1,102 +0,0 @@ -package background - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/sourcegraph/sourcegraph/internal/api" - - "golang.org/x/net/context/ctxhttp" - - "github.com/pkg/errors" -) - -type graphQLQuery struct { - Query string `json:"query"` - Variables interface{} `json:"variables"` -} - -const gqlSearchQuery = `query Search( - $query: String!, -) { - search(query: $query, ) { - results { - limitHit - cloning { name } - missing { name } - timedout { name } - matchCount - alert { - title - description - } - } - } -}` - -type gqlSearchVars struct { - Query string `json:"query"` -} - -type gqlSearchResponse struct { - Data struct { - Search struct { - Results struct { - LimitHit bool - Cloning []*api.Repo - Missing []*api.Repo - Timedout []*api.Repo - MatchCount int - Alert struct { - Title string - Description string - } - } - } - } - Errors []interface{} -} - -func search(ctx context.Context, query string) (*gqlSearchResponse, error) { - var buf bytes.Buffer - err := json.NewEncoder(&buf).Encode(graphQLQuery{ - Query: gqlSearchQuery, - Variables: gqlSearchVars{Query: query}, - }) - if err != nil { - return nil, errors.Wrap(err, "Encode") - } - - url, err := gqlURL("Search") - if err != nil { - return nil, errors.Wrap(err, "constructing frontend URL") - } - - resp, err := ctxhttp.Post(ctx, nil, url, "application/json", &buf) - if err != nil { - return nil, errors.Wrap(err, "Post") - } - defer resp.Body.Close() - - var res *gqlSearchResponse - if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { - return nil, errors.Wrap(err, "Decode") - } - if len(res.Errors) > 0 { - return res, fmt.Errorf("graphql: errors: %v", res.Errors) - } - return res, nil -} - -func gqlURL(queryName string) (string, error) { - u, err := url.Parse(api.InternalClient.URL) - if err != nil { - return "", err - } - u.Path = "/.internal/graphql" - u.RawQuery = queryName - return u.String(), nil -} diff --git a/enterprise/internal/insights/background/metrics.go b/enterprise/internal/insights/background/metrics.go deleted file mode 100644 index 5abe53feda75..000000000000 --- a/enterprise/internal/insights/background/metrics.go +++ /dev/null @@ -1,48 +0,0 @@ -package background - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/sourcegraph/sourcegraph/internal/observation" - "github.com/sourcegraph/sourcegraph/internal/workerutil" - "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker" -) - -// metrics describes all Prometheus metrics to be recorded during the background execution of -// workers. -type metrics struct { - // workerMetrics records worker operations & number of jobs. - workerMetrics workerutil.WorkerMetrics - - // resetterMetrics records the number of jobs that got reset because workers timed out / took - // too long. - resetterMetrics dbworker.ResetterMetrics -} - -func newMetrics(observationContext *observation.Context) *metrics { - workerResets := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "src_insights_worker_resets_total", - Help: "The number of times work took too long and was reset for retry later.", - }) - observationContext.Registerer.MustRegister(workerResets) - - workerResetFailures := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "src_insights_worker_reset_failures_total", - Help: "The number of times work took too long so many times that retries will no longer happen.", - }) - observationContext.Registerer.MustRegister(workerResetFailures) - - workerErrors := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "src_insights_worker_errors_total", - Help: "The number of errors that occurred during a worker job.", - }) - - return &metrics{ - workerMetrics: workerutil.NewMetrics(observationContext, "insights", nil), - resetterMetrics: dbworker.ResetterMetrics{ - RecordResets: workerResets, - RecordResetFailures: workerResetFailures, - Errors: workerErrors, - }, - } -} diff --git a/enterprise/internal/insights/background/query_runner.go b/enterprise/internal/insights/background/query_runner.go deleted file mode 100644 index 5917244bb9e7..000000000000 --- a/enterprise/internal/insights/background/query_runner.go +++ /dev/null @@ -1,75 +0,0 @@ -package background - -import ( - "context" - - "github.com/inconshreveable/log15" - - "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" - "github.com/sourcegraph/sourcegraph/internal/workerutil" - "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker" - dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store" -) - -var _ dbworker.Handler = &queryRunner{} - -// queryRunner implements the dbworker.Handler interface by executing search queries and inserting -// insights about them to the insights database. -type queryRunner struct { - workerStore *store.Store // TODO(slimsag): should not create in TimescaleDB - insightsStore *store.Store -} - -func (r *queryRunner) Handle(ctx context.Context, workerStore dbworkerstore.Store, record workerutil.Record) (err error) { - defer func() { - if err != nil { - log15.Error("insights.queryRunner.Handle", "error", err) - } - }() - - s := r.workerStore.With(workerStore) - - // TODO(slimsag): get query from work queue similar to below: - var q = struct { - ID int - }{} - newQuery := "errorf" - /* - var q *cm.MonitorQuery - q, err = s.GetQueryByRecordID(ctx, record.RecordID()) - if err != nil { - return err - } - */ - - // Search. - var results *gqlSearchResponse - results, err = search(ctx, newQuery) - if err != nil { - return err - } - var matchCount int - if results != nil { - matchCount = results.Data.Search.Results.MatchCount - } - // TODO(slimsag): record result count to insights DB - - // TODO(slimsag): implement equivilent? - _ = s - _ = matchCount - _ = q - /* - // Log next_run and latest_result to table cm_queries. - newLatestResult := latestResultTime(q.LatestResult, results, err) - err = s.SetTriggerQueryNextRun(ctx, q.Id, s.Clock()().Add(5*time.Minute), newLatestResult.UTC()) - if err != nil { - return err - } - // Log the actual query we ran and whether we got any new results. - err = s.LogSearch(ctx, newQuery, numResults, record.RecordID()) - if err != nil { - return fmt.Errorf("LogSearch: %w", err) - } - */ - return nil -} diff --git a/enterprise/internal/insights/background/workers.go b/enterprise/internal/insights/background/workers.go deleted file mode 100644 index b77e2e767403..000000000000 --- a/enterprise/internal/insights/background/workers.go +++ /dev/null @@ -1,171 +0,0 @@ -package background - -import ( - "context" - "database/sql" - "time" - - "github.com/keegancsmith/sqlf" - - "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store" - "github.com/sourcegraph/sourcegraph/internal/database/basestore" - "github.com/sourcegraph/sourcegraph/internal/goroutine" - "github.com/sourcegraph/sourcegraph/internal/workerutil" - "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker" - dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store" -) - -// newInsightEnqueuer returns a background goroutine which will periodically find all of the search -// and webhook insights across all user settings, and enqueue work for the query runner and webhook -// runner workers to perform. -func newInsightEnqueuer(ctx context.Context, store *store.Store) goroutine.BackgroundRoutine { - // TODO: 1 minute may be too slow? hmm - return goroutine.NewPeriodicGoroutine(ctx, 1*time.Minute, goroutine.NewHandlerWithErrorMessage( - "insights_enqueuer", - func(ctx context.Context) error { - // TODO: needs metrics - // TODO: similar to EnqueueTriggerQueries, actually enqueue work - return nil - }, - )) -} - -// newQueryRunner returns a worker that will execute search queries and insert information about -// the results into the code insights database. -// -// TODO(slimsag): needs main app DB for settings discovery -func newQueryRunner(ctx context.Context, insightsStore *store.Store, metrics *metrics) *workerutil.Worker { - workerStore := createDBWorkerStoreForInsightsJobs(insightsStore) // TODO(slimsag): should not create in TimescaleDB - options := workerutil.WorkerOptions{ - Name: "insights_query_runner_worker", - NumHandlers: 1, - Interval: 5 * time.Second, - Metrics: metrics.workerMetrics, - } - worker := dbworker.NewWorker(ctx, workerStore, &queryRunner{ - workerStore: insightsStore, // TODO(slimsag): should not create in TimescaleDB - insightsStore: insightsStore, - }, options) - return worker -} - -// newQueryRunnerResetter returns a worker that will reset pending query runner jobs if they take -// too long to complete. -func newQueryRunnerResetter(ctx context.Context, s *store.Store, metrics *metrics) *dbworker.Resetter { - workerStore := createDBWorkerStoreForInsightsJobs(s) // TODO(slimsag): should not create in TimescaleDB - options := dbworker.ResetterOptions{ - Name: "code_insights_trigger_jobs_worker_resetter", - Interval: 1 * time.Minute, - Metrics: metrics.resetterMetrics, - } - return dbworker.NewResetter(workerStore, options) -} - -func createDBWorkerStoreForInsightsJobs(s *store.Store) dbworkerstore.Store { - return dbworkerstore.New(s.Handle(), dbworkerstore.Options{ - Name: "insights_trigger_jobs_worker_store", - TableName: "insights_trigger_jobs", - // TODO(slimsag): table names - ColumnExpressions: InsightsJobsColumns, - Scan: ScanInsightsJobs, - - // We will let a search query or webhook run for up to 60s. After that, it times out and - // retries in 10s. If 3 timeouts occur, it is not retried. - StalledMaxAge: 60 * time.Second, - RetryAfter: 10 * time.Second, - MaxNumRetries: 3, - OrderByExpression: sqlf.Sprintf("id"), - }) -} - -// TODO(slimsag): move to a insights/dbworkerstore package? - -type InsightsJobs struct { - // TODO(slimsag): all these columns are wrong. - Id int - Query int64 - - // The query we ran including after: filter. - QueryString *string - - // Whether we got any results. - Results *bool - NumResults *int - - // Fields demanded for any dbworker. - State string - FailureMessage *string - StartedAt *time.Time - FinishedAt *time.Time - ProcessAfter *time.Time - NumResets int32 - NumFailures int32 - LogContents *string -} - -func (r *InsightsJobs) RecordID() int { - return r.Id -} - -func ScanInsightsJobs(rows *sql.Rows, err error) (workerutil.Record, bool, error) { - records, err := scanInsightsJobs(rows, err) - if err != nil { - return &InsightsJobs{}, false, err - } - return records[0], true, nil -} - -func scanInsightsJobs(rows *sql.Rows, err error) ([]*InsightsJobs, error) { - if err != nil { - return nil, err - } - defer func() { err = basestore.CloseRows(rows, err) }() - var ms []*InsightsJobs - for rows.Next() { - m := &InsightsJobs{} - if err := rows.Scan( - // TODO(slimsag): all these columns are wrong. - &m.Id, - &m.Query, - &m.QueryString, - &m.Results, - &m.NumResults, - &m.State, - &m.FailureMessage, - &m.StartedAt, - &m.FinishedAt, - &m.ProcessAfter, - &m.NumResets, - &m.NumFailures, - &m.LogContents, - ); err != nil { - return nil, err - } - ms = append(ms, m) - } - if err != nil { - return nil, err - } - // Rows.Err will report the last error encountered by Rows.Scan. - if err := rows.Err(); err != nil { - return nil, err - } - return ms, nil -} - -var InsightsJobsColumns = []*sqlf.Query{ - // TODO(slimsag): all these columns are wrong. - sqlf.Sprintf("cm_trigger_jobs.id"), - sqlf.Sprintf("cm_trigger_jobs.query"), - sqlf.Sprintf("cm_trigger_jobs.query_string"), - sqlf.Sprintf("cm_trigger_jobs.results"), - sqlf.Sprintf("cm_trigger_jobs.num_results"), - sqlf.Sprintf("cm_trigger_jobs.state"), - sqlf.Sprintf("cm_trigger_jobs.failure_message"), - sqlf.Sprintf("cm_trigger_jobs.started_at"), - sqlf.Sprintf("cm_trigger_jobs.finished_at"), - sqlf.Sprintf("cm_trigger_jobs.process_after"), - sqlf.Sprintf("cm_trigger_jobs.num_resets"), - sqlf.Sprintf("cm_trigger_jobs.num_failures"), - sqlf.Sprintf("cm_trigger_jobs.log_contents"), -} diff --git a/enterprise/internal/insights/background/workers_test.go b/enterprise/internal/insights/background/workers_test.go deleted file mode 100644 index 2f7b889258cc..000000000000 --- a/enterprise/internal/insights/background/workers_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package background - -// TODO(slimsag) From b0073088c1700f14931a249607146c8c2e8b1fff Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Wed, 17 Feb 2021 17:50:07 -0700 Subject: [PATCH 78/78] Update README.codeinsights.md --- README.codeinsights.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.codeinsights.md b/README.codeinsights.md index fd31f4609d2e..441cceec8168 100644 --- a/README.codeinsights.md +++ b/README.codeinsights.md @@ -202,3 +202,17 @@ GROUP BY value, bucket; Note: This is not optimized, we can use materialized views to do continuous aggregation. See https://docs.timescale.com/latest/using-timescaledb/continuous-aggregates + +## Why aren't insights being recorded? + +Find insights background worker logs: + +``` +kubectl --namespace=prod logs repo-updater-76df6f4646-q92nx repo-updater | grep insights +``` + +## Get a psql prompt (Kubernetes) + +``` +kubectl -n prod exec -it codeinsights-db-5f5977f74d-8q9nl -- psql -U postgres +```