From e3a4e2194fea6625bf6dbfe3753e8e9676de2376 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 16:34:15 -0700 Subject: [PATCH 1/9] insights: store: query metadata & other minor improvements * Query metadata for points. * Improve formatting of test data. * Change incorrect `Series *int32` to `Series *string` * Add TODOs for improved filtering abilities in the future. Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/store.go | 27 +++++++++++++++---- .../internal/insights/store/store_test.go | 15 +++++------ 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index 6662d77c0e61..fbfc395d8248 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -55,15 +55,27 @@ func (s *Store) With(other basestore.ShareableStore) *Store { var _ Interface = &Store{} // SeriesPoint describes a single insights' series data point. +// +// Some fields that could be queried (series ID, repo ID/names) are omitted as they are primarily +// only useful for filtering the data you get back, and would inflate the data size considerably +// otherwise. type SeriesPoint struct { - Time time.Time - Value float64 + Time time.Time + Value float64 + Metadata []byte +} + +func (s *SeriesPoint) String() string { + return fmt.Sprintf("SeriesPoint{Time: %q, Value: %v, Metadata: %s}", s.Time, s.Value, s.Metadata) } // SeriesPointsOpts describes options for querying insights' series data points. type SeriesPointsOpts struct { // SeriesID is the unique series ID to query, if non-nil. - SeriesID *int32 + SeriesID *string + + // TODO(slimsag): Add ability to filter based on repo ID, name, original name. + // TODO(slimsag): Add ability to do limited filtering based on metadata. // Time ranges to query from/to, if non-nil. From, To *time.Time @@ -80,6 +92,7 @@ func (s *Store) SeriesPoints(ctx context.Context, opts SeriesPointsOpts) ([]Seri err := sc.Scan( &point.Time, &point.Value, + &point.Metadata, ) if err != nil { return err @@ -91,8 +104,12 @@ func (s *Store) SeriesPoints(ctx context.Context, opts SeriesPointsOpts) ([]Seri } var seriesPointsQueryFmtstr = ` --- source: enterprise/internal/insights/store/series_points.go -SELECT time, value FROM series_points +-- source: enterprise/internal/insights/store/store.go:SeriesPoints +SELECT time, + value, + m.metadata +FROM series_points p +INNER JOIN metadata m ON p.metadata_id = m.id WHERE %s ORDER BY time DESC ` diff --git a/enterprise/internal/insights/store/store_test.go b/enterprise/internal/insights/store/store_test.go index bff028c047cb..814d7b1cad24 100644 --- a/enterprise/internal/insights/store/store_test.go +++ b/enterprise/internal/insights/store/store_test.go @@ -2,7 +2,6 @@ package store import ( "context" - "fmt" "testing" "time" @@ -73,8 +72,8 @@ SELECT time, t.Fatal(err) } autogold.Want("SeriesPoints(2).len", int(913)).Equal(t, len(points)) - autogold.Want("SeriesPoints(2)[len()-1]", "{Time:2020-01-01 00:00:00 +0000 UTC Value:-20.00716650672132}").Equal(t, fmt.Sprintf("%+v", points[len(points)-1])) - autogold.Want("SeriesPoints(2)[0]", "{Time:2020-06-01 00:00:00 +0000 UTC Value:-37.8750440811433}").Equal(t, fmt.Sprintf("%+v", points[0])) + autogold.Want("SeriesPoints(2)[len()-1].String()", `SeriesPoint{Time: "2020-01-01 00:00:00 +0000 UTC", Value: -20.00716650672132, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[len(points)-1].String()) + autogold.Want("SeriesPoints(2)[0].String()", `SeriesPoint{Time: "2020-06-01 00:00:00 +0000 UTC", Value: -37.8750440811433, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[0].String()) }) t.Run("subset of data", func(t *testing.T) { @@ -87,8 +86,8 @@ SELECT time, t.Fatal(err) } autogold.Want("SeriesPoints(3).len", int(551)).Equal(t, len(points)) - autogold.Want("SeriesPoints(3)[0]", "{Time:2020-05-31 20:00:00 +0000 UTC Value:-11.269436460802638}").Equal(t, fmt.Sprintf("%+v", points[0])) - autogold.Want("SeriesPoints(3)[len()-1]", "{Time:2020-03-01 04:00:00 +0000 UTC Value:35.85710033014749}").Equal(t, fmt.Sprintf("%+v", points[len(points)-1])) + autogold.Want("SeriesPoints(3)[0].String()", `SeriesPoint{Time: "2020-05-31 20:00:00 +0000 UTC", Value: -11.269436460802638, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[0].String()) + autogold.Want("SeriesPoints(3)[len()-1].String()", `SeriesPoint{Time: "2020-03-01 04:00:00 +0000 UTC", Value: 35.85710033014749, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[len(points)-1].String()) }) t.Run("latest 3 points", func(t *testing.T) { @@ -100,9 +99,9 @@ SELECT time, t.Fatal(err) } autogold.Want("SeriesPoints(4).len", int(3)).Equal(t, len(points)) - autogold.Want("SeriesPoints(4)[0]", "{Time:2020-06-01 00:00:00 +0000 UTC Value:-37.8750440811433}").Equal(t, fmt.Sprintf("%+v", points[0])) - autogold.Want("SeriesPoints(4)[1]", "{Time:2020-05-31 20:00:00 +0000 UTC Value:-11.269436460802638}").Equal(t, fmt.Sprintf("%+v", points[1])) - autogold.Want("SeriesPoints(4)[2]", "{Time:2020-05-31 16:00:00 +0000 UTC Value:17.838503552871998}").Equal(t, fmt.Sprintf("%+v", points[2])) + autogold.Want("SeriesPoints(4)[0].String()", `SeriesPoint{Time: "2020-06-01 00:00:00 +0000 UTC", Value: -37.8750440811433, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[0].String()) + autogold.Want("SeriesPoints(4)[1].String()", `SeriesPoint{Time: "2020-05-31 20:00:00 +0000 UTC", Value: -11.269436460802638, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[1].String()) + autogold.Want("SeriesPoints(4)[2].String()", `SeriesPoint{Time: "2020-05-31 16:00:00 +0000 UTC", Value: 17.838503552871998, Metadata: {"hello": "world", "languages": ["Go", "Python", "Java"]}}`).Equal(t, points[2].String()) }) } From 139e77d30675831eb05135c7b41038459b5bedef Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 16:53:54 -0700 Subject: [PATCH 2/9] update resolver data Signed-off-by: Stephen Gutekanst --- .../internal/insights/resolvers/insight_series_resolver_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/internal/insights/resolvers/insight_series_resolver_test.go b/enterprise/internal/insights/resolvers/insight_series_resolver_test.go index 2c143b9be85b..b8100f9cfafe 100644 --- a/enterprise/internal/insights/resolvers/insight_series_resolver_test.go +++ b/enterprise/internal/insights/resolvers/insight_series_resolver_test.go @@ -110,6 +110,6 @@ func TestResolver_InsightSeries(t *testing.T) { if err != nil { t.Fatal(err) } - autogold.Want("insights[0][0].Points mocked", "[{p:{Time:{wall:0 ext:63271811045 loc:} Value:1}} {p:{Time:{wall:0 ext:63271811045 loc:} Value:2}} {p:{Time:{wall:0 ext:63271811045 loc:} Value:3}}]").Equal(t, fmt.Sprintf("%+v", points)) + autogold.Want("insights[0][0].Points mocked", "[{p:{Time:{wall:0 ext:63271811045 loc:} Value:1 Metadata:[]}} {p:{Time:{wall:0 ext:63271811045 loc:} Value:2 Metadata:[]}} {p:{Time:{wall:0 ext:63271811045 loc:} Value:3 Metadata:[]}}]").Equal(t, fmt.Sprintf("%+v", points)) }) } From 70c85f817a68fcda60c5e772f57f4bfd4dca1cf7 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 16:52:31 -0700 Subject: [PATCH 3/9] insights: store: add support for recording data points This PR adds support for the store to record data points. Stacked on top of #18254 Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/store.go | 117 ++++++++++++++++++ .../internal/insights/store/store_test.go | 56 +++++++++ 2 files changed, 173 insertions(+) diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index fbfc395d8248..8b5e7ace8b36 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -3,11 +3,14 @@ package store import ( "context" "database/sql" + "encoding/json" "fmt" "time" "github.com/keegancsmith/sqlf" + "github.com/pkg/errors" + "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/database/basestore" "github.com/sourcegraph/sourcegraph/internal/database/dbutil" "github.com/sourcegraph/sourcegraph/internal/timeutil" @@ -17,6 +20,7 @@ import ( // for actual API usage. type Interface interface { SeriesPoints(ctx context.Context, opts SeriesPointsOpts) ([]SeriesPoint, error) + RecordSeriesPoint(ctx context.Context, v RecordSeriesPointArgs) error } var _ Interface = &Store{} @@ -140,6 +144,119 @@ func seriesPointsQuery(opts SeriesPointsOpts) *sqlf.Query { ) } +// RecordSeriesPointArgs describes arguments for the RecordSeriesPoint method. +type RecordSeriesPointArgs struct { + // SeriesID is the unique series ID to query. It should describe the series of data uniquely, + // but is not a DB table primary key ID. + SeriesID string + + // Point is the actual data point recorded and at what time. + Point SeriesPoint + + // Repository name and DB ID to associate with this data point, if any. + // + // Both must be specified if one is specified. + RepoName *string + RepoID *api.RepoID + + // Metadata contains arbitrary JSON metadata to associate with the data point, if any. + // + // See the DB schema comments for intended use cases. This should generally be small, + // low-cardinality data to avoid inflating the table. + Metadata interface{} +} + +// RecordSeriesPoint records a data point for the specfied series ID (which is a unique ID for the +// series, not a DB table primary key ID). +func (s *Store) RecordSeriesPoint(ctx context.Context, v RecordSeriesPointArgs) (err error) { + // Start transaction. + var txStore *basestore.Store + txStore, err = s.Transact(ctx) + if err != nil { + return err + } + defer func() { err = txStore.Done(err) }() + + if (v.RepoName != nil && v.RepoID == nil) || (v.RepoID != nil && v.RepoName == nil) { + return errors.New("RepoName and RepoID must be mutually specified") + } + + // Upsert the repository name into a separate table, so we get a small ID we can reference + // many times from the series_points table without storing the repo name multiple times. + var repoNameID *int32 + if v.RepoName != nil { + row := txStore.QueryRow(ctx, sqlf.Sprintf(upsertRepoNameFmtStr, *v.RepoName, *v.RepoName)) + repoNameID = new(int32) + if err := row.Scan(repoNameID); err != nil { + return errors.Wrap(err, "upserting repo name ID") + } + } + + // Upsert the metadata into a separate table, so we get a small ID we can reference many times + // from the series_points table without storing the metadata multiple times. + var metadataID *int32 + if v.Metadata != nil { + jsonMetadata, err := json.Marshal(v.Metadata) + if err != nil { + return errors.Wrap(err, "upserting>encoding metadata") + } + row := txStore.QueryRow(ctx, sqlf.Sprintf(upsertMetadataFmtStr, jsonMetadata, jsonMetadata)) + metadataID = new(int32) + if err := row.Scan(metadataID); err != nil { + return errors.Wrap(err, "upserting metadata ID") + } + } + + // Insert the actual data point. + return txStore.Exec(ctx, sqlf.Sprintf( + recordSeriesPointFmtstr, + v.SeriesID, // series_id + v.Point.Time, // time + v.Point.Value, // value + metadataID, // metadata_id + v.RepoID, // repo_id + repoNameID, // repo_name_id + repoNameID, // original_repo_name_id + )) +} + +const upsertRepoNameFmtStr = ` +WITH e AS( + INSERT INTO repo_names(name) + VALUES (%s) + ON CONFLICT DO NOTHING + RETURNING id +) +SELECT * FROM e +UNION + SELECT id FROM repo_names WHERE name = %s; +` + +const upsertMetadataFmtStr = ` +WITH e AS( + INSERT INTO metadata(metadata) + VALUES (%s) + ON CONFLICT DO NOTHING + RETURNING id +) +SELECT * FROM e +UNION + SELECT id FROM metadata WHERE metadata = %s; +` + +const recordSeriesPointFmtstr = ` +-- source: enterprise/internal/insights/store/store.go:RecordSeriesPoint +INSERT INTO series_points( + series_id, + time, + value, + metadata_id, + repo_id, + repo_name_id, + original_repo_name_id) +VALUES (%s, %s, %s, %s, %s, %s, %s); +` + func (s *Store) query(ctx context.Context, q *sqlf.Query, sc scanFunc) error { rows, err := s.Store.Query(ctx, q) if err != nil { diff --git a/enterprise/internal/insights/store/store_test.go b/enterprise/internal/insights/store/store_test.go index 814d7b1cad24..a4612515f0d6 100644 --- a/enterprise/internal/insights/store/store_test.go +++ b/enterprise/internal/insights/store/store_test.go @@ -8,6 +8,7 @@ import ( "github.com/hexops/autogold" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/dbtesting" + "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/timeutil" ) @@ -105,3 +106,58 @@ SELECT time, }) } + +func TestRecordSeriesPoints(t *testing.T) { + if testing.Short() { + t.Skip() + } + t.Parallel() + + ctx := context.Background() + clock := timeutil.Now + timescale, cleanup := dbtesting.TimescaleDB(t) + defer cleanup() + store := NewWithClock(timescale, clock) + + time := func(s string) time.Time { + v, err := time.Parse(time.RFC3339, s) + if err != nil { + t.Fatal(err) + } + return v + } + optionalString := func(v string) *string { return &v } + optionalRepoID := func(v api.RepoID) *api.RepoID { return &v } + + // Record some data points. + for _, record := range []RecordSeriesPointArgs{ + { + SeriesID: "one", + Point: SeriesPoint{Time: time("2020-03-01T00:00:00Z"), Value: 1.1}, + RepoName: optionalString("repo1"), + RepoID: optionalRepoID(3), + Metadata: map[string]interface{}{"some": "data"}, + }, + { + SeriesID: "two", + Point: SeriesPoint{Time: time("2020-03-02T00:00:00Z"), Value: 2.2}, + Metadata: []interface{}{"some", "data", "two"}, + }, + } { + if err := store.RecordSeriesPoint(ctx, record); err != nil { + t.Fatal(err) + } + } + + // Confirm we get the expected data back. + points, err := store.SeriesPoints(ctx, SeriesPointsOpts{}) + if err != nil { + t.Fatal(err) + } + autogold.Want("len(points)", 0).Equal(t, len(points)) + autogold.Want("points[0].String()", 0).Equal(t, points[0].String()) + autogold.Want("points[1].String()", 0).Equal(t, points[1].String()) + + // Confirm the data point with repository name got recorded correctly. + // TODO(slimsag): future: once we support querying by repo ID/names, add tests to ensure that information is inserted properly here. +} From 07b26346f0c9dbc3993657a499c6d743121f6559 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 17:30:42 -0700 Subject: [PATCH 4/9] go generate ./enterprise/internal/insights/store/ (regenerate mocks) Signed-off-by: Stephen Gutekanst --- .../insights/store/mock_store_interface.go | 117 ++++++++++++++++++ 1 file changed, 117 insertions(+) diff --git a/enterprise/internal/insights/store/mock_store_interface.go b/enterprise/internal/insights/store/mock_store_interface.go index 0ef935976bd6..8af2c0e0777e 100644 --- a/enterprise/internal/insights/store/mock_store_interface.go +++ b/enterprise/internal/insights/store/mock_store_interface.go @@ -12,6 +12,9 @@ import ( // github.com/sourcegraph/sourcegraph/enterprise/internal/insights/store) // used for unit testing. type MockInterface struct { + // RecordSeriesPointFunc is an instance of a mock function object + // controlling the behavior of the method RecordSeriesPoint. + RecordSeriesPointFunc *InterfaceRecordSeriesPointFunc // SeriesPointsFunc is an instance of a mock function object controlling // the behavior of the method SeriesPoints. SeriesPointsFunc *InterfaceSeriesPointsFunc @@ -21,6 +24,11 @@ type MockInterface struct { // methods return zero values for all results, unless overwritten. func NewMockInterface() *MockInterface { return &MockInterface{ + RecordSeriesPointFunc: &InterfaceRecordSeriesPointFunc{ + defaultHook: func(context.Context, RecordSeriesPointArgs) error { + return nil + }, + }, SeriesPointsFunc: &InterfaceSeriesPointsFunc{ defaultHook: func(context.Context, SeriesPointsOpts) ([]SeriesPoint, error) { return nil, nil @@ -33,12 +41,121 @@ func NewMockInterface() *MockInterface { // All methods delegate to the given implementation, unless overwritten. func NewMockInterfaceFrom(i Interface) *MockInterface { return &MockInterface{ + RecordSeriesPointFunc: &InterfaceRecordSeriesPointFunc{ + defaultHook: i.RecordSeriesPoint, + }, SeriesPointsFunc: &InterfaceSeriesPointsFunc{ defaultHook: i.SeriesPoints, }, } } +// InterfaceRecordSeriesPointFunc describes the behavior when the +// RecordSeriesPoint method of the parent MockInterface instance is invoked. +type InterfaceRecordSeriesPointFunc struct { + defaultHook func(context.Context, RecordSeriesPointArgs) error + hooks []func(context.Context, RecordSeriesPointArgs) error + history []InterfaceRecordSeriesPointFuncCall + mutex sync.Mutex +} + +// RecordSeriesPoint delegates to the next hook function in the queue and +// stores the parameter and result values of this invocation. +func (m *MockInterface) RecordSeriesPoint(v0 context.Context, v1 RecordSeriesPointArgs) error { + r0 := m.RecordSeriesPointFunc.nextHook()(v0, v1) + m.RecordSeriesPointFunc.appendCall(InterfaceRecordSeriesPointFuncCall{v0, v1, r0}) + return r0 +} + +// SetDefaultHook sets function that is called when the RecordSeriesPoint +// method of the parent MockInterface instance is invoked and the hook queue +// is empty. +func (f *InterfaceRecordSeriesPointFunc) SetDefaultHook(hook func(context.Context, RecordSeriesPointArgs) error) { + f.defaultHook = hook +} + +// PushHook adds a function to the end of hook queue. Each invocation of the +// RecordSeriesPoint method of the parent MockInterface instance invokes the +// hook at the front of the queue and discards it. After the queue is empty, +// the default hook function is invoked for any future action. +func (f *InterfaceRecordSeriesPointFunc) PushHook(hook func(context.Context, RecordSeriesPointArgs) error) { + f.mutex.Lock() + f.hooks = append(f.hooks, hook) + f.mutex.Unlock() +} + +// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns +// the given values. +func (f *InterfaceRecordSeriesPointFunc) SetDefaultReturn(r0 error) { + f.SetDefaultHook(func(context.Context, RecordSeriesPointArgs) error { + return r0 + }) +} + +// PushReturn calls PushDefaultHook with a function that returns the given +// values. +func (f *InterfaceRecordSeriesPointFunc) PushReturn(r0 error) { + f.PushHook(func(context.Context, RecordSeriesPointArgs) error { + return r0 + }) +} + +func (f *InterfaceRecordSeriesPointFunc) nextHook() func(context.Context, RecordSeriesPointArgs) error { + f.mutex.Lock() + defer f.mutex.Unlock() + + if len(f.hooks) == 0 { + return f.defaultHook + } + + hook := f.hooks[0] + f.hooks = f.hooks[1:] + return hook +} + +func (f *InterfaceRecordSeriesPointFunc) appendCall(r0 InterfaceRecordSeriesPointFuncCall) { + f.mutex.Lock() + f.history = append(f.history, r0) + f.mutex.Unlock() +} + +// History returns a sequence of InterfaceRecordSeriesPointFuncCall objects +// describing the invocations of this function. +func (f *InterfaceRecordSeriesPointFunc) History() []InterfaceRecordSeriesPointFuncCall { + f.mutex.Lock() + history := make([]InterfaceRecordSeriesPointFuncCall, len(f.history)) + copy(history, f.history) + f.mutex.Unlock() + + return history +} + +// InterfaceRecordSeriesPointFuncCall is an object that describes an +// invocation of method RecordSeriesPoint on an instance of MockInterface. +type InterfaceRecordSeriesPointFuncCall struct { + // Arg0 is the value of the 1st argument passed to this method + // invocation. + Arg0 context.Context + // Arg1 is the value of the 2nd argument passed to this method + // invocation. + Arg1 RecordSeriesPointArgs + // Result0 is the value of the 1st result returned from this method + // invocation. + Result0 error +} + +// Args returns an interface slice containing the arguments of this +// invocation. +func (c InterfaceRecordSeriesPointFuncCall) Args() []interface{} { + return []interface{}{c.Arg0, c.Arg1} +} + +// Results returns an interface slice containing the results of this +// invocation. +func (c InterfaceRecordSeriesPointFuncCall) Results() []interface{} { + return []interface{}{c.Result0} +} + // InterfaceSeriesPointsFunc describes the behavior when the SeriesPoints // method of the parent MockInterface instance is invoked. type InterfaceSeriesPointsFunc struct { From c64564288bd4933cf48218c6b0d9c78eef815ca4 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 17:35:43 -0700 Subject: [PATCH 5/9] go test -update Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/store_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/enterprise/internal/insights/store/store_test.go b/enterprise/internal/insights/store/store_test.go index a4612515f0d6..8f5219f9f42e 100644 --- a/enterprise/internal/insights/store/store_test.go +++ b/enterprise/internal/insights/store/store_test.go @@ -154,9 +154,9 @@ func TestRecordSeriesPoints(t *testing.T) { if err != nil { t.Fatal(err) } - autogold.Want("len(points)", 0).Equal(t, len(points)) - autogold.Want("points[0].String()", 0).Equal(t, points[0].String()) - autogold.Want("points[1].String()", 0).Equal(t, points[1].String()) + autogold.Want("len(points)", int(2)).Equal(t, len(points)) + autogold.Want("points[0].String()", `SeriesPoint{Time: "2020-03-02 00:00:00 +0000 UTC", Value: 2.2, Metadata: ["some", "data", "two"]}`).Equal(t, points[0].String()) + autogold.Want("points[1].String()", `SeriesPoint{Time: "2020-03-01 00:00:00 +0000 UTC", Value: 1.1, Metadata: {"some": "data"}}`).Equal(t, points[1].String()) // Confirm the data point with repository name got recorded correctly. // TODO(slimsag): future: once we support querying by repo ID/names, add tests to ensure that information is inserted properly here. From 03f8a8a92002b40393f8509cf0f632ff608436ce Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Thu, 11 Feb 2021 15:43:34 -0700 Subject: [PATCH 6/9] migrations: add insights_query_runner_jobs table This adds the DB schema needed for the query runner worker, which will execute search queries and record insights about them. Stacked on top of #18255 Signed-off-by: Stephen Gutekanst --- ...395783_insights_query_runner_jobs.down.sql | 6 +++ ...28395783_insights_query_runner_jobs.up.sql | 20 ++++++++ migrations/frontend/bindata.go | 46 +++++++++++++++++++ 3 files changed, 72 insertions(+) create mode 100644 migrations/frontend/1528395783_insights_query_runner_jobs.down.sql create mode 100644 migrations/frontend/1528395783_insights_query_runner_jobs.up.sql diff --git a/migrations/frontend/1528395783_insights_query_runner_jobs.down.sql b/migrations/frontend/1528395783_insights_query_runner_jobs.down.sql new file mode 100644 index 000000000000..27b23c73ab0e --- /dev/null +++ b/migrations/frontend/1528395783_insights_query_runner_jobs.down.sql @@ -0,0 +1,6 @@ +BEGIN; + +DROP TABLE IF EXISTS insights_query_runner_jobs; +DROP INDEX IF EXISTS insights_query_runner_jobs_state_btree; + +COMMIT; diff --git a/migrations/frontend/1528395783_insights_query_runner_jobs.up.sql b/migrations/frontend/1528395783_insights_query_runner_jobs.up.sql new file mode 100644 index 000000000000..5c86af00e6c9 --- /dev/null +++ b/migrations/frontend/1528395783_insights_query_runner_jobs.up.sql @@ -0,0 +1,20 @@ +BEGIN; + +CREATE TABLE IF NOT EXISTS insights_query_runner_jobs( + id SERIAL PRIMARY KEY, + series_id text NOT NULL, + search_query text NOT NULL, + state text default 'queued', + failure_message text, + started_at timestamptz, + finished_at timestamptz, + process_after timestamptz, + num_resets int4 NOT NULL default 0, + num_failures int4 NOT NULL default 0, + execution_logs json[] +); +CREATE INDEX insights_query_runner_jobs_state_btree ON insights_query_runner_jobs USING btree (state); + +COMMENT ON TABLE insights_query_runner_jobs IS 'See enterprise/internal/insights/background/queryrunner/worker.go:Job'; + +COMMIT; diff --git a/migrations/frontend/bindata.go b/migrations/frontend/bindata.go index 2fb9129b5280..20d585171641 100644 --- a/migrations/frontend/bindata.go +++ b/migrations/frontend/bindata.go @@ -100,6 +100,8 @@ // 1528395781_remove_user_repos_table.up.sql (57B) // 1528395782_normalize_spec_fields_on_changeset_specs.down.sql (1.922kB) // 1528395782_normalize_spec_fields_on_changeset_specs.up.sql (2.764kB) +// 1528395783_insights_query_runner_jobs.down.sql (127B) +// 1528395783_insights_query_runner_jobs.up.sql (671B) package migrations @@ -2168,6 +2170,46 @@ func _1528395782_normalize_spec_fields_on_changeset_specsUpSql() (*asset, error) return a, nil } +var __1528395783_insights_query_runner_jobsDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x72\x75\xf7\xf4\xb3\xe6\xe2\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\xc8\xcc\x2b\xce\x4c\xcf\x28\x29\x8e\x2f\x2c\x4d\x2d\xaa\x8c\x2f\x2a\xcd\xcb\x4b\x2d\x8a\xcf\xca\x4f\x2a\xb6\x86\x68\xf0\xf4\x73\x71\x8d\x20\x4a\x43\x7c\x71\x49\x62\x49\x6a\x7c\x52\x49\x51\x6a\xaa\x35\x17\x97\xb3\xbf\xaf\xaf\x67\x88\x35\x17\x20\x00\x00\xff\xff\xbd\x57\xed\x70\x7f\x00\x00\x00") + +func _1528395783_insights_query_runner_jobsDownSqlBytes() ([]byte, error) { + return bindataRead( + __1528395783_insights_query_runner_jobsDownSql, + "1528395783_insights_query_runner_jobs.down.sql", + ) +} + +func _1528395783_insights_query_runner_jobsDownSql() (*asset, error) { + bytes, err := _1528395783_insights_query_runner_jobsDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1528395783_insights_query_runner_jobs.down.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd0, 0x36, 0x7d, 0xe4, 0x66, 0x88, 0x86, 0x15, 0xe3, 0xff, 0x22, 0x26, 0xd6, 0x25, 0x32, 0x1a, 0xff, 0xd6, 0x73, 0xae, 0xb7, 0x84, 0x7d, 0x27, 0x52, 0x7c, 0x4, 0x5a, 0x40, 0x20, 0x3a, 0x76}} + return a, nil +} + +var __1528395783_insights_query_runner_jobsUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x92\xc1\x6e\x9b\x40\x10\x86\xef\x3c\xc5\xdc\x9c\x48\x55\xe9\xa1\xa7\xfa\xe4\xb4\xdb\x68\x5b\x8c\x2b\x20\x52\xa2\xaa\x5a\x2d\x30\xc6\x9b\xc0\xae\x33\x33\xab\xa6\x7d\xfa\xca\x80\xed\x43\x23\x87\x13\xd2\x7c\xff\x37\x03\x33\x37\xea\x56\xe7\xcb\x24\xf9\x5c\xa8\x55\xa5\xa0\x5a\xdd\x64\x0a\xf4\x57\xc8\x37\x15\xa8\x7b\x5d\x56\x25\x38\xcf\xae\xdb\x09\x9b\xe7\x88\xf4\xc7\x50\xf4\x1e\xc9\x3c\x86\x9a\xaf\x12\x00\x00\xd7\x42\xa9\x0a\xbd\xca\xe0\x47\xa1\xd7\xab\xe2\x01\xbe\xab\x87\x77\x63\x89\x91\x1c\xb2\x71\x2d\x08\xbe\xc8\x28\xcd\xef\xb2\xec\x58\xb4\xd4\xec\x26\xeb\xab\x75\xb1\x82\x70\x7e\x46\xa4\xc5\xad\x8d\xbd\xc0\xe2\x39\x62\xc4\x76\x31\xa1\x5b\xeb\xfa\x48\x68\x06\x64\xb6\x1d\x8e\xe8\x49\x42\x82\xad\xb1\x32\x4b\xdc\x80\x2c\x76\xd8\xcb\xdf\x39\xea\xbc\xe3\xdd\x99\xf8\x0f\xd8\x53\x68\x90\xd9\xd8\xad\x20\xbd\x06\xf8\x38\x18\x42\x46\xe1\xa9\x85\xf3\xf2\xf1\xf4\x29\xa7\x81\x3f\x9c\xe1\x79\x5a\x7e\x13\xc6\x17\x6c\xa2\xb8\xe0\x4d\x1f\x3a\x86\x47\x0e\xfe\xe7\xaf\xe4\x7a\x79\xdc\x96\xce\xbf\xa8\xfb\x0b\xfb\x31\xe3\x2f\x34\xb5\x10\x22\x6c\xf2\x0b\x24\xdc\x95\x3a\xbf\x85\x89\xbc\x1a\x63\xd7\x87\xab\xd8\xac\xd7\x2a\xaf\x0e\xd9\xe9\x32\x2e\x18\x74\x09\x8b\x12\x11\xd0\x0b\xd2\x9e\x1c\x63\xea\x0e\xaf\xde\xf6\xe9\x31\x96\xd6\xb6\x79\xea\x28\x44\xdf\xa6\xa3\x61\x12\xa4\xbf\x03\x3d\x21\xbd\xef\xc2\xa7\x6f\xa1\x5e\xcc\x8d\x75\xb5\x4c\xfe\x05\x00\x00\xff\xff\x63\xd6\x50\xe6\x9f\x02\x00\x00") + +func _1528395783_insights_query_runner_jobsUpSqlBytes() ([]byte, error) { + return bindataRead( + __1528395783_insights_query_runner_jobsUpSql, + "1528395783_insights_query_runner_jobs.up.sql", + ) +} + +func _1528395783_insights_query_runner_jobsUpSql() (*asset, error) { + bytes, err := _1528395783_insights_query_runner_jobsUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1528395783_insights_query_runner_jobs.up.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x51, 0xf6, 0x24, 0x86, 0x1c, 0x13, 0x23, 0x81, 0xcb, 0x48, 0xc3, 0x34, 0xf, 0xf7, 0x74, 0x8f, 0x61, 0xde, 0xc3, 0x71, 0xa6, 0x2a, 0x9, 0xf2, 0x4f, 0xfb, 0xd1, 0xf9, 0x77, 0xb1, 0x8, 0x39}} + return a, nil +} + // Asset loads and returns the asset for the given name. // It returns an error if the asset could not be found or // could not be loaded. @@ -2359,6 +2401,8 @@ var _bindata = map[string]func() (*asset, error){ "1528395781_remove_user_repos_table.up.sql": _1528395781_remove_user_repos_tableUpSql, "1528395782_normalize_spec_fields_on_changeset_specs.down.sql": _1528395782_normalize_spec_fields_on_changeset_specsDownSql, "1528395782_normalize_spec_fields_on_changeset_specs.up.sql": _1528395782_normalize_spec_fields_on_changeset_specsUpSql, + "1528395783_insights_query_runner_jobs.down.sql": _1528395783_insights_query_runner_jobsDownSql, + "1528395783_insights_query_runner_jobs.up.sql": _1528395783_insights_query_runner_jobsUpSql, } // AssetDebug is true if the assets were built with the debug flag enabled. @@ -2505,6 +2549,8 @@ var _bintree = &bintree{nil, map[string]*bintree{ "1528395781_remove_user_repos_table.up.sql": {_1528395781_remove_user_repos_tableUpSql, map[string]*bintree{}}, "1528395782_normalize_spec_fields_on_changeset_specs.down.sql": {_1528395782_normalize_spec_fields_on_changeset_specsDownSql, map[string]*bintree{}}, "1528395782_normalize_spec_fields_on_changeset_specs.up.sql": {_1528395782_normalize_spec_fields_on_changeset_specsUpSql, map[string]*bintree{}}, + "1528395783_insights_query_runner_jobs.down.sql": {_1528395783_insights_query_runner_jobsDownSql, map[string]*bintree{}}, + "1528395783_insights_query_runner_jobs.up.sql": {_1528395783_insights_query_runner_jobsUpSql, map[string]*bintree{}}, }} // RestoreAsset restores an asset under the given directory. From 9cf713981ce4ab12ef504c15827e5fa241490c39 Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Fri, 12 Feb 2021 18:58:09 -0700 Subject: [PATCH 7/9] go generate ./internal/database/... Signed-off-by: Stephen Gutekanst --- internal/database/schema.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/internal/database/schema.md b/internal/database/schema.md index e880ea3b3240..187fb2437b80 100644 --- a/internal/database/schema.md +++ b/internal/database/schema.md @@ -572,6 +572,29 @@ Indexes: ``` +# Table "public.insights_query_runner_jobs" +``` + Column | Type | Modifiers +-----------------+--------------------------+------------------------------------------------------------------------- + id | integer | not null default nextval('insights_query_runner_jobs_id_seq'::regclass) + series_id | text | not null + search_query | text | not null + state | text | default 'queued'::text + failure_message | text | + started_at | timestamp with time zone | + finished_at | timestamp with time zone | + process_after | timestamp with time zone | + num_resets | integer | not null default 0 + num_failures | integer | not null default 0 + execution_logs | json[] | +Indexes: + "insights_query_runner_jobs_pkey" PRIMARY KEY, btree (id) + "insights_query_runner_jobs_state_btree" btree (state) + +``` + +See enterprise/internal/insights/background/queryrunner/worker.go:Job + # Table "public.lsif_dirty_repositories" ``` Column | Type | Modifiers From ee7617520fd3731d460cdaba7391caed5b48e69d Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 15 Feb 2021 13:46:12 -0700 Subject: [PATCH 8/9] add comment link, fix ordering Signed-off-by: Stephen Gutekanst --- internal/database/schema.md | 2 +- .../1528395783_insights_query_runner_jobs.down.sql | 2 +- .../1528395783_insights_query_runner_jobs.up.sql | 2 +- migrations/frontend/bindata.go | 10 +++++----- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/database/schema.md b/internal/database/schema.md index 187fb2437b80..a85e95788373 100644 --- a/internal/database/schema.md +++ b/internal/database/schema.md @@ -593,7 +593,7 @@ Indexes: ``` -See enterprise/internal/insights/background/queryrunner/worker.go:Job +See [enterprise/internal/insights/background/queryrunner/worker.go:Job](https://sourcegraph.com/search?q=repo:%5Egithub%5C.com/sourcegraph/sourcegraph%24+file:enterprise/internal/insights/background/queryrunner/worker.go+type+Job&patternType=literal) # Table "public.lsif_dirty_repositories" ``` diff --git a/migrations/frontend/1528395783_insights_query_runner_jobs.down.sql b/migrations/frontend/1528395783_insights_query_runner_jobs.down.sql index 27b23c73ab0e..7d137bae1866 100644 --- a/migrations/frontend/1528395783_insights_query_runner_jobs.down.sql +++ b/migrations/frontend/1528395783_insights_query_runner_jobs.down.sql @@ -1,6 +1,6 @@ BEGIN; -DROP TABLE IF EXISTS insights_query_runner_jobs; DROP INDEX IF EXISTS insights_query_runner_jobs_state_btree; +DROP TABLE IF EXISTS insights_query_runner_jobs; COMMIT; diff --git a/migrations/frontend/1528395783_insights_query_runner_jobs.up.sql b/migrations/frontend/1528395783_insights_query_runner_jobs.up.sql index 5c86af00e6c9..96bb9461999c 100644 --- a/migrations/frontend/1528395783_insights_query_runner_jobs.up.sql +++ b/migrations/frontend/1528395783_insights_query_runner_jobs.up.sql @@ -15,6 +15,6 @@ CREATE TABLE IF NOT EXISTS insights_query_runner_jobs( ); CREATE INDEX insights_query_runner_jobs_state_btree ON insights_query_runner_jobs USING btree (state); -COMMENT ON TABLE insights_query_runner_jobs IS 'See enterprise/internal/insights/background/queryrunner/worker.go:Job'; +COMMENT ON TABLE insights_query_runner_jobs IS 'See [enterprise/internal/insights/background/queryrunner/worker.go:Job](https://sourcegraph.com/search?q=repo:%5Egithub%5C.com/sourcegraph/sourcegraph%24+file:enterprise/internal/insights/background/queryrunner/worker.go+type+Job&patternType=literal)'; COMMIT; diff --git a/migrations/frontend/bindata.go b/migrations/frontend/bindata.go index 20d585171641..9bf565315ce8 100644 --- a/migrations/frontend/bindata.go +++ b/migrations/frontend/bindata.go @@ -101,7 +101,7 @@ // 1528395782_normalize_spec_fields_on_changeset_specs.down.sql (1.922kB) // 1528395782_normalize_spec_fields_on_changeset_specs.up.sql (2.764kB) // 1528395783_insights_query_runner_jobs.down.sql (127B) -// 1528395783_insights_query_runner_jobs.up.sql (671B) +// 1528395783_insights_query_runner_jobs.up.sql (852B) package migrations @@ -2170,7 +2170,7 @@ func _1528395782_normalize_spec_fields_on_changeset_specsUpSql() (*asset, error) return a, nil } -var __1528395783_insights_query_runner_jobsDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x72\x75\xf7\xf4\xb3\xe6\xe2\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\xc8\xcc\x2b\xce\x4c\xcf\x28\x29\x8e\x2f\x2c\x4d\x2d\xaa\x8c\x2f\x2a\xcd\xcb\x4b\x2d\x8a\xcf\xca\x4f\x2a\xb6\x86\x68\xf0\xf4\x73\x71\x8d\x20\x4a\x43\x7c\x71\x49\x62\x49\x6a\x7c\x52\x49\x51\x6a\xaa\x35\x17\x97\xb3\xbf\xaf\xaf\x67\x88\x35\x17\x20\x00\x00\xff\xff\xbd\x57\xed\x70\x7f\x00\x00\x00") +var __1528395783_insights_query_runner_jobsDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x72\x75\xf7\xf4\xb3\xe6\xe2\x72\x09\xf2\x0f\x50\xf0\xf4\x73\x71\x8d\x50\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\xc8\xcc\x2b\xce\x4c\xcf\x28\x29\x8e\x2f\x2c\x4d\x2d\xaa\x8c\x2f\x2a\xcd\xcb\x4b\x2d\x8a\xcf\xca\x4f\x2a\x8e\x2f\x2e\x49\x2c\x49\x8d\x4f\x2a\x29\x4a\x4d\xb5\x86\x68\x0e\x71\x74\xf2\x71\x25\x4a\xb3\x35\x17\x97\xb3\xbf\xaf\xaf\x67\x88\x35\x17\x20\x00\x00\xff\xff\x7d\x1e\x6f\x28\x7f\x00\x00\x00") func _1528395783_insights_query_runner_jobsDownSqlBytes() ([]byte, error) { return bindataRead( @@ -2186,11 +2186,11 @@ func _1528395783_insights_query_runner_jobsDownSql() (*asset, error) { } info := bindataFileInfo{name: "1528395783_insights_query_runner_jobs.down.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd0, 0x36, 0x7d, 0xe4, 0x66, 0x88, 0x86, 0x15, 0xe3, 0xff, 0x22, 0x26, 0xd6, 0x25, 0x32, 0x1a, 0xff, 0xd6, 0x73, 0xae, 0xb7, 0x84, 0x7d, 0x27, 0x52, 0x7c, 0x4, 0x5a, 0x40, 0x20, 0x3a, 0x76}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x37, 0x3e, 0x7e, 0x7d, 0x11, 0x9e, 0xc0, 0x8f, 0x32, 0x87, 0x70, 0xc1, 0x72, 0xe5, 0x1c, 0x4, 0xa8, 0x3b, 0xa7, 0xf1, 0x6a, 0x8c, 0x17, 0x2d, 0x12, 0xaa, 0x0, 0x57, 0x1d, 0x87, 0xbb, 0xe}} return a, nil } -var __1528395783_insights_query_runner_jobsUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x92\xc1\x6e\x9b\x40\x10\x86\xef\x3c\xc5\xdc\x9c\x48\x55\xe9\xa1\xa7\xfa\xe4\xb4\xdb\x68\x5b\x8c\x2b\x20\x52\xa2\xaa\x5a\x2d\x30\xc6\x9b\xc0\xae\x33\x33\xab\xa6\x7d\xfa\xca\x80\xed\x43\x23\x87\x13\xd2\x7c\xff\x37\x03\x33\x37\xea\x56\xe7\xcb\x24\xf9\x5c\xa8\x55\xa5\xa0\x5a\xdd\x64\x0a\xf4\x57\xc8\x37\x15\xa8\x7b\x5d\x56\x25\x38\xcf\xae\xdb\x09\x9b\xe7\x88\xf4\xc7\x50\xf4\x1e\xc9\x3c\x86\x9a\xaf\x12\x00\x00\xd7\x42\xa9\x0a\xbd\xca\xe0\x47\xa1\xd7\xab\xe2\x01\xbe\xab\x87\x77\x63\x89\x91\x1c\xb2\x71\x2d\x08\xbe\xc8\x28\xcd\xef\xb2\xec\x58\xb4\xd4\xec\x26\xeb\xab\x75\xb1\x82\x70\x7e\x46\xa4\xc5\xad\x8d\xbd\xc0\xe2\x39\x62\xc4\x76\x31\xa1\x5b\xeb\xfa\x48\x68\x06\x64\xb6\x1d\x8e\xe8\x49\x42\x82\xad\xb1\x32\x4b\xdc\x80\x2c\x76\xd8\xcb\xdf\x39\xea\xbc\xe3\xdd\x99\xf8\x0f\xd8\x53\x68\x90\xd9\xd8\xad\x20\xbd\x06\xf8\x38\x18\x42\x46\xe1\xa9\x85\xf3\xf2\xf1\xf4\x29\xa7\x81\x3f\x9c\xe1\x79\x5a\x7e\x13\xc6\x17\x6c\xa2\xb8\xe0\x4d\x1f\x3a\x86\x47\x0e\xfe\xe7\xaf\xe4\x7a\x79\xdc\x96\xce\xbf\xa8\xfb\x0b\xfb\x31\xe3\x2f\x34\xb5\x10\x22\x6c\xf2\x0b\x24\xdc\x95\x3a\xbf\x85\x89\xbc\x1a\x63\xd7\x87\xab\xd8\xac\xd7\x2a\xaf\x0e\xd9\xe9\x32\x2e\x18\x74\x09\x8b\x12\x11\xd0\x0b\xd2\x9e\x1c\x63\xea\x0e\xaf\xde\xf6\xe9\x31\x96\xd6\xb6\x79\xea\x28\x44\xdf\xa6\xa3\x61\x12\xa4\xbf\x03\x3d\x21\xbd\xef\xc2\xa7\x6f\xa1\x5e\xcc\x8d\x75\xb5\x4c\xfe\x05\x00\x00\xff\xff\x63\xd6\x50\xe6\x9f\x02\x00\x00") +var __1528395783_insights_query_runner_jobsUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x92\x4f\x6b\xdb\x40\x10\xc5\xef\xfe\x14\x73\x49\x6d\xe3\x12\x95\x92\x5c\x6c\x42\x71\xd2\x6d\x50\x6a\xcb\xc5\x52\x20\x21\x84\x65\x25\x8d\xa5\x4d\xa4\x5d\x79\x76\x96\xc6\xfd\xf4\xc5\x92\xff\x14\x1a\xdc\x43\x75\x1a\x78\xbf\xf7\x34\x3b\x33\xd7\xe2\x36\x8c\x26\xbd\xde\xcd\x52\x4c\x13\x01\xc9\xf4\x7a\x26\x20\xfc\x06\xd1\x22\x01\xf1\x10\xc6\x49\x0c\xda\x38\x5d\x94\xec\xe4\xda\x23\x6d\x24\x79\x63\x90\xe4\x8b\x4d\xdd\xa0\x07\x00\xa0\x73\x88\xc5\x32\x9c\xce\xe0\xc7\x32\x9c\x4f\x97\x8f\xf0\x5d\x3c\x7e\x6c\x25\x87\xa4\xd1\x49\x9d\x03\xe3\x1b\xb7\xa1\xd1\xfd\x6c\xb6\x17\x15\x65\x65\x97\xfa\xae\xce\x8a\x11\x8e\x5f\x8b\xe4\xb8\x52\xbe\x62\xe8\xaf\x3d\x7a\xcc\xfb\x1d\xba\x52\xba\xf2\x84\xb2\x46\xe7\x54\x81\x2d\x7a\x08\x21\xc6\x5c\x2a\xde\x85\xe8\x1a\x1d\xab\xba\xe1\x5f\x3b\xab\x36\xda\x95\x47\xe2\x2f\xa0\x21\x9b\xa1\x73\x52\xad\x18\xe9\x3d\xc0\xf8\x5a\x12\x3a\x64\xd7\xfd\x42\x1b\xbe\x38\x3c\xe5\xd0\xf0\xa7\x23\xbc\xeb\xd6\xfd\x13\xc6\x37\xcc\x3c\x6b\x6b\x64\x65\x0b\x07\x2f\xce\x9a\xa7\xe7\xde\x70\xb2\xdf\x56\x18\x7d\x15\x0f\x27\xf6\x23\xdb\x11\xca\x94\x09\x11\x16\xd1\x09\x12\xee\xe3\x30\xba\x85\x8e\x1c\xb4\xb6\xe1\xf6\x2a\x16\xf3\xb9\x88\x92\xad\xb7\xbb\x8c\x13\x09\x61\x0c\xfd\x18\x11\x9e\xd0\x30\x52\x43\xda\x61\xa0\xb7\xa5\x51\x55\xb0\xf7\x05\xa9\xca\x5e\x0b\xb2\xde\xe4\x41\x1b\xd1\x25\x04\x3f\x2d\xbd\x22\x9d\x17\x76\x7c\x67\xd3\xe7\x41\xc9\xdc\xb8\x71\x10\x38\xeb\x29\xc3\x82\x54\x53\x9e\x67\xb6\x0e\xba\x93\xf9\xb2\xbe\x22\x6c\xec\xf8\xec\x52\x14\x9a\x4b\x9f\x9e\x5d\xde\x74\xf2\x11\xff\xb3\x3e\xfb\x7c\x31\x5a\xe9\x0a\xc7\xff\xd5\xd9\x88\x37\x0d\x8e\xee\x6c\xfa\xa1\x51\xbc\x35\x27\x9b\x06\xaf\x2a\xcd\x48\xaa\x1a\xf6\x77\xd3\x0a\x93\x49\xef\x77\x00\x00\x00\xff\xff\x2b\x49\x8c\x28\x54\x03\x00\x00") func _1528395783_insights_query_runner_jobsUpSqlBytes() ([]byte, error) { return bindataRead( @@ -2206,7 +2206,7 @@ func _1528395783_insights_query_runner_jobsUpSql() (*asset, error) { } info := bindataFileInfo{name: "1528395783_insights_query_runner_jobs.up.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x51, 0xf6, 0x24, 0x86, 0x1c, 0x13, 0x23, 0x81, 0xcb, 0x48, 0xc3, 0x34, 0xf, 0xf7, 0x74, 0x8f, 0x61, 0xde, 0xc3, 0x71, 0xa6, 0x2a, 0x9, 0xf2, 0x4f, 0xfb, 0xd1, 0xf9, 0x77, 0xb1, 0x8, 0x39}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfc, 0x94, 0x2a, 0xa0, 0x67, 0x7b, 0xd2, 0x10, 0x1b, 0xc0, 0xa3, 0x4a, 0x68, 0xfa, 0xfc, 0x24, 0x6a, 0xb2, 0x2b, 0xce, 0xe7, 0x71, 0xd0, 0x8, 0x80, 0xac, 0xaa, 0xac, 0x7c, 0xad, 0x74, 0xee}} return a, nil } From b5090c3d70e40c378988bdc3994a00bc7d9c489f Mon Sep 17 00:00:00 2001 From: Stephen Gutekanst Date: Mon, 15 Feb 2021 13:47:49 -0700 Subject: [PATCH 9/9] fix merge conflict Signed-off-by: Stephen Gutekanst --- enterprise/internal/insights/store/store.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index 84be44f72102..1f05fbe2509c 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -227,10 +227,7 @@ func (s *Store) RecordSeriesPoint(ctx context.Context, v RecordSeriesPointArgs) } const upsertRepoNameFmtStr = ` -<<<<<<< HEAD -======= -- source: enterprise/internal/insights/store/store.go:RecordSeriesPoint ->>>>>>> origin/main WITH e AS( INSERT INTO repo_names(name) VALUES (%s) @@ -243,10 +240,7 @@ UNION ` const upsertMetadataFmtStr = ` -<<<<<<< HEAD -======= -- source: enterprise/internal/insights/store/store.go:RecordSeriesPoint ->>>>>>> origin/main WITH e AS( INSERT INTO metadata(metadata) VALUES (%s)