Skip to content

Commit

Permalink
Merge #68434 #69294
Browse files Browse the repository at this point in the history
68434: sql,jobs: create SQL Stats Compaction Job and resumer r=ajwerner,maryliag a=Azhng

Previous PR #67090
Followed by #68401

## First Commit 

sql,util: refactor checking for running jobs to its own util package

Release note: None


## Second Commit

sql,jobs: create SQL Stats Compaction Job and resumer

This commit introduces the SQL Stats Compaction job type
and a barebones implementation of the SQL Stats compaction.

Release justification: category 4

Release note: None

69294: changefeedccl: small test changes r=stevendanna a=stevendanna

Two small test changes:

- Use log.TestingClearServerIdentifiers() to avoid opening a new log
  scope.

- Don't start a schema registry when the kafka sink will already start
  one for us.

Release justification: low risk, test only changes (category 1)
Release note: None

Co-authored-by: Azhng <archer.xn@gmail.com>
Co-authored-by: Steven Danna <danna@cockroachlabs.com>
  • Loading branch information
3 people committed Aug 24, 2021
3 parents 7c36a9d + 4469ffb + 657e88e commit 8418f43
Show file tree
Hide file tree
Showing 25 changed files with 1,686 additions and 431 deletions.
1 change: 1 addition & 0 deletions docs/generated/settings/settings-for-tenants.txt
Expand Up @@ -141,6 +141,7 @@ sql.stats.flush.enabled boolean true if set, SQL execution statistics are period
sql.stats.flush.interval duration 1h0m0s the interval at which SQL execution statistics are flushed to disk
sql.stats.histogram_collection.enabled boolean true histogram collection mode
sql.stats.multi_column_collection.enabled boolean true multi-column statistics collection mode
sql.stats.persisted_rows.max integer 10000 maximum number of rows of statement and transaction statistics that will be persisted in the system tables
sql.stats.post_events.enabled boolean false if set, an event is logged for every CREATE STATISTICS job
sql.temp_object_cleaner.cleanup_interval duration 30m0s how often to clean up orphaned temporary objects
sql.trace.log_statement_execute boolean false set to true to enable logging of executed statements
Expand Down
1 change: 1 addition & 0 deletions docs/generated/settings/settings.html
Expand Up @@ -145,6 +145,7 @@
<tr><td><code>sql.stats.flush.interval</code></td><td>duration</td><td><code>1h0m0s</code></td><td>the interval at which SQL execution statistics are flushed to disk</td></tr>
<tr><td><code>sql.stats.histogram_collection.enabled</code></td><td>boolean</td><td><code>true</code></td><td>histogram collection mode</td></tr>
<tr><td><code>sql.stats.multi_column_collection.enabled</code></td><td>boolean</td><td><code>true</code></td><td>multi-column statistics collection mode</td></tr>
<tr><td><code>sql.stats.persisted_rows.max</code></td><td>integer</td><td><code>10000</code></td><td>maximum number of rows of statement and transaction statistics that will be persisted in the system tables</td></tr>
<tr><td><code>sql.stats.post_events.enabled</code></td><td>boolean</td><td><code>false</code></td><td>if set, an event is logged for every CREATE STATISTICS job</td></tr>
<tr><td><code>sql.temp_object_cleaner.cleanup_interval</code></td><td>duration</td><td><code>30m0s</code></td><td>how often to clean up orphaned temporary objects</td></tr>
<tr><td><code>sql.trace.log_statement_execute</code></td><td>boolean</td><td><code>false</code></td><td>set to true to enable logging of executed statements</td></tr>
Expand Down
12 changes: 3 additions & 9 deletions pkg/ccl/changefeedccl/changefeed_test.go
Expand Up @@ -1506,14 +1506,11 @@ func TestChangefeedWorksOnRBRChange(t *testing.T) {
testFnAvro := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '50ms'")
schemaReg := cdctest.StartTestSchemaRegistry()
defer schemaReg.Close()

t.Run("regional by row change works", func(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE rbr (a INT PRIMARY KEY, b INT)`)
defer sqlDB.Exec(t, `DROP TABLE rbr`)
sqlDB.Exec(t, `INSERT INTO rbr VALUES (0, NULL)`)
rbr := feed(t, f, fmt.Sprintf("CREATE CHANGEFEED FOR rbr WITH format=avro, confluent_schema_registry='%s'", schemaReg.URL()))
rbr := feed(t, f, `CREATE CHANGEFEED FOR rbr WITH format=avro`)
defer closeFeed(t, rbr)
sqlDB.Exec(t, `INSERT INTO rbr VALUES (1, 2)`)
assertPayloads(t, rbr, []string{
Expand All @@ -1530,7 +1527,7 @@ func TestChangefeedWorksOnRBRChange(t *testing.T) {
sqlDB.Exec(t, `CREATE TABLE rbr (a INT PRIMARY KEY, b INT, region crdb_internal_region NOT NULL DEFAULT 'us-east-1')`)
defer sqlDB.Exec(t, `DROP TABLE rbr`)
sqlDB.Exec(t, `INSERT INTO rbr VALUES (0, NULL)`)
rbr := feed(t, f, fmt.Sprintf("CREATE CHANGEFEED FOR rbr WITH format=avro, confluent_schema_registry='%s'", schemaReg.URL()))
rbr := feed(t, f, `CREATE CHANGEFEED FOR rbr WITH format=avro`)
defer closeFeed(t, rbr)
sqlDB.Exec(t, `INSERT INTO rbr VALUES (1, 2)`)
assertPayloads(t, rbr, []string{
Expand Down Expand Up @@ -1574,15 +1571,12 @@ func TestChangefeedRBRAvroAddRegion(t *testing.T) {
cluster, db, cleanup := startTestCluster(t)
defer cleanup()

schemaReg := cdctest.StartTestSchemaRegistry()
defer schemaReg.Close()

f := makeKafkaFeedFactoryForCluster(cluster, db)
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE rbr (a INT PRIMARY KEY)`)
waitForSchemaChange(t, sqlDB, `ALTER TABLE rbr SET LOCALITY REGIONAL BY ROW`)
sqlDB.Exec(t, `INSERT INTO rbr VALUES (0)`)
rbr := feed(t, f, fmt.Sprintf("CREATE CHANGEFEED FOR rbr WITH format=avro, confluent_schema_registry='%s'", schemaReg.URL()))
rbr := feed(t, f, `CREATE CHANGEFEED FOR rbr WITH format=avro`)
defer closeFeed(t, rbr)
assertPayloads(t, rbr, []string{
`rbr: {"a":{"long":0},"crdb_region":{"string":"us-east1"}}->{"after":{"rbr":{"a":{"long":0},"crdb_region":{"string":"us-east1"}}}}`,
Expand Down
13 changes: 2 additions & 11 deletions pkg/ccl/changefeedccl/helpers_test.go
Expand Up @@ -406,19 +406,10 @@ func startTestCluster(t testing.TB) (serverutils.TestClusterInterface, *gosql.DB
func startTestTenant(
t testing.TB, options feedTestOptions,
) (serverutils.TestServerInterface, *gosql.DB, func()) {
// We need to open a new log scope because StartTenant
// calls log.SetNodeIDs which can only be called once
// per log scope. If we don't open a log scope here,
// then any test function that wants to use this twice
// would fail.
logScope := log.Scope(t)
log.TestingClearServerIdentifiers()
ctx := context.Background()

kvServer, _, kvCleanup := startTestFullServer(t, options)
cleanup := func() {
kvCleanup()
logScope.Close(t)
}
kvServer, _, cleanup := startTestFullServer(t, options)
knobs := base.TestingKnobs{
DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{}},
JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(),
Expand Down
1 change: 1 addition & 0 deletions pkg/jobs/BUILD.bazel
Expand Up @@ -18,6 +18,7 @@ go_library(
"test_helpers.go",
"testing_knobs.go",
"update.go",
"utils.go",
"validate.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/jobs",
Expand Down

0 comments on commit 8418f43

Please sign in to comment.