From 05290b9bec21ddf7926fbb4887d666d84c46477b Mon Sep 17 00:00:00 2001 From: Victor Chen Date: Mon, 30 Apr 2018 16:11:00 -0400 Subject: [PATCH 1/2] cluster: remove legacy version upgrade tests for auto upgrade merge Legacy tests such as `TestDockerReadWriteBidirectionalReferenceVersion`, and `TestDockerReadWriteForwardReferenceVersion` no longer applies. Therefore, we remove these two tests. In addition, test `TestClusterVersionUpgrade1_0To2_0` is outdated as well. In the auto upgrade PR #24987, it is updated. Release note: None --- pkg/acceptance/reference_test.go | 148 ------------------------------- 1 file changed, 148 deletions(-) delete mode 100644 pkg/acceptance/reference_test.go diff --git a/pkg/acceptance/reference_test.go b/pkg/acceptance/reference_test.go deleted file mode 100644 index c85e1d7afd53..000000000000 --- a/pkg/acceptance/reference_test.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2016 The Cockroach Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package acceptance - -import ( - "context" - "fmt" - "testing" - - "github.com/cockroachdb/cockroach/pkg/acceptance/cluster" - "github.com/cockroachdb/cockroach/pkg/util/log" -) - -func runReferenceTestWithScript(ctx context.Context, t *testing.T, script string) { - containerConfig := defaultContainerConfig() - containerConfig.Cmd = []string{"stat", cluster.CockroachBinaryInContainer} - if err := testDockerOneShot(ctx, t, "reference", containerConfig); err != nil { - t.Skipf(`TODO(dt): No binary in one-shot container, see #6086: %s`, err) - } - - containerConfig.Cmd = []string{"/bin/bash", "-c", script} - if err := testDockerOneShot(ctx, t, "reference", containerConfig); err != nil { - t.Error(err) - } -} - -func runReadWriteReferenceTest( - ctx context.Context, t *testing.T, referenceBinPath string, backwardReferenceTest string, -) { - referenceTestScript := fmt.Sprintf(` -set -xe -mkdir old -cd old - -touch oldout newout -function finish() { - cat oldout newout -} -trap finish EXIT - -export PGHOST=localhost -export PGPORT="" -export COCKROACH_SKIP_UPDATE_CHECK=1 - -bin=/opt/%s/cockroach -$bin start --background --logtostderr --insecure --certs-dir=/certs &> oldout - -echo "Use the reference binary to write a couple rows, then render its output to a file and shut down." -$bin sql --insecure -e "CREATE DATABASE old" -$bin sql --insecure -d old -e "CREATE TABLE testing_old (i int primary key, b bool, s string unique, d decimal, f float, t timestamp, v interval, index sb (s, b))" -$bin sql --insecure -d old -e "INSERT INTO testing_old values (1, true, 'hello', decimal '3.14159', 3.14159, NOW(), interval '1h')" -$bin sql --insecure -d old -e "INSERT INTO testing_old values (2, false, 'world', decimal '0.14159', 0.14159, NOW(), interval '234h45m2s234ms')" -$bin sql --insecure -d old --format=records -e "SELECT i, b, s, d, f, extract(epoch from (timestamp '1970-01-01 00:00:00' + v)) as v, extract(epoch FROM t) as e FROM testing_old" > old.everything -$bin quit --insecure && wait # wait will block until all background jobs finish. - -bin=/cockroach/cockroach -$bin start --background --logtostderr --insecure --certs-dir=/certs &> newout -echo "Read data written by reference version using new binary" -$bin sql --insecure -d old --format=records -e "SELECT i, b, s, d, f, extract(epoch from (timestamp '1970-01-01 00:00:00' + v)) as v, extract(epoch FROM t) as e FROM testing_old" > new.everything -# diff returns non-zero if different. With set -e above, that would exit here. -diff -u new.everything old.everything - -$bin node status --insecure - -echo "Add a row with the new binary and render the updated data before shutting down." -$bin sql --insecure -d old -e "INSERT INTO testing_old values (3, false, '!', decimal '2.14159', 2.14159, NOW(), interval '3h')" -$bin sql --insecure -d old --format=records -e "SELECT i, b, s, d, f, extract(epoch from (timestamp '1970-01-01 00:00:00' + v)) as v, extract(epoch FROM t) as e FROM testing_old" > new.everything -$bin sql --insecure -d old -e "CREATE TABLE testing_new (i int primary key, b bool, s string unique, d decimal, f float, t timestamp, v interval, index sb (s, b))" -$bin sql --insecure -d old -e "INSERT INTO testing_new values (4, false, '!!', decimal '1.14159', 1.14159, NOW(), interval '4h')" -$bin sql --insecure -d old --format=records -e "SELECT i, b, s, d, f, extract(epoch from (timestamp '1970-01-01 00:00:00' + v)) as v, extract(epoch FROM t) as e FROM testing_new" >> new.everything -$bin quit --insecure -# Let it close its listening sockets. -sleep 1 - -echo "Read the modified data using the reference binary again." -bin=/opt/%s/cockroach -%s -`, referenceBinPath, referenceBinPath, backwardReferenceTest) - runReferenceTestWithScript(ctx, t, referenceTestScript) -} - -// TestDockerReadWriteBidirectionalReferenceVersion verifies that we can -// upgrade from the bidirectional reference version (i.e. the oldest -// version that we support downgrading to, specified in the -// postgres-test dockerfile), then downgrade to it again. -func TestDockerReadWriteBidirectionalReferenceVersion(t *testing.T) { - s := log.Scope(t) - defer s.Close(t) - - ctx := context.Background() - backwardReferenceTest := ` -touch out -function finish() { - cat out -} -trap finish EXIT - -$bin start --background --logtostderr --insecure --certs-dir=/certs &> out -$bin sql --insecure -d old --format=records -e "SELECT i, b, s, d, f, extract(epoch from (timestamp '1970-01-01 00:00:00' + v)) as v, extract(epoch FROM t) as e FROM testing_old" > old.everything -$bin sql --insecure -d old --format=records -e "SELECT i, b, s, d, f, extract(epoch from (timestamp '1970-01-01 00:00:00' + v)) as v, extract(epoch FROM t) as e FROM testing_new" >> old.everything -# diff returns non-zero if different. With set -e above, that would exit here. -diff -u new.everything old.everything -$bin quit --insecure && wait -` - runReadWriteReferenceTest(ctx, t, `bidirectional-reference-version`, backwardReferenceTest) -} - -// TestDockerReadWriteForwardReferenceVersion verifies that we can -// upgrade from the forward reference version (i.e. the oldest version -// that we support upgrading from, specified in the postgres-test -// dockerfile), and that downgrading to it fails in the expected ways. -// -// When the forward and bidirectional reference versions are the same, -// this test is a duplicate of the bidirectional test above and exists -// solely to preserve the scaffolding for when they differ again. -func TestDockerReadWriteForwardReferenceVersion(t *testing.T) { - s := log.Scope(t) - defer s.Close(t) - - ctx := context.Background() - backwardReferenceTest := ` -touch out -function finish() { - cat out -} -trap finish EXIT - -$bin start --background --logtostderr --insecure --certs-dir=/certs &> out -$bin sql --insecure -d old --format=records -e "SELECT i, b, s, d, f, extract(epoch from (timestamp '1970-01-01 00:00:00' + v)) as v, extract(epoch FROM t) as e FROM testing_old" > old.everything -$bin sql --insecure -d old --format=records -e "SELECT i, b, s, d, f, extract(epoch from (timestamp '1970-01-01 00:00:00' + v)) as v, extract(epoch FROM t) as e FROM testing_new" >> old.everything -# diff returns non-zero if different. With set -e above, that would exit here. -diff -u new.everything old.everything -$bin quit --insecure && wait -` - runReadWriteReferenceTest(ctx, t, `forward-reference-version`, backwardReferenceTest) -} From 0402f4e976234e221284ca4fab02534ebc66f720 Mon Sep 17 00:00:00 2001 From: Rebecca Taft Date: Mon, 14 May 2018 13:51:10 -0400 Subject: [PATCH 2/2] opt/optbuilder: Fix bug with order by, aggregate alias, and having. Previously, queries such as: `SELECT SUM(a) AS a2 FROM abcd GROUP BY c HAVING SUM(a)=10 ORDER BY a2` were incorrectly causing an error: `error: column name "a2" not found` This commit fixes the error by ensuring that a column alias for an aggregate function is applied even when the same aggregate appears multiple times in the same query (e.g., in the HAVING clause). Release note: None --- pkg/sql/opt/optbuilder/groupby.go | 5 + pkg/sql/opt/optbuilder/testdata/aggregate | 75 +++++++++++ pkg/sql/opt/optbuilder/testdata/having | 148 ++++++++++++++++++++++ 3 files changed, 228 insertions(+) diff --git a/pkg/sql/opt/optbuilder/groupby.go b/pkg/sql/opt/optbuilder/groupby.go index 822456a9ea4d..7aee8ce1e924 100644 --- a/pkg/sql/opt/optbuilder/groupby.go +++ b/pkg/sql/opt/optbuilder/groupby.go @@ -377,6 +377,11 @@ func (b *Builder) buildAggregateFunction( // Undo the adding of the args. // TODO(radu): is there a cleaner way to do this? aggInScope.cols = aggInScope.cols[:aggInScopeColsBefore] + + // Update the column name if a label is provided. + if label != "" { + col.name = tree.Name(label) + } } // Replace the function call with a reference to the column. diff --git a/pkg/sql/opt/optbuilder/testdata/aggregate b/pkg/sql/opt/optbuilder/testdata/aggregate index 4ea581e0872a..ff49213e7e0d 100644 --- a/pkg/sql/opt/optbuilder/testdata/aggregate +++ b/pkg/sql/opt/optbuilder/testdata/aggregate @@ -2747,3 +2747,78 @@ sort │ └── variable: kv.k [type=int] └── projections └── variable: mk [type=int] + +build +SELECT MAX(k) AS mk1, MAX(k) AS mk2 FROM kv GROUP BY v ORDER BY mk1 +---- +sort + ├── columns: mk1:5(int) mk2:5(int) + ├── ordering: +5 + └── project + ├── columns: mk1:5(int) + ├── group-by + │ ├── columns: kv.v:2(int) mk1:5(int) + │ ├── grouping columns: kv.v:2(int) + │ ├── project + │ │ ├── columns: kv.v:2(int) kv.k:1(int!null) + │ │ ├── scan kv + │ │ │ └── columns: kv.k:1(int!null) kv.v:2(int) kv.w:3(int) kv.s:4(string) + │ │ └── projections + │ │ ├── variable: kv.v [type=int] + │ │ └── variable: kv.k [type=int] + │ └── aggregations + │ └── max [type=int] + │ └── variable: kv.k [type=int] + └── projections + └── variable: mk1 [type=int] + +build +SELECT MAX(k) AS mk1, MAX(k) AS mk2 FROM kv GROUP BY v ORDER BY mk2 +---- +sort + ├── columns: mk1:5(int) mk2:5(int) + ├── ordering: +5 + └── project + ├── columns: mk1:5(int) + ├── group-by + │ ├── columns: kv.v:2(int) mk1:5(int) + │ ├── grouping columns: kv.v:2(int) + │ ├── project + │ │ ├── columns: kv.v:2(int) kv.k:1(int!null) + │ │ ├── scan kv + │ │ │ └── columns: kv.k:1(int!null) kv.v:2(int) kv.w:3(int) kv.s:4(string) + │ │ └── projections + │ │ ├── variable: kv.v [type=int] + │ │ └── variable: kv.k [type=int] + │ └── aggregations + │ └── max [type=int] + │ └── variable: kv.k [type=int] + └── projections + └── variable: mk1 [type=int] + +build +SELECT MAX(k) AS mk1, MAX(k)/5 AS mk2 FROM kv GROUP BY v ORDER BY mk2 +---- +sort + ├── columns: mk1:5(int) mk2:6(decimal) + ├── ordering: +6 + └── project + ├── columns: mk1:5(int) mk2:6(decimal) + ├── group-by + │ ├── columns: kv.v:2(int) mk1:5(int) + │ ├── grouping columns: kv.v:2(int) + │ ├── project + │ │ ├── columns: kv.v:2(int) kv.k:1(int!null) + │ │ ├── scan kv + │ │ │ └── columns: kv.k:1(int!null) kv.v:2(int) kv.w:3(int) kv.s:4(string) + │ │ └── projections + │ │ ├── variable: kv.v [type=int] + │ │ └── variable: kv.k [type=int] + │ └── aggregations + │ └── max [type=int] + │ └── variable: kv.k [type=int] + └── projections + ├── variable: mk1 [type=int] + └── div [type=decimal] + ├── variable: mk1 [type=int] + └── const: 5 [type=int] diff --git a/pkg/sql/opt/optbuilder/testdata/having b/pkg/sql/opt/optbuilder/testdata/having index 09f0c65197cd..0f76bdf50690 100644 --- a/pkg/sql/opt/optbuilder/testdata/having +++ b/pkg/sql/opt/optbuilder/testdata/having @@ -314,3 +314,151 @@ select └── gt [type=bool] ├── variable: column6 [type=int] └── const: 1 [type=int] + +# Check that ordering by an alias of an aggregate works when HAVING is present. +build +SELECT SUM(k) AS mk FROM kv GROUP BY v HAVING SUM(k)=10 ORDER BY mk +---- +sort + ├── columns: mk:5(decimal) + ├── ordering: +5 + └── project + ├── columns: column5:5(decimal) + ├── select + │ ├── columns: kv.v:2(int) column5:5(decimal) + │ ├── group-by + │ │ ├── columns: kv.v:2(int) column5:5(decimal) + │ │ ├── grouping columns: kv.v:2(int) + │ │ ├── project + │ │ │ ├── columns: kv.v:2(int) kv.k:1(int!null) + │ │ │ ├── scan kv + │ │ │ │ └── columns: kv.k:1(int!null) kv.v:2(int) kv.w:3(int) kv.s:4(string) + │ │ │ └── projections + │ │ │ ├── variable: kv.v [type=int] + │ │ │ └── variable: kv.k [type=int] + │ │ └── aggregations + │ │ └── sum [type=decimal] + │ │ └── variable: kv.k [type=int] + │ └── eq [type=bool] + │ ├── variable: column5 [type=decimal] + │ └── const: 10 [type=decimal] + └── projections + └── variable: column5 [type=decimal] + +build +SELECT SUM(k) AS mk FROM kv GROUP BY v HAVING MAX(k) > 10 ORDER BY mk +---- +sort + ├── columns: mk:6(decimal) + ├── ordering: +6 + └── project + ├── columns: mk:6(decimal) + ├── select + │ ├── columns: kv.v:2(int) column5:5(int) mk:6(decimal) + │ ├── group-by + │ │ ├── columns: kv.v:2(int) column5:5(int) mk:6(decimal) + │ │ ├── grouping columns: kv.v:2(int) + │ │ ├── project + │ │ │ ├── columns: kv.v:2(int) kv.k:1(int!null) + │ │ │ ├── scan kv + │ │ │ │ └── columns: kv.k:1(int!null) kv.v:2(int) kv.w:3(int) kv.s:4(string) + │ │ │ └── projections + │ │ │ ├── variable: kv.v [type=int] + │ │ │ └── variable: kv.k [type=int] + │ │ └── aggregations + │ │ ├── max [type=int] + │ │ │ └── variable: kv.k [type=int] + │ │ └── sum [type=decimal] + │ │ └── variable: kv.k [type=int] + │ └── gt [type=bool] + │ ├── variable: column5 [type=int] + │ └── const: 10 [type=int] + └── projections + └── variable: mk [type=decimal] + +build +SELECT SUM(k) AS mk FROM kv GROUP BY v HAVING v > 10 ORDER BY mk +---- +sort + ├── columns: mk:5(decimal) + ├── ordering: +5 + └── project + ├── columns: mk:5(decimal) + ├── select + │ ├── columns: kv.v:2(int) mk:5(decimal) + │ ├── group-by + │ │ ├── columns: kv.v:2(int) mk:5(decimal) + │ │ ├── grouping columns: kv.v:2(int) + │ │ ├── project + │ │ │ ├── columns: kv.v:2(int) kv.k:1(int!null) + │ │ │ ├── scan kv + │ │ │ │ └── columns: kv.k:1(int!null) kv.v:2(int) kv.w:3(int) kv.s:4(string) + │ │ │ └── projections + │ │ │ ├── variable: kv.v [type=int] + │ │ │ └── variable: kv.k [type=int] + │ │ └── aggregations + │ │ └── sum [type=decimal] + │ │ └── variable: kv.k [type=int] + │ └── gt [type=bool] + │ ├── variable: kv.v [type=int] + │ └── const: 10 [type=int] + └── projections + └── variable: mk [type=decimal] + +build +SELECT MAX(k) AS mk1, MAX(k) AS mk2 FROM kv GROUP BY v HAVING MAX(k) > 10 ORDER BY mk1 +---- +sort + ├── columns: mk1:5(int) mk2:5(int) + ├── ordering: +5 + └── project + ├── columns: column5:5(int) + ├── select + │ ├── columns: kv.v:2(int) column5:5(int) + │ ├── group-by + │ │ ├── columns: kv.v:2(int) column5:5(int) + │ │ ├── grouping columns: kv.v:2(int) + │ │ ├── project + │ │ │ ├── columns: kv.v:2(int) kv.k:1(int!null) + │ │ │ ├── scan kv + │ │ │ │ └── columns: kv.k:1(int!null) kv.v:2(int) kv.w:3(int) kv.s:4(string) + │ │ │ └── projections + │ │ │ ├── variable: kv.v [type=int] + │ │ │ └── variable: kv.k [type=int] + │ │ └── aggregations + │ │ └── max [type=int] + │ │ └── variable: kv.k [type=int] + │ └── gt [type=bool] + │ ├── variable: column5 [type=int] + │ └── const: 10 [type=int] + └── projections + └── variable: column5 [type=int] + +build +SELECT MAX(k) AS mk1, MAX(k) AS mk2 FROM kv GROUP BY v HAVING MAX(k) > 10 ORDER BY mk2 +---- +sort + ├── columns: mk1:5(int) mk2:5(int) + ├── ordering: +5 + └── project + ├── columns: column5:5(int) + ├── select + │ ├── columns: kv.v:2(int) column5:5(int) + │ ├── group-by + │ │ ├── columns: kv.v:2(int) column5:5(int) + │ │ ├── grouping columns: kv.v:2(int) + │ │ ├── project + │ │ │ ├── columns: kv.v:2(int) kv.k:1(int!null) + │ │ │ ├── scan kv + │ │ │ │ └── columns: kv.k:1(int!null) kv.v:2(int) kv.w:3(int) kv.s:4(string) + │ │ │ └── projections + │ │ │ ├── variable: kv.v [type=int] + │ │ │ └── variable: kv.k [type=int] + │ │ └── aggregations + │ │ └── max [type=int] + │ │ └── variable: kv.k [type=int] + │ └── gt [type=bool] + │ ├── variable: column5 [type=int] + │ └── const: 10 [type=int] + └── projections + └── variable: column5 [type=int]