From ac19b148c2462e29874fdbfc204aacd23dc77434 Mon Sep 17 00:00:00 2001 From: Zoltan Haindrich Date: Thu, 13 Jun 2024 08:47:50 +0200 Subject: [PATCH] Upgrade calcite to 1.37.0 (#16504) * contains Make a full copy of the parser and apply our modifications to it #16503 * some minor api changes pair/entry * some unnecessary aggregation was removed from a set of queries in `CalciteSubqueryTest` * `AliasedOperatorConversion` was detecting `CHAR_LENGTH` as not a function ; I've removed the check * the field it was using doesn't look maintained that much * the `kind` is passed for the created `SqlFunction` so I don't think this check is actually needed * some decoupled test cases become broken - will be fixed later * some aggregate related changes: due to the fact that SUM() and COUNT() of no inputs are different * upgrade avatica to 1.25.0 * `CalciteQueryTest#testExactCountDistinctWithFilter` is now executable Close apache/druid#16503 --- dev/upgrade-calcite-parser | 72 + .../druid/msq/sql/MSQTaskQueryMaker.java | 10 +- .../druid/msq/sql/MSQTaskSqlEngine.java | 28 +- .../msq/test/CalciteSelectQueryMSQTest.java | 8 + licenses.yaml | 4 +- pom.xml | 10 +- processing/src/test/resources/log4j2.xml | 3 + sql/pom.xml | 80 +- sql/src/main/codegen/config.fmpp | 2 - sql/src/main/codegen/default_config.fmpp | 459 + sql/src/main/codegen/includes/from.ftl | 306 - sql/src/main/codegen/includes/insert.ftl | 113 - sql/src/main/codegen/templates/Parser.jj | 9235 +++++++++++++++++ .../expression/AliasedOperatorConversion.java | 6 - .../calcite/parser/DruidSqlParserUtils.java | 6 +- .../druid/sql/calcite/planner/QueryUtils.java | 6 +- .../sql/calcite/rel/logical/DruidWindow.java | 6 + .../sql/calcite/run/NativeQueryMaker.java | 10 +- .../calcite/CalciteNestedDataQueryTest.java | 121 +- .../druid/sql/calcite/CalciteQueryTest.java | 120 +- .../sql/calcite/CalciteSelectQueryTest.java | 13 +- .../sql/calcite/CalciteSubqueryTest.java | 359 +- .../druid/sql/calcite/NotYetSupported.java | 3 +- .../parser/DruidSqlParserUtilsTest.java | 41 +- ...ccess@all_disabled@NullHandling=default.iq | 10 +- ...ectAccess@all_disabled@NullHandling=sql.iq | 10 +- ...Access@all_enabled@NullHandling=default.iq | 10 +- ...rectAccess@all_enabled@NullHandling=sql.iq | 10 +- ...rectAccess@default@NullHandling=default.iq | 10 +- ...ftDirectAccess@default@NullHandling=sql.iq | 10 +- ...ue-column_disabled@NullHandling=default.iq | 10 +- ...-value-column_disabled@NullHandling=sql.iq | 10 +- ...-rewrites-disabled@NullHandling=default.iq | 10 +- ...lter-rewrites-disabled@NullHandling=sql.iq | 10 +- ...ss@filter-rewrites@NullHandling=default.iq | 10 +- ...Access@filter-rewrites@NullHandling=sql.iq | 10 +- ...ess@join-to-filter@NullHandling=default.iq | 10 +- ...tAccess@join-to-filter@NullHandling=sql.iq | 10 +- ...ccess@all_disabled@NullHandling=default.iq | 10 +- ...ectAccess@all_disabled@NullHandling=sql.iq | 10 +- ...Access@all_enabled@NullHandling=default.iq | 10 +- ...rectAccess@all_enabled@NullHandling=sql.iq | 10 +- ...rectAccess@default@NullHandling=default.iq | 10 +- ...ftDirectAccess@default@NullHandling=sql.iq | 10 +- ...ue-column_disabled@NullHandling=default.iq | 10 +- ...-value-column_disabled@NullHandling=sql.iq | 10 +- ...-rewrites-disabled@NullHandling=default.iq | 10 +- ...lter-rewrites-disabled@NullHandling=sql.iq | 10 +- ...ss@filter-rewrites@NullHandling=default.iq | 10 +- ...Access@filter-rewrites@NullHandling=sql.iq | 10 +- ...ess@join-to-filter@NullHandling=default.iq | 10 +- ...tAccess@join-to-filter@NullHandling=sql.iq | 10 +- ...ccess@all_disabled@NullHandling=default.iq | 10 +- ...ectAccess@all_disabled@NullHandling=sql.iq | 10 +- ...Access@all_enabled@NullHandling=default.iq | 10 +- ...rectAccess@all_enabled@NullHandling=sql.iq | 10 +- ...rectAccess@default@NullHandling=default.iq | 10 +- ...ftDirectAccess@default@NullHandling=sql.iq | 10 +- ...ue-column_disabled@NullHandling=default.iq | 10 +- ...-value-column_disabled@NullHandling=sql.iq | 10 +- ...-rewrites-disabled@NullHandling=default.iq | 10 +- ...lter-rewrites-disabled@NullHandling=sql.iq | 10 +- ...ss@filter-rewrites@NullHandling=default.iq | 10 +- ...Access@filter-rewrites@NullHandling=sql.iq | 10 +- ...ess@join-to-filter@NullHandling=default.iq | 10 +- ...tAccess@join-to-filter@NullHandling=sql.iq | 10 +- ...ccess@all_disabled@NullHandling=default.iq | 10 +- ...ectAccess@all_disabled@NullHandling=sql.iq | 10 +- ...Access@all_enabled@NullHandling=default.iq | 10 +- ...rectAccess@all_enabled@NullHandling=sql.iq | 10 +- ...rectAccess@default@NullHandling=default.iq | 10 +- ...ftDirectAccess@default@NullHandling=sql.iq | 10 +- ...ue-column_disabled@NullHandling=default.iq | 10 +- ...-value-column_disabled@NullHandling=sql.iq | 10 +- ...-rewrites-disabled@NullHandling=default.iq | 10 +- ...lter-rewrites-disabled@NullHandling=sql.iq | 10 +- ...ss@filter-rewrites@NullHandling=default.iq | 10 +- ...Access@filter-rewrites@NullHandling=sql.iq | 10 +- ...ess@join-to-filter@NullHandling=default.iq | 10 +- ...tAccess@join-to-filter@NullHandling=sql.iq | 10 +- ...ccess@all_disabled@NullHandling=default.iq | 10 +- ...ectAccess@all_disabled@NullHandling=sql.iq | 10 +- ...Access@all_enabled@NullHandling=default.iq | 10 +- ...rectAccess@all_enabled@NullHandling=sql.iq | 10 +- ...rectAccess@default@NullHandling=default.iq | 10 +- ...ftDirectAccess@default@NullHandling=sql.iq | 10 +- ...ue-column_disabled@NullHandling=default.iq | 10 +- ...-value-column_disabled@NullHandling=sql.iq | 10 +- ...-rewrites-disabled@NullHandling=default.iq | 10 +- ...lter-rewrites-disabled@NullHandling=sql.iq | 10 +- ...ss@filter-rewrites@NullHandling=default.iq | 10 +- ...Access@filter-rewrites@NullHandling=sql.iq | 10 +- ...ess@join-to-filter@NullHandling=default.iq | 10 +- ...tAccess@join-to-filter@NullHandling=sql.iq | 10 +- ...pByTimeFloorAndDim@NullHandling=default.iq | 16 +- ...GroupByTimeFloorAndDim@NullHandling=sql.iq | 18 +- ...estGroupByWithLiteralInSubqueryGrouping.iq | 2 +- ...ojectDoesNotRename@NullHandling=default.iq | 32 +- ...tyProjectDoesNotRename@NullHandling=sql.iq | 30 +- 99 files changed, 10532 insertions(+), 1287 deletions(-) create mode 100755 dev/upgrade-calcite-parser create mode 100644 sql/src/main/codegen/default_config.fmpp delete mode 100644 sql/src/main/codegen/includes/from.ftl delete mode 100644 sql/src/main/codegen/includes/insert.ftl create mode 100644 sql/src/main/codegen/templates/Parser.jj diff --git a/dev/upgrade-calcite-parser b/dev/upgrade-calcite-parser new file mode 100755 index 000000000000..f311d3ea7381 --- /dev/null +++ b/dev/upgrade-calcite-parser @@ -0,0 +1,72 @@ +#!/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------- + +# Adopts base Calcite parser changes +# +# Establishes a git friendly merge situation: +# +# Creates a commit which matches the original state of the calcite parser; +# To this point creates to alternates: +# * one with local customizations +# * another with all the upstream updates +# merges the two branches to obtain the upgrade state +# + +[ $# -ne 2 ] && echo -e "updates base parser sources.\n usage: $0 " && exit 1 + +CALCITE_OLD=$1 +CALCITE_NEW=$2 + +set -e +set -x + +BRANCH=`git name-rev --name-only HEAD` + +REPO=.git/calcite-upgrade +rm -rf "$REPO" +git clone $PWD --reference $PWD --branch $BRANCH $REPO + +cd "$REPO" +git checkout -b curr-changes + +mvn -q generate-sources -pl sql -Dcalcite.version=$CALCITE_OLD -Pskip-static-checks +cp -r sql/target/calcite-base-parser/codegen/./ sql/src/main/codegen/./ +git commit -m 'current reverse' -a +git revert --no-edit HEAD +# HEAD is now at the same as before; but parent are the base calcite changes + +git branch base-changes curr-changes^ +git checkout base-changes +git show|patch -p0 -R # undo temproarily to ensure maven runs + +mvn -q generate-sources -pl sql -Dcalcite.version=$CALCITE_NEW -Pskip-static-checks +cp -r sql/target/calcite-base-parser/codegen/./ sql/src/main/codegen/./ + +git commit --allow-empty -m base-changes -a +git checkout -b new-state +git merge --no-edit curr-changes + +echo ok +cd - + +git remote remove calcite-upgrade &>/dev/null || echo -n +git remote add -f calcite-upgrade "$REPO" + + +echo "merge branch calcite-upgrade/curr-changes if satisfied with those changes" + diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskQueryMaker.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskQueryMaker.java index 4debe4d9d43e..58031558a0a7 100644 --- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskQueryMaker.java +++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskQueryMaker.java @@ -24,7 +24,6 @@ import com.google.common.base.Preconditions; import org.apache.calcite.runtime.Hook; import org.apache.calcite.sql.type.SqlTypeName; -import org.apache.calcite.util.Pair; import org.apache.druid.common.guava.FutureUtils; import org.apache.druid.error.DruidException; import org.apache.druid.error.InvalidInput; @@ -78,6 +77,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Optional; import java.util.stream.Collectors; @@ -91,7 +91,7 @@ public class MSQTaskQueryMaker implements QueryMaker private final OverlordClient overlordClient; private final PlannerContext plannerContext; private final ObjectMapper jsonMapper; - private final List> fieldMapping; + private final List> fieldMapping; MSQTaskQueryMaker( @@ -99,7 +99,7 @@ public class MSQTaskQueryMaker implements QueryMaker final OverlordClient overlordClient, final PlannerContext plannerContext, final ObjectMapper jsonMapper, - final List> fieldMapping + final List> fieldMapping ) { this.targetDataSource = targetDataSource; @@ -193,7 +193,7 @@ public QueryResponse runQuery(final DruidQuery druidQuery) final List columnTypeList = new ArrayList<>(); final List columnMappings = QueryUtils.buildColumnMappings(fieldMapping, druidQuery); - for (final Pair entry : fieldMapping) { + for (final Entry entry : fieldMapping) { final String queryColumn = druidQuery.getOutputRowSignature().getColumnName(entry.getKey()); final SqlTypeName sqlTypeName; @@ -238,7 +238,7 @@ public QueryResponse runQuery(final DruidQuery druidQuery) MSQTaskQueryMakerUtils.validateSegmentSortOrder( segmentSortOrder, - fieldMapping.stream().map(f -> f.right).collect(Collectors.toList()) + fieldMapping.stream().map(f -> f.getValue()).collect(Collectors.toList()) ); final DataSourceMSQDestination dataSourceMSQDestination = new DataSourceMSQDestination( diff --git a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java index 99fc71f1b81a..e3baa058b41a 100644 --- a/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java +++ b/extensions-core/multi-stage-query/src/main/java/org/apache/druid/msq/sql/MSQTaskSqlEngine.java @@ -33,7 +33,6 @@ import org.apache.calcite.schema.Table; import org.apache.calcite.sql.dialect.CalciteSqlDialect; import org.apache.calcite.sql.type.SqlTypeName; -import org.apache.calcite.util.Pair; import org.apache.druid.error.DruidException; import org.apache.druid.error.InvalidInput; import org.apache.druid.error.InvalidSqlInput; @@ -65,6 +64,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; public class MSQTaskSqlEngine implements SqlEngine @@ -222,7 +222,7 @@ private static void validateSelect(final PlannerContext plannerContext) */ private static void validateInsert( final RelNode rootRel, - final List> fieldMappings, + final List> fieldMappings, @Nullable Table targetTable, final PlannerContext plannerContext ) @@ -241,13 +241,13 @@ private static void validateInsert( * SQL allows multiple output columns with the same name. However, we don't allow this for INSERT or REPLACE * queries, because we use these output names to generate columns in segments. They must be unique. */ - private static void validateNoDuplicateAliases(final List> fieldMappings) + private static void validateNoDuplicateAliases(final List> fieldMappings) { final Set aliasesSeen = new HashSet<>(); - for (final Pair field : fieldMappings) { - if (!aliasesSeen.add(field.right)) { - throw InvalidSqlInput.exception("Duplicate field in SELECT: [%s]", field.right); + for (final Entry field : fieldMappings) { + if (!aliasesSeen.add(field.getValue())) { + throw InvalidSqlInput.exception("Duplicate field in SELECT: [%s]", field.getValue()); } } } @@ -353,7 +353,7 @@ private static void validateLimitAndOffset(final RelNode rootRel, final boolean */ private static void validateTypeChanges( final RelNode rootRel, - final List> fieldMappings, + final List> fieldMappings, @Nullable final Table targetTable, final PlannerContext plannerContext ) @@ -366,9 +366,9 @@ private static void validateTypeChanges( MultiStageQueryContext.getColumnsExcludedFromTypeVerification(plannerContext.queryContext()); final ArrayIngestMode arrayIngestMode = MultiStageQueryContext.getArrayIngestMode(plannerContext.queryContext()); - for (Pair fieldMapping : fieldMappings) { - final int columnIndex = fieldMapping.left; - final String columnName = fieldMapping.right; + for (Entry fieldMapping : fieldMappings) { + final int columnIndex = fieldMapping.getKey(); + final String columnName = fieldMapping.getValue(); final RelDataTypeField oldSqlTypeField = targetTable.getRowType(DruidTypeSystem.TYPE_FACTORY).getField(columnName, true, false); @@ -427,11 +427,11 @@ private static void validateTypeChanges( * * Returns -1 if the list does not contain a time column. */ - private static int getTimeColumnIndex(final List> fieldMappings) + private static int getTimeColumnIndex(final List> fieldMappings) { - for (final Pair field : fieldMappings) { - if (field.right.equals(ColumnHolder.TIME_COLUMN_NAME)) { - return field.left; + for (final Entry field : fieldMappings) { + if (field.getValue().equals(ColumnHolder.TIME_COLUMN_NAME)) { + return field.getKey(); } } diff --git a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/test/CalciteSelectQueryMSQTest.java b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/test/CalciteSelectQueryMSQTest.java index 87a794f8f863..3008f9d43b47 100644 --- a/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/test/CalciteSelectQueryMSQTest.java +++ b/extensions-core/multi-stage-query/src/test/java/org/apache/druid/msq/test/CalciteSelectQueryMSQTest.java @@ -150,6 +150,14 @@ public void testExactCountDistinctWithFilter() } + @Disabled + @Override + @Test + public void testExactCountDistinctWithFilter2() + { + + } + @Disabled @Override @Test diff --git a/licenses.yaml b/licenses.yaml index 752e82081643..3c1f7b0b2d46 100644 --- a/licenses.yaml +++ b/licenses.yaml @@ -1616,7 +1616,7 @@ name: Apache Calcite license_category: binary module: java-core license_name: Apache License version 2.0 -version: 1.35.0 +version: 1.37.0 libraries: - org.apache.calcite: calcite-core - org.apache.calcite: calcite-linq4j @@ -1634,7 +1634,7 @@ name: Apache Calcite Avatica license_category: binary module: java-core license_name: Apache License version 2.0 -version: 1.23.0 +version: 1.25.0 libraries: - org.apache.calcite.avatica: avatica-core - org.apache.calcite.avatica: avatica-metrics diff --git a/pom.xml b/pom.xml index f4dd5f0d4512..0ab733968574 100644 --- a/pom.xml +++ b/pom.xml @@ -82,13 +82,12 @@ 2.4.0 2.10.1 2.13.14 - 1.23.0 + 1.25.0 1.11.3 - - 1.35.0 + 1.37.0 6.2.12 4.2.0 2.2.0 @@ -2189,6 +2188,7 @@ **/.classpath **/.project **/*.iq + **/*.iq.out diff --git a/processing/src/test/resources/log4j2.xml b/processing/src/test/resources/log4j2.xml index d335523d0863..ed8a48d02c11 100644 --- a/processing/src/test/resources/log4j2.xml +++ b/processing/src/test/resources/log4j2.xml @@ -31,5 +31,8 @@ + + + diff --git a/sql/pom.xml b/sql/pom.xml index a90d9e5f2987..b3ccc6ad2586 100644 --- a/sql/pom.xml +++ b/sql/pom.xml @@ -333,8 +333,8 @@ - - + + org.apache.maven.plugins maven-dependency-plugin @@ -353,17 +353,8 @@ ${calcite.version} jar true - ${project.build.directory}/ - **/Parser.jj - - - org.apache.calcite - calcite-core - ${calcite.version} - jar - true - ${project.build.directory}/ - **/default_config.fmpp + ${project.build.directory}/calcite-base-parser + codegen/** @@ -371,32 +362,7 @@ - - - maven-resources-plugin - - - copy-fmpp-resources - generate-sources - - copy-resources - - - ${project.build.directory}/codegen - - - src/main/codegen - false - - - - - - - - + com.googlecode.fmpp-maven-plugin fmpp-maven-plugin @@ -408,9 +374,9 @@ generate - ${project.build.directory}/codegen/config.fmpp + src/main/codegen/config.fmpp ${project.build.directory}/generated-sources - ${project.build.directory}/codegen/templates + src/main/codegen/templates @@ -440,38 +406,6 @@ - - - com.google.code.maven-replacer-plugin - replacer - 1.5.3 - - - generate-sources - - replace - - - - - ${project.build.directory}/generated-sources/org/apache/druid/sql/calcite/parser - - **/DruidSqlParserImpl.java - - - - fromClause = FromClause - fromClause = DruidFromClause - - - - - org.codehaus.mojo diff --git a/sql/src/main/codegen/config.fmpp b/sql/src/main/codegen/config.fmpp index 12bd51351d27..837c04f42a63 100644 --- a/sql/src/main/codegen/config.fmpp +++ b/sql/src/main/codegen/config.fmpp @@ -100,10 +100,8 @@ data: { # "dataTypeParserMethods". implementationFiles: [ "common.ftl" - "insert.ftl" "explain.ftl" "replace.ftl" - "from.ftl" ] } } diff --git a/sql/src/main/codegen/default_config.fmpp b/sql/src/main/codegen/default_config.fmpp new file mode 100644 index 000000000000..5f48306c6526 --- /dev/null +++ b/sql/src/main/codegen/default_config.fmpp @@ -0,0 +1,459 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default data declarations for parsers. +# Each of these may be overridden in a parser's config.fmpp file. +# In addition, each parser must define "package" and "class". +parser: { + # List of additional classes and packages to import. + # Example: "org.apache.calcite.sql.*", "java.util.List". + imports: [ + ] + + # List of new keywords. Example: "DATABASES", "TABLES". If the keyword is + # not a reserved keyword, add it to the 'nonReservedKeywords' section. + keywords: [ + ] + + # List of keywords from "keywords" section that are not reserved. + nonReservedKeywords: [ + "A" + "ABSENT" + "ABSOLUTE" + "ACTION" + "ADA" + "ADD" + "ADMIN" + "AFTER" + "ALWAYS" + "APPLY" + "ARRAY_AGG" + "ARRAY_CONCAT_AGG" + "ASC" + "ASSERTION" + "ASSIGNMENT" + "ATTRIBUTE" + "ATTRIBUTES" + "BEFORE" + "BERNOULLI" + "BREADTH" + "C" + "CASCADE" + "CATALOG" + "CATALOG_NAME" + "CENTURY" + "CHAIN" + "CHARACTERISTICS" + "CHARACTERS" + "CHARACTER_SET_CATALOG" + "CHARACTER_SET_NAME" + "CHARACTER_SET_SCHEMA" + "CLASS_ORIGIN" + "COBOL" + "COLLATION" + "COLLATION_CATALOG" + "COLLATION_NAME" + "COLLATION_SCHEMA" + "COLUMN_NAME" + "COMMAND_FUNCTION" + "COMMAND_FUNCTION_CODE" + "COMMITTED" + "CONDITIONAL" + "CONDITION_NUMBER" + "CONNECTION" + "CONNECTION_NAME" + "CONSTRAINT_CATALOG" + "CONSTRAINT_NAME" + "CONSTRAINTS" + "CONSTRAINT_SCHEMA" + "CONSTRUCTOR" + "CONTAINS_SUBSTR" + "CONTINUE" + "CURSOR_NAME" + "DATA" + "DATABASE" + "DATE_DIFF" + "DATE_TRUNC" + "DATETIME_DIFF" + "DATETIME_INTERVAL_CODE" + "DATETIME_INTERVAL_PRECISION" + "DATETIME_TRUNC" + "DAYOFWEEK" + "DAYOFYEAR" + "DAYS" + "DECADE" + "DEFAULTS" + "DEFERRABLE" + "DEFERRED" + "DEFINED" + "DEFINER" + "DEGREE" + "DEPTH" + "DERIVED" + "DESC" + "DESCRIPTION" + "DESCRIPTOR" + "DIAGNOSTICS" + "DISPATCH" + "DOMAIN" + "DOW" + "DOY" + "DOT_FORMAT" + "DYNAMIC_FUNCTION" + "DYNAMIC_FUNCTION_CODE" + "ENCODING" + "EPOCH" + "ERROR" + "EXCEPTION" + "EXCLUDE" + "EXCLUDING" + "FINAL" + "FIRST" + "FOLLOWING" + "FORMAT" + "FORTRAN" + "FOUND" + "FRAC_SECOND" + "G" + "GENERAL" + "GENERATED" + "GEOMETRY" + "GO" + "GOTO" + "GRANTED" + "GROUP_CONCAT" + "HIERARCHY" + "HOP" + "HOURS" + "IGNORE" + "ILIKE" + "IMMEDIATE" + "IMMEDIATELY" + "IMPLEMENTATION" + "INCLUDE" + "INCLUDING" + "INCREMENT" + "INITIALLY" + "INPUT" + "INSTANCE" + "INSTANTIABLE" + "INVOKER" + "ISODOW" + "ISOLATION" + "ISOYEAR" + "JAVA" + "JSON" + "K" + "KEY" + "KEY_MEMBER" + "KEY_TYPE" + "LABEL" + "LAST" + "LENGTH" + "LEVEL" + "LIBRARY" + "LOCATOR" + "M" + "MAP" + "MATCHED" + "MAXVALUE" + "MESSAGE_LENGTH" + "MESSAGE_OCTET_LENGTH" + "MESSAGE_TEXT" + "MICROSECOND" + "MILLENNIUM" + "MILLISECOND" + "MINUTES" + "MINVALUE" + "MONTHS" + "MORE_" + "MUMPS" + "NAME" + "NAMES" + "NANOSECOND" + "NESTING" + "NORMALIZED" + "NULLABLE" + "NULLS" + "NUMBER" + "OBJECT" + "OCTETS" + "OPTION" + "OPTIONS" + "ORDERING" + "ORDINALITY" + "OTHERS" + "OUTPUT" + "OVERRIDING" + "PAD" + "PARAMETER_MODE" + "PARAMETER_NAME" + "PARAMETER_ORDINAL_POSITION" + "PARAMETER_SPECIFIC_CATALOG" + "PARAMETER_SPECIFIC_NAME" + "PARAMETER_SPECIFIC_SCHEMA" + "PARTIAL" + "PASCAL" + "PASSING" + "PASSTHROUGH" + "PAST" + "PATH" + "PIVOT" + "PLACING" + "PLAN" + "PLI" + "PRECEDING" + "PRESERVE" + "PRIOR" + "PRIVILEGES" + "PUBLIC" + "QUARTER" + "QUARTERS" + "READ" + "RELATIVE" + "REPEATABLE" + "REPLACE" + "RESPECT" + "RESTART" + "RESTRICT" + "RETURNED_CARDINALITY" + "RETURNED_LENGTH" + "RETURNED_OCTET_LENGTH" + "RETURNED_SQLSTATE" + "RETURNING" + "RLIKE" + "ROLE" + "ROUTINE" + "ROUTINE_CATALOG" + "ROUTINE_NAME" + "ROUTINE_SCHEMA" + "ROW_COUNT" + "SCALAR" + "SCALE" + "SCHEMA" + "SCHEMA_NAME" + "SCOPE_CATALOGS" + "SCOPE_NAME" + "SCOPE_SCHEMA" + "SECONDS" + "SECTION" + "SECURITY" + "SELF" + "SEPARATOR" + "SEQUENCE" + "SERIALIZABLE" + "SERVER" + "SERVER_NAME" + "SESSION" + "SETS" + "SIMPLE" + "SIZE" + "SOURCE" + "SPACE" + "SPECIFIC_NAME" + "SQL_BIGINT" + "SQL_BINARY" + "SQL_BIT" + "SQL_BLOB" + "SQL_BOOLEAN" + "SQL_CHAR" + "SQL_CLOB" + "SQL_DATE" + "SQL_DECIMAL" + "SQL_DOUBLE" + "SQL_FLOAT" + "SQL_INTEGER" + "SQL_INTERVAL_DAY" + "SQL_INTERVAL_DAY_TO_HOUR" + "SQL_INTERVAL_DAY_TO_MINUTE" + "SQL_INTERVAL_DAY_TO_SECOND" + "SQL_INTERVAL_HOUR" + "SQL_INTERVAL_HOUR_TO_MINUTE" + "SQL_INTERVAL_HOUR_TO_SECOND" + "SQL_INTERVAL_MINUTE" + "SQL_INTERVAL_MINUTE_TO_SECOND" + "SQL_INTERVAL_MONTH" + "SQL_INTERVAL_SECOND" + "SQL_INTERVAL_YEAR" + "SQL_INTERVAL_YEAR_TO_MONTH" + "SQL_LONGVARBINARY" + "SQL_LONGVARCHAR" + "SQL_LONGVARNCHAR" + "SQL_NCHAR" + "SQL_NCLOB" + "SQL_NUMERIC" + "SQL_NVARCHAR" + "SQL_REAL" + "SQL_SMALLINT" + "SQL_TIME" + "SQL_TIMESTAMP" + "SQL_TINYINT" + "SQL_TSI_DAY" + "SQL_TSI_FRAC_SECOND" + "SQL_TSI_HOUR" + "SQL_TSI_MICROSECOND" + "SQL_TSI_MINUTE" + "SQL_TSI_MONTH" + "SQL_TSI_QUARTER" + "SQL_TSI_SECOND" + "SQL_TSI_WEEK" + "SQL_TSI_YEAR" + "SQL_VARBINARY" + "SQL_VARCHAR" + "STATE" + "STATEMENT" + "STRING_AGG" + "STRUCTURE" + "STYLE" + "SUBCLASS_ORIGIN" + "SUBSTITUTE" + "TABLE_NAME" + "TEMPORARY" + "TIES" + "TIME_DIFF" + "TIME_TRUNC" + "TIMESTAMPADD" + "TIMESTAMPDIFF" + "TIMESTAMP_DIFF" + "TIMESTAMP_TRUNC" + "TOP_LEVEL_COUNT" + "TRANSACTION" + "TRANSACTIONS_ACTIVE" + "TRANSACTIONS_COMMITTED" + "TRANSACTIONS_ROLLED_BACK" + "TRANSFORM" + "TRANSFORMS" + "TRIGGER_CATALOG" + "TRIGGER_NAME" + "TRIGGER_SCHEMA" + "TUMBLE" + "TYPE" + "UNBOUNDED" + "UNCOMMITTED" + "UNCONDITIONAL" + "UNDER" + "UNPIVOT" + "UNNAMED" + "USAGE" + "USER_DEFINED_TYPE_CATALOG" + "USER_DEFINED_TYPE_CODE" + "USER_DEFINED_TYPE_NAME" + "USER_DEFINED_TYPE_SCHEMA" + "UTF16" + "UTF32" + "UTF8" + "VERSION" + "VIEW" + "WEEK" + "WEEKS" + "WORK" + "WRAPPER" + "WRITE" + "XML" + "YEARS" + "ZONE" + ] + + # List of non-reserved keywords to add; + # items in this list become non-reserved. + nonReservedKeywordsToAdd: [ + ] + + # List of non-reserved keywords to remove; + # items in this list become reserved. + nonReservedKeywordsToRemove: [ + ] + + # List of additional join types. Each is a method with no arguments. + # Example: "LeftSemiJoin". + joinTypes: [ + ] + + # List of methods for parsing custom SQL statements. + # Return type of method implementation should be 'SqlNode'. + # Example: "SqlShowDatabases()", "SqlShowTables()". + statementParserMethods: [ + ] + + # List of methods for parsing custom literals. + # Return type of method implementation should be "SqlNode". + # Example: ParseJsonLiteral(). + literalParserMethods: [ + ] + + # List of methods for parsing custom data types. + # Return type of method implementation should be "SqlTypeNameSpec". + # Example: SqlParseTimeStampZ(). + dataTypeParserMethods: [ + ] + + # List of methods for parsing builtin function calls. + # Return type of method implementation should be "SqlNode". + # Example: "DateTimeConstructorCall()". + builtinFunctionCallMethods: [ + ] + + # List of methods for parsing extensions to "ALTER " calls. + # Each must accept arguments "(SqlParserPos pos, String scope)". + # Example: "SqlAlterTable". + alterStatementParserMethods: [ + ] + + # List of methods for parsing extensions to "CREATE [OR REPLACE]" calls. + # Each must accept arguments "(SqlParserPos pos, boolean replace)". + # Example: "SqlCreateForeignSchema". + createStatementParserMethods: [ + ] + + # List of methods for parsing extensions to "DROP" calls. + # Each must accept arguments "(SqlParserPos pos)". + # Example: "SqlDropSchema". + dropStatementParserMethods: [ + ] + + # List of methods for parsing extensions to "TRUNCATE" calls. + # Each must accept arguments "(SqlParserPos pos)". + # Example: "SqlTruncate". + truncateStatementParserMethods: [ + ] + + # Binary operators tokens. + # Example: "< INFIX_CAST: \"::\" >". + binaryOperatorsTokens: [ + ] + + # Binary operators initialization. + # Example: "InfixCast". + extraBinaryExpressions: [ + ] + + # List of files in @includes directory that have parser method + # implementations for parsing custom SQL statements, literals or types + # given as part of "statementParserMethods", "literalParserMethods" or + # "dataTypeParserMethods". + # Example: "parserImpls.ftl". + implementationFiles: [ + ] + + # Custom identifier token. + # Example: "< IDENTIFIER: (|)+ >". + customIdentifierToken: "" + + includePosixOperators: false + includeCompoundIdentifier: true + includeBraces: true + includeAdditionalDeclarations: false + includeParsingStringLiteralAsArrayLiteral: false +} diff --git a/sql/src/main/codegen/includes/from.ftl b/sql/src/main/codegen/includes/from.ftl deleted file mode 100644 index ae6d03b841cb..000000000000 --- a/sql/src/main/codegen/includes/from.ftl +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Druid note: this file is copied from core/src/main/codegen/templates/Parser.jj in Calcite 1.35.0, with changes to - * to add two elements of Druid syntax to the FROM clause: - * - * id [ () ] - * - * And - * - * TABLE(()) () - * - * These changes were originally in https://github.com/apache/druid/pull/13360 as a patch script (sql/edit-parser.py), - * then later moved to this copied-and-edited file in https://github.com/apache/druid/pull/13553.* - * - * This file prefixes the required production rules with 'Druid' so that the whole FROM production rule can be - * derived from this file itself. The production clause is injected in the grammar using the maven replace plugin in - * sql module's pom. - */ - -/** - * Parses the FROM clause for a SELECT. - * - *

FROM is mandatory in standard SQL, optional in dialects such as MySQL, - * PostgreSQL. The parser allows SELECT without FROM, but the validator fails - * if conformance is, say, STRICT_2003. - */ -SqlNode DruidFromClause() : -{ - SqlNode e, e2; - SqlLiteral joinType; -} -{ - e = DruidJoin() - ( - // Comma joins should only occur at top-level in the FROM clause. - // Valid: - // * FROM a, b - // * FROM (a CROSS JOIN b), c - // Not valid: - // * FROM a CROSS JOIN (b, c) - LOOKAHEAD(1) - { joinType = JoinType.COMMA.symbol(getPos()); } - e2 = DruidJoin() { - e = new SqlJoin(joinType.getParserPosition(), - e, - SqlLiteral.createBoolean(false, joinType.getParserPosition()), - joinType, - e2, - JoinConditionType.NONE.symbol(SqlParserPos.ZERO), - null); - } - )* - { return e; } -} - -SqlNode DruidJoin() : -{ - SqlNode e; -} -{ - e = DruidTableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN) - ( - LOOKAHEAD(2) - e = DruidJoinTable(e) - )* - { - return e; - } -} - -/** Matches "LEFT JOIN t ON ...", "RIGHT JOIN t USING ...", "JOIN t". */ -SqlNode DruidJoinTable(SqlNode e) : -{ - SqlNode e2, condition; - final SqlLiteral natural, joinType, on, using; - SqlNodeList list; -} -{ - // LOOKAHEAD(3) is needed here rather than a LOOKAHEAD(2) because JavaCC - // calculates minimum lookahead count incorrectly for choice that contains - // zero size child. For instance, with the generated code, - // "LOOKAHEAD(2, Natural(), JoinType())" - // returns true immediately if it sees a single "" token. Where we - // expect the lookahead succeeds after " ". - // - // For more information about the issue, - // see https://github.com/javacc/javacc/issues/86 - // - // We allow CROSS JOIN (joinType = CROSS_JOIN) to have a join condition, - // even though that is not valid SQL; the validator will catch it. - LOOKAHEAD(3) - natural = Natural() - joinType = JoinType() - e2 = DruidTableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN) - ( - { on = JoinConditionType.ON.symbol(getPos()); } - condition = Expression(ExprContext.ACCEPT_SUB_QUERY) { - return new SqlJoin(joinType.getParserPosition(), - e, - natural, - joinType, - e2, - on, - condition); - } - | - { using = JoinConditionType.USING.symbol(getPos()); } - list = ParenthesizedSimpleIdentifierList() { - return new SqlJoin(joinType.getParserPosition(), - e, - natural, - joinType, - e2, - using, - new SqlNodeList(list, Span.of(using).end(this))); - } - | - { - return new SqlJoin(joinType.getParserPosition(), - e, - natural, - joinType, - e2, - JoinConditionType.NONE.symbol(joinType.getParserPosition()), - null); - } - ) -| - { joinType = JoinType.CROSS.symbol(getPos()); } - e2 = DruidTableRef2(true) { - if (!this.conformance.isApplyAllowed()) { - throw SqlUtil.newContextException(getPos(), RESOURCE.applyNotAllowed()); - } - return new SqlJoin(joinType.getParserPosition(), - e, - SqlLiteral.createBoolean(false, joinType.getParserPosition()), - joinType, - e2, - JoinConditionType.NONE.symbol(SqlParserPos.ZERO), - null); - } -| - { joinType = JoinType.LEFT.symbol(getPos()); } - e2 = DruidTableRef2(true) { - if (!this.conformance.isApplyAllowed()) { - throw SqlUtil.newContextException(getPos(), RESOURCE.applyNotAllowed()); - } - return new SqlJoin(joinType.getParserPosition(), - e, - SqlLiteral.createBoolean(false, joinType.getParserPosition()), - joinType, - e2, - JoinConditionType.ON.symbol(SqlParserPos.ZERO), - SqlLiteral.createBoolean(true, joinType.getParserPosition())); - } -} - -/** - * Parses a table reference in a FROM clause, not lateral unless LATERAL - * is explicitly specified. - */ -SqlNode DruidTableRef() : -{ - final SqlNode e; -} -{ - e = DruidTableRef3(ExprContext.ACCEPT_QUERY, false) { return e; } -} - -SqlNode DruidTableRef1(ExprContext exprContext) : -{ - final SqlNode e; -} -{ - e = DruidTableRef3(exprContext, false) { return e; } -} - -/** - * Parses a table reference in a FROM clause. - */ -SqlNode DruidTableRef2(boolean lateral) : -{ - final SqlNode e; -} -{ - e = DruidTableRef3(ExprContext.ACCEPT_QUERY, lateral) { return e; } -} - -SqlNode DruidTableRef3(ExprContext exprContext, boolean lateral) : -{ - final SqlIdentifier tableName; - SqlNode tableRef; - List paramList; - final SqlIdentifier alias; - final Span s; - SqlNodeList args; - final SqlNodeList columnAliasList; - SqlUnnestOperator unnestOp = SqlStdOperatorTable.UNNEST; - SqlNodeList extendList = null; -} -{ - ( - LOOKAHEAD(2) - tableName = CompoundTableIdentifier() - ( tableRef = TableHints(tableName) | { tableRef = tableName; } ) - // BEGIN: Druid-specific code - [ - paramList = FunctionParameterList(ExprContext.ACCEPT_NONCURSOR) - { - tableRef = ParameterizeOperator.PARAM.createCall(tableRef, paramList); - } - ] - // END: Druid-specific code - tableRef = Over(tableRef) - [ tableRef = Snapshot(tableRef) ] - [ tableRef = MatchRecognize(tableRef) ] - | - LOOKAHEAD(2) - [ { lateral = true; } ] - tableRef = ParenthesizedExpression(exprContext) - tableRef = Over(tableRef) - tableRef = addLateral(tableRef, lateral) - [ tableRef = MatchRecognize(tableRef) ] - | - { s = span(); } - args = ParenthesizedQueryOrCommaList(ExprContext.ACCEPT_SUB_QUERY) - [ - { - unnestOp = SqlStdOperatorTable.UNNEST_WITH_ORDINALITY; - } - ] - { - tableRef = unnestOp.createCall(s.end(this), (List) args); - } - | - [ { lateral = true; } ] - tableRef = TableFunctionCall() - // BEGIN: Druid-specific code - [ - [ ] - extendList = ExtendList() - { - tableRef = ExtendOperator.EXTEND.createCall( - Span.of(tableRef, extendList).pos(), tableRef, extendList); - } - ] - // END: Druid-specific code - tableRef = addLateral(tableRef, lateral) - | - tableRef = ExtendedTableRef() - ) - [ - LOOKAHEAD(2) - tableRef = Pivot(tableRef) - ] - [ - LOOKAHEAD(2) - tableRef = Unpivot(tableRef) - ] - [ - [ ] alias = SimpleIdentifier() - ( - columnAliasList = ParenthesizedSimpleIdentifierList() - | { columnAliasList = null; } - ) - { - // Standard SQL (and Postgres) allow applying "AS alias" to a JOIN, - // e.g. "FROM (a CROSS JOIN b) AS c". The new alias obscures the - // internal aliases, and columns cannot be referenced if they are - // not unique. TODO: Support this behavior; see - // [CALCITE-5168] Allow AS after parenthesized JOIN - checkNotJoin(tableRef); - if (columnAliasList == null) { - tableRef = SqlStdOperatorTable.AS.createCall( - Span.of(tableRef).end(this), tableRef, alias); - } else { - List idList = new ArrayList(); - idList.add(tableRef); - idList.add(alias); - idList.addAll(columnAliasList.getList()); - tableRef = SqlStdOperatorTable.AS.createCall( - Span.of(tableRef).end(this), idList); - } - } - ] - [ tableRef = Tablesample(tableRef) ] - { return tableRef; } -} diff --git a/sql/src/main/codegen/includes/insert.ftl b/sql/src/main/codegen/includes/insert.ftl deleted file mode 100644 index 0f053a4f655a..000000000000 --- a/sql/src/main/codegen/includes/insert.ftl +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Parses an INSERT statement. This function is copied from SqlInsert in core/src/main/codegen/templates/Parser.jj, - * with some changes to allow a custom error message if an OVERWRITE clause is present. - */ -// Using fully qualified name for Pair class, since Calcite also has a same class name being used in the Parser.jj -SqlNode DruidSqlInsertEof() : -{ - SqlNode insertNode; - final List keywords = new ArrayList(); - final SqlNodeList keywordList; - final SqlIdentifier destination; - SqlNode tableRef = null; - SqlNode source; - final SqlNodeList columnList; - final Span s; - final Pair p; - SqlGranularityLiteral partitionedBy = null; - SqlNodeList clusteredBy = null; - SqlIdentifier exportFileFormat = null; -} -{ - ( - - | - { keywords.add(SqlInsertKeyword.UPSERT.symbol(getPos())); } - ) - { s = span(); } - SqlInsertKeywords(keywords) { - keywordList = new SqlNodeList(keywords, s.addAll(keywords).pos()); - } - - ( - LOOKAHEAD(2) - destination = ExternalDestination() - | - destination = CompoundTableIdentifier() - ( tableRef = TableHints(destination) | { tableRef = destination; } ) - [ LOOKAHEAD(5) tableRef = ExtendTable(tableRef) ] - ) - ( - LOOKAHEAD(2) - p = ParenthesizedCompoundIdentifierList() { - if (p.right.size() > 0) { - tableRef = extend(tableRef, p.right); - } - if (p.left.size() > 0) { - columnList = p.left; - } else { - columnList = null; - } - } - | { columnList = null; } - ) - [ - exportFileFormat = FileFormat() - ] - ( - - { - throw org.apache.druid.sql.calcite.parser.DruidSqlParserUtils.problemParsing( - "An OVERWRITE clause is not allowed with INSERT statements. Use REPLACE statements if overwriting existing segments is required or remove the OVERWRITE clause." - ); - } - | - source = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) - ) - // PARTITIONED BY is necessary, but is kept optional in the grammar. It is asserted that it is not missing in the - // IngestHandler#validate() so that we can return a custom error message. - [ - - partitionedBy = PartitionGranularity() - ] - [ - clusteredBy = ClusteredBy() - ] - { - - } - // EOF is also present in SqlStmtEof but EOF is a special case and a single EOF can be consumed multiple times. - // The reason for adding EOF here is to ensure that we create a DruidSqlInsert node after the syntax has been - // validated and throw SQL syntax errors before performing validations in the DruidSqlInsert which can overshadow the - // actual error message. - - { - insertNode = new SqlInsert(s.end(source), keywordList, destination, source, columnList); - if (!(insertNode instanceof SqlInsert)) { - // This shouldn't be encountered, but done as a defensive practice. SqlInsert() always returns a node of type - // SqlInsert - return insertNode; - } - SqlInsert sqlInsert = (SqlInsert) insertNode; - return DruidSqlInsert.create(sqlInsert, partitionedBy, clusteredBy, exportFileFormat); - } -} diff --git a/sql/src/main/codegen/templates/Parser.jj b/sql/src/main/codegen/templates/Parser.jj new file mode 100644 index 000000000000..0f8ae7e5f6d4 --- /dev/null +++ b/sql/src/main/codegen/templates/Parser.jj @@ -0,0 +1,9235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +<@pp.dropOutputFile /> + +<@pp.changeOutputFile name="javacc/Parser.jj" /> + +options { + STATIC = false; + IGNORE_CASE = true; + UNICODE_INPUT = true; +} + + +PARSER_BEGIN(${parser.class}) + +package ${parser.package}; + +<#list (parser.imports!default.parser.imports) as importStr> +import ${importStr}; + + +import org.apache.calcite.avatica.util.Casing; +import org.apache.calcite.avatica.util.TimeUnit; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.runtime.CalciteContextException; +import org.apache.calcite.sql.JoinConditionType; +import org.apache.calcite.sql.JoinType; +import org.apache.calcite.sql.SqlAlter; +import org.apache.calcite.sql.SqlBasicTypeNameSpec; +import org.apache.calcite.sql.SqlBinaryOperator; +import org.apache.calcite.sql.SqlCall; +import org.apache.calcite.sql.SqlCharStringLiteral; +import org.apache.calcite.sql.SqlCollation; +import org.apache.calcite.sql.SqlCollectionTypeNameSpec; +import org.apache.calcite.sql.SqlDataTypeSpec; +import org.apache.calcite.sql.SqlDelete; +import org.apache.calcite.sql.SqlDescribeSchema; +import org.apache.calcite.sql.SqlDescribeTable; +import org.apache.calcite.sql.SqlDynamicParam; +import org.apache.calcite.sql.SqlExplain; +import org.apache.calcite.sql.SqlExplainFormat; +import org.apache.calcite.sql.SqlExplainLevel; +import org.apache.calcite.sql.SqlFunction; +import org.apache.calcite.sql.SqlFunctionCategory; +import org.apache.calcite.sql.SqlHint; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlInsert; +import org.apache.calcite.sql.SqlInsertKeyword; +import org.apache.calcite.sql.SqlIntervalQualifier; +import org.apache.calcite.sql.SqlJdbcDataTypeName; +import org.apache.calcite.sql.SqlJdbcFunctionCall; +import org.apache.calcite.sql.SqlJoin; +import org.apache.calcite.sql.SqlJsonConstructorNullClause; +import org.apache.calcite.sql.SqlJsonEncoding; +import org.apache.calcite.sql.SqlJsonExistsErrorBehavior; +import org.apache.calcite.sql.SqlJsonEmptyOrError; +import org.apache.calcite.sql.SqlJsonQueryEmptyOrErrorBehavior; +import org.apache.calcite.sql.SqlJsonQueryWrapperBehavior; +import org.apache.calcite.sql.SqlJsonValueEmptyOrErrorBehavior; +import org.apache.calcite.sql.SqlJsonValueReturning; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlLambda; +import org.apache.calcite.sql.SqlLiteral; +import org.apache.calcite.sql.SqlMatchRecognize; +import org.apache.calcite.sql.SqlMerge; +import org.apache.calcite.sql.SqlMapTypeNameSpec; +import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.sql.SqlNodeList; +import org.apache.calcite.sql.SqlNumericLiteral; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.SqlOrderBy; +import org.apache.calcite.sql.SqlPivot; +import org.apache.calcite.sql.SqlPostfixOperator; +import org.apache.calcite.sql.SqlPrefixOperator; +import org.apache.calcite.sql.SqlRowTypeNameSpec; +import org.apache.calcite.sql.SqlSampleSpec; +import org.apache.calcite.sql.SqlSelect; +import org.apache.calcite.sql.SqlSelectKeyword; +import org.apache.calcite.sql.SqlSetOption; +import org.apache.calcite.sql.SqlSnapshot; +import org.apache.calcite.sql.SqlTableRef; +import org.apache.calcite.sql.SqlTypeNameSpec; +import org.apache.calcite.sql.SqlUnnestOperator; +import org.apache.calcite.sql.SqlUnpivot; +import org.apache.calcite.sql.SqlUpdate; +import org.apache.calcite.sql.SqlUserDefinedTypeNameSpec; +import org.apache.calcite.sql.SqlUtil; +import org.apache.calcite.sql.SqlWindow; +import org.apache.calcite.sql.SqlWith; +import org.apache.calcite.sql.SqlWithItem; +import org.apache.calcite.sql.fun.SqlCase; +import org.apache.calcite.sql.fun.SqlInternalOperators; +import org.apache.calcite.sql.fun.SqlLibraryOperators; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.fun.SqlTrimFunction; +import org.apache.calcite.sql.parser.Span; +import org.apache.calcite.sql.parser.SqlAbstractParserImpl; +import org.apache.calcite.sql.parser.SqlParseException; +import org.apache.calcite.sql.parser.SqlParser; +import org.apache.calcite.sql.parser.SqlParserImplFactory; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.parser.SqlParserUtil; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.validate.SqlConformance; +import org.apache.calcite.sql.validate.SqlConformanceEnum; +import org.apache.calcite.util.Glossary; +import org.apache.calcite.util.Pair; +import org.apache.calcite.util.SourceStringReader; +import org.apache.calcite.util.Util; +import org.apache.calcite.util.trace.CalciteTrace; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.slf4j.Logger; + +import java.io.Reader; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.apache.calcite.util.Static.RESOURCE; + +/** + * SQL parser, generated from Parser.jj by JavaCC. + * + *

The public wrapper for this parser is {@link SqlParser}. + */ +public class ${parser.class} extends SqlAbstractParserImpl +{ + private static final Logger LOGGER = CalciteTrace.getParserTracer(); + + // Can't use quoted literal because of a bug in how JavaCC translates + // backslash-backslash. + private static final char BACKSLASH = 0x5c; + private static final char DOUBLE_QUOTE = 0x22; + private static final String DQ = DOUBLE_QUOTE + ""; + private static final String DQDQ = DQ + DQ; + private static final SqlLiteral LITERAL_ZERO = + SqlLiteral.createExactNumeric("0", SqlParserPos.ZERO); + private static final SqlLiteral LITERAL_ONE = + SqlLiteral.createExactNumeric("1", SqlParserPos.ZERO); + private static final SqlLiteral LITERAL_MINUS_ONE = + SqlLiteral.createExactNumeric("-1", SqlParserPos.ZERO); + private static final BigDecimal ONE_HUNDRED = BigDecimal.valueOf(100L); + + private static Metadata metadata; + + private Casing unquotedCasing; + private Casing quotedCasing; + private int identifierMaxLength; + private SqlConformance conformance; + + /** + * {@link SqlParserImplFactory} implementation for creating parser. + */ + public static final SqlParserImplFactory FACTORY = new SqlParserImplFactory() { + public SqlAbstractParserImpl getParser(Reader reader) { + final ${parser.class} parser = new ${parser.class}(reader); + if (reader instanceof SourceStringReader) { + final String sql = + ((SourceStringReader) reader).getSourceString(); + parser.setOriginalSql(sql); + } + return parser; + } + }; + + public SqlParseException normalizeException(Throwable ex) { + try { + if (ex instanceof ParseException) { + ex = cleanupParseException((ParseException) ex); + } + return convertException(ex); + } catch (ParseException e) { + throw new AssertionError(e); + } + } + + public Metadata getMetadata() { + synchronized (${parser.class}.class) { + if (metadata == null) { + metadata = new MetadataImpl( + new ${parser.class}(new java.io.StringReader(""))); + } + return metadata; + } + } + + public void setTabSize(int tabSize) { + jj_input_stream.setTabSize(tabSize); + } + + public void switchTo(SqlAbstractParserImpl.LexicalState state) { + final int stateOrdinal = + Arrays.asList(${parser.class}TokenManager.lexStateNames) + .indexOf(state.name()); + token_source.SwitchTo(stateOrdinal); + } + + public void setQuotedCasing(Casing quotedCasing) { + this.quotedCasing = quotedCasing; + } + + public void setUnquotedCasing(Casing unquotedCasing) { + this.unquotedCasing = unquotedCasing; + } + + public void setIdentifierMaxLength(int identifierMaxLength) { + this.identifierMaxLength = identifierMaxLength; + } + + public void setConformance(SqlConformance conformance) { + this.conformance = conformance; + } + + public SqlNode parseSqlExpressionEof() throws Exception { + return SqlExpressionEof(); + } + + public SqlNode parseSqlStmtEof() throws Exception { + return SqlStmtEof(); + } + + public SqlNodeList parseSqlStmtList() throws Exception { + return SqlStmtList(); + } + + public SqlNode parseArray() throws SqlParseException { + switchTo(LexicalState.BQID); + try { + return ArrayLiteral(); + } catch (ParseException ex) { + throw normalizeException(ex); + } catch (TokenMgrError ex) { + throw normalizeException(ex); + } + } + + private SqlNode extend(SqlNode table, SqlNodeList extendList) { + return SqlStdOperatorTable.EXTEND.createCall( + Span.of(table, extendList).pos(), table, extendList); + } + + /** Adds a warning that a token such as "HOURS" was used, + * whereas the SQL standard only allows "HOUR". + * + *

Currently, we silently add an exception to a list of warnings. In + * future, we may have better compliance checking, for example a strict + * compliance mode that throws if any non-standard features are used. */ + private TimeUnit warn(TimeUnit timeUnit) throws ParseException { + final String token = getToken(0).image.toUpperCase(Locale.ROOT); + warnings.add( + SqlUtil.newContextException(getPos(), + RESOURCE.nonStandardFeatureUsed(token))); + return timeUnit; + } +} + +PARSER_END(${parser.class}) + + +/*************************************** + * Utility Codes for Semantic Analysis * + ***************************************/ + +/* For Debug */ +JAVACODE +void debug_message1() { + LOGGER.info("{} , {}", getToken(0).image, getToken(1).image); +} + +JAVACODE String unquotedIdentifier() { + return SqlParserUtil.toCase(getToken(0).image, unquotedCasing); +} + +/** + * Allows parser to be extended with new types of table references. The + * default implementation of this production is empty. + */ +SqlNode ExtendedTableRef() : +{ +} +{ + UnusedExtension() + { + return null; + } +} + +/** + * Allows an OVER clause following a table expression as an extension to + * standard SQL syntax. The default implementation of this production is empty. + */ +SqlNode TableOverOpt() : +{ +} +{ + { + return null; + } +} + +/* + * Parses dialect-specific keywords immediately following the SELECT keyword. + */ +void SqlSelectKeywords(List keywords) : +{} +{ + E() +} + +/* + * Parses dialect-specific keywords immediately following the INSERT keyword. + */ +void SqlInsertKeywords(List keywords) : +{} +{ + E() +} + +/* +* Parse Floor/Ceil function parameters +*/ +SqlNode FloorCeilOptions(Span s, boolean floorFlag) : +{ + SqlNode node; +} +{ + node = StandardFloorCeilOptions(s, floorFlag) { + return node; + } +} + +/* +// This file contains the heart of a parser for SQL SELECT statements. +// code can be shared between various parsers (for example, a DDL parser and a +// DML parser) but is not a standalone JavaCC file. You need to prepend a +// parser declaration (such as that in Parser.jj). +*/ + +/* Epsilon */ +JAVACODE +void E() {} + +/** @Deprecated */ +JAVACODE List startList(Object o) +{ + List list = new ArrayList(); + list.add(o); + return list; +} + +/* + * NOTE jvs 6-Feb-2004: The straightforward way to implement the SQL grammar is + * to keep query expressions (SELECT, UNION, etc) separate from row expressions + * (+, LIKE, etc). However, this is not possible with an LL(k) parser, because + * both kinds of expressions allow parenthesization, so no fixed amount of left + * context is ever good enough. A sub-query can be a leaf in a row expression, + * and can include operators like UNION, so it's not even possible to use a + * syntactic lookahead rule like "look past an indefinite number of parentheses + * until you see SELECT, VALUES, or TABLE" (since at that point we still + * don't know whether we're parsing a sub-query like ((select ...) + x) + * vs. (select ... union select ...). + * + * The somewhat messy solution is to unify the two kinds of expression, + * and to enforce syntax rules using parameterized context. This + * is the purpose of the ExprContext parameter. It is passed to + * most expression productions, which check the expressions encountered + * against the context for correctness. When a query + * element like SELECT is encountered, the production calls + * checkQueryExpression, which will throw an exception if + * a row expression was expected instead. When a row expression like + * IN is encountered, the production calls checkNonQueryExpression + * instead. It is very important to understand how this works + * when modifying the grammar. + * + * The commingling of expressions results in some bogus ambiguities which are + * resolved with LOOKAHEAD hints. The worst example is comma. SQL allows both + * (WHERE x IN (1,2)) and (WHERE x IN (select ...)). This means when we parse + * the right-hand-side of an IN, we have to allow any kind of expression inside + * the parentheses. Now consider the expression "WHERE x IN(SELECT a FROM b + * GROUP BY c,d)". When the parser gets to "c,d" it doesn't know whether the + * comma indicates the end of the GROUP BY or the end of one item in an IN + * list. Luckily, we know that select and comma-list are mutually exclusive + * within IN, so we use maximal munch for the GROUP BY comma. However, this + * usage of hints could easily mask unintended ambiguities resulting from + * future changes to the grammar, making it very brittle. + */ + +JAVACODE protected SqlParserPos getPos() +{ + return new SqlParserPos( + token.beginLine, + token.beginColumn, + token.endLine, + token.endColumn); +} + +/** Starts a span at the current position. */ +JAVACODE Span span() +{ + return Span.of(getPos()); +} + +JAVACODE void checkQueryExpression(ExprContext exprContext) +{ + switch (exprContext) { + case ACCEPT_NON_QUERY: + case ACCEPT_SUB_QUERY: + case ACCEPT_CURSOR: + throw SqlUtil.newContextException(getPos(), + RESOURCE.illegalQueryExpression()); + } +} + +JAVACODE void checkNonQueryExpression(ExprContext exprContext) +{ + switch (exprContext) { + case ACCEPT_QUERY: + throw SqlUtil.newContextException(getPos(), + RESOURCE.illegalNonQueryExpression()); + } +} + +JAVACODE SqlNode checkNotJoin(SqlNode e) +{ + if (e instanceof SqlJoin) { + throw SqlUtil.newContextException(e.getParserPosition(), + RESOURCE.illegalJoinExpression()); + } + return e; +} + +/** + * Converts a ParseException (local to this particular instantiation + * of the parser) into a SqlParseException (common to all parsers). + */ +JAVACODE SqlParseException convertException(Throwable ex) +{ + if (ex instanceof SqlParseException) { + return (SqlParseException) ex; + } + SqlParserPos pos = null; + int[][] expectedTokenSequences = null; + String[] tokenImage = null; + if (ex instanceof ParseException) { + ParseException pex = (ParseException) ex; + expectedTokenSequences = pex.expectedTokenSequences; + tokenImage = pex.tokenImage; + if (pex.currentToken != null) { + final Token token = pex.currentToken.next; + // Checks token.image.equals("1") to avoid recursive call. + // The SqlAbstractParserImpl#MetadataImpl constructor uses constant "1" to + // throw intentionally to collect the expected tokens. + if (!token.image.equals("1") + && getMetadata().isKeyword(token.image) + && SqlParserUtil.allowsIdentifier(tokenImage, expectedTokenSequences)) { + // If the next token is a keyword, reformat the error message as: + + // Incorrect syntax near the keyword '{keyword}' at line {line_number}, + // column {column_number}. + final String expecting = ex.getMessage() + .substring(ex.getMessage().indexOf("Was expecting")); + final String errorMsg = String.format("Incorrect syntax near the keyword '%s' " + + "at line %d, column %d.\n%s", + token.image, + token.beginLine, + token.beginColumn, + expecting); + // Replace the ParseException with explicit error message. + ex = new ParseException(errorMsg); + } + pos = new SqlParserPos( + token.beginLine, + token.beginColumn, + token.endLine, + token.endColumn); + } + } else if (ex instanceof TokenMgrError) { + expectedTokenSequences = null; + tokenImage = null; + // Example: + // Lexical error at line 3, column 24. Encountered "#" after "a". + final java.util.regex.Pattern pattern = java.util.regex.Pattern.compile( + "(?s)Lexical error at line ([0-9]+), column ([0-9]+).*"); + java.util.regex.Matcher matcher = pattern.matcher(ex.getMessage()); + if (matcher.matches()) { + int line = Integer.parseInt(matcher.group(1)); + int column = Integer.parseInt(matcher.group(2)); + pos = new SqlParserPos(line, column, line, column); + } + } else if (ex instanceof CalciteContextException) { + // CalciteContextException is the standard wrapper for exceptions + // produced by the validator, but in the parser, the standard is + // SqlParseException; so, strip it away. In case you were wondering, + // the CalciteContextException appears because the parser + // occasionally calls into validator-style code such as + // SqlSpecialOperator.reduceExpr. + CalciteContextException ece = + (CalciteContextException) ex; + pos = new SqlParserPos( + ece.getPosLine(), + ece.getPosColumn(), + ece.getEndPosLine(), + ece.getEndPosColumn()); + ex = ece.getCause(); + } + + return new SqlParseException( + ex.getMessage(), pos, expectedTokenSequences, tokenImage, ex); +} + +/** + * Removes or transforms misleading information from a parse exception. + * + * @param e dirty excn + * + * @return clean excn + */ +JAVACODE ParseException cleanupParseException(ParseException ex) +{ + if (ex.expectedTokenSequences == null) { + return ex; + } + int iIdentifier = Arrays.asList(ex.tokenImage).indexOf(""); + + // Find all sequences in the error which contain identifier. For + // example, + // {} + // {A} + // {B, C} + // {D, } + // {D, A} + // {D, B} + // + // would yield + // {} + // {D} + final List prefixList = new ArrayList(); + for (int i = 0; i < ex.expectedTokenSequences.length; ++i) { + int[] seq = ex.expectedTokenSequences[i]; + int j = seq.length - 1; + int i1 = seq[j]; + if (i1 == iIdentifier) { + int[] prefix = new int[j]; + System.arraycopy(seq, 0, prefix, 0, j); + prefixList.add(prefix); + } + } + + if (prefixList.isEmpty()) { + return ex; + } + + int[][] prefixes = (int[][]) + prefixList.toArray(new int[prefixList.size()][]); + + // Since was one of the possible productions, + // we know that the parser will also have included all + // of the non-reserved keywords (which are treated as + // identifiers in non-keyword contexts). So, now we need + // to clean those out, since they're totally irrelevant. + + final List list = new ArrayList(); + Metadata metadata = getMetadata(); + for (int i = 0; i < ex.expectedTokenSequences.length; ++i) { + int [] seq = ex.expectedTokenSequences[i]; + String tokenImage = ex.tokenImage[seq[seq.length - 1]]; + String token = SqlParserUtil.getTokenVal(tokenImage); + if (token == null || !metadata.isNonReservedKeyword(token)) { + list.add(seq); + continue; + } + boolean match = matchesPrefix(seq, prefixes); + if (!match) { + list.add(seq); + } + } + + ex.expectedTokenSequences = + (int [][]) list.toArray(new int [list.size()][]); + return ex; +} + +JAVACODE boolean matchesPrefix(int[] seq, int[][] prefixes) +{ + nextPrefix: + for (int[] prefix : prefixes) { + if (seq.length == prefix.length + 1) { + for (int k = 0; k < prefix.length; k++) { + if (prefix[k] != seq[k]) { + continue nextPrefix; + } + } + return true; + } + } + return false; +} + +/***************************************** + * Syntactical Descriptions * + *****************************************/ + +SqlNode ExprOrJoinOrOrderedQuery(ExprContext exprContext) : +{ + SqlNode e; + final List list = new ArrayList(); +} +{ + // Lookhead to distinguish between "TABLE emp" (which will be + // matched by ExplicitTable() via Query()) + // and "TABLE fun(args)" (which will be matched by TableRef()) + ( + LOOKAHEAD(2) + e = Query(exprContext) + e = OrderByLimitOpt(e) + { return e; } + | + e = TableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN) + ( e = JoinTable(e) )* + { list.add(e); } + ( AddSetOpQuery(list, exprContext) )* + { return SqlParserUtil.toTree(list); } + ) +} + +/** + * Parses either a row expression or a query expression with an optional + * ORDER BY. + * + *

Postgres syntax for limit: + * + *

+ *    [ LIMIT { count | ALL } ]
+ *    [ OFFSET start ]
+ *
+ * + *

Trino syntax for limit: + * + *

+ *    [ OFFSET start ]
+ *    [ LIMIT { count | ALL } ]
+ *
+ * + *

MySQL syntax for limit: + * + *

+ *    [ LIMIT { count | start, count } ]
+ *
+ * + *

SQL:2008 syntax for limit: + * + *

+ *    [ OFFSET start { ROW | ROWS } ]
+ *    [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]
+ *
+ */ +SqlNode OrderedQueryOrExpr(ExprContext exprContext) : +{ + SqlNode e; +} +{ + e = QueryOrExpr(exprContext) + e = OrderByLimitOpt(e) + { return e; } +} + +/** Reads optional "ORDER BY", "LIMIT", "OFFSET", "FETCH" following a query, + * {@code e}. If any of them are present, adds them to the query; + * otherwise returns the query unchanged. + * Throws if they are present and {@code e} is not a query. */ +SqlNode OrderByLimitOpt(SqlNode e) : +{ + final SqlNodeList orderBy; + final Span s = Span.of(); + SqlNode[] offsetFetch = {null, null}; +} +{ + ( + // use the syntactic type of the expression we just parsed + // to decide whether ORDER BY makes sense + orderBy = OrderBy(e.isA(SqlKind.QUERY)) + | { orderBy = null; } + ) + [ + LimitClause(s, offsetFetch) + [ OffsetClause(s, offsetFetch) ] + | + OffsetClause(s, offsetFetch) + [ + LimitClause(s, offsetFetch) { + if (!this.conformance.isOffsetLimitAllowed()) { + throw SqlUtil.newContextException(s.end(this), + RESOURCE.offsetLimitNotAllowed()); + } + } + | + FetchClause(offsetFetch) + ] + | + FetchClause(offsetFetch) + ] + { + if (orderBy != null || offsetFetch[0] != null || offsetFetch[1] != null) { + return new SqlOrderBy(getPos(), e, + Util.first(orderBy, SqlNodeList.EMPTY), + offsetFetch[0], offsetFetch[1]); + } + return e; + } +} + +/** + * Parses an OFFSET clause in an ORDER BY expression. + */ +void OffsetClause(Span s, SqlNode[] offsetFetch) : +{ +} +{ + // ROW or ROWS is required in SQL:2008 but we make it optional + // because it is not present in Postgres-style syntax. + { s.add(this); } + offsetFetch[0] = UnsignedNumericLiteralOrParam() + [ | ] +} + +/** + * Parses a FETCH clause in an ORDER BY expression. + */ +void FetchClause(SqlNode[] offsetFetch) : +{ +} +{ + // SQL:2008-style syntax. "OFFSET ... FETCH ...". + // If you specify both LIMIT and FETCH, FETCH wins. + ( | ) offsetFetch[1] = UnsignedNumericLiteralOrParam() + ( | ) +} + +/** + * Parses a LIMIT clause in an ORDER BY expression. + */ +void LimitClause(Span s, SqlNode[] offsetFetch) : +{ + final String error; +} +{ + // Postgres-style syntax. "LIMIT ... OFFSET ..." + { s.add(this); } + ( + // MySQL-style syntax. "LIMIT start, count" or "LIMIT start, ALL" + LOOKAHEAD(2) + offsetFetch[0] = UnsignedNumericLiteralOrParam() + + ( + offsetFetch[1] = UnsignedNumericLiteralOrParam() { + error = "count"; + } + | + { + error = "ALL"; + } + ) { + if (!this.conformance.isLimitStartCountAllowed()) { + throw SqlUtil.newContextException(s.end(this), + RESOURCE.limitStartCountOrAllNotAllowed(error)); + } + } + | + offsetFetch[1] = UnsignedNumericLiteralOrParam() + | + + ) +} + +/** + * Parses a leaf in a query expression (SELECT, VALUES or TABLE). + */ +SqlNode LeafQuery(ExprContext exprContext) : +{ + SqlNode e; +} +{ + { + // ensure a query is legal in this context + checkQueryExpression(exprContext); + } + e = SqlSelect() { return e; } +| + e = TableConstructor() { return e; } +| + e = ExplicitTable(getPos()) { return e; } +} + +/** + * Parses a parenthesized query or single row expression. + * Depending on {@code exprContext}, may also accept a join. + */ +SqlNode ParenthesizedExpression(ExprContext exprContext) : +{ + SqlNode e; +} +{ + + { + // we've now seen left paren, so queries inside should + // be allowed as sub-queries + switch (exprContext) { + case ACCEPT_SUB_QUERY: + exprContext = ExprContext.ACCEPT_NONCURSOR; + break; + case ACCEPT_CURSOR: + exprContext = ExprContext.ACCEPT_ALL; + break; + } + } + e = ExprOrJoinOrOrderedQuery(exprContext) + + { + exprContext.throwIfNotCompatible(e); + return e; + } +} + +/** + * Parses a parenthesized query or comma-list of row expressions. + * + *

REVIEW jvs 8-Feb-2004: There's a small hole in this production. It can be + * used to construct something like + * + *

+ * WHERE x IN (select count(*) from t where c=d,5)
+ *
+ * + *

which should be illegal. The above is interpreted as equivalent to + * + *

+ * WHERE x IN ((select count(*) from t where c=d),5)
+ *
+ * + *

which is a legal use of a sub-query. The only way to fix the hole is to + * be able to remember whether a subexpression was parenthesized or not, which + * means preserving parentheses in the SqlNode tree. This is probably + * desirable anyway for use in purely syntactic parsing applications (e.g. SQL + * pretty-printer). However, if this is done, it's important to also make + * isA() on the paren node call down to its operand so that we can + * always correctly discriminate a query from a row expression. + */ +SqlNodeList ParenthesizedQueryOrCommaList( + ExprContext exprContext) : +{ + SqlNode e; + final List list = new ArrayList(); + ExprContext firstExprContext = exprContext; + final Span s; +} +{ + + { + // we've now seen left paren, so a query by itself should + // be interpreted as a sub-query + s = span(); + switch (exprContext) { + case ACCEPT_SUB_QUERY: + firstExprContext = ExprContext.ACCEPT_NONCURSOR; + break; + case ACCEPT_CURSOR: + firstExprContext = ExprContext.ACCEPT_ALL; + break; + } + } + e = OrderedQueryOrExpr(firstExprContext) { list.add(e); } + ( + + { + // a comma-list can't appear where only a query is expected + checkNonQueryExpression(exprContext); + } + AddExpression(list, exprContext) + )* + + { + return new SqlNodeList(list, s.end(this)); + } +} + +/** As ParenthesizedQueryOrCommaList, but allows DEFAULT + * in place of any of the expressions. For example, + * {@code (x, DEFAULT, null, DEFAULT)}. */ +SqlNodeList ParenthesizedQueryOrCommaListWithDefault( + ExprContext exprContext) : +{ + SqlNode e; + final List list = new ArrayList(); + ExprContext firstExprContext = exprContext; + final Span s; +} +{ + + { + // we've now seen left paren, so a query by itself should + // be interpreted as a sub-query + s = span(); + switch (exprContext) { + case ACCEPT_SUB_QUERY: + firstExprContext = ExprContext.ACCEPT_NONCURSOR; + break; + case ACCEPT_CURSOR: + firstExprContext = ExprContext.ACCEPT_ALL; + break; + } + } + ( + e = OrderedQueryOrExpr(firstExprContext) { list.add(e); } + | + e = Default() { list.add(e); } + ) + ( + + { + // a comma-list can't appear where only a query is expected + checkNonQueryExpression(exprContext); + } + ( + e = Expression(exprContext) { list.add(e); } + | + e = Default() { list.add(e); } + ) + )* + + { + return new SqlNodeList(list, s.end(this)); + } +} + +/** + * Parses function parameter lists. + * If the list starts with DISTINCT or ALL, it is discarded. + */ +List UnquantifiedFunctionParameterList(ExprContext exprContext) : +{ + final List args; +} +{ + args = FunctionParameterList(exprContext) { + args.remove(0); // remove DISTINCT or ALL, if present + return args; + } +} + +/** + * Parses function parameter lists including DISTINCT keyword recognition, + * DEFAULT, and named argument assignment. + */ +List FunctionParameterList(ExprContext exprContext) : +{ + final SqlLiteral qualifier; + final List list = new ArrayList(); +} +{ + + ( + qualifier = AllOrDistinct() { list.add(qualifier); } + | + { list.add(null); } + ) + AddArg0(list, exprContext) + ( + { + // a comma-list can't appear where only a query is expected + checkNonQueryExpression(exprContext); + } + AddArg(list, exprContext) + )* + + { + return list; + } +} + +SqlLiteral AllOrDistinct() : +{ +} +{ + { return SqlSelectKeyword.DISTINCT.symbol(getPos()); } +| + { return SqlSelectKeyword.ALL.symbol(getPos()); } +} + +void AddArg0(List list, ExprContext exprContext) : +{ + final SqlIdentifier name; + SqlNode e; + final ExprContext firstExprContext; + { + // we've now seen left paren, so queries inside should + // be allowed as sub-queries + switch (exprContext) { + case ACCEPT_SUB_QUERY: + firstExprContext = ExprContext.ACCEPT_NONCURSOR; + break; + case ACCEPT_CURSOR: + firstExprContext = ExprContext.ACCEPT_ALL; + break; + default: + firstExprContext = exprContext; + break; + } + } +} +{ + ( + LOOKAHEAD(2) name = SimpleIdentifier() + | { name = null; } + ) + ( + e = Default() + | + LOOKAHEAD((SimpleIdentifierOrList() | ) ) + e = LambdaExpression() + | + LOOKAHEAD(3) + e = TableParam() + | + e = PartitionedQueryOrQueryOrExpr(firstExprContext) + ) + { + if (name != null) { + e = SqlStdOperatorTable.ARGUMENT_ASSIGNMENT.createCall( + Span.of(name, e).pos(), e, name); + } + list.add(e); + } +} + +void AddArg(List list, ExprContext exprContext) : +{ + final SqlIdentifier name; + SqlNode e; +} +{ + ( + LOOKAHEAD(2) name = SimpleIdentifier() + | { name = null; } + ) + ( + e = Default() + | + LOOKAHEAD((SimpleIdentifierOrList() | ) ) + e = LambdaExpression() + | + e = Expression(exprContext) + | + e = TableParam() + ) + { + if (name != null) { + e = SqlStdOperatorTable.ARGUMENT_ASSIGNMENT.createCall( + Span.of(name, e).pos(), e, name); + } + list.add(e); + } +} + +SqlNode Default() : {} +{ + { + return SqlStdOperatorTable.DEFAULT.createCall(getPos()); + } +} + +/** + * Parses a query (SELECT, UNION, INTERSECT, EXCEPT, VALUES, TABLE) followed by + * the end-of-file symbol. + */ +SqlNode SqlQueryEof() : +{ + SqlNode query; +} +{ + query = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) + + { return query; } +} + +/** + * Parses a list of SQL statements separated by semicolon. + * The semicolon is required between statements, but is + * optional at the end. + */ +SqlNodeList SqlStmtList() : +{ + final List stmtList = new ArrayList(); + SqlNode stmt; +} +{ + stmt = SqlStmt() { + stmtList.add(stmt); + } + ( + + [ + stmt = SqlStmt() { + stmtList.add(stmt); + } + ] + )* + + { + return new SqlNodeList(stmtList, Span.of(stmtList).pos()); + } +} + +/** + * Parses an SQL statement. + */ +SqlNode SqlStmt() : +{ + SqlNode stmt; +} +{ + ( +<#-- Add methods to parse additional statements here --> +<#list (parser.statementParserMethods!default.parser.statementParserMethods) as method> + LOOKAHEAD(2) stmt = ${method} + | + + stmt = SqlSetOption(Span.of(), null) + | + stmt = SqlAlter() + | +<#if (parser.createStatementParserMethods!default.parser.createStatementParserMethods)?size != 0> + stmt = SqlCreate() + | + +<#if (parser.dropStatementParserMethods!default.parser.dropStatementParserMethods)?size != 0> + stmt = SqlDrop() + | + +<#if (parser.truncateStatementParserMethods!default.parser.truncateStatementParserMethods)?size != 0> + LOOKAHEAD(2) + stmt = SqlTruncate() + | + + stmt = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) + | + stmt = SqlExplain() + | + stmt = SqlDescribe() + | + stmt = SqlInsert() + | + stmt = SqlDelete() + | + stmt = SqlUpdate() + | + stmt = SqlMerge() + | + stmt = SqlProcedureCall() + ) + { + return stmt; + } +} + +/** + * Parses an SQL statement followed by the end-of-file symbol. + */ +SqlNode SqlStmtEof() : +{ + SqlNode stmt; +} +{ + stmt = SqlStmt() + { + return stmt; + } +} + +<#-- Add implementations of additional parser statement calls here --> +<#list (parser.implementationFiles!default.parser.implementationFiles) as file> + <#include "/@includes/"+file /> + + +SqlNodeList ParenthesizedKeyValueOptionCommaList() : +{ + final Span s; + final List list = new ArrayList(); +} +{ + { s = span(); } + + AddKeyValueOption(list) + ( + + AddKeyValueOption(list) + )* + { + return new SqlNodeList(list, s.end(this)); + } +} + +/** +* Parses an option with format key=val whose key is a simple identifier or string literal +* and value is a string literal. +*/ +void AddKeyValueOption(List list) : +{ + final SqlNode key; + final SqlNode value; +} +{ + ( + key = SimpleIdentifier() + | + key = StringLiteral() + ) + + value = StringLiteral() { + list.add(key); + list.add(value); + } +} + +/** Parses an option value (either a string or a numeric) and adds to a list. */ +void AddOptionValue(List list) : +{ + final SqlNode value; +} +{ + ( + value = NumericLiteral() { list.add(value); } + | + value = StringLiteral() { list.add(value); } + ) +} + +/** + * Parses a literal list separated by comma. The literal is either a string or a numeric. + */ +SqlNodeList ParenthesizedLiteralOptionCommaList() : +{ + final Span s; + final List list = new ArrayList(); +} +{ + { s = span(); } + + AddOptionValue(list) ( AddOptionValue(list) )* + { + return new SqlNodeList(list, s.end(this)); + } +} + +void AddHint(List hints) : +{ + final SqlIdentifier hintName; + final SqlNodeList hintOptions; + final SqlHint.HintOptionFormat optionFormat; +} +{ + hintName = SimpleIdentifier() + ( + LOOKAHEAD(5) + hintOptions = ParenthesizedKeyValueOptionCommaList() { + optionFormat = SqlHint.HintOptionFormat.KV_LIST; + } + | + LOOKAHEAD(3) + hintOptions = ParenthesizedSimpleIdentifierList() { + optionFormat = SqlHint.HintOptionFormat.ID_LIST; + } + | + LOOKAHEAD(3) + hintOptions = ParenthesizedLiteralOptionCommaList() { + optionFormat = SqlHint.HintOptionFormat.LITERAL_LIST; + } + | + LOOKAHEAD(2) + [ ] + { + hintOptions = SqlNodeList.EMPTY; + optionFormat = SqlHint.HintOptionFormat.EMPTY; + } + ) + { + hints.add( + new SqlHint(Span.of(hintOptions).end(this), hintName, hintOptions, + optionFormat)); + } +} + +/** Parses hints following a table reference, + * and returns the wrapped table reference. */ +SqlNode TableHints(SqlIdentifier tableName) : +{ + final List hints = new ArrayList(); +} +{ + AddHint(hints) ( AddHint(hints) )* { + final SqlParserPos pos = Span.of(tableName).addAll(hints).end(this); + final SqlNodeList hintList = new SqlNodeList(hints, pos); + return new SqlTableRef(pos, tableName, hintList); + } +} + +/** + * Parses a leaf SELECT expression without ORDER BY. + */ +SqlSelect SqlSelect() : +{ + final List keywords = new ArrayList(); + final SqlLiteral keyword; + final SqlNodeList keywordList; + final List selectList = new ArrayList(); + final SqlNode fromClause; + final SqlNode where; + final SqlNodeList groupBy; + final SqlNode having; + final SqlNodeList windowDecls; + final SqlNode qualify; + final List hints = new ArrayList(); + final Span s; +} +{ +