Skip to content

Commit

Permalink
DRILL-7200: Update Calcite to 1.19.0 / 1.20.0
Browse files Browse the repository at this point in the history
  • Loading branch information
KazydubB authored and vvysotskyi committed Jul 5, 2019
1 parent 74b277d commit 2efa51e
Show file tree
Hide file tree
Showing 47 changed files with 351 additions and 164 deletions.
5 changes: 0 additions & 5 deletions common/pom.xml
Expand Up @@ -45,11 +45,6 @@
<version>${dep.junit.version}</version>
</dependency>

<dependency>
<groupId>com.github.vvysotskyi.drill-calcite</groupId>
<artifactId>calcite-core</artifactId>
</dependency>

<dependency>
<groupId>com.typesafe</groupId>
<artifactId>config</artifactId>
Expand Down
Expand Up @@ -20,6 +20,7 @@
import org.apache.drill.common.expression.BooleanOperator;
import org.apache.drill.common.expression.FunctionCall;
import org.apache.drill.common.expression.LogicalExpression;
import org.apache.drill.common.expression.SchemaPath;
import org.apache.drill.common.expression.visitors.AbstractExprVisitor;
import org.apache.drill.exec.store.hbase.DrillHBaseConstants;
import org.ojai.Value;
Expand Down Expand Up @@ -56,6 +57,15 @@ public boolean isAllExpressionsConverted() {
return allExpressionsConverted;
}

@Override
public JsonScanSpec visitSchemaPath(SchemaPath path, Void value) throws RuntimeException {
String fieldPath = FieldPathHelper.schemaPath2FieldPath(path).asPathString();
QueryCondition cond = MapRDBImpl.newCondition().is(fieldPath, Op.EQUAL, true);
return new JsonScanSpec(groupScan.getTableName(),
groupScan.getIndexDesc(),
cond.build());
}

@Override
public JsonScanSpec visitUnknown(LogicalExpression e, Void value) throws RuntimeException {
allExpressionsConverted = false;
Expand Down
Expand Up @@ -81,15 +81,15 @@ public void testMultiCFDifferentCase() throws Exception {
.baselineValues(
"a1".getBytes(),
mapOf("c3", "23".getBytes()),
mapOf("c3", "23".getBytes()))
mapOf("c1", "21".getBytes(), "c2", "22".getBytes()))
.baselineValues(
"a2".getBytes(),
mapOf("c3", "13".getBytes()),
mapOf("c3", "13".getBytes()))
mapOf("c1", "11".getBytes(), "c2", "12".getBytes()))
.baselineValues(
"a3".getBytes(),
mapOf("c3", "33".getBytes()),
mapOf("c3", "33".getBytes()))
mapOf("c1", "31".getBytes(), "c2", "32".getBytes()))
.go();
}

Expand Down
Expand Up @@ -177,7 +177,7 @@ public static void generateHBaseDatasetMultiCF(Connection conn, Admin admin, Tab
}

HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor("f"));
desc.addFamily(new HColumnDescriptor("f0"));
desc.addFamily(new HColumnDescriptor("F"));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions - 1));
Expand All @@ -188,20 +188,20 @@ public static void generateHBaseDatasetMultiCF(Connection conn, Admin admin, Tab
BufferedMutator table = conn.getBufferedMutator(tableName);

Put p = new Put("a1".getBytes());
p.addColumn("f".getBytes(), "c1".getBytes(), "21".getBytes());
p.addColumn("f".getBytes(), "c2".getBytes(), "22".getBytes());
p.addColumn("f0".getBytes(), "c1".getBytes(), "21".getBytes());
p.addColumn("f0".getBytes(), "c2".getBytes(), "22".getBytes());
p.addColumn("F".getBytes(), "c3".getBytes(), "23".getBytes());
table.mutate(p);

p = new Put("a2".getBytes());
p.addColumn("f".getBytes(), "c1".getBytes(), "11".getBytes());
p.addColumn("f".getBytes(), "c2".getBytes(), "12".getBytes());
p.addColumn("f0".getBytes(), "c1".getBytes(), "11".getBytes());
p.addColumn("f0".getBytes(), "c2".getBytes(), "12".getBytes());
p.addColumn("F".getBytes(), "c3".getBytes(), "13".getBytes());
table.mutate(p);

p = new Put("a3".getBytes());
p.addColumn("f".getBytes(), "c1".getBytes(), "31".getBytes());
p.addColumn("f".getBytes(), "c2".getBytes(), "32".getBytes());
p.addColumn("f0".getBytes(), "c1".getBytes(), "31".getBytes());
p.addColumn("f0".getBytes(), "c2".getBytes(), "32".getBytes());
p.addColumn("F".getBytes(), "c3".getBytes(), "33".getBytes());
table.mutate(p);

Expand Down
2 changes: 1 addition & 1 deletion contrib/storage-hive/core/pom.xml
Expand Up @@ -96,7 +96,7 @@
</exclusions>
</dependency>
<dependency>
<groupId>com.github.vvysotskyi.drill-calcite</groupId>
<groupId>${calcite.groupId}</groupId>
<artifactId>calcite-core</artifactId>
</dependency>
<dependency>
Expand Down
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.drill.exec.fn.hive;

import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.Arrays;
import java.util.List;
Expand Down Expand Up @@ -149,7 +150,7 @@ public void testLastDay() throws Exception {
.sqlQuery("select last_day(to_date('1994-02-01','yyyy-MM-dd')) as `LAST_DAY` from (VALUES(1))")
.unOrdered()
.baselineColumns("LAST_DAY")
.baselineValues("1994-02-28")
.baselineValues(LocalDate.parse("1994-02-28"))
.go();
}

Expand Down
3 changes: 2 additions & 1 deletion docs/dev/Calcite.md
@@ -1,7 +1,7 @@
# Drill-specific commits in Apache Calcite

Currently, Drill uses Apache Calcite with additional changes, required for Drill. All the commits were left after
update from Calcite `1.4.0` to Calcite `1.15.0` and weren't merged to the Calcite's master yet since there is no consensus on them in Calcite community.
update from Calcite `1.4.0` to Calcite `1.15.0` (3 commits) and from Calcite `1.18.0` to Calcite `1.20.0` (1 commit) and weren't merged to the Calcite's master yet since there is no consensus on them in Calcite community.

List of Jiras with Drill-specific commits:

Expand All @@ -10,6 +10,7 @@ List of Jiras with Drill-specific commits:
|[CALCITE-2018](https://issues.apache.org/jira/browse/CALCITE-2018)|Queries failed with AssertionError: rel has lower cost than best cost of subset|Pull request with the fix was created ([PR-552](https://github.com/apache/calcite/pull/552)), but [CALCITE-2166](https://issues.apache.org/jira/browse/CALCITE-2166) which blocks it was found and is not resolved yet.|
|[CALCITE-2087](https://issues.apache.org/jira/browse/CALCITE-2087)|Add new method to ViewExpander interface to allow passing SchemaPlus.|Pull request into Apache Calcite was created, but it was declined. See conversation in Jira.|
|[CALCITE-1178](https://issues.apache.org/jira/browse/CALCITE-1178)|Allow SqlBetweenOperator to compare DATE and TIMESTAMP|SQL spec does not allow to compare datetime types if they have different `<primary datetime field>`s. Therefore Calcite community won’t accept these changes. Similar issues were reported in [CALCITE-2829](https://issues.apache.org/jira/browse/CALCITE-2829) and in [CALCITE-2745](https://issues.apache.org/jira/browse/CALCITE-2745).|
|[CALCITE-3121](https://issues.apache.org/jira/browse/CALCITE-3121)|VolcanoPlanner hangs due to removing ORDER BY from sub-query|Pull request was open to revert changes ([PR-1264](https://github.com/apache/calcite/pull/1264)) which remove ORDER BY clause; it wasn't merged, because aforementioned changes only unveiled the issue and no proper solution is available yet.|

# Drill-Calcite repository

Expand Down
4 changes: 2 additions & 2 deletions exec/java-exec/pom.xml
Expand Up @@ -207,7 +207,7 @@
</exclusions>
</dependency>
<dependency>
<groupId>com.github.vvysotskyi.drill-calcite</groupId>
<groupId>${calcite.groupId}</groupId>
<artifactId>calcite-core</artifactId>
</dependency>
<dependency>
Expand Down Expand Up @@ -673,7 +673,7 @@
<configuration>
<artifactItems>
<artifactItem>
<groupId>com.github.vvysotskyi.drill-calcite</groupId>
<groupId>${calcite.groupId}</groupId>
<artifactId>calcite-core</artifactId>
<type>jar</type>
<overWrite>true</overWrite>
Expand Down
8 changes: 7 additions & 1 deletion exec/java-exec/src/main/codegen/data/Parser.tdd
Expand Up @@ -866,7 +866,13 @@
# Example: LeftSemiJoin()
joinTypes: [
]


# List of methods for parsing builtin function calls.
# Return type of method implementation should be "SqlNode".
# Example: DateFunctionCall().
builtinFunctionCallMethods: [
]

includeCompoundIdentifier: false,
includeBraces: true,
includeAdditionalDeclarations: false,
Expand Down
Expand Up @@ -246,7 +246,7 @@ public LogicalExpression visitFunctionHolderExpression(FunctionHolderExpression
return handleCompareFunction(funcHolderExpr, value);
}

if (isIsFunction(funcName)) {
if (isIsFunction(funcName) || isNot(funcHolderExpr, funcName)) {
return handleIsFunction(funcHolderExpr, value);
}

Expand All @@ -262,6 +262,13 @@ public LogicalExpression visitFunctionHolderExpression(FunctionHolderExpression
}
}

// shows whether function is simplified IS FALSE
private boolean isNot(FunctionHolderExpression holderExpression, String funcName) {
return !holderExpression.args.isEmpty()
&& !(holderExpression.args.get(0) instanceof DrillFuncHolderExpr)
&& FunctionGenerationHelper.NOT.equals(funcName);
}

private List<LogicalExpression> generateNewExpressions(List<LogicalExpression> expressions, Set<LogicalExpression> value) {
List<LogicalExpression> newExpressions = new ArrayList<>();
for (LogicalExpression arg : expressions) {
Expand Down
Expand Up @@ -230,6 +230,7 @@ public static <C extends Comparable<C>> LogicalExpression createIsPredicate(Stri
case FunctionGenerationHelper.IS_NOT_TRUE:
return createIsNotTruePredicate(expr);
case FunctionGenerationHelper.IS_FALSE:
case FunctionGenerationHelper.NOT:
return createIsFalsePredicate(expr);
case FunctionGenerationHelper.IS_NOT_FALSE:
return createIsNotFalsePredicate(expr);
Expand Down
Expand Up @@ -53,6 +53,7 @@ public class FunctionGenerationHelper {
public static final String IS_NOT_TRUE = "isnottrue";
public static final String IS_FALSE = "isfalse";
public static final String IS_NOT_FALSE = "isnotfalse";
public static final String NOT = "not";

/**
* Finds ordering comparator ("compare_to...") FunctionHolderExpression with
Expand Down
@@ -0,0 +1,48 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.expr.fn.impl;

import org.apache.drill.exec.expr.DrillSimpleFunc;
import org.apache.drill.exec.expr.annotations.FunctionTemplate;
import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
import org.apache.drill.exec.expr.annotations.Output;
import org.apache.drill.exec.expr.annotations.Param;
import org.apache.drill.exec.expr.holders.DateHolder;
import org.apache.drill.exec.expr.holders.TimeStampHolder;

@SuppressWarnings("unused")
@FunctionTemplate(names = "last_day", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
public class LastDayFunction implements DrillSimpleFunc {

@Param
TimeStampHolder in;
@Output
DateHolder out;

@Override
public void setup() {
}

@Override
public void eval() {
java.time.LocalDate date =
java.time.Instant.ofEpochMilli(in.value).atZone(java.time.ZoneOffset.UTC).toLocalDate();
java.time.LocalDate end = date.withDayOfMonth(date.lengthOfMonth());
out.value = end.atStartOfDay(java.time.ZoneOffset.UTC).toInstant().toEpochMilli();
}
}
Expand Up @@ -34,15 +34,14 @@

import java.util.List;


/**
* Base class for logical and physical Aggregations implemented in Drill
*/
public abstract class DrillAggregateRelBase extends Aggregate implements DrillRelNode {

public DrillAggregateRelBase(RelOptCluster cluster, RelTraitSet traits, RelNode child, boolean indicator,
ImmutableBitSet groupSet, List<ImmutableBitSet> groupSets, List<AggregateCall> aggCalls) {
super(cluster, traits, child, indicator, groupSet, groupSets, aggCalls);
public DrillAggregateRelBase(RelOptCluster cluster, RelTraitSet traits, RelNode child,
ImmutableBitSet groupSet, List<ImmutableBitSet> groupSets, List<AggregateCall> aggCalls) {
super(cluster, traits, child, groupSet, groupSets, aggCalls);
}

/**
Expand Down
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.drill.exec.planner.common;

import org.apache.calcite.rel.core.JoinRelType;
import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
import org.apache.calcite.plan.RelOptCluster;
Expand All @@ -29,7 +30,6 @@
import org.apache.calcite.rel.metadata.RelMetadataQuery;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rel.type.RelDataTypeField;
import org.apache.calcite.sql.SemiJoinType;
import org.apache.calcite.sql.validate.SqlValidatorUtil;
import org.apache.calcite.util.ImmutableBitSet;
import org.apache.drill.exec.ExecConstants;
Expand All @@ -47,7 +47,7 @@ public abstract class DrillLateralJoinRelBase extends Correlate implements Drill
final private static double CORRELATE_MEM_COPY_COST = DrillCostBase.MEMORY_TO_CPU_RATIO * DrillCostBase.BASE_CPU_COST;
final public boolean excludeCorrelateColumn;
public DrillLateralJoinRelBase(RelOptCluster cluster, RelTraitSet traits, RelNode left, RelNode right, boolean excludeCorrelateCol,
CorrelationId correlationId, ImmutableBitSet requiredColumns, SemiJoinType semiJoinType) {
CorrelationId correlationId, ImmutableBitSet requiredColumns, JoinRelType semiJoinType) {
super(cluster, traits, left, right, correlationId, requiredColumns, semiJoinType);
this.excludeCorrelateColumn = excludeCorrelateCol;
}
Expand All @@ -73,7 +73,7 @@ protected RelDataType deriveRowType() {
case LEFT:
case INNER:
return constructRowType(SqlValidatorUtil.deriveJoinRowType(left.getRowType(),
removeImplicitField(right.getRowType()), joinType.toJoinType(),
removeImplicitField(right.getRowType()), joinType,
getCluster().getTypeFactory(), null,
ImmutableList.of()));
case ANTI:
Expand Down
Expand Up @@ -30,5 +30,6 @@ private DrillDefaultRelMetadataProvider() {
.of(DrillRelMdRowCount.SOURCE,
DrillRelMdDistinctRowCount.SOURCE,
DrillRelMdSelectivity.SOURCE,
DrillRelMdMaxRowCount.SOURCE,
DefaultRelMetadataProvider.INSTANCE));
}
@@ -0,0 +1,48 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.planner.cost;

import org.apache.calcite.rel.core.Aggregate;
import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
import org.apache.calcite.rel.metadata.RelMdMaxRowCount;
import org.apache.calcite.rel.metadata.RelMetadataProvider;
import org.apache.calcite.rel.metadata.RelMetadataQuery;
import org.apache.calcite.util.BuiltInMethod;

public class DrillRelMdMaxRowCount extends RelMdMaxRowCount {

private static final DrillRelMdMaxRowCount INSTANCE = new DrillRelMdMaxRowCount();

public static final RelMetadataProvider SOURCE =
ReflectiveRelMetadataProvider.reflectiveSource(BuiltInMethod.MAX_ROW_COUNT.method, INSTANCE);

// The method is overriden because of changes done in CALCITE-2991 and
// TODO: should be discarded when CALCITE-1048 is fixed.
@Override
public Double getMaxRowCount(Aggregate rel, RelMetadataQuery mq) {
if (rel.getGroupSet().isEmpty()) {
// Aggregate with no GROUP BY always returns 1 row (even on empty table).
return 1D;
}
final Double rowCount = mq.getMaxRowCount(rel.getInput());
if (rowCount == null) {
return null;
}
return rowCount * rel.getGroupSets().size();
}
}
Expand Up @@ -47,15 +47,15 @@
* Aggregation implemented in Drill.
*/
public class DrillAggregateRel extends DrillAggregateRelBase implements DrillRel {
/** Creates a DrillAggregateRel. */
public DrillAggregateRel(RelOptCluster cluster, RelTraitSet traits, RelNode child, boolean indicator, ImmutableBitSet groupSet,
List<ImmutableBitSet> groupSets, List<AggregateCall> aggCalls) {
super(cluster, traits, child, indicator, groupSet, groupSets, aggCalls);

public DrillAggregateRel(RelOptCluster cluster, RelTraitSet traits, RelNode child, ImmutableBitSet groupSet,
List<ImmutableBitSet> groupSets, List<AggregateCall> aggCalls) {
super(cluster, traits, child, groupSet, groupSets, aggCalls);
}

@Override
public Aggregate copy(RelTraitSet traitSet, RelNode input, boolean indicator, ImmutableBitSet groupSet, List<ImmutableBitSet> groupSets, List<AggregateCall> aggCalls) {
return new DrillAggregateRel(getCluster(), traitSet, input, indicator, groupSet, groupSets, aggCalls);
public Aggregate copy(RelTraitSet traitSet, RelNode input, ImmutableBitSet groupSet, List<ImmutableBitSet> groupSets, List<AggregateCall> aggCalls) {
return new DrillAggregateRel(getCluster(), traitSet, input, groupSet, groupSets, aggCalls);
}

@Override
Expand Down
Expand Up @@ -51,7 +51,7 @@ public void onMatch(RelOptRuleCall call) {

final RelTraitSet traits = aggregate.getTraitSet().plus(DrillRel.DRILL_LOGICAL);
final RelNode convertedInput = convert(input, input.getTraitSet().plus(DrillRel.DRILL_LOGICAL).simplify());
call.transformTo(new DrillAggregateRel(aggregate.getCluster(), traits, convertedInput, aggregate.indicator,
call.transformTo(new DrillAggregateRel(aggregate.getCluster(), traits, convertedInput,
aggregate.getGroupSet(), aggregate.getGroupSets(), aggregate.getAggCallList()));
}
}

0 comments on commit 2efa51e

Please sign in to comment.