Skip to content

Commit

Permalink
HIVE-17672: Upgrade Calcite version to 1.14 (Jesus Camacho Rodriguez,…
Browse files Browse the repository at this point in the history
… reviewed by Ashutosh Chauhan)
  • Loading branch information
jcamachor committed Oct 16, 2017
1 parent 45b9b8d commit 9975131
Show file tree
Hide file tree
Showing 66 changed files with 1,113 additions and 838 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -149,13 +149,21 @@ public FileSinkOperator.RecordWriter getHiveRecordWriter(
af = new DoubleSumAggregatorFactory(columnNames.get(i), columnNames.get(i));
break;
case TIMESTAMP:
// Granularity column
String tColumnName = columnNames.get(i);
if (!tColumnName.equals(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN) && !tColumnName
.equals(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME)) {
if (!tColumnName.equals(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME)) {
throw new IOException("Dimension " + tColumnName + " does not have STRING type: " +
primitiveCategory);
}
continue;
case TIMESTAMPLOCALTZ:
// Druid timestamp column
String tLocalTZColumnName = columnNames.get(i);
if (!tLocalTZColumnName.equals(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN)) {
throw new IOException("Dimension " + tLocalTZColumnName + " does not have STRING type: " +
primitiveCategory);
}
continue;
default:
// Dimension
String dColumnName = columnNames.get(i);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@

import java.io.IOException;
import java.io.InputStream;
import java.sql.Timestamp;
import java.time.Instant;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
Expand All @@ -31,6 +32,7 @@
import org.apache.hadoop.hive.common.type.HiveChar;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.common.type.HiveVarchar;
import org.apache.hadoop.hive.common.type.TimestampTZ;
import org.apache.hadoop.hive.conf.Constants;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.druid.DruidStorageHandler;
Expand All @@ -47,7 +49,7 @@
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
import org.apache.hadoop.hive.serde2.io.ShortWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
import org.apache.hadoop.hive.serde2.io.TimestampLocalTZWritable;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
Expand All @@ -64,9 +66,11 @@
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampLocalTZObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TimestampLocalTZTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
import org.apache.hadoop.io.BooleanWritable;
Expand Down Expand Up @@ -175,7 +179,7 @@ public void initialize(Configuration configuration, Properties properties) throw
if (columnInfo.getKey().equals(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN)) {
// Special handling for timestamp column
columnNames.add(columnInfo.getKey()); // field name
PrimitiveTypeInfo type = TypeInfoFactory.timestampTypeInfo; // field type
PrimitiveTypeInfo type = TypeInfoFactory.timestampLocalTZTypeInfo; // field type
columnTypes.add(type);
inspectors
.add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(type));
Expand Down Expand Up @@ -300,7 +304,7 @@ private void inferSchema(TimeseriesQuery query,
Map<String, PrimitiveTypeInfo> mapColumnNamesTypes) {
// Timestamp column
columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
columnTypes.add(TypeInfoFactory.timestampTypeInfo);
columnTypes.add(TypeInfoFactory.timestampLocalTZTypeInfo);
// Aggregator columns
for (AggregatorFactory af : query.getAggregatorSpecs()) {
columnNames.add(af.getName());
Expand Down Expand Up @@ -328,7 +332,7 @@ private void inferSchema(TopNQuery query,
Map<String, PrimitiveTypeInfo> mapColumnNamesTypes) {
// Timestamp column
columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
columnTypes.add(TypeInfoFactory.timestampTypeInfo);
columnTypes.add(TypeInfoFactory.timestampLocalTZTypeInfo);
// Dimension column
columnNames.add(query.getDimensionSpec().getOutputName());
columnTypes.add(TypeInfoFactory.stringTypeInfo);
Expand Down Expand Up @@ -360,7 +364,7 @@ private void inferSchema(SelectQuery query,
throws SerDeException {
// Timestamp column
columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
columnTypes.add(TypeInfoFactory.timestampTypeInfo);
columnTypes.add(TypeInfoFactory.timestampLocalTZTypeInfo);
// Dimension columns
for (DimensionSpec ds : query.getDimensions()) {
columnNames.add(ds.getOutputName());
Expand Down Expand Up @@ -402,7 +406,7 @@ private void inferSchema(GroupByQuery query,
Map<String, PrimitiveTypeInfo> mapColumnNamesTypes) {
// Timestamp column
columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
columnTypes.add(TypeInfoFactory.timestampTypeInfo);
columnTypes.add(TypeInfoFactory.timestampLocalTZTypeInfo);
// Dimension columns
for (DimensionSpec ds : query.getDimensions()) {
columnNames.add(ds.getOutputName());
Expand Down Expand Up @@ -456,10 +460,9 @@ public Writable serialize(Object o, ObjectInspector objectInspector) throws SerD
}
final Object res;
switch (types[i].getPrimitiveCategory()) {
case TIMESTAMP:
res = ((TimestampObjectInspector) fields.get(i).getFieldObjectInspector())
.getPrimitiveJavaObject(
values.get(i)).getTime();
case TIMESTAMPLOCALTZ:
res = ((TimestampLocalTZObjectInspector) fields.get(i).getFieldObjectInspector())
.getPrimitiveJavaObject(values.get(i)).getZonedDateTime().toInstant().toEpochMilli();
break;
case BYTE:
res = ((ByteObjectInspector) fields.get(i).getFieldObjectInspector()).get(values.get(i));
Expand Down Expand Up @@ -529,8 +532,13 @@ public Object deserialize(Writable writable) throws SerDeException {
continue;
}
switch (types[i].getPrimitiveCategory()) {
case TIMESTAMP:
output.add(new TimestampWritable(new Timestamp((Long) value)));
case TIMESTAMPLOCALTZ:
output.add(
new TimestampLocalTZWritable(
new TimestampTZ(
ZonedDateTime.ofInstant(
Instant.ofEpochMilli((Long) value),
((TimestampLocalTZTypeInfo) types[i]).timeZone()))));
break;
case BYTE:
output.add(new ByteWritable(((Number) value).byteValue()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ public static PrimitiveTypeInfo extractTypeFromDimension(DimensionSpec ds) {
TimeFormatExtractionFn tfe = (TimeFormatExtractionFn) eds.getExtractionFn();
if (tfe.getFormat() == null || tfe.getFormat().equals(ISO_TIME_FORMAT)) {
// Timestamp (null or default used by FLOOR)
return TypeInfoFactory.timestampTypeInfo;
return TypeInfoFactory.timestampLocalTZTypeInfo;
} else {
// EXTRACT from timestamp
return TypeInfoFactory.intTypeInfo;
Expand Down
Loading

0 comments on commit 9975131

Please sign in to comment.