From b5e3bc762361e2960d3e5e787719b20a6786e32c Mon Sep 17 00:00:00 2001 From: Arina Ielchiieva Date: Fri, 2 Mar 2018 13:38:00 +0200 Subject: [PATCH] DRILL-6204: Pass tables columns without partition columns to empty Hive reader --- .../store/hive/HiveDrillNativeScanBatchCreator.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeScanBatchCreator.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeScanBatchCreator.java index 3861aa02378..43318d17cc5 100644 --- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeScanBatchCreator.java +++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeScanBatchCreator.java @@ -76,11 +76,11 @@ public ScanBatch getBatch(ExecutorFragmentContext context, HiveDrillNativeParque final List partitionColumns = Lists.newArrayList(); final List selectedPartitionColumns = Lists.newArrayList(); - List newColumns = columns; + List tableColumns = columns; if (!selectAllQuery) { // Separate out the partition and non-partition columns. Non-partition columns are passed directly to the // ParquetRecordReader. Partition columns are passed to ScanBatch. - newColumns = Lists.newArrayList(); + tableColumns = Lists.newArrayList(); Pattern pattern = Pattern.compile(String.format("%s[0-9]+", partitionDesignator)); for (SchemaPath column : columns) { Matcher m = pattern.matcher(column.getRootSegmentPath()); @@ -88,7 +88,7 @@ public ScanBatch getBatch(ExecutorFragmentContext context, HiveDrillNativeParque selectedPartitionColumns.add( Integer.parseInt(column.getRootSegmentPath().substring(partitionDesignator.length()))); } else { - newColumns.add(column); + tableColumns.add(column); } } } @@ -139,7 +139,7 @@ public ScanBatch getBatch(ExecutorFragmentContext context, HiveDrillNativeParque CodecFactory.createDirectCodecFactory(fs.getConf(), new ParquetDirectByteBufferAllocator(oContext.getAllocator()), 0), parquetMetadata, - newColumns, + tableColumns, containsCorruptDates) ); Map implicitValues = Maps.newLinkedHashMap(); @@ -174,7 +174,7 @@ public ScanBatch getBatch(ExecutorFragmentContext context, HiveDrillNativeParque // If there are no readers created (which is possible when the table is empty or no row groups are matched), // create an empty RecordReader to output the schema if (readers.size() == 0) { - readers.add(new HiveDefaultReader(table, null, null, columns, context, conf, + readers.add(new HiveDefaultReader(table, null, null, tableColumns, context, conf, ImpersonationUtil.createProxyUgi(config.getUserName(), context.getQueryUserName()))); }