-
Notifications
You must be signed in to change notification settings - Fork 4.6k
/
VectorizedOrcInputFormat.java
229 lines (201 loc) · 8 KB
/
VectorizedOrcInputFormat.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.io.orc;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.BucketIdentifier;
import org.apache.hadoop.hive.ql.io.InputFormatChecker;
import org.apache.hadoop.hive.ql.io.RowPositionAwareVectorizedRecordReader;
import org.apache.hadoop.hive.ql.io.SelfDescribingInputFormatInterface;
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.orc.OrcProto;
import org.apache.orc.OrcUtils;
import org.apache.orc.TypeDescription;
/**
* A MapReduce/Hive input format for ORC files.
*/
public class VectorizedOrcInputFormat extends FileInputFormat<NullWritable, VectorizedRowBatch>
implements InputFormatChecker, VectorizedInputFormatInterface,
SelfDescribingInputFormatInterface {
static class VectorizedOrcRecordReader
implements RecordReader<NullWritable, VectorizedRowBatch>, RowPositionAwareVectorizedRecordReader {
private final org.apache.hadoop.hive.ql.io.orc.RecordReader reader;
private final long offset;
private final long length;
private float progress = 0.0f;
private VectorizedRowBatchCtx rbCtx;
private final Object[] partitionValues;
private boolean addPartitionCols = true;
private final BucketIdentifier bucketIdentifier;
VectorizedOrcRecordReader(Reader file, Configuration conf,
FileSplit fileSplit) throws IOException {
boolean isAcidRead = AcidUtils.isFullAcidScan(conf);
if (isAcidRead) {
OrcInputFormat.raiseAcidTablesMustBeReadWithAcidReaderException(conf);
}
rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
/**
* Do we have schema on read in the configuration variables?
*/
int dataColumns = rbCtx.getDataColumnCount();
String orcSchemaOverrideString = conf.get(ColumnProjectionUtils.ORC_SCHEMA_STRING);
TypeDescription schema = orcSchemaOverrideString == null ?
OrcInputFormat.getDesiredRowTypeDescr(conf, false, dataColumns) :
TypeDescription.fromString(orcSchemaOverrideString);
if (schema == null) {
schema = file.getSchema();
// Even if the user isn't doing schema evolution, cut the schema
// to the desired size.
if (schema.getCategory() == TypeDescription.Category.STRUCT &&
schema.getChildren().size() > dataColumns) {
schema = schema.clone();
List<TypeDescription> children = schema.getChildren();
for(int c = children.size() - 1; c >= dataColumns; --c) {
children.remove(c);
}
}
}
List<OrcProto.Type> types = OrcUtils.getOrcTypes(schema);
Reader.Options options = new Reader.Options(conf).schema(schema);
this.offset = fileSplit.getStart();
this.length = fileSplit.getLength();
options.range(offset, length);
options.include(OrcInputFormat.genIncludedColumns(schema, conf));
OrcInputFormat.setSearchArgument(options, types, conf, true);
this.reader = file.rowsOptions(options, conf);
int partitionColumnCount = rbCtx.getPartitionColumnCount();
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
rbCtx.getPartitionValues(rbCtx, conf, fileSplit, partitionValues);
} else {
partitionValues = null;
}
this.bucketIdentifier = BucketIdentifier.from(conf, fileSplit.getPath());
}
@Override
public boolean next(NullWritable key, VectorizedRowBatch value) throws IOException {
try {
// Check and update partition cols if necessary. Ideally, this should be done
// in CreateValue as the partition is constant per split. But since Hive uses
// CombineHiveRecordReader and
// as this does not call CreateValue for each new RecordReader it creates, this check is
// required in next()
if (addPartitionCols) {
if (partitionValues != null) {
rbCtx.addPartitionColsToBatch(value, partitionValues);
}
addPartitionCols = false;
}
if (!reader.nextBatch(value)) {
return false;
}
} catch (Exception e) {
throw new RuntimeException(e);
}
progress = reader.getProgress();
if (bucketIdentifier != null) {
rbCtx.setBucketAndWriteIdOf(value, bucketIdentifier);
}
return true;
}
@Override
public NullWritable createKey() {
return NullWritable.get();
}
@Override
public VectorizedRowBatch createValue() {
return rbCtx.createVectorizedRowBatch();
}
@Override
public long getPos() throws IOException {
return offset + (long) (progress * length);
}
@Override
public void close() throws IOException {
reader.close();
}
@Override
public float getProgress() throws IOException {
return progress;
}
@Override
public long getRowNumber() throws IOException {
return reader.getRowNumber();
}
}
public VectorizedOrcInputFormat() {
// just set a really small lower bound
setMinSplitSize(16 * 1024);
}
@Override
public RecordReader<NullWritable, VectorizedRowBatch>
getRecordReader(InputSplit inputSplit, JobConf conf,
Reporter reporter) throws IOException {
FileSplit fSplit = (FileSplit)inputSplit;
reporter.setStatus(fSplit.toString());
Path path = fSplit.getPath();
OrcFile.ReaderOptions opts = OrcFile.readerOptions(conf);
if(fSplit instanceof OrcSplit){
OrcSplit orcSplit = (OrcSplit) fSplit;
if (orcSplit.hasFooter()) {
opts.orcTail(orcSplit.getOrcTail());
}
opts.maxLength(orcSplit.getFileLength());
}
Reader reader = OrcFile.createReader(path, opts);
return new VectorizedOrcRecordReader(reader, conf, fSplit);
}
@Override
public boolean validateInput(FileSystem fs, HiveConf conf,
List<FileStatus> files
) throws IOException {
if (files.size() <= 0) {
return false;
}
for (FileStatus file : files) {
try (Reader notUsed = OrcFile.createReader(file.getPath(), OrcFile.readerOptions(conf).filesystem(fs))) {
// We do not use the reader itself. We just check if we can open the file.
} catch (IOException e) {
return false;
}
}
return true;
}
@Override
public VectorizedSupport.Support[] getSupportedFeatures() {
return new VectorizedSupport.Support[] {VectorizedSupport.Support.DECIMAL_64};
}
}