/
reader.cc
419 lines (368 loc) · 14.7 KB
/
reader.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "arrow/csv/reader.h"
#include <cstdint>
#include <cstring>
#include <limits>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "arrow/buffer.h"
#include "arrow/csv/chunker.h"
#include "arrow/csv/column-builder.h"
#include "arrow/csv/options.h"
#include "arrow/csv/parser.h"
#include "arrow/io/readahead.h"
#include "arrow/status.h"
#include "arrow/table.h"
#include "arrow/type.h"
#include "arrow/util/logging.h"
#include "arrow/util/macros.h"
#include "arrow/util/task-group.h"
#include "arrow/util/thread-pool.h"
namespace arrow {
class MemoryPool;
namespace io {
class InputStream;
} // namespace io
namespace csv {
using internal::GetCpuThreadPool;
using internal::ThreadPool;
using io::internal::ReadaheadBuffer;
using io::internal::ReadaheadSpooler;
static constexpr int64_t kDefaultLeftPadding = 2048; // 2 kB
static constexpr int64_t kDefaultRightPadding = 16;
/////////////////////////////////////////////////////////////////////////
// Base class for common functionality
class BaseTableReader : public csv::TableReader {
public:
BaseTableReader(MemoryPool* pool, const ReadOptions& read_options,
const ParseOptions& parse_options,
const ConvertOptions& convert_options)
: pool_(pool),
read_options_(read_options),
parse_options_(parse_options),
convert_options_(convert_options) {}
protected:
// Read a next data block, stitch it to trailing data
Status ReadNextBlock() {
bool trailing_data = cur_size_ > 0;
ReadaheadBuffer rh;
if (trailing_data) {
if (readahead_->GetLeftPadding() < cur_size_) {
// Growth heuristic to try and ensure sufficient left padding
// in subsequent reads
readahead_->SetLeftPadding(cur_size_ * 3 / 2);
}
}
RETURN_NOT_OK(readahead_->Read(&rh));
if (!rh.buffer) {
// EOF, let caller finish with existing data
eof_ = true;
return Status::OK();
}
std::shared_ptr<Buffer> new_block = rh.buffer;
uint8_t* new_data = rh.buffer->mutable_data() + rh.left_padding;
int64_t new_size = rh.buffer->size() - rh.left_padding - rh.right_padding;
DCHECK_GT(new_size, 0); // ensured by ReadaheadSpooler
if (trailing_cr_ && new_data[0] == '\n') {
// Skip '\r\n' line separator that started at the end of previous block
++new_data;
--new_size;
}
trailing_cr_ = (new_data[new_size - 1] == '\r');
if (trailing_data) {
// Try to copy trailing data at the beginning of new block
if (cur_size_ <= rh.left_padding) {
// Can left-extend new block inside padding area
new_data -= cur_size_;
new_size += cur_size_;
std::memcpy(new_data, cur_data_, cur_size_);
} else {
// Need to allocate bigger block and concatenate trailing + present data
RETURN_NOT_OK(
AllocateBuffer(pool_, cur_size_ + new_size + rh.right_padding, &new_block));
std::memcpy(new_block->mutable_data(), cur_data_, cur_size_);
std::memcpy(new_block->mutable_data() + cur_size_, new_data, new_size);
std::memset(new_block->mutable_data() + cur_size_ + new_size, 0,
rh.right_padding);
new_data = new_block->mutable_data();
new_size = cur_size_ + new_size;
}
}
cur_block_ = new_block;
cur_data_ = new_data;
cur_size_ = new_size;
return Status::OK();
}
// Read header and column names from current block, create column builders
Status ProcessHeader() {
DCHECK_GT(cur_size_, 0);
if (parse_options_.header_rows == 0) {
// TODO allow passing names and/or generate column numbers?
return Status::Invalid("header_rows == 0 needs explicit column names");
}
BlockParser parser(pool_, parse_options_, num_cols_, parse_options_.header_rows);
uint32_t parsed_size = 0;
RETURN_NOT_OK(parser.Parse(reinterpret_cast<const char*>(cur_data_),
static_cast<uint32_t>(cur_size_), &parsed_size));
if (parser.num_rows() != parse_options_.header_rows) {
return Status::Invalid(
"Could not read header rows from CSV file, either "
"file is too short or header is larger than block size");
}
if (parser.num_cols() == 0) {
return Status::Invalid("No columns in CSV file");
}
num_cols_ = parser.num_cols();
DCHECK_GT(num_cols_, 0);
for (int32_t col_index = 0; col_index < num_cols_; ++col_index) {
auto visit = [&](const uint8_t* data, uint32_t size, bool quoted) -> Status {
if (names_.size() <= static_cast<uint32_t>(col_index)) {
names_.emplace_back(reinterpret_cast<const char*>(data), size);
}
return Status::OK();
};
RETURN_NOT_OK(parser.VisitColumn(col_index, visit));
std::shared_ptr<ColumnBuilder> builder;
RETURN_NOT_OK(
ColumnBuilder::Make(col_index, convert_options_, task_group_, &builder));
column_builders_.push_back(builder);
}
// Skip parsed header rows
cur_data_ += parsed_size;
cur_size_ -= parsed_size;
return Status::OK();
}
// Trigger conversion of parsed block data
Status ProcessData(const std::shared_ptr<BlockParser>& parser, int64_t block_index) {
for (auto& builder : column_builders_) {
builder->Insert(block_index, parser);
}
return Status::OK();
}
Status MakeTable(std::shared_ptr<Table>* out) {
DCHECK_GT(num_cols_, 0);
DCHECK_EQ(names_.size(), static_cast<uint32_t>(num_cols_));
DCHECK_EQ(column_builders_.size(), static_cast<uint32_t>(num_cols_));
std::vector<std::shared_ptr<Field>> fields;
std::vector<std::shared_ptr<Column>> columns;
for (int32_t i = 0; i < num_cols_; ++i) {
std::shared_ptr<ChunkedArray> array;
RETURN_NOT_OK(column_builders_[i]->Finish(&array));
columns.push_back(std::make_shared<Column>(names_[i], array));
fields.push_back(columns.back()->field());
}
*out = Table::Make(schema(fields), columns);
return Status::OK();
}
MemoryPool* pool_;
ReadOptions read_options_;
ParseOptions parse_options_;
ConvertOptions convert_options_;
int32_t num_cols_ = -1;
std::shared_ptr<ReadaheadSpooler> readahead_;
std::vector<std::string> names_;
std::shared_ptr<internal::TaskGroup> task_group_;
std::vector<std::shared_ptr<ColumnBuilder>> column_builders_;
// Current block and data pointer
std::shared_ptr<Buffer> cur_block_;
const uint8_t* cur_data_ = nullptr;
int64_t cur_size_ = 0;
// Index of current block inside data stream
int64_t cur_block_index_ = 0;
// Whether there was a trailing CR at the end of last parsed line
bool trailing_cr_ = false;
// Whether we reached input stream EOF. There may still be data left to
// process in current block.
bool eof_ = false;
};
/////////////////////////////////////////////////////////////////////////
// Serial TableReader implementation
class SerialTableReader : public BaseTableReader {
public:
SerialTableReader(MemoryPool* pool, std::shared_ptr<io::InputStream> input,
const ReadOptions& read_options, const ParseOptions& parse_options,
const ConvertOptions& convert_options)
: BaseTableReader(pool, read_options, parse_options, convert_options) {
// Since we're converting serially, no need to readahead more than one block
int32_t block_queue_size = 1;
readahead_ = std::make_shared<ReadaheadSpooler>(
pool_, input, read_options_.block_size, block_queue_size, kDefaultLeftPadding,
kDefaultRightPadding);
}
Status Read(std::shared_ptr<Table>* out) {
task_group_ = internal::TaskGroup::MakeSerial();
// First block
RETURN_NOT_OK(ReadNextBlock());
if (eof_) {
return Status::Invalid("Empty CSV file");
}
RETURN_NOT_OK(ProcessHeader());
static constexpr int32_t max_num_rows = std::numeric_limits<int32_t>::max();
auto parser =
std::make_shared<BlockParser>(pool_, parse_options_, num_cols_, max_num_rows);
while (!eof_) {
// Consume current block
uint32_t parsed_size = 0;
RETURN_NOT_OK(parser->Parse(reinterpret_cast<const char*>(cur_data_),
static_cast<uint32_t>(cur_size_), &parsed_size));
if (parser->num_rows() > 0) {
// Got some data
RETURN_NOT_OK(ProcessData(parser, cur_block_index_++));
cur_data_ += parsed_size;
cur_size_ -= parsed_size;
if (!task_group_->ok()) {
// Conversion error => early exit
break;
}
} else {
// Need to fetch more data to get at least one row
RETURN_NOT_OK(ReadNextBlock());
}
}
if (eof_ && cur_size_ > 0) {
// Parse remaining data
uint32_t parsed_size = 0;
RETURN_NOT_OK(parser->ParseFinal(reinterpret_cast<const char*>(cur_data_),
static_cast<uint32_t>(cur_size_), &parsed_size));
if (parser->num_rows() > 0) {
RETURN_NOT_OK(ProcessData(parser, cur_block_index_++));
}
}
// Finish conversion, create schema and table
RETURN_NOT_OK(task_group_->Finish());
return MakeTable(out);
}
};
/////////////////////////////////////////////////////////////////////////
// Parallel TableReader implementation
class ThreadedTableReader : public BaseTableReader {
public:
ThreadedTableReader(MemoryPool* pool, std::shared_ptr<io::InputStream> input,
ThreadPool* thread_pool, const ReadOptions& read_options,
const ParseOptions& parse_options,
const ConvertOptions& convert_options)
: BaseTableReader(pool, read_options, parse_options, convert_options),
thread_pool_(thread_pool) {
// Readahead one block per worker thread
int32_t block_queue_size = thread_pool->GetCapacity();
readahead_ = std::make_shared<ReadaheadSpooler>(
pool_, input, read_options_.block_size, block_queue_size, kDefaultLeftPadding,
kDefaultRightPadding);
}
~ThreadedTableReader() {
if (task_group_) {
// In case of error, make sure all pending tasks are finished before
// we start destroying BaseTableReader members
ARROW_UNUSED(task_group_->Finish());
}
}
Status Read(std::shared_ptr<Table>* out) {
task_group_ = internal::TaskGroup::MakeThreaded(thread_pool_);
static constexpr int32_t max_num_rows = std::numeric_limits<int32_t>::max();
Chunker chunker(parse_options_);
// Get first block and process header serially
RETURN_NOT_OK(ReadNextBlock());
if (eof_) {
return Status::Invalid("Empty CSV file");
}
RETURN_NOT_OK(ProcessHeader());
while (!eof_ && task_group_->ok()) {
// Consume current chunk
uint32_t chunk_size = 0;
RETURN_NOT_OK(chunker.Process(reinterpret_cast<const char*>(cur_data_),
static_cast<uint32_t>(cur_size_), &chunk_size));
if (chunk_size > 0) {
// Got a chunk of rows
const uint8_t* chunk_data = cur_data_;
std::shared_ptr<Buffer> chunk_buffer = cur_block_;
int64_t chunk_index = cur_block_index_;
// "mutable" allows to modify captured by-copy chunk_buffer
task_group_->Append([=]() mutable -> Status {
auto parser = std::make_shared<BlockParser>(pool_, parse_options_, num_cols_,
max_num_rows);
uint32_t parsed_size = 0;
RETURN_NOT_OK(parser->Parse(reinterpret_cast<const char*>(chunk_data),
chunk_size, &parsed_size));
if (parsed_size != chunk_size) {
DCHECK_EQ(parsed_size, chunk_size);
std::stringstream ss;
ss << "Chunker and parser disagree on block size: " << chunk_size << " vs "
<< parsed_size;
return Status::Invalid(ss.str());
}
RETURN_NOT_OK(ProcessData(parser, chunk_index));
// Keep chunk buffer alive within closure and release it at the end
chunk_buffer.reset();
return Status::OK();
});
cur_data_ += chunk_size;
cur_size_ -= chunk_size;
cur_block_index_++;
} else {
// Need to fetch more data to get at least one row
RETURN_NOT_OK(ReadNextBlock());
}
}
// Finish all pending parallel tasks
RETURN_NOT_OK(task_group_->Finish());
if (eof_ && cur_size_ > 0) {
// Parse remaining data (serial)
task_group_ = internal::TaskGroup::MakeSerial();
for (auto& builder : column_builders_) {
builder->SetTaskGroup(task_group_);
}
auto parser =
std::make_shared<BlockParser>(pool_, parse_options_, num_cols_, max_num_rows);
uint32_t parsed_size = 0;
RETURN_NOT_OK(parser->ParseFinal(reinterpret_cast<const char*>(cur_data_),
static_cast<uint32_t>(cur_size_), &parsed_size));
if (parser->num_rows() > 0) {
RETURN_NOT_OK(ProcessData(parser, cur_block_index_++));
}
RETURN_NOT_OK(task_group_->Finish());
}
// Create schema and table
return MakeTable(out);
}
protected:
ThreadPool* thread_pool_;
};
/////////////////////////////////////////////////////////////////////////
// TableReader factory function
Status TableReader::Make(MemoryPool* pool, std::shared_ptr<io::InputStream> input,
const ReadOptions& read_options,
const ParseOptions& parse_options,
const ConvertOptions& convert_options,
std::shared_ptr<TableReader>* out) {
std::shared_ptr<TableReader> result;
if (read_options.use_threads) {
result = std::make_shared<ThreadedTableReader>(
pool, input, GetCpuThreadPool(), read_options, parse_options, convert_options);
*out = result;
return Status::OK();
} else {
result = std::make_shared<SerialTableReader>(pool, input, read_options, parse_options,
convert_options);
*out = result;
return Status::OK();
}
}
} // namespace csv
} // namespace arrow