Skip to content

Commit

Permalink
Core: Ignore split offsets array when split offset is past file length
Browse files Browse the repository at this point in the history
  • Loading branch information
amogh-jahagirdar committed Oct 27, 2023
1 parent aa891ac commit 34eee1b
Show file tree
Hide file tree
Showing 3 changed files with 80 additions and 15 deletions.
24 changes: 15 additions & 9 deletions core/src/main/java/org/apache/iceberg/BaseFile.java
Expand Up @@ -460,21 +460,27 @@ public ByteBuffer keyMetadata() {

@Override
public List<Long> splitOffsets() {
if (splitOffsets == null || splitOffsets.length == 0) {
return null;
if (hasWellDefinedOffsets()) {
return ArrayUtil.toUnmodifiableLongList(splitOffsets);
}

// If the last split offset is past the file size this means the split offsets are corrupted and
// should not be used
if (splitOffsets[splitOffsets.length - 1] >= fileSizeInBytes) {
return null;
return null;
}

long[] splitOffsetArray() {
if (hasWellDefinedOffsets()) {
return splitOffsets;
}

return ArrayUtil.toUnmodifiableLongList(splitOffsets);
return null;
}

long[] splitOffsetArray() {
return splitOffsets;
private boolean hasWellDefinedOffsets() {
// If the last split offset is past the file size this means the split offsets are corrupted and
// should not be used
return splitOffsets != null
&& splitOffsets.length != 0
&& splitOffsets[splitOffsets.length - 1] < fileSizeInBytes;
}

@Override
Expand Down
25 changes: 19 additions & 6 deletions core/src/test/java/org/apache/iceberg/TestSplitPlanning.java
Expand Up @@ -291,27 +291,36 @@ private void appendFiles(Iterable<DataFile> files) {
}

private List<DataFile> newFiles(int numFiles, long sizeInBytes) {
return newFiles(numFiles, sizeInBytes, FileFormat.PARQUET, 1);
return newFiles(numFiles, sizeInBytes, FileFormat.PARQUET, 1, false);
}

private List<DataFile> newFiles(int numFiles, long sizeInBytes, int numOffset) {
return newFiles(numFiles, sizeInBytes, FileFormat.PARQUET, numOffset);
return newFiles(numFiles, sizeInBytes, FileFormat.PARQUET, numOffset, false);
}

private List<DataFile> newFilesWithInvalidOffset(int numFiles, long sizeInBytes, int numOffset) {
return newFiles(numFiles, sizeInBytes, FileFormat.PARQUET, numOffset, true);
}

private List<DataFile> newFiles(int numFiles, long sizeInBytes, FileFormat fileFormat) {
return newFiles(numFiles, sizeInBytes, fileFormat, 1);
return newFiles(numFiles, sizeInBytes, fileFormat, 1, false);
}

private List<DataFile> newFiles(
int numFiles, long sizeInBytes, FileFormat fileFormat, int numOffset) {
int numFiles,
long sizeInBytes,
FileFormat fileFormat,
int numOffset,
boolean produceInvalidOffset) {
List<DataFile> files = Lists.newArrayList();
for (int fileNum = 0; fileNum < numFiles; fileNum++) {
files.add(newFile(sizeInBytes, fileFormat, numOffset));
files.add(newFile(sizeInBytes, fileFormat, numOffset, produceInvalidOffset));
}
return files;
}

private DataFile newFile(long sizeInBytes, FileFormat fileFormat, int numOffsets) {
private DataFile newFile(
long sizeInBytes, FileFormat fileFormat, int numOffsets, boolean produceInvalidOffset) {
String fileName = UUID.randomUUID().toString();
Builder builder =
DataFiles.builder(PartitionSpec.unpartitioned())
Expand All @@ -326,6 +335,10 @@ private DataFile newFile(long sizeInBytes, FileFormat fileFormat, int numOffsets
.map(i -> i * stepSize)
.boxed()
.collect(Collectors.toList());
if (produceInvalidOffset) {
offsets.add(sizeInBytes + 1);
}

builder.withSplitOffsets(offsets);
}

Expand Down
46 changes: 46 additions & 0 deletions core/src/test/java/org/apache/iceberg/util/TestTableScanUtil.java
Expand Up @@ -25,19 +25,26 @@
import java.util.List;
import java.util.stream.Collectors;
import org.apache.iceberg.BaseCombinedScanTask;
import org.apache.iceberg.BaseFileScanTask;
import org.apache.iceberg.CombinedScanTask;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.DeleteFile;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.MergeableScanTask;
import org.apache.iceberg.MockFileScanTask;
import org.apache.iceberg.PartitionScanTask;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.PartitionSpecParser;
import org.apache.iceberg.ScanTask;
import org.apache.iceberg.ScanTaskGroup;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SchemaParser;
import org.apache.iceberg.SplittableScanTask;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.TableTestBase;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.expressions.ResidualEvaluator;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
Expand Down Expand Up @@ -126,6 +133,45 @@ public void testTaskGroupPlanning() {
assertThat(taskGroups).as("Must have 3 task groups").hasSize(3);
}

@Test
public void testTaskGroupPlanningCorruptedOffset() {
DataFile dataFile =
DataFiles.builder(TableTestBase.SPEC)
.withPath("/path/to/data-a.parquet")
.withFileSizeInBytes(10)
.withPartitionPath("data_bucket=0")
.withRecordCount(1)
.withSplitOffsets(
ImmutableList.of(2L, 12L)) // the last offset is beyond the end of the file
.build();

ResidualEvaluator residualEvaluator =
ResidualEvaluator.of(TableTestBase.SPEC, Expressions.equal("id", 1), false);

BaseFileScanTask baseFileScanTask =
new BaseFileScanTask(
dataFile,
null,
SchemaParser.toJson(TableTestBase.SCHEMA),
PartitionSpecParser.toJson(TableTestBase.SPEC),
residualEvaluator);

List<BaseFileScanTask> baseFileScanTasks = ImmutableList.of(baseFileScanTask);

int taskCount = 0;
for (ScanTaskGroup<BaseFileScanTask> task :
TableScanUtil.planTaskGroups(CloseableIterable.withNoopClose(baseFileScanTasks), 1, 1, 0)) {
for (FileScanTask fileScanTask : task.tasks()) {
DataFile taskDataFile = fileScanTask.file();
Assertions.assertThat(taskDataFile.splitOffsets()).isNull();
taskCount++;
}
}

// 10 tasks since the split offsets are ignored and there are 1 byte splits for a 10 byte file
Assertions.assertThat(taskCount).isEqualTo(10);
}

@Test
public void testTaskMerging() {
List<ParentTask> tasks =
Expand Down

0 comments on commit 34eee1b

Please sign in to comment.