New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[HUDI-960] Implementation of the HFile base and log file format. #1804
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -45,6 +45,7 @@ | |
|
||
import java.io.IOException; | ||
import java.util.Iterator; | ||
import java.util.Map; | ||
|
||
public class HoodieCreateHandle<T extends HoodieRecordPayload> extends HoodieWriteHandle<T> { | ||
|
||
|
@@ -55,7 +56,7 @@ public class HoodieCreateHandle<T extends HoodieRecordPayload> extends HoodieWri | |
private long recordsWritten = 0; | ||
private long insertRecordsWritten = 0; | ||
private long recordsDeleted = 0; | ||
private Iterator<HoodieRecord<T>> recordIterator; | ||
private Map<String, HoodieRecord<T>> recordMap; | ||
vinothchandar marked this conversation as resolved.
Show resolved
Hide resolved
|
||
private boolean useWriterSchema = false; | ||
|
||
public HoodieCreateHandle(HoodieWriteConfig config, String instantTime, HoodieTable<T> hoodieTable, | ||
|
@@ -90,9 +91,10 @@ public HoodieCreateHandle(HoodieWriteConfig config, String instantTime, HoodieTa | |
* Called by the compactor code path. | ||
*/ | ||
public HoodieCreateHandle(HoodieWriteConfig config, String instantTime, HoodieTable<T> hoodieTable, | ||
String partitionPath, String fileId, Iterator<HoodieRecord<T>> recordIterator, SparkTaskContextSupplier sparkTaskContextSupplier) { | ||
String partitionPath, String fileId, Map<String, HoodieRecord<T>> recordMap, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not sure if we can leak the type of base file to compactor. But did you think about having two overloaded methods here. So for parquet compaction path, iterator will be passed in, where as for hfile compaction, record map will be passed in. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ideally not. the more the compactor can function wihtout knowing the base file type specifics, the better |
||
SparkTaskContextSupplier sparkTaskContextSupplier) { | ||
this(config, instantTime, hoodieTable, partitionPath, fileId, sparkTaskContextSupplier); | ||
this.recordIterator = recordIterator; | ||
this.recordMap = recordMap; | ||
this.useWriterSchema = true; | ||
} | ||
|
||
|
@@ -138,9 +140,17 @@ public void write(HoodieRecord record, Option<IndexedRecord> avroRecord) { | |
* Writes all records passed. | ||
*/ | ||
public void write() { | ||
Iterator<String> keyIterator; | ||
if (hoodieTable.requireSortedRecords()) { | ||
// Sorting the keys limits the amount of extra memory required for writing sorted records | ||
keyIterator = recordMap.keySet().stream().sorted().iterator(); | ||
} else { | ||
keyIterator = recordMap.keySet().stream().iterator(); | ||
} | ||
try { | ||
while (recordIterator.hasNext()) { | ||
HoodieRecord<T> record = recordIterator.next(); | ||
while (keyIterator.hasNext()) { | ||
final String key = keyIterator.next(); | ||
HoodieRecord<T> record = recordMap.get(key); | ||
if (useWriterSchema) { | ||
write(record, record.getData().getInsertValue(writerSchemaWithMetafields)); | ||
} else { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,126 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one | ||
* or more contributor license agreements. See the NOTICE file | ||
* distributed with this work for additional information | ||
* regarding copyright ownership. The ASF licenses this file | ||
* to you under the Apache License, Version 2.0 (the | ||
* "License"); you may not use this file except in compliance | ||
* with the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.hudi.io; | ||
|
||
import org.apache.hudi.client.SparkTaskContextSupplier; | ||
import org.apache.hudi.client.WriteStatus; | ||
import org.apache.hudi.common.model.HoodieBaseFile; | ||
import org.apache.hudi.common.model.HoodieRecord; | ||
import org.apache.hudi.common.model.HoodieRecordPayload; | ||
import org.apache.hudi.config.HoodieWriteConfig; | ||
import org.apache.hudi.exception.HoodieUpsertException; | ||
import org.apache.hudi.table.HoodieTable; | ||
|
||
import org.apache.avro.generic.GenericRecord; | ||
|
||
import java.io.IOException; | ||
import java.util.Iterator; | ||
import java.util.Map; | ||
import java.util.PriorityQueue; | ||
import java.util.Queue; | ||
|
||
/** | ||
* Hoodie merge handle which writes records (new inserts or updates) sorted by their key. | ||
* | ||
* The implementation performs a merge-sort by comparing the key of the record being written to the list of | ||
* keys in newRecordKeys (sorted in-memory). | ||
*/ | ||
public class HoodieSortedMergeHandle<T extends HoodieRecordPayload> extends HoodieMergeHandle<T> { | ||
|
||
private Queue<String> newRecordKeysSorted = new PriorityQueue<>(); | ||
vinothchandar marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
public HoodieSortedMergeHandle(HoodieWriteConfig config, String instantTime, HoodieTable<T> hoodieTable, | ||
Iterator<HoodieRecord<T>> recordItr, String partitionPath, String fileId, SparkTaskContextSupplier sparkTaskContextSupplier) { | ||
super(config, instantTime, hoodieTable, recordItr, partitionPath, fileId, sparkTaskContextSupplier); | ||
newRecordKeysSorted.addAll(keyToNewRecords.keySet()); | ||
} | ||
|
||
/** | ||
* Called by compactor code path. | ||
*/ | ||
public HoodieSortedMergeHandle(HoodieWriteConfig config, String instantTime, HoodieTable<T> hoodieTable, | ||
vinothchandar marked this conversation as resolved.
Show resolved
Hide resolved
|
||
Map<String, HoodieRecord<T>> keyToNewRecordsOrig, String partitionPath, String fileId, | ||
HoodieBaseFile dataFileToBeMerged, SparkTaskContextSupplier sparkTaskContextSupplier) { | ||
super(config, instantTime, hoodieTable, keyToNewRecordsOrig, partitionPath, fileId, dataFileToBeMerged, | ||
sparkTaskContextSupplier); | ||
|
||
newRecordKeysSorted.addAll(keyToNewRecords.keySet()); | ||
} | ||
|
||
/** | ||
* Go through an old record. Here if we detect a newer version shows up, we write the new one to the file. | ||
*/ | ||
@Override | ||
public void write(GenericRecord oldRecord) { | ||
String key = oldRecord.get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString(); | ||
|
||
// To maintain overall sorted order across updates and inserts, write any new inserts whose keys are less than | ||
// the oldRecord's key. | ||
while (!newRecordKeysSorted.isEmpty() && newRecordKeysSorted.peek().compareTo(key) <= 0) { | ||
String keyToPreWrite = newRecordKeysSorted.remove(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. instead, we can just do a streaming sort-merge? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. cc @prashantwason can you please chime in. I feel we can avoid the queue altogether and just sort merge? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is a streaming sort-merge. The logic is as follows:
Do you have some other algorithm in mind? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I am thinking we don't need the map in HoodieMergeHandle or the priorityQueue. The record which have changeed i.e. the input iterator is already sorted. lets call it So , we can just compare the recordBeingWritten with inputItr.next() and write out the smallest one, if equal, we call the payload to merge. This will avoid any kind of memory overhead There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If the inputItr is sorted then yes all this overhead can be removed. |
||
if (keyToPreWrite.equals(key)) { | ||
// will be handled as an update later | ||
break; | ||
} | ||
|
||
// This is a new insert | ||
HoodieRecord<T> hoodieRecord = new HoodieRecord<>(keyToNewRecords.get(keyToPreWrite)); | ||
if (writtenRecordKeys.contains(keyToPreWrite)) { | ||
throw new HoodieUpsertException("Insert/Update not in sorted order"); | ||
} | ||
try { | ||
if (useWriterSchema) { | ||
writeRecord(hoodieRecord, hoodieRecord.getData().getInsertValue(writerSchemaWithMetafields)); | ||
} else { | ||
writeRecord(hoodieRecord, hoodieRecord.getData().getInsertValue(writerSchema)); | ||
} | ||
insertRecordsWritten++; | ||
writtenRecordKeys.add(keyToPreWrite); | ||
} catch (IOException e) { | ||
throw new HoodieUpsertException("Failed to write records", e); | ||
} | ||
} | ||
|
||
super.write(oldRecord); | ||
} | ||
|
||
@Override | ||
public WriteStatus close() { | ||
// write out any pending records (this can happen when inserts are turned into updates) | ||
newRecordKeysSorted.stream().forEach(key -> { | ||
try { | ||
HoodieRecord<T> hoodieRecord = keyToNewRecords.get(key); | ||
if (!writtenRecordKeys.contains(hoodieRecord.getRecordKey())) { | ||
if (useWriterSchema) { | ||
writeRecord(hoodieRecord, hoodieRecord.getData().getInsertValue(writerSchemaWithMetafields)); | ||
} else { | ||
writeRecord(hoodieRecord, hoodieRecord.getData().getInsertValue(writerSchema)); | ||
} | ||
insertRecordsWritten++; | ||
} | ||
} catch (IOException e) { | ||
throw new HoodieUpsertException("Failed to close UpdateHandle", e); | ||
} | ||
}); | ||
newRecordKeysSorted.clear(); | ||
keyToNewRecords.clear(); | ||
|
||
return super.close(); | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@vinothchandar : wrt you comment on having two diff configs. I see similar configs at other places too. like bloom index parallelism, we have one config per index type. Initially I thought we will have any one config which will be used by any index type that is being initialized. But I saw that every index has its own set of configs and don't share any.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
not following. sorry. are you suggesting having a single config or two?
So, we need to have a config per usage of HFile. so we can control the base file size for data, metadata, record index separately.
We cannot have a generic base.file.size or hfile.size config here, at this level IMO. cc @prashantwason
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@prashantwason I think we need to eventually have a config "per use" of base file - data, metadata, index - since people may want to control them differently. So, in that sense, this has to kind of change.
yes the change is backwards compatible to RDD clients (which I thought was okay, since its just uber. if you prefer to not have that, lmk. IMO, its about time, we cleaned these up, given we are moving to having way more base files/tables in the mix)