Skip to content

Commit

Permalink
Remove unnecessary zeroing of row conversion buffer
Browse files Browse the repository at this point in the history
  • Loading branch information
JoshRosen committed Jul 8, 2015
1 parent c56ec18 commit 845bea3
Showing 1 changed file with 0 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@
package org.apache.spark.sql.execution;

import java.io.IOException;
import java.util.Arrays;

import org.apache.spark.sql.Row;
import scala.collection.Iterator;
import scala.math.Ordering;

Expand Down Expand Up @@ -100,12 +98,6 @@ void insertRow(InternalRow row) throws IOException {
final int sizeRequirement = rowConverter.getSizeRequirement(row);
if (sizeRequirement > rowConversionBuffer.length) {
rowConversionBuffer = new byte[sizeRequirement];
} else {
// Zero out the buffer that's used to hold the current row. This is necessary in order
// to ensure that rows hash properly, since garbage data from the previous row could
// otherwise end up as padding in this row. As a performance optimization, we only zero
// out the portion of the buffer that we'll actually write to.
Arrays.fill(rowConversionBuffer, 0, sizeRequirement, (byte) 0);
}
final int bytesWritten = rowConverter.writeRow(
row, rowConversionBuffer, PlatformDependent.BYTE_ARRAY_OFFSET, objPool);
Expand Down

0 comments on commit 845bea3

Please sign in to comment.