Skip to content

Commit

Permalink
Fix a handful of misc. IntelliJ inspections
Browse files Browse the repository at this point in the history
  • Loading branch information
JoshRosen committed May 11, 2015
1 parent 39434f9 commit e1855e5
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ private SpillInfo writeSpillFile() throws IOException {

// Currently, we need to open a new DiskBlockObjectWriter for each partition; we can avoid this
// after SPARK-5581 is fixed.
BlockObjectWriter writer = null;
BlockObjectWriter writer;

// Small writes to DiskBlockObjectWriter will be fairly inefficient. Since there doesn't seem to
// be an API to directly transfer bytes from managed memory to the disk writer, we buffer
Expand Down Expand Up @@ -202,7 +202,6 @@ private SpillInfo writeSpillFile() throws IOException {
writeBuffer,
PlatformDependent.BYTE_ARRAY_OFFSET,
toTransfer);
assert (writer != null); // To suppress an IntelliJ warning
writer.write(writeBuffer, 0, toTransfer);
recordReadPosition += toTransfer;
dataRemaining -= toTransfer;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,8 @@

package org.apache.spark.shuffle.unsafe;

import java.io.IOException;
import java.util.Comparator;

import org.apache.spark.unsafe.memory.MemoryBlock;
import org.apache.spark.util.collection.Sorter;

final class UnsafeShuffleSorter {
Expand Down Expand Up @@ -71,7 +69,7 @@ public long getMemoryUsage() {
* @param partitionId the partition id, which must be less than or equal to
* {@link PackedRecordPointer#MAXIMUM_PARTITION_ID}.
*/
public void insertRecord(long recordPointer, int partitionId) throws IOException {
public void insertRecord(long recordPointer, int partitionId) {
if (!hasSpaceForAnotherRecord()) {
expandSortBuffer();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ public void write(scala.collection.Iterator<Product2<K, V>> records) throws IOEx
closeAndWriteOutput();
} catch (Exception e) {
// Unfortunately, we have to catch Exception here in order to ensure proper cleanup after
// errors becuase Spark's Scala code, or users' custom Serializers, might throw arbitrary
// errors because Spark's Scala code, or users' custom Serializers, might throw arbitrary
// unchecked exceptions.
try {
sorter.cleanupAfterError();
Expand Down

0 comments on commit e1855e5

Please sign in to comment.