From e1855e556321b1d119fd361ff436fe1e91ddedb7 Mon Sep 17 00:00:00 2001 From: Josh Rosen Date: Mon, 11 May 2015 15:39:29 -0700 Subject: [PATCH] Fix a handful of misc. IntelliJ inspections --- .../spark/shuffle/unsafe/UnsafeShuffleExternalSorter.java | 3 +-- .../org/apache/spark/shuffle/unsafe/UnsafeShuffleSorter.java | 4 +--- .../org/apache/spark/shuffle/unsafe/UnsafeShuffleWriter.java | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleExternalSorter.java b/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleExternalSorter.java index 44a37fcd43951..1d1382c104fea 100644 --- a/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleExternalSorter.java +++ b/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleExternalSorter.java @@ -139,7 +139,7 @@ private SpillInfo writeSpillFile() throws IOException { // Currently, we need to open a new DiskBlockObjectWriter for each partition; we can avoid this // after SPARK-5581 is fixed. - BlockObjectWriter writer = null; + BlockObjectWriter writer; // Small writes to DiskBlockObjectWriter will be fairly inefficient. Since there doesn't seem to // be an API to directly transfer bytes from managed memory to the disk writer, we buffer @@ -202,7 +202,6 @@ private SpillInfo writeSpillFile() throws IOException { writeBuffer, PlatformDependent.BYTE_ARRAY_OFFSET, toTransfer); - assert (writer != null); // To suppress an IntelliJ warning writer.write(writeBuffer, 0, toTransfer); recordReadPosition += toTransfer; dataRemaining -= toTransfer; diff --git a/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleSorter.java b/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleSorter.java index 8e66fbaf4c645..f2b90617793e5 100644 --- a/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleSorter.java +++ b/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleSorter.java @@ -17,10 +17,8 @@ package org.apache.spark.shuffle.unsafe; -import java.io.IOException; import java.util.Comparator; -import org.apache.spark.unsafe.memory.MemoryBlock; import org.apache.spark.util.collection.Sorter; final class UnsafeShuffleSorter { @@ -71,7 +69,7 @@ public long getMemoryUsage() { * @param partitionId the partition id, which must be less than or equal to * {@link PackedRecordPointer#MAXIMUM_PARTITION_ID}. */ - public void insertRecord(long recordPointer, int partitionId) throws IOException { + public void insertRecord(long recordPointer, int partitionId) { if (!hasSpaceForAnotherRecord()) { expandSortBuffer(); } diff --git a/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleWriter.java b/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleWriter.java index 438852cd1408c..8977517c0bcbe 100644 --- a/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleWriter.java +++ b/core/src/main/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleWriter.java @@ -135,7 +135,7 @@ public void write(scala.collection.Iterator> records) throws IOEx closeAndWriteOutput(); } catch (Exception e) { // Unfortunately, we have to catch Exception here in order to ensure proper cleanup after - // errors becuase Spark's Scala code, or users' custom Serializers, might throw arbitrary + // errors because Spark's Scala code, or users' custom Serializers, might throw arbitrary // unchecked exceptions. try { sorter.cleanupAfterError();