From 832b8014df9b0f23c5a52ff559a9dd249cb33cfa Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Thu, 7 Apr 2016 15:45:57 -0700 Subject: [PATCH 1/3] [SPARK-14465][BUILD] Checkstyle should check all Java files --- .../org/apache/spark/io/LZ4BlockInputStream.java | 14 ++++++-------- .../org/apache/spark/streaming/Java8APISuite.java | 6 ++++-- pom.xml | 2 +- .../spark/sql/execution/BufferedRowIterator.java | 2 +- .../apache/spark/sql/expressions/java/typed.java | 11 +++++------ 5 files changed, 17 insertions(+), 18 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/io/LZ4BlockInputStream.java b/core/src/main/scala/org/apache/spark/io/LZ4BlockInputStream.java index 27b6f0d4a3885..8783b5f56ebae 100644 --- a/core/src/main/scala/org/apache/spark/io/LZ4BlockInputStream.java +++ b/core/src/main/scala/org/apache/spark/io/LZ4BlockInputStream.java @@ -20,20 +20,17 @@ import java.io.InputStream; import java.util.zip.Checksum; -import net.jpountz.lz4.LZ4BlockOutputStream; import net.jpountz.lz4.LZ4Exception; import net.jpountz.lz4.LZ4Factory; import net.jpountz.lz4.LZ4FastDecompressor; import net.jpountz.util.SafeUtils; -import net.jpountz.xxhash.StreamingXXHash32; -import net.jpountz.xxhash.XXHash32; import net.jpountz.xxhash.XXHashFactory; /** * {@link InputStream} implementation to decode data written with - * {@link LZ4BlockOutputStream}. This class is not thread-safe and does not + * {@link net.jpountz.lz4.LZ4BlockOutputStream}. This class is not thread-safe and does not * support {@link #mark(int)}/{@link #reset()}. - * @see LZ4BlockOutputStream + * @see net.jpountz.lz4.LZ4BlockOutputStream * * This is based on net.jpountz.lz4.LZ4BlockInputStream * @@ -90,12 +87,13 @@ public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor, Che } /** - * Create a new instance using {@link XXHash32} for checksuming. + * Create a new instance using {@link net.jpountz.xxhash.XXHash32} for checksuming. * @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor, Checksum) - * @see StreamingXXHash32#asChecksum() + * @see net.jpountz.xxhash.StreamingXXHash32#asChecksum() */ public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor) { - this(in, decompressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum()); + this(in, decompressor, + XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum()); } /** diff --git a/external/java8-tests/src/test/java/org/apache/spark/streaming/Java8APISuite.java b/external/java8-tests/src/test/java/org/apache/spark/streaming/Java8APISuite.java index 67bc64a44466c..d0fed303e659c 100644 --- a/external/java8-tests/src/test/java/org/apache/spark/streaming/Java8APISuite.java +++ b/external/java8-tests/src/test/java/org/apache/spark/streaming/Java8APISuite.java @@ -377,7 +377,9 @@ public void testForeachRDD() { }); // This is a test to make sure foreachRDD(VoidFunction2) can be called from Java - stream.foreachRDD((rdd, time) -> { return; }); + stream.foreachRDD((rdd, time) -> { + return; + }); JavaTestUtils.runStreams(ssc, 2, 2); @@ -873,7 +875,7 @@ public void testMapWithStateAPI() { JavaMapWithStateDStream stateDstream = wordsDstream.mapWithState( - StateSpec. function((time, key, value, state) -> { + StateSpec.function((time, key, value, state) -> { // Use all State's methods here state.exists(); state.get(); diff --git a/pom.xml b/pom.xml index 66a34e4bdf8b0..00d95c4860b29 100644 --- a/pom.xml +++ b/pom.xml @@ -2272,7 +2272,7 @@ false true false - ${basedir}/src/main/java + ${basedir}/src/main/java,${basedir}/src/main/scala ${basedir}/src/test/java dev/checkstyle.xml ${basedir}/target/checkstyle-output.xml diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/BufferedRowIterator.java b/sql/core/src/main/scala/org/apache/spark/sql/execution/BufferedRowIterator.java index c2633a9f8cd48..086547c793e3b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/BufferedRowIterator.java +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/BufferedRowIterator.java @@ -60,7 +60,7 @@ public long durationMs() { /** * Initializes from array of iterators of InternalRow. */ - public abstract void init(int index, Iterator iters[]); + public abstract void init(int index, Iterator[] iters); /** * Append a row to currentRows. diff --git a/sql/core/src/main/scala/org/apache/spark/sql/expressions/java/typed.java b/sql/core/src/main/scala/org/apache/spark/sql/expressions/java/typed.java index 8ff7b6549b5f4..c7c6e3868f9bb 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/expressions/java/typed.java +++ b/sql/core/src/main/scala/org/apache/spark/sql/expressions/java/typed.java @@ -19,7 +19,6 @@ import org.apache.spark.annotation.Experimental; import org.apache.spark.api.java.function.MapFunction; -import org.apache.spark.sql.Dataset; import org.apache.spark.sql.TypedColumn; import org.apache.spark.sql.execution.aggregate.TypedAverage; import org.apache.spark.sql.execution.aggregate.TypedCount; @@ -28,7 +27,7 @@ /** * :: Experimental :: - * Type-safe functions available for {@link Dataset} operations in Java. + * Type-safe functions available for {@link org.apache.spark.sql.Dataset} operations in Java. * * Scala users should use {@link org.apache.spark.sql.expressions.scala.typed}. * @@ -43,7 +42,7 @@ public class typed { * * @since 2.0.0 */ - public static TypedColumn avg(MapFunction f) { + public static TypedColumn avg(MapFunction f) { return new TypedAverage(f).toColumnJava(); } @@ -52,7 +51,7 @@ public static TypedColumn avg(MapFunction f) { * * @since 2.0.0 */ - public static TypedColumn count(MapFunction f) { + public static TypedColumn count(MapFunction f) { return new TypedCount(f).toColumnJava(); } @@ -61,7 +60,7 @@ public static TypedColumn count(MapFunction f) { * * @since 2.0.0 */ - public static TypedColumn sum(MapFunction f) { + public static TypedColumn sum(MapFunction f) { return new TypedSumDouble(f).toColumnJava(); } @@ -70,7 +69,7 @@ public static TypedColumn sum(MapFunction f) { * * @since 2.0.0 */ - public static TypedColumn sumLong(MapFunction f) { + public static TypedColumn sumLong(MapFunction f) { return new TypedSumLong(f).toColumnJava(); } } From dbd9d5ade694921a1636ad5aa326b3a01809029a Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Fri, 8 Apr 2016 11:13:05 -0700 Subject: [PATCH 2/3] Move java files --- .../{scala => java}/org/apache/spark/io/LZ4BlockInputStream.java | 0 .../org/apache/spark/sql/expressions/java/typed.java | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename core/src/main/{scala => java}/org/apache/spark/io/LZ4BlockInputStream.java (100%) rename sql/core/src/main/{scala => java}/org/apache/spark/sql/expressions/java/typed.java (100%) diff --git a/core/src/main/scala/org/apache/spark/io/LZ4BlockInputStream.java b/core/src/main/java/org/apache/spark/io/LZ4BlockInputStream.java similarity index 100% rename from core/src/main/scala/org/apache/spark/io/LZ4BlockInputStream.java rename to core/src/main/java/org/apache/spark/io/LZ4BlockInputStream.java diff --git a/sql/core/src/main/scala/org/apache/spark/sql/expressions/java/typed.java b/sql/core/src/main/java/org/apache/spark/sql/expressions/java/typed.java similarity index 100% rename from sql/core/src/main/scala/org/apache/spark/sql/expressions/java/typed.java rename to sql/core/src/main/java/org/apache/spark/sql/expressions/java/typed.java From 38b9bada8f8811054953012458365094395ef610 Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Fri, 8 Apr 2016 11:28:38 -0700 Subject: [PATCH 3/3] Move BufferedRowIterator.java, too --- .../org/apache/spark/sql/execution/BufferedRowIterator.java | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename sql/core/src/main/{scala => java}/org/apache/spark/sql/execution/BufferedRowIterator.java (100%) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/BufferedRowIterator.java b/sql/core/src/main/java/org/apache/spark/sql/execution/BufferedRowIterator.java similarity index 100% rename from sql/core/src/main/scala/org/apache/spark/sql/execution/BufferedRowIterator.java rename to sql/core/src/main/java/org/apache/spark/sql/execution/BufferedRowIterator.java