Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion core/src/test/scala/org/apache/spark/SparkFunSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark.util.AccumulatorContext
/**
* Base abstract class for all unit tests in Spark for handling common functionality.
*/
private[spark] abstract class SparkFunSuite
abstract class SparkFunSuite
extends FunSuite
with BeforeAndAfterAll
with Logging {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* limitations under the License.
*/

package org.apache.spark;
package test.org.apache.spark.java8;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, core tests use test.org.apache.spark to specifically test from outside the same package. Is .java8 needed? I don't particularly care much myself. Some scala tests use org.apache.sparktest, unfortunately.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the java8 part is to make it easier to run all java 8 tests. not that big of a deal though. the more important one is to make the beginning part consistent.


import java.io.File;
import java.io.Serializable;
Expand All @@ -33,6 +33,8 @@
import org.junit.Before;
import org.junit.Test;

import org.apache.spark.Accumulator;
import org.apache.spark.AccumulatorParam;
import org.apache.spark.api.java.JavaDoubleRDD;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
Expand All @@ -45,8 +47,8 @@
* Most of these tests replicate org.apache.spark.JavaAPISuite using java 8
* lambda syntax.
*/
public class Java8APISuite implements Serializable {
static int foreachCalls = 0;
public class Java8RDDAPISuite implements Serializable {
private static int foreachCalls = 0;
private transient JavaSparkContext sc;

@Before
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* limitations under the License.
*/

package org.apache.spark.streaming;
package test.org.apache.spark.java8.dstream;

import java.io.Serializable;
import java.util.*;
Expand All @@ -33,6 +33,7 @@
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.*;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaMapWithStateDStream;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* limitations under the License.
*/

package test.org.apache.spark.sql.sources;
package test.org.apache.spark.java8.sql;

import java.util.Arrays;

Expand All @@ -26,6 +26,7 @@
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.KeyValueGroupedDataset;
import org.apache.spark.sql.expressions.javalang.typed;
import test.org.apache.spark.sql.JavaDatasetAggregatorSuiteBase;

/**
* Suite that replicates tests in JavaDatasetAggregatorSuite using lambda syntax.
Expand All @@ -42,7 +43,7 @@ public void testTypedAggregationAverage() {
public void testTypedAggregationCount() {
KeyValueGroupedDataset<String, Tuple2<String, Integer>> grouped = generateGroupedDataset();
Dataset<Tuple2<String, Long>> agged = grouped.agg(typed.count(v -> v));
Assert.assertEquals(Arrays.asList(tuple2("a", 2), tuple2("b", 1)), agged.collectAsList());
Assert.assertEquals(Arrays.asList(tuple2("a", 2L), tuple2("b", 1L)), agged.collectAsList());
}

@Test
Expand All @@ -56,6 +57,6 @@ public void testTypedAggregationSumDouble() {
public void testTypedAggregationSumLong() {
KeyValueGroupedDataset<String, Tuple2<String, Integer>> grouped = generateGroupedDataset();
Dataset<Tuple2<String, Long>> agged = grouped.agg(typed.sumLong(v -> (long)v._2()));
Assert.assertEquals(Arrays.asList(tuple2("a", 3), tuple2("b", 3)), agged.collectAsList());
Assert.assertEquals(Arrays.asList(tuple2("a", 3L), tuple2("b", 3L)), agged.collectAsList());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,10 @@
* limitations under the License.
*/

package org.apache.spark
package test.org.apache.spark.java8

import org.apache.spark.SharedSparkContext
import org.apache.spark.SparkFunSuite

/**
* Test cases where JDK8-compiled Scala user code is used with Spark.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* limitations under the License.
*/

package test.org.apache.spark.sql.sources;
package test.org.apache.spark.sql;

import java.util.Arrays;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* limitations under the License.
*/

package test.org.apache.spark.sql.sources;
package test.org.apache.spark.sql;

import java.io.Serializable;
import java.util.Arrays;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
* limitations under the License.
*/

package test.org.apache.spark.sql.sources;
package test.org.apache.spark.sql;

import java.io.File;
import java.io.IOException;
Expand Down