Skip to content

Commit

Permalink
Optimized imports
Browse files Browse the repository at this point in the history
Optimized imports and arranged according to scala style guide @
https://cwiki.apache.org/confluence/display/SPARK/Spark+Code+Style+Guide#SparkCodeStyleGuide-Imports

Author: NirmalReddy <nirmal.reddy@imaginea.com>
Author: NirmalReddy <nirmal_reddy2000@yahoo.com>

Closes #613 from NirmalReddy/opt-imports and squashes the following commits:

578b4f5 [NirmalReddy] imported java.lang.Double as JDouble
a2cbcc5 [NirmalReddy] addressed the comments
776d664 [NirmalReddy] Optimized imports in core
  • Loading branch information
NirmalReddy authored and aarondav committed Feb 18, 2014
1 parent f74ae0e commit ccb327a
Show file tree
Hide file tree
Showing 246 changed files with 446 additions and 552 deletions.
Expand Up @@ -17,6 +17,8 @@

package org.apache.spark.network.netty;

import java.util.concurrent.TimeUnit;

import io.netty.bootstrap.Bootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelOption;
Expand All @@ -27,8 +29,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.concurrent.TimeUnit;

class FileClient {

private static final Logger LOG = LoggerFactory.getLogger(FileClient.class.getName());
Expand Down
Expand Up @@ -23,11 +23,11 @@
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.channel.DefaultFileRegion;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import org.apache.spark.storage.BlockId;
import org.apache.spark.storage.FileSegment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

class FileServerHandler extends SimpleChannelInboundHandler<String> {

Expand Down
Expand Up @@ -17,7 +17,8 @@

package org.apache.hadoop.mapreduce

import java.lang.{Integer => JInteger, Boolean => JBoolean}
import java.lang.{Boolean => JBoolean, Integer => JInteger}

import org.apache.hadoop.conf.Configuration

private[apache]
Expand Down
3 changes: 2 additions & 1 deletion core/src/main/scala/org/apache/spark/Accumulators.scala
Expand Up @@ -19,8 +19,9 @@ package org.apache.spark

import java.io.{ObjectInputStream, Serializable}

import scala.collection.mutable.Map
import scala.collection.generic.Growable
import scala.collection.mutable.Map

import org.apache.spark.serializer.JavaSerializer

/**
Expand Down
Expand Up @@ -20,12 +20,11 @@ package org.apache.spark
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap

import org.apache.spark.executor.{ShuffleReadMetrics, TaskMetrics}
import org.apache.spark.executor.ShuffleReadMetrics
import org.apache.spark.serializer.Serializer
import org.apache.spark.storage.{BlockId, BlockManagerId, ShuffleBlockId}
import org.apache.spark.util.CompletionIterator


private[spark] class BlockStoreShuffleFetcher extends ShuffleFetcher with Logging {

override def fetch[T](
Expand Down
4 changes: 2 additions & 2 deletions core/src/main/scala/org/apache/spark/CacheManager.scala
Expand Up @@ -18,9 +18,9 @@
package org.apache.spark

import scala.collection.mutable.{ArrayBuffer, HashSet}
import org.apache.spark.storage.{BlockId, BlockManager, StorageLevel, RDDBlockId}
import org.apache.spark.rdd.RDD

import org.apache.spark.rdd.RDD
import org.apache.spark.storage.{BlockManager, RDDBlockId, StorageLevel}

/** Spark class responsible for passing RDDs split contents to the BlockManager and making
sure a node doesn't load two copies of an RDD at once.
Expand Down
4 changes: 1 addition & 3 deletions core/src/main/scala/org/apache/spark/FutureAction.scala
Expand Up @@ -21,10 +21,8 @@ import scala.concurrent._
import scala.concurrent.duration.Duration
import scala.util.Try

import org.apache.spark.scheduler.{JobSucceeded, JobWaiter}
import org.apache.spark.scheduler.JobFailed
import org.apache.spark.rdd.RDD

import org.apache.spark.scheduler.{JobFailed, JobSucceeded, JobWaiter}

/**
* A future for the result of an action to support cancellation. This is an extension of the
Expand Down
4 changes: 3 additions & 1 deletion core/src/main/scala/org/apache/spark/HttpFileServer.scala
Expand Up @@ -17,8 +17,10 @@

package org.apache.spark

import java.io.{File}
import java.io.File

import com.google.common.io.Files

import org.apache.spark.util.Utils

private[spark] class HttpFileServer extends Logging {
Expand Down
2 changes: 1 addition & 1 deletion core/src/main/scala/org/apache/spark/HttpServer.scala
Expand Up @@ -18,14 +18,14 @@
package org.apache.spark

import java.io.File
import java.net.InetAddress

import org.eclipse.jetty.server.Server
import org.eclipse.jetty.server.bio.SocketConnector
import org.eclipse.jetty.server.handler.DefaultHandler
import org.eclipse.jetty.server.handler.HandlerList
import org.eclipse.jetty.server.handler.ResourceHandler
import org.eclipse.jetty.util.thread.QueuedThreadPool

import org.apache.spark.util.Utils

/**
Expand Down
Expand Up @@ -22,7 +22,6 @@ import java.util.zip.{GZIPInputStream, GZIPOutputStream}

import scala.collection.mutable.HashSet
import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor._
import akka.pattern.ask
Expand Down
Expand Up @@ -19,9 +19,9 @@ package org.apache.spark

import java.io._

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.ObjectWritable
import org.apache.hadoop.io.Writable
import org.apache.hadoop.conf.Configuration

class SerializableWritable[T <: Writable](@transient var t: T) extends Serializable {
def value = t
Expand Down
2 changes: 0 additions & 2 deletions core/src/main/scala/org/apache/spark/ShuffleFetcher.scala
Expand Up @@ -17,10 +17,8 @@

package org.apache.spark

import org.apache.spark.executor.TaskMetrics
import org.apache.spark.serializer.Serializer


private[spark] abstract class ShuffleFetcher {

/**
Expand Down
2 changes: 0 additions & 2 deletions core/src/main/scala/org/apache/spark/SparkConf.scala
Expand Up @@ -20,8 +20,6 @@ package org.apache.spark
import scala.collection.JavaConverters._
import scala.collection.mutable.HashMap

import java.io.{ObjectInputStream, ObjectOutputStream, IOException}

/**
* Configuration for a Spark application. Used to set various Spark parameters as key-value pairs.
*
Expand Down
15 changes: 5 additions & 10 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Expand Up @@ -19,21 +19,18 @@ package org.apache.spark

import java.io._
import java.net.URI
import java.util.{UUID, Properties}
import java.util.{Properties, UUID}
import java.util.concurrent.atomic.AtomicInteger

import scala.collection.{Map, Set}
import scala.collection.generic.Growable

import scala.collection.mutable.{ArrayBuffer, HashMap}
import scala.reflect.{ClassTag, classTag}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable,
FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat,
TextInputFormat}
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.mesos.MesosNativeLibrary
Expand All @@ -42,14 +39,12 @@ import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd._
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend,
SparkDeploySchedulerBackend, SimrSchedulerBackend}
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, SparkDeploySchedulerBackend, SimrSchedulerBackend}
import org.apache.spark.scheduler.cluster.mesos.{CoarseMesosSchedulerBackend, MesosSchedulerBackend}
import org.apache.spark.scheduler.local.LocalBackend
import org.apache.spark.storage.{BlockManagerSource, RDDInfo, StorageStatus, StorageUtils}
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{Utils, TimeStampedHashMap, MetadataCleaner, MetadataCleanerType,
ClosureCleaner}
import org.apache.spark.util.{ClosureCleaner, MetadataCleaner, MetadataCleanerType, TimeStampedHashMap, Utils}

/**
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
Expand Down
9 changes: 4 additions & 5 deletions core/src/main/scala/org/apache/spark/SparkEnv.scala
Expand Up @@ -21,16 +21,15 @@ import scala.collection.mutable
import scala.concurrent.Await

import akka.actor._
import com.google.common.collect.MapMaker

import org.apache.spark.api.python.PythonWorkerFactory
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.storage.{BlockManagerMasterActor, BlockManager, BlockManagerMaster}
import org.apache.spark.storage.{BlockManager, BlockManagerMaster, BlockManagerMasterActor}
import org.apache.spark.network.ConnectionManager
import org.apache.spark.serializer.{Serializer, SerializerManager}
import org.apache.spark.util.{Utils, AkkaUtils}
import org.apache.spark.api.python.PythonWorkerFactory

import com.google.common.collect.MapMaker
import org.apache.spark.util.{AkkaUtils, Utils}

/**
* Holds all the runtime environment objects for a running Spark instance (either master or worker),
Expand Down
Expand Up @@ -18,8 +18,8 @@
package org.apache.hadoop.mapred

import java.io.IOException
import java.text.SimpleDateFormat
import java.text.NumberFormat
import java.text.SimpleDateFormat
import java.util.Date

import org.apache.hadoop.fs.FileSystem
Expand Down
44 changes: 21 additions & 23 deletions core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
Expand Up @@ -17,27 +17,25 @@

package org.apache.spark.api.java

import java.lang.{Double => JDouble}

import scala.reflect.ClassTag

import org.apache.spark.rdd.RDD
import org.apache.spark.Partitioner
import org.apache.spark.SparkContext.doubleRDDToDoubleRDDFunctions
import org.apache.spark.api.java.function.{Function => JFunction}
import org.apache.spark.util.StatCounter
import org.apache.spark.partial.{BoundedDouble, PartialResult}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.StatCounter

import java.lang.Double
import org.apache.spark.Partitioner

import scala.collection.JavaConverters._

class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends JavaRDDLike[Double, JavaDoubleRDD] {
class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends JavaRDDLike[JDouble, JavaDoubleRDD] {

override val classTag: ClassTag[Double] = implicitly[ClassTag[Double]]
override val classTag: ClassTag[JDouble] = implicitly[ClassTag[JDouble]]

override val rdd: RDD[Double] = srdd.map(x => Double.valueOf(x))
override val rdd: RDD[JDouble] = srdd.map(x => JDouble.valueOf(x))

override def wrapRDD(rdd: RDD[Double]): JavaDoubleRDD =
override def wrapRDD(rdd: RDD[JDouble]): JavaDoubleRDD =
new JavaDoubleRDD(rdd.map(_.doubleValue))

// Common RDD functions
Expand Down Expand Up @@ -67,7 +65,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends JavaRDDLike[Double, Jav
def unpersist(blocking: Boolean): JavaDoubleRDD = fromRDD(srdd.unpersist(blocking))

// first() has to be overriden here in order for its return type to be Double instead of Object.
override def first(): Double = srdd.first()
override def first(): JDouble = srdd.first()

// Transformations (return a new RDD)

Expand All @@ -84,7 +82,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends JavaRDDLike[Double, Jav
/**
* Return a new RDD containing only the elements that satisfy a predicate.
*/
def filter(f: JFunction[Double, java.lang.Boolean]): JavaDoubleRDD =
def filter(f: JFunction[JDouble, java.lang.Boolean]): JavaDoubleRDD =
fromRDD(srdd.filter(x => f(x).booleanValue()))

/**
Expand Down Expand Up @@ -133,7 +131,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends JavaRDDLike[Double, Jav
/**
* Return a sampled subset of this RDD.
*/
def sample(withReplacement: Boolean, fraction: Double, seed: Int): JavaDoubleRDD =
def sample(withReplacement: Boolean, fraction: JDouble, seed: Int): JavaDoubleRDD =
fromRDD(srdd.sample(withReplacement, fraction, seed))

/**
Expand All @@ -145,7 +143,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends JavaRDDLike[Double, Jav
// Double RDD functions

/** Add up the elements in this RDD. */
def sum(): Double = srdd.sum()
def sum(): JDouble = srdd.sum()

/**
* Return a [[org.apache.spark.util.StatCounter]] object that captures the mean, variance and
Expand All @@ -154,35 +152,35 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends JavaRDDLike[Double, Jav
def stats(): StatCounter = srdd.stats()

/** Compute the mean of this RDD's elements. */
def mean(): Double = srdd.mean()
def mean(): JDouble = srdd.mean()

/** Compute the variance of this RDD's elements. */
def variance(): Double = srdd.variance()
def variance(): JDouble = srdd.variance()

/** Compute the standard deviation of this RDD's elements. */
def stdev(): Double = srdd.stdev()
def stdev(): JDouble = srdd.stdev()

/**
* Compute the sample standard deviation of this RDD's elements (which corrects for bias in
* estimating the standard deviation by dividing by N-1 instead of N).
*/
def sampleStdev(): Double = srdd.sampleStdev()
def sampleStdev(): JDouble = srdd.sampleStdev()

/**
* Compute the sample variance of this RDD's elements (which corrects for bias in
* estimating the standard variance by dividing by N-1 instead of N).
*/
def sampleVariance(): Double = srdd.sampleVariance()
def sampleVariance(): JDouble = srdd.sampleVariance()

/** Return the approximate mean of the elements in this RDD. */
def meanApprox(timeout: Long, confidence: Double): PartialResult[BoundedDouble] =
def meanApprox(timeout: Long, confidence: JDouble): PartialResult[BoundedDouble] =
srdd.meanApprox(timeout, confidence)

/** (Experimental) Approximate operation to return the mean within a timeout. */
def meanApprox(timeout: Long): PartialResult[BoundedDouble] = srdd.meanApprox(timeout)

/** (Experimental) Approximate operation to return the sum within a timeout. */
def sumApprox(timeout: Long, confidence: Double): PartialResult[BoundedDouble] =
def sumApprox(timeout: Long, confidence: JDouble): PartialResult[BoundedDouble] =
srdd.sumApprox(timeout, confidence)

/** (Experimental) Approximate operation to return the sum within a timeout. */
Expand Down Expand Up @@ -222,7 +220,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends JavaRDDLike[Double, Jav
srdd.histogram(buckets, false)
}

def histogram(buckets: Array[Double], evenBuckets: Boolean): Array[Long] = {
def histogram(buckets: Array[JDouble], evenBuckets: Boolean): Array[Long] = {
srdd.histogram(buckets.map(_.toDouble), evenBuckets)
}

Expand Down
Expand Up @@ -20,8 +20,8 @@ package org.apache.spark.api.java
import scala.reflect.ClassTag

import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.api.java.function.{Function => JFunction}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel

class JavaRDD[T](val rdd: RDD[T])(implicit val classTag: ClassTag[T])
Expand Down
Expand Up @@ -17,7 +17,6 @@

package org.apache.spark.api.java.function;


import java.io.Serializable;

/**
Expand Down
Expand Up @@ -17,7 +17,6 @@

package org.apache.spark.api.java.function;


import java.io.Serializable;

/**
Expand Down
Expand Up @@ -17,11 +17,10 @@

package org.apache.spark.api.java.function;

import scala.reflect.ClassTag;
import scala.reflect.ClassTag$;

import java.io.Serializable;

import scala.reflect.ClassTag;
import scala.reflect.ClassTag$;

/**
* Base class for functions whose return types do not create special RDDs. PairFunction and
Expand Down

0 comments on commit ccb327a

Please sign in to comment.