Skip to content

Commit

Permalink
DeveloperAPI -> DeveloperApi
Browse files Browse the repository at this point in the history
  • Loading branch information
andrewor14 committed Apr 8, 2014
1 parent 0d48908 commit c1bcb41
Show file tree
Hide file tree
Showing 37 changed files with 170 additions and 170 deletions.
6 changes: 3 additions & 3 deletions core/src/main/scala/org/apache/spark/Aggregator.scala
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,18 @@

package org.apache.spark

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.annotations.DeveloperApi
import org.apache.spark.util.collection.{AppendOnlyMap, ExternalAppendOnlyMap}

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* A set of functions used to aggregate data.
*
* @param createCombiner function to create the initial value of the aggregation.
* @param mergeValue function to merge a new value into the aggregation result.
* @param mergeCombiners function to merge outputs from multiple mergeValue function.
*/
@DeveloperAPI
@DeveloperApi
case class Aggregator[K, V, C] (
createCombiner: V => C,
mergeValue: (C, V) => C,
Expand Down
22 changes: 11 additions & 11 deletions core/src/main/scala/org/apache/spark/Dependency.scala
Original file line number Diff line number Diff line change
Expand Up @@ -17,24 +17,24 @@

package org.apache.spark

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.annotations.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.Serializer

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Base class for dependencies.
*/
@DeveloperAPI
@DeveloperApi
abstract class Dependency[T](val rdd: RDD[T]) extends Serializable


/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Base class for dependencies where each partition of the parent RDD is used by at most one
* partition of the child RDD. Narrow dependencies allow for pipelined execution.
*/
@DeveloperAPI
@DeveloperApi
abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {
/**
* Get the parent partitions for a child partition.
Expand All @@ -46,15 +46,15 @@ abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {


/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Represents a dependency on the output of a shuffle stage.
* @param rdd the parent RDD
* @param partitioner partitioner used to partition the shuffle output
* @param serializer [[org.apache.spark.serializer.Serializer Serializer]] to use. If set to null,
* the default serializer, as specified by `spark.serializer` config option, will
* be used.
*/
@DeveloperAPI
@DeveloperApi
class ShuffleDependency[K, V](
@transient rdd: RDD[_ <: Product2[K, V]],
val partitioner: Partitioner,
Expand All @@ -66,24 +66,24 @@ class ShuffleDependency[K, V](


/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Represents a one-to-one dependency between partitions of the parent and child RDDs.
*/
@DeveloperAPI
@DeveloperApi
class OneToOneDependency[T](rdd: RDD[T]) extends NarrowDependency[T](rdd) {
override def getParents(partitionId: Int) = List(partitionId)
}


/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Represents a one-to-one dependency between ranges of partitions in the parent and child RDDs.
* @param rdd the parent RDD
* @param inStart the start of the range in the parent RDD
* @param outStart the start of the range in the child RDD
* @param length the length of the range
*/
@DeveloperAPI
@DeveloperApi
class RangeDependency[T](rdd: RDD[T], inStart: Int, outStart: Int, length: Int)
extends NarrowDependency[T](rdd) {

Expand Down
6 changes: 3 additions & 3 deletions core/src/main/scala/org/apache/spark/Logging.scala
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,18 @@ import org.apache.log4j.{LogManager, PropertyConfigurator}
import org.slf4j.{Logger, LoggerFactory}
import org.slf4j.impl.StaticLoggerBinder

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.annotations.DeveloperApi

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Utility trait for classes that want to log data. Creates a SLF4J logger for the class and allows
* logging messages at different levels using methods that only evaluate parameters lazily if the
* log level is enabled.
*
* NOTE: DO NOT USE this class outside of Spark. It is intended as an internal utility.
* This will likely be changed or removed in future releases.
*/
@DeveloperAPI
@DeveloperApi
trait Logging {
// Make the log field transient so that objects with Logging can
// be serialized and used on another machine
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.ObjectWritable
import org.apache.hadoop.io.Writable

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.annotations.DeveloperApi

@DeveloperAPI
@DeveloperApi
class SerializableWritable[T <: Writable](@transient var t: T) extends Serializable {
def value = t
override def toString = t.toString
Expand Down
18 changes: 9 additions & 9 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHad
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.mesos.MesosNativeLibrary

import org.apache.spark.annotations.{DeveloperAPI, Experimental}
import org.apache.spark.annotations.{DeveloperApi, Experimental}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
Expand All @@ -49,15 +49,15 @@ import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{ClosureCleaner, MetadataCleaner, MetadataCleanerType, TimeStampedHashMap, Utils}

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
*
* @param config a Spark Config object describing the application configuration. Any settings in
* this config overrides the default configs as well as system properties.
*/

@DeveloperAPI
@DeveloperApi
class SparkContext(config: SparkConf) extends Logging {

// This is used only by YARN for now, but should be relevant to other cluster types (Mesos,
Expand All @@ -66,14 +66,14 @@ class SparkContext(config: SparkConf) extends Logging {
private[spark] var preferredNodeLocationData: Map[String, Set[SplitInfo]] = Map()

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Alternative constructor for setting preferred locations where Spark will create executors.
*
* @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on. Ca
* be generated using [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]
* from a list of input files or InputFormats for the application.
*/
@DeveloperAPI
@DeveloperApi
def this(config: SparkConf, preferredNodeLocationData: Map[String, Set[SplitInfo]]) = {
this(config)
this.preferredNodeLocationData = preferredNodeLocationData
Expand Down Expand Up @@ -718,10 +718,10 @@ class SparkContext(config: SparkConf) extends Logging {
}

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Register a listener to receive up-calls from events that happen during execution.
*/
@DeveloperAPI
@DeveloperApi
def addSparkListener(listener: SparkListener) {
listenerBus.addListener(listener)
}
Expand Down Expand Up @@ -1031,10 +1031,10 @@ class SparkContext(config: SparkConf) extends Logging {
}

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Run a job that can return approximate results.
*/
@DeveloperAPI
@DeveloperApi
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
Expand Down
6 changes: 3 additions & 3 deletions core/src/main/scala/org/apache/spark/SparkEnv.scala
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import scala.util.Properties
import akka.actor._
import com.google.common.collect.MapMaker

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.annotations.DeveloperApi
import org.apache.spark.api.python.PythonWorkerFactory
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.metrics.MetricsSystem
Expand All @@ -36,14 +36,14 @@ import org.apache.spark.storage._
import org.apache.spark.util.{AkkaUtils, Utils}

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Holds all the runtime environment objects for a running Spark instance (either master or worker),
* including the serializer, Akka actor system, block manager, map output tracker, etc. Currently
* Spark code finds the SparkEnv through a thread-local variable, so each thread that accesses these
* objects needs to have the right SparkEnv set. You can get the current environment with
* SparkEnv.get (e.g. after creating a SparkContext) and set it with SparkEnv.set.
*/
@DeveloperAPI
@DeveloperApi
class SparkEnv (
val executorId: String,
val actorSystem: ActorSystem,
Expand Down
6 changes: 3 additions & 3 deletions core/src/main/scala/org/apache/spark/TaskContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ package org.apache.spark

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.annotations.DeveloperApi
import org.apache.spark.executor.TaskMetrics

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Contextual information about a task which can be read or mutated during execution.
*/
@DeveloperAPI
@DeveloperApi
class TaskContext(
val stageId: Int,
val partitionId: Int,
Expand Down
28 changes: 14 additions & 14 deletions core/src/main/scala/org/apache/spark/TaskEndReason.scala
Original file line number Diff line number Diff line change
Expand Up @@ -17,34 +17,34 @@

package org.apache.spark

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.annotations.DeveloperApi
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.storage.BlockManagerId

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Various possible reasons why a task ended. The low-level TaskScheduler is supposed to retry
* tasks several times for "ephemeral" failures, and only report back failures that require some
* old stages to be resubmitted, such as shuffle map fetch failures.
*/
@DeveloperAPI
@DeveloperApi
sealed trait TaskEndReason

@DeveloperAPI
@DeveloperApi
case object Success extends TaskEndReason

@DeveloperAPI
@DeveloperApi
case object Resubmitted extends TaskEndReason // Task was finished earlier but we've now lost it

@DeveloperAPI
@DeveloperApi
case class FetchFailed(
bmAddress: BlockManagerId,
shuffleId: Int,
mapId: Int,
reduceId: Int)
extends TaskEndReason

@DeveloperAPI
@DeveloperApi
case class ExceptionFailure(
className: String,
description: String,
Expand All @@ -53,28 +53,28 @@ case class ExceptionFailure(
extends TaskEndReason

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* The task finished successfully, but the result was lost from the executor's block manager before
* it was fetched.
*/
@DeveloperAPI
@DeveloperApi
case object TaskResultLost extends TaskEndReason

@DeveloperAPI
@DeveloperApi
case object TaskKilled extends TaskEndReason

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* The task failed because the executor that it was running on was lost. This may happen because
* the task crashed the JVM.
*/
@DeveloperAPI
@DeveloperApi
case object ExecutorLostFailure extends TaskEndReason

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* We don't know why the task ended -- for example, because of a ClassNotFound exception when
* deserializing the task result.
*/
@DeveloperAPI
@DeveloperApi
case object UnknownReason extends TaskEndReason
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,4 @@
@Retention(RetentionPolicy.SOURCE)
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD,
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
public @interface DeveloperAPI {}
public @interface DeveloperApi {}
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,16 @@ package org.apache.spark.broadcast

import org.apache.spark.SecurityManager
import org.apache.spark.SparkConf
import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.annotations.DeveloperApi

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* An interface for all the broadcast implementations in Spark (to allow
* multiple broadcast implementations). SparkContext uses a user-specified
* BroadcastFactory implementation to instantiate a particular broadcast for the
* entire Spark job.
*/
@DeveloperAPI
@DeveloperApi
trait BroadcastFactory {
def initialize(isDriver: Boolean, conf: SparkConf, securityMgr: SecurityManager): Unit
def newBroadcast[T](value: T, isLocal: Boolean, id: Long): Broadcast[T]
Expand Down
14 changes: 7 additions & 7 deletions core/src/main/scala/org/apache/spark/executor/TaskMetrics.scala
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@

package org.apache.spark.executor

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.annotations.DeveloperApi
import org.apache.spark.storage.{BlockId, BlockStatus}

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Metrics tracked during the execution of a task.
*/
@DeveloperAPI
@DeveloperApi
class TaskMetrics extends Serializable {
/**
* Host's name the task runs on
Expand Down Expand Up @@ -89,10 +89,10 @@ private[spark] object TaskMetrics {


/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Metrics pertaining to shuffle data read in a given task.
*/
@DeveloperAPI
@DeveloperApi
class ShuffleReadMetrics extends Serializable {
/**
* Absolute time when this task finished reading shuffle data
Expand Down Expand Up @@ -128,10 +128,10 @@ class ShuffleReadMetrics extends Serializable {
}

/**
* :: DeveloperAPI ::
* :: DeveloperApi ::
* Metrics pertaining to shuffle data written in a given task.
*/
@DeveloperAPI
@DeveloperApi
class ShuffleWriteMetrics extends Serializable {
/**
* Number of bytes written for the shuffle by this task
Expand Down
Loading

0 comments on commit c1bcb41

Please sign in to comment.