Skip to content

Commit

Permalink
Dynamically add badges based on annotations
Browse files Browse the repository at this point in the history
This has known problems due to bugs in scaladoc. In particular, on the
packages page, annotations appear only if there are no comments for the
element. As soon as this is fixed (or there is a workaround) this should
basically be ready.
  • Loading branch information
andrewor14 committed Apr 8, 2014
1 parent 824011b commit 99192ef
Show file tree
Hide file tree
Showing 42 changed files with 192 additions and 101 deletions.
11 changes: 6 additions & 5 deletions core/src/main/scala/org/apache/spark/Dependency.scala
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,22 @@

package org.apache.spark

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.Serializer

/**
* <span class="developer badge">Developer API</span>
* Base class for dependencies.
*/
@DeveloperAPI
abstract class Dependency[T](val rdd: RDD[T]) extends Serializable


/**
* <span class="developer badge">Developer API</span>
* Base class for dependencies where each partition of the parent RDD is used by at most one
* partition of the child RDD. Narrow dependencies allow for pipelined execution.
*/
@DeveloperAPI
abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {
/**
* Get the parent partitions for a child partition.
Expand All @@ -43,14 +44,14 @@ abstract class NarrowDependency[T](rdd: RDD[T]) extends Dependency(rdd) {


/**
* <span class="developer badge">Developer API</span>
* Represents a dependency on the output of a shuffle stage.
* @param rdd the parent RDD
* @param partitioner partitioner used to partition the shuffle output
* @param serializer [[org.apache.spark.serializer.Serializer Serializer]] to use. If set to null,
* the default serializer, as specified by `spark.serializer` config option, will
* be used.
*/
@DeveloperAPI
class ShuffleDependency[K, V](
@transient rdd: RDD[_ <: Product2[K, V]],
val partitioner: Partitioner,
Expand All @@ -62,22 +63,22 @@ class ShuffleDependency[K, V](


/**
* <span class="developer badge">Developer API</span>
* Represents a one-to-one dependency between partitions of the parent and child RDDs.
*/
@DeveloperAPI
class OneToOneDependency[T](rdd: RDD[T]) extends NarrowDependency[T](rdd) {
override def getParents(partitionId: Int) = List(partitionId)
}


/**
* <span class="developer badge">Developer API</span>
* Represents a one-to-one dependency between ranges of partitions in the parent and child RDDs.
* @param rdd the parent RDD
* @param inStart the start of the range in the parent RDD
* @param outStart the start of the range in the child RDD
* @param length the length of the range
*/
@DeveloperAPI
class RangeDependency[T](rdd: RDD[T], inStart: Int, outStart: Int, length: Int)
extends NarrowDependency[T](rdd) {

Expand Down
7 changes: 4 additions & 3 deletions core/src/main/scala/org/apache/spark/FutureAction.scala
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,15 @@ import scala.concurrent._
import scala.concurrent.duration.Duration
import scala.util.Try

import org.apache.spark.annotations.Experimental
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.{JobFailed, JobSucceeded, JobWaiter}

/**
* <span class="experimental badge">Experimental</span>
* A future for the result of an action to support cancellation. This is an extension of the
* Scala Future interface to support cancellation.
*/
@Experimental
trait FutureAction[T] extends Future[T] {
// Note that we redefine methods of the Future trait here explicitly so we can specify a different
// documentation (with reference to the word "action").
Expand Down Expand Up @@ -85,10 +86,10 @@ trait FutureAction[T] extends Future[T] {


/**
* <span class="experimental badge">Experimental</span>
* A [[FutureAction]] holding the result of an action that triggers a single job. Examples include
* count, collect, reduce.
*/
@Experimental
class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc: => T)
extends FutureAction[T] {

Expand Down Expand Up @@ -150,11 +151,11 @@ class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc:


/**
* <span class="experimental badge">Experimental</span>
* A [[FutureAction]] for actions that could trigger multiple Spark jobs. Examples include take,
* takeSample. Cancellation works by setting the cancelled flag to true and interrupting the
* action thread if it is being blocked by a job.
*/
@Experimental
class ComplexFutureAction[T] extends FutureAction[T] {

// Pointer to the thread that is executing the action. It is set when the action is run.
Expand Down
4 changes: 3 additions & 1 deletion core/src/main/scala/org/apache/spark/Logging.scala
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,17 @@ import org.apache.log4j.{LogManager, PropertyConfigurator}
import org.slf4j.{Logger, LoggerFactory}
import org.slf4j.impl.StaticLoggerBinder

import org.apache.spark.annotations.DeveloperAPI

/**
* <span class="developer badge">Developer API</span>
* Utility trait for classes that want to log data. Creates a SLF4J logger for the class and allows
* logging messages at different levels using methods that only evaluate parameters lazily if the
* log level is enabled.
*
* NOTE: DO NOT USE this class outside of Spark. It is intended as an internal utility.
* This will likely be changed or removed in future releases.
*/
@DeveloperAPI
trait Logging {
// Make the log field transient so that objects with Logging can
// be serialized and used on another machine
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@ import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.ObjectWritable
import org.apache.hadoop.io.Writable

/** <span class="developer badge">Developer API</span> */
import org.apache.spark.annotations.DeveloperAPI

@DeveloperAPI
class SerializableWritable[T <: Writable](@transient var t: T) extends Serializable {
def value = t
override def toString = t.toString
Expand Down
11 changes: 7 additions & 4 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHad
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.mesos.MesosNativeLibrary

import org.apache.spark.annotations.{DeveloperAPI, Experimental}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
Expand All @@ -54,6 +55,8 @@ import org.apache.spark.util.{ClosureCleaner, MetadataCleaner, MetadataCleanerTy
* @param config a Spark Config object describing the application configuration. Any settings in
* this config overrides the default configs as well as system properties.
*/

@DeveloperAPI
class SparkContext(config: SparkConf) extends Logging {

// This is used only by YARN for now, but should be relevant to other cluster types (Mesos,
Expand All @@ -62,13 +65,13 @@ class SparkContext(config: SparkConf) extends Logging {
private[spark] var preferredNodeLocationData: Map[String, Set[SplitInfo]] = Map()

/**
* <span class="developer badge">Developer API</span>
* Alternative constructor for setting preferred locations where Spark will create executors.
*
* @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on. Ca
* be generated using [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]
* from a list of input files or InputFormats for the application.
*/
@DeveloperAPI
def this(config: SparkConf, preferredNodeLocationData: Map[String, Set[SplitInfo]]) = {
this(config)
this.preferredNodeLocationData = preferredNodeLocationData
Expand Down Expand Up @@ -713,9 +716,9 @@ class SparkContext(config: SparkConf) extends Logging {
}

/**
* <span class="developer badge">Developer API</span>
* Register a listener to receive up-calls from events that happen during execution.
*/
@DeveloperAPI
def addSparkListener(listener: SparkListener) {
listenerBus.addListener(listener)
}
Expand Down Expand Up @@ -1025,9 +1028,9 @@ class SparkContext(config: SparkConf) extends Logging {
}

/**
* <span class="developer badge">Developer API</span>
* Run a job that can return approximate results.
*/
@DeveloperAPI
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
Expand All @@ -1043,9 +1046,9 @@ class SparkContext(config: SparkConf) extends Logging {
}

/**
* <span class="experimental badge">Experimental</span>
* Submit a job for execution and return a FutureJob holding the result.
*/
@Experimental
def submitJob[T, U, R](
rdd: RDD[T],
processPartition: Iterator[T] => U,
Expand Down
3 changes: 2 additions & 1 deletion core/src/main/scala/org/apache/spark/SparkEnv.scala
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import scala.util.Properties
import akka.actor._
import com.google.common.collect.MapMaker

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.api.python.PythonWorkerFactory
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.metrics.MetricsSystem
Expand All @@ -35,13 +36,13 @@ import org.apache.spark.storage._
import org.apache.spark.util.{AkkaUtils, Utils}

/**
* <span class="developer badge">Developer API</span>
* Holds all the runtime environment objects for a running Spark instance (either master or worker),
* including the serializer, Akka actor system, block manager, map output tracker, etc. Currently
* Spark code finds the SparkEnv through a thread-local variable, so each thread that accesses these
* objects needs to have the right SparkEnv set. You can get the current environment with
* SparkEnv.get (e.g. after creating a SparkContext) and set it with SparkEnv.set.
*/
@DeveloperAPI
class SparkEnv (
val executorId: String,
val actorSystem: ActorSystem,
Expand Down
3 changes: 2 additions & 1 deletion core/src/main/scala/org/apache/spark/TaskContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,13 @@ package org.apache.spark

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.executor.TaskMetrics

/**
* <span class="developer badge">Developer API</span>
* Contextual information about a task which can be read or mutated during execution.
*/
@DeveloperAPI
class TaskContext(
val stageId: Int,
val partitionId: Int,
Expand Down
16 changes: 8 additions & 8 deletions core/src/main/scala/org/apache/spark/TaskEndReason.scala
Original file line number Diff line number Diff line change
Expand Up @@ -29,21 +29,21 @@ import org.apache.spark.storage.BlockManagerId
@DeveloperAPI
sealed trait TaskEndReason

/** <span class="developer badge">Developer API</span> */
@DeveloperAPI
case object Success extends TaskEndReason

/** <span class="developer badge">Developer API</span> */
@DeveloperAPI
case object Resubmitted extends TaskEndReason // Task was finished earlier but we've now lost it

/** <span class="developer badge">Developer API</span> */
@DeveloperAPI
case class FetchFailed(
bmAddress: BlockManagerId,
shuffleId: Int,
mapId: Int,
reduceId: Int)
extends TaskEndReason

/** <span class="developer badge">Developer API</span> */
@DeveloperAPI
case class ExceptionFailure(
className: String,
description: String,
Expand All @@ -52,25 +52,25 @@ case class ExceptionFailure(
extends TaskEndReason

/**
* <span class="developer badge">Developer API</span>
* The task finished successfully, but the result was lost from the executor's block manager before
* it was fetched.
*/
@DeveloperAPI
case object TaskResultLost extends TaskEndReason

/** <span class="developer badge">Developer API</span> */
@DeveloperAPI
case object TaskKilled extends TaskEndReason

/**
* <span class="developer badge">Developer API</span>
* The task failed because the executor that it was running on was lost. This may happen because
* the task crashed the JVM.
*/
@DeveloperAPI
case object ExecutorLostFailure extends TaskEndReason

/**
* <span class="developer badge">Developer API</span>
* We don't know why the task ended -- for example, because of a ClassNotFound exception when
* deserializing the task result.
*/
@DeveloperAPI
case object UnknownReason extends TaskEndReason
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.annotations;

import java.lang.annotation.*;

@Retention(RetentionPolicy.SOURCE)
@Target({ElementType.TYPE, ElementType.FIELD, ElementType.METHOD,
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE})
public @interface AlphaComponent {}
Original file line number Diff line number Diff line change
Expand Up @@ -16,17 +16,18 @@
*/

package org.apache.spark.broadcast
import org.apache.spark.SecurityManager

import org.apache.spark.SecurityManager
import org.apache.spark.SparkConf
import org.apache.spark.annotations.DeveloperAPI

/**
* <span class="developer badge">Developer API</span>
* An interface for all the broadcast implementations in Spark (to allow
* multiple broadcast implementations). SparkContext uses a user-specified
* BroadcastFactory implementation to instantiate a particular broadcast for the
* entire Spark job.
*/
@DeveloperAPI
trait BroadcastFactory {
def initialize(isDriver: Boolean, conf: SparkConf, securityMgr: SecurityManager): Unit
def newBroadcast[T](value: T, isLocal: Boolean, id: Long): Broadcast[T]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,13 @@

package org.apache.spark.executor

import org.apache.spark.annotations.DeveloperAPI
import org.apache.spark.storage.{BlockId, BlockStatus}

/**
* <span class="developer badge">Developer API</span>
* Metrics tracked during the execution of a task.
*/
@DeveloperAPI
class TaskMetrics extends Serializable {
/**
* Host's name the task runs on
Expand Down Expand Up @@ -87,9 +88,9 @@ private[spark] object TaskMetrics {


/**
* <span class="developer badge">Developer API</span>
* Metrics pertaining to shuffle data read in a given task.
*/
@DeveloperAPI
class ShuffleReadMetrics extends Serializable {
/**
* Absolute time when this task finished reading shuffle data
Expand Down Expand Up @@ -125,9 +126,9 @@ class ShuffleReadMetrics extends Serializable {
}

/**
* <span class="developer badge">Developer API</span>
* Metrics pertaining to shuffle data written in a given task.
*/
@DeveloperAPI
class ShuffleWriteMetrics extends Serializable {
/**
* Number of bytes written for the shuffle by this task
Expand Down
Loading

0 comments on commit 99192ef

Please sign in to comment.