Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MINOR][BUILD] Update genjavadoc to 0.13 #24443

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -24,7 +24,7 @@ package org.apache.spark.rpc
private[spark] trait RpcCallContext {

/**
* Reply a message to the sender. If the sender is [[RpcEndpoint]], its [[RpcEndpoint.receive]]
* Reply a message to the sender. If the sender is [[RpcEndpoint]], its `RpcEndpoint.receive`
* will be called.
*/
def reply(response: Any): Unit
Expand Down
Expand Up @@ -77,16 +77,16 @@ private[spark] trait UIRoot {
/**
* Runs some code with the current SparkUI instance for the app / attempt.
*
* @throws NoSuchElementException If the app / attempt pair does not exist.
* @throws java.util.NoSuchElementException If the app / attempt pair does not exist.
*/
def withSparkUI[T](appId: String, attemptId: Option[String])(fn: SparkUI => T): T

def getApplicationInfoList: Iterator[ApplicationInfo]
def getApplicationInfo(appId: String): Option[ApplicationInfo]

/**
* Write the event logs for the given app to the [[ZipOutputStream]] instance. If attemptId is
* [[None]], event logs for all attempts of this application will be written out.
* Write the event logs for the given app to the `ZipOutputStream` instance. If attemptId is
* `None`, event logs for all attempts of this application will be written out.
*/
def writeEventLogs(appId: String, attemptId: Option[String], zipStream: ZipOutputStream): Unit = {
Response.serverError()
Expand Down
Expand Up @@ -34,7 +34,7 @@ import org.apache.spark.util.collection.OpenHashSet
/**
* A trait that allows a class to give [[SizeEstimator]] more accurate size estimation.
* When a class extends it, [[SizeEstimator]] will query the `estimatedSize` first.
* If `estimatedSize` does not return [[None]], [[SizeEstimator]] will use the returned size
* If `estimatedSize` does not return `None`, [[SizeEstimator]] will use the returned size
* as the size of the object. Otherwise, [[SizeEstimator]] will do the estimation work.
* The difference between a [[KnownSizeEstimation]] and
* [[org.apache.spark.util.collection.SizeTracker]] is that, a
Expand Down
Expand Up @@ -101,8 +101,8 @@ object SparkAWSCredentials {
*
* @note The given AWS keypair will be saved in DStream checkpoints if checkpointing is
* enabled. Make sure that your checkpoint directory is secure. Prefer using the
* [[http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default default provider chain]]
* instead if possible.
* default provider chain instead if possible
* (http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default).
*
* @param accessKeyId AWS access key ID
* @param secretKey AWS secret key
Expand Down
6 changes: 3 additions & 3 deletions mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
Expand Up @@ -371,15 +371,15 @@ private[ann] trait TopologyModel extends Serializable {
def forward(data: BDM[Double], includeLastLayer: Boolean): Array[BDM[Double]]

/**
* Prediction of the model. See {@link ProbabilisticClassificationModel}
* Prediction of the model. See `ProbabilisticClassificationModel``
*
* @param features input features
* @return prediction
*/
def predict(features: Vector): Vector

/**
* Raw prediction of the model. See {@link ProbabilisticClassificationModel}
* Raw prediction of the model. See `ProbabilisticClassificationModel`
*
* @param features input features
* @return raw prediction
Expand All @@ -389,7 +389,7 @@ private[ann] trait TopologyModel extends Serializable {
def predictRaw(features: Vector): Vector

/**
* Probability of the model. See {@link ProbabilisticClassificationModel}
* Probability of the model. See `ProbabilisticClassificationModel`
*
* @param rawPrediction raw prediction vector
* @return probability
Expand Down
Expand Up @@ -121,7 +121,7 @@ sealed abstract class Attribute extends Serializable {
private[attribute] trait AttributeFactory {

/**
* Creates an [[Attribute]] from a [[Metadata]] instance.
* Creates an [[Attribute]] from a `Metadata` instance.
*/
private[attribute] def fromMetadata(metadata: Metadata): Attribute

Expand Down
Expand Up @@ -49,7 +49,7 @@ object Correlation {
* Supported: `pearson` (default), `spearman`
* @return A dataframe that contains the correlation matrix of the column of vectors. This
* dataframe contains a single row and a single column of name
* '$METHODNAME($COLUMN)'.
* `$METHODNAME($COLUMN)`.
* @throws IllegalArgumentException if the column is not a valid column in the dataset, or if
* the content of this column is not of type Vector.
*
Expand Down
25 changes: 13 additions & 12 deletions mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala
Expand Up @@ -40,39 +40,39 @@ private[ml] trait DecisionTreeParams extends PredictorParams
with HasCheckpointInterval with HasSeed with HasWeightCol {

/**
* Maximum depth of the tree (>= 0).
* Maximum depth of the tree (nonnegative).
* E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.
* (default = 5)
* @group param
*/
final val maxDepth: IntParam =
new IntParam(this, "maxDepth", "Maximum depth of the tree. (>= 0)" +
new IntParam(this, "maxDepth", "Maximum depth of the tree. (Nonnegative)" +
" E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.",
ParamValidators.gtEq(0))

/**
* Maximum number of bins used for discretizing continuous features and for choosing how to split
* on features at each node. More bins give higher granularity.
* Must be >= 2 and >= number of categories in any categorical feature.
* Must be at least 2 and at least number of categories in any categorical feature.
* (default = 32)
* @group param
*/
final val maxBins: IntParam = new IntParam(this, "maxBins", "Max number of bins for" +
" discretizing continuous features. Must be >=2 and >= number of categories for any" +
" categorical feature.", ParamValidators.gtEq(2))
" discretizing continuous features. Must be at least 2 and at least number of categories" +
" for any categorical feature.", ParamValidators.gtEq(2))

/**
* Minimum number of instances each child must have after split.
* If a split causes the left or right child to have fewer than minInstancesPerNode,
* the split will be discarded as invalid.
* Should be >= 1.
* Must be at least 1.
* (default = 1)
* @group param
*/
final val minInstancesPerNode: IntParam = new IntParam(this, "minInstancesPerNode", "Minimum" +
" number of instances each child must have after split. If a split causes the left or right" +
" child to have fewer than minInstancesPerNode, the split will be discarded as invalid." +
" Should be >= 1.", ParamValidators.gtEq(1))
" Must be at least 1.", ParamValidators.gtEq(1))

/**
* Minimum fraction of the weighted sample count that each child must have after split.
Expand All @@ -91,7 +91,7 @@ private[ml] trait DecisionTreeParams extends PredictorParams

/**
* Minimum information gain for a split to be considered at a tree node.
* Should be >= 0.0.
* Should be at least 0.0.
* (default = 0.0)
* @group param
*/
Expand Down Expand Up @@ -316,7 +316,7 @@ private[ml] trait TreeEnsembleParams extends DecisionTreeParams {
* Supported options:
* - "auto": Choose automatically for task:
* If numTrees == 1, set to "all."
* If numTrees > 1 (forest), set to "sqrt" for classification and
* If numTrees greater than 1 (forest), set to "sqrt" for classification and
* to "onethird" for regression.
* - "all": use all features
* - "onethird": use 1/3 of the features
Expand Down Expand Up @@ -361,8 +361,8 @@ private[ml] trait TreeEnsembleParams extends DecisionTreeParams {
private[ml] trait RandomForestParams extends TreeEnsembleParams {

/**
* Number of trees to train (>= 1).
* If 1, then no bootstrapping is used. If > 1, then bootstrapping is done.
* Number of trees to train (at least 1).
* If 1, then no bootstrapping is used. If greater than 1, then bootstrapping is done.
* TODO: Change to always do bootstrapping (simpler). SPARK-7130
* (default = 20)
*
Expand All @@ -371,7 +371,8 @@ private[ml] trait RandomForestParams extends TreeEnsembleParams {
* are a bit different.
* @group param
*/
final val numTrees: IntParam = new IntParam(this, "numTrees", "Number of trees to train (>= 1)",
final val numTrees: IntParam =
new IntParam(this, "numTrees", "Number of trees to train (at least 1)",
ParamValidators.gtEq(1))

setDefault(numTrees -> 20)
Expand Down
Expand Up @@ -33,7 +33,7 @@ import org.apache.spark.util.StatCounter
/**
* Significance testing methods for [[StreamingTest]]. New 2-sample statistical significance tests
* should extend [[StreamingTestMethod]] and introduce a new entry in
* [[StreamingTestMethod.TEST_NAME_TO_OBJECT]]
* `StreamingTestMethod.TEST_NAME_TO_OBJECT`
*/
private[stat] sealed trait StreamingTestMethod extends Serializable {

Expand Down
2 changes: 1 addition & 1 deletion project/SparkBuild.scala
Expand Up @@ -219,7 +219,7 @@ object SparkBuild extends PomBuild {
.map(file),
incOptions := incOptions.value.withNameHashing(true),
publishMavenStyle := true,
unidocGenjavadocVersion := "0.11",
unidocGenjavadocVersion := "0.13",

// Override SBT's default resolvers:
resolvers := Seq(
Expand Down
Expand Up @@ -41,7 +41,7 @@ private[hive] trait HiveClient {

/**
* Return the associated Hive SessionState of this [[HiveClientImpl]]
* @return [[Any]] not SessionState to avoid linkage error
* @return `Any` not SessionState to avoid linkage error
*/
def getState: Any

Expand Down Expand Up @@ -76,7 +76,7 @@ private[hive] trait HiveClient {
/** Return whether a table/view with the specified name exists. */
def tableExists(dbName: String, tableName: String): Boolean

/** Returns the specified table, or throws [[NoSuchTableException]]. */
/** Returns the specified table, or throws `NoSuchTableException`. */
final def getTable(dbName: String, tableName: String): CatalogTable = {
getTableOption(dbName, tableName).getOrElse(throw new NoSuchTableException(dbName, tableName))
}
Expand Down Expand Up @@ -166,7 +166,7 @@ private[hive] trait HiveClient {
table: String,
newParts: Seq[CatalogTablePartition]): Unit

/** Returns the specified partition, or throws [[NoSuchPartitionException]]. */
/** Returns the specified partition, or throws `NoSuchPartitionException`. */
final def getPartition(
dbName: String,
tableName: String,
Expand Down