Skip to content

Commit

Permalink
[SPARK-1357 (fix)] remove empty line after :: DeveloperApi/Experiment…
Browse files Browse the repository at this point in the history
…al ::

Remove empty line after :: DeveloperApi/Experimental :: in comments to make the original doc show up in the preview of the generated html docs. Thanks @andrewor14 !

Author: Xiangrui Meng <meng@databricks.com>

Closes #373 from mengxr/api and squashes the following commits:

9c35bdc [Xiangrui Meng] remove the empty line after :: DeveloperApi/Experimental ::
  • Loading branch information
mengxr authored and pwendell committed Apr 10, 2014
1 parent eb5f2b6 commit 0adc932
Show file tree
Hide file tree
Showing 33 changed files with 21 additions and 71 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import org.apache.spark.rdd.RDD

/**
* :: DeveloperApi ::
*
* The Java stubs necessary for the Python mllib bindings.
*/
@DeveloperApi
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import org.apache.spark.rdd.RDD

/**
* :: Experimental ::
*
* Model for Naive Bayes Classifiers.
*
* @param labels list of labels
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ class KMeans private (

/**
* :: Experimental ::
*
* Set the number of runs of the algorithm to execute in parallel. We initialize the algorithm
* this many times with random starting conditions (configured by the initialization mode), then
* return the best clustering found over any run. Default: 1.
Expand Down Expand Up @@ -398,9 +397,6 @@ object KMeans {
MLUtils.fastSquaredDistance(v1.vector, v1.norm, v2.vector, v2.norm)
}

/**
* :: Experimental ::
*/
@Experimental
def main(args: Array[String]) {
if (args.length < 4) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ case class MatrixEntry(i: Long, j: Long, value: Double)

/**
* :: Experimental ::
*
* Represents a matrix in coordinate format.
*
* @param entries matrix entries
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,13 @@ import org.apache.spark.mllib.linalg.SingularValueDecomposition

/**
* :: Experimental ::
*
* Represents a row of [[org.apache.spark.mllib.linalg.distributed.IndexedRowMatrix]].
*/
@Experimental
case class IndexedRow(index: Long, vector: Vector)

/**
* :: Experimental ::
*
* Represents a row-oriented [[org.apache.spark.mllib.linalg.distributed.DistributedMatrix]] with
* indexed rows.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import org.apache.spark.Logging

/**
* :: Experimental ::
*
* Represents a row-oriented distributed Matrix with no meaningful row indices.
*
* @param rows rows stored as an RDD[Vector]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import org.apache.spark.mllib.linalg.{Vectors, Vector}

/**
* :: DeveloperApi ::
*
* Class used to compute the gradient for a loss function, given a single data point.
*/
@DeveloperApi
Expand Down Expand Up @@ -56,7 +55,6 @@ abstract class Gradient extends Serializable {

/**
* :: DeveloperApi ::
*
* Compute gradient and loss for a logistic loss function, as used in binary classification.
* See also the documentation for the precise formulation.
*/
Expand Down Expand Up @@ -100,7 +98,6 @@ class LogisticGradient extends Gradient {

/**
* :: DeveloperApi ::
*
* Compute gradient and loss for a Least-squared loss function, as used in linear regression.
* This is correct for the averaged least squares loss function (mean squared error)
* L = 1/n ||A weights-y||^2
Expand Down Expand Up @@ -135,7 +132,6 @@ class LeastSquaresGradient extends Gradient {

/**
* :: DeveloperApi ::
*
* Compute gradient and loss for a Hinge loss function, as used in SVM binary classification.
* See also the documentation for the precise formulation.
* NOTE: This assumes that the labels are {0,1}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import org.apache.spark.mllib.linalg.{Vectors, Vector}

/**
* :: DeveloperApi ::
*
* Class used to solve an optimization problem using Gradient Descent.
* @param gradient Gradient function to be used.
* @param updater Updater to be used to update weights after every iteration.
Expand Down Expand Up @@ -113,7 +112,6 @@ class GradientDescent(private var gradient: Gradient, private var updater: Updat

/**
* :: DeveloperApi ::
*
* Top-level method to run gradient descent.
*/
@DeveloperApi
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import org.apache.spark.mllib.linalg.Vector

/**
* :: DeveloperApi ::
*
* Trait for optimization problem solvers.
*/
@DeveloperApi
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import org.apache.spark.mllib.linalg.{Vectors, Vector}

/**
* :: DeveloperApi ::
*
* Class used to perform steps (weight update) using Gradient Descent methods.
*
* For general minimization problems, or for regularized problems of the form
Expand Down Expand Up @@ -64,7 +63,6 @@ abstract class Updater extends Serializable {

/**
* :: DeveloperApi ::
*
* A simple updater for gradient descent *without* any regularization.
* Uses a step-size decreasing with the square root of the number of iterations.
*/
Expand All @@ -86,7 +84,6 @@ class SimpleUpdater extends Updater {

/**
* :: DeveloperApi ::
*
* Updater for L1 regularized problems.
* R(w) = ||w||_1
* Uses a step-size decreasing with the square root of the number of iterations.
Expand Down Expand Up @@ -131,7 +128,6 @@ class L1Updater extends Updater {

/**
* :: DeveloperApi ::
*
* Updater for L2 regularized problems.
* R(w) = 1/2 ||w||^2
* Uses a step-size decreasing with the square root of the number of iterations.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,6 @@ class ALS private (

/**
* :: Experimental ::
*
* Sets the constant used in computing confidence in implicit ALS. Default: 1.0.
*/
@Experimental
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@ class MatrixFactorizationModel(

/**
* :: DeveloperApi ::
*
* Predict the rating of many users for many products.
* This is a Java stub for python predictAll()
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,6 @@ abstract class GeneralizedLinearAlgorithm[M <: GeneralizedLinearModel]

/**
* :: Experimental ::
*
* Set if the algorithm should validate data before training. Default true.
*/
@Experimental
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ import org.apache.spark.mllib.linalg.{Vector, Vectors}

/**
* :: Experimental ::
*
* A class that implements a decision tree algorithm for classification and regression. It
* supports both continuous and categorical features.
* @param strategy The configuration parameters for the tree algorithm which specify the type
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import org.apache.spark.annotation.Experimental

/**
* :: Experimental ::
*
* Enum to select the algorithm for the decision tree
*/
@Experimental
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import org.apache.spark.annotation.Experimental

/**
* :: Experimental ::
*
* Enum to describe whether a feature is "continuous" or "categorical"
*/
@Experimental
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import org.apache.spark.annotation.Experimental

/**
* :: Experimental ::
*
* Enum for selecting the quantile calculation strategy
*/
@Experimental
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import org.apache.spark.mllib.tree.configuration.QuantileStrategy._

/**
* :: Experimental ::
*
* Stores all the configuration options for tree construction
* @param algo classification or regression
* @param impurity criterion used for information gain calculation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import org.apache.spark.annotation.{DeveloperApi, Experimental}

/**
* :: Experimental ::
*
* Class for calculating [[http://en.wikipedia.org/wiki/Binary_entropy_function entropy]] during
* binary classification.
*/
Expand All @@ -32,7 +31,6 @@ object Entropy extends Impurity {

/**
* :: DeveloperApi ::
*
* entropy calculation
* @param c0 count of instances with label 0
* @param c1 count of instances with label 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import org.apache.spark.annotation.{DeveloperApi, Experimental}

/**
* :: Experimental ::
*
* Class for calculating the
* [[http://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity Gini impurity]]
* during binary classification.
Expand All @@ -31,7 +30,6 @@ object Gini extends Impurity {

/**
* :: DeveloperApi ::
*
* Gini coefficient calculation
* @param c0 count of instances with label 0
* @param c1 count of instances with label 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,13 @@ import org.apache.spark.annotation.{DeveloperApi, Experimental}

/**
* :: Experimental ::
*
* Trait for calculating information gain.
*/
@Experimental
trait Impurity extends Serializable {

/**
* :: DeveloperApi ::
*
* information calculation for binary classification
* @param c0 count of instances with label 0
* @param c1 count of instances with label 1
Expand All @@ -40,7 +38,6 @@ trait Impurity extends Serializable {

/**
* :: DeveloperApi ::
*
* information calculation for regression
* @param count number of instances
* @param sum sum of labels
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import org.apache.spark.annotation.{DeveloperApi, Experimental}

/**
* :: Experimental ::
*
* Class for calculating variance during regression
*/
@Experimental
Expand All @@ -31,7 +30,6 @@ object Variance extends Impurity {

/**
* :: DeveloperApi ::
*
* variance calculation
* @param count number of instances
* @param sum sum of labels
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import org.apache.spark.mllib.linalg.Vector

/**
* :: Experimental ::
*
* Model to store the decision tree parameters
* @param topNode root node
* @param algo algorithm type -- classification or regression
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import org.apache.spark.annotation.DeveloperApi

/**
* :: DeveloperApi ::
*
* Information gain statistics for each split
* @param gain information gain value
* @param impurity current node impurity
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import org.apache.spark.mllib.linalg.Vector

/**
* :: DeveloperApi ::
*
* Node in a decision tree
* @param id integer node id
* @param predict predicted value at the node
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import org.apache.spark.mllib.tree.configuration.FeatureType.FeatureType

/**
* :: DeveloperApi ::
*
* Split applied to a feature
* @param feature feature index
* @param threshold threshold for continuous feature
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import org.apache.spark.mllib.regression.LabeledPoint

/**
* :: DeveloperApi ::
*
* A collection of methods used to validate data before applying ML algorithms.
*/
@DeveloperApi
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import org.apache.spark.rdd.RDD

/**
* :: DeveloperApi ::
*
* Generate test data for KMeans. This class first chooses k cluster centers
* from a d-dimensional Gaussian distribution scaled by factor r and then creates a Gaussian
* cluster with scale 1 around each center.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import org.apache.spark.mllib.regression.LabeledPoint

/**
* :: DeveloperApi ::
*
* Generate sample data used for Linear Data. This class generates
* uniformly random values for every feature and adds Gaussian noise with mean `eps` to the
* response variable `Y`.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import org.apache.spark.mllib.linalg.Vectors

/**
* :: DeveloperApi ::
*
* Generate test data for LogisticRegression. This class chooses positive labels
* with probability `probOne` and scales features for positive examples by `eps`.
*/
Expand Down
Loading

0 comments on commit 0adc932

Please sign in to comment.