Skip to content

Commit

Permalink
[SPARK-4822] Use sphinx tags for Python doc annotations
Browse files Browse the repository at this point in the history
Modify python annotations for sphinx. There is no change to build process from.
https://github.com/apache/spark/blob/master/docs/README.md

Author: lewuathe <lewuathe@me.com>

Closes #3685 from Lewuathe/sphinx-tag-for-pydoc and squashes the following commits:

88a0fd9 [lewuathe] [SPARK-4822] Fix DevelopApi and WARN tags
3d7a398 [lewuathe] [SPARK-4822] Use sphinx tags for Python doc annotations
  • Loading branch information
Lewuathe authored and mengxr committed Dec 18, 2014
1 parent ca12608 commit 3cd5161
Show file tree
Hide file tree
Showing 6 changed files with 17 additions and 17 deletions.
4 changes: 2 additions & 2 deletions python/pyspark/context.py
Expand Up @@ -407,7 +407,7 @@ def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):

def binaryFiles(self, path, minPartitions=None):
"""
:: Experimental ::
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
Expand All @@ -424,7 +424,7 @@ def binaryFiles(self, path, minPartitions=None):

def binaryRecords(self, path, recordLength):
"""
:: Experimental ::
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
Expand Down
4 changes: 2 additions & 2 deletions python/pyspark/mllib/classification.py
Expand Up @@ -41,7 +41,7 @@ def __init__(self, weights, intercept):

def setThreshold(self, value):
"""
:: Experimental ::
.. note:: Experimental
Sets the threshold that separates positive predictions from negative
predictions. An example with prediction score greater than or equal
Expand All @@ -51,7 +51,7 @@ def setThreshold(self, value):

def clearThreshold(self):
"""
:: Experimental ::
.. note:: Experimental
Clears the threshold so that `predict` will output raw prediction scores.
"""
Expand Down
12 changes: 6 additions & 6 deletions python/pyspark/mllib/feature.py
Expand Up @@ -36,7 +36,7 @@

class VectorTransformer(object):
"""
:: DeveloperApi ::
.. note:: DeveloperApi
Base class for transformation of a vector or RDD of vector
"""
Expand All @@ -51,7 +51,7 @@ def transform(self, vector):

class Normalizer(VectorTransformer):
"""
:: Experimental ::
.. note:: Experimental
Normalizes samples individually to unit L\ :sup:`p`\ norm
Expand Down Expand Up @@ -112,7 +112,7 @@ def transform(self, vector):

class StandardScalerModel(JavaVectorTransformer):
"""
:: Experimental ::
.. note:: Experimental
Represents a StandardScaler model that can transform vectors.
"""
Expand All @@ -129,7 +129,7 @@ def transform(self, vector):

class StandardScaler(object):
"""
:: Experimental ::
.. note:: Experimental
Standardizes features by removing the mean and scaling to unit
variance using column summary statistics on the samples in the
Expand Down Expand Up @@ -172,7 +172,7 @@ def fit(self, dataset):

class HashingTF(object):
"""
:: Experimental ::
.. note:: Experimental
Maps a sequence of terms to their term frequencies using the hashing trick.
Expand Down Expand Up @@ -232,7 +232,7 @@ def transform(self, x):

class IDF(object):
"""
:: Experimental ::
.. note:: Experimental
Inverse document frequency (IDF).
Expand Down
4 changes: 2 additions & 2 deletions python/pyspark/mllib/stat.py
Expand Up @@ -55,7 +55,7 @@ def min(self):

class ChiSqTestResult(JavaModelWrapper):
"""
:: Experimental ::
.. note:: Experimental
Object containing the test results for the chi-squared hypothesis test.
"""
Expand Down Expand Up @@ -200,7 +200,7 @@ def corr(x, y=None, method=None):
@staticmethod
def chiSqTest(observed, expected=None):
"""
:: Experimental ::
.. note:: Experimental
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
Expand Down
8 changes: 4 additions & 4 deletions python/pyspark/rdd.py
Expand Up @@ -1964,7 +1964,7 @@ def _to_java_object_rdd(self):

def countApprox(self, timeout, confidence=0.95):
"""
:: Experimental ::
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
Expand All @@ -1977,7 +1977,7 @@ def countApprox(self, timeout, confidence=0.95):

def sumApprox(self, timeout, confidence=0.95):
"""
:: Experimental ::
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
Expand All @@ -1993,7 +1993,7 @@ def sumApprox(self, timeout, confidence=0.95):

def meanApprox(self, timeout, confidence=0.95):
"""
:: Experimental ::
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
Expand All @@ -2009,7 +2009,7 @@ def meanApprox(self, timeout, confidence=0.95):

def countApproxDistinct(self, relativeSD=0.05):
"""
:: Experimental ::
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/sql.py
Expand Up @@ -420,7 +420,7 @@ def fromJson(cls, json):

class UserDefinedType(DataType):
"""
:: WARN: Spark Internal Use Only ::
.. note:: WARN: Spark Internal Use Only
SQL User-Defined Type (UDT).
"""

Expand Down

0 comments on commit 3cd5161

Please sign in to comment.