Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MRG + 1] DOC clean up assorted type specifications #10441

Merged
merged 2 commits into from Jan 10, 2018
Merged
Changes from 1 commit
Commits
File filter...
Filter file types
Jump to…
Jump to file or symbol
Failed to load files and symbols.
+75 −104
Diff settings

Always

Just for now

Copy path View file
@@ -55,7 +55,7 @@ def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : numpy.RandomState
random_state : RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
@@ -1495,7 +1495,7 @@ def _labels_inertia_minibatch(self, X):
Returns
-------
labels : array, shap (n_samples,)
labels : array, shape (n_samples,)
Cluster labels for each point.
inertia : float
@@ -55,7 +55,7 @@ def empirical_covariance(X, assume_centered=False):
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
assume_centered : boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
@@ -142,7 +142,7 @@ def get_precision(self):
Returns
-------
precision_ : array-like,
precision_ : array-like
The precision matrix associated to the current covariance object.
"""
@@ -162,12 +162,12 @@ def fit(self, X, y=None):
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : not used, present for API consistence purpose.
y
not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
@@ -193,7 +193,8 @@ def score(self, X_test, y=None):
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : not used, present for API consistence purpose.
y
not used, present for API consistence purpose.
Returns
-------
@@ -519,7 +519,7 @@ class GraphLassoCV(GraphLasso):
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
assume_centered : boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
@@ -523,7 +523,7 @@ class MinCovDet(EmpiricalCovariance):
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
assume_centered : bool
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
@@ -606,12 +606,12 @@ def fit(self, X, y=None):
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
y
not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
@@ -122,12 +122,12 @@ def fit(self, X, y=None):
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
y
not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
@@ -157,7 +157,7 @@ def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
X : array-like, shape (n_samples, n_features)
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
assume_centered : Boolean
assume_centered : bool
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
@@ -380,12 +380,12 @@ def fit(self, X, y=None):
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
y
not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
@@ -537,12 +537,12 @@ def fit(self, X, y=None):
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
y
not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
@@ -153,7 +153,8 @@ class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
max_iter : int (default 500)
The maximum number of iterations
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
Copy path View file
@@ -212,8 +212,9 @@ def load_data(module_path, data_file_name):
Parameters
----------
data_file_name : String. Name of csv file to be loaded from
module_path/data/data_file_name. For example 'wine_data.csv'.
data_file_name : String

This comment has been minimized.

Copy link
@glemaitre

glemaitre Jan 10, 2018

Contributor

Are we using String or string or str?

This comment has been minimized.

Copy link
@jnothman

jnothman Jan 10, 2018

Author Member

Any of the above?

This comment has been minimized.

Copy link
@jnothman

jnothman Jan 10, 2018

Author Member

Lowercase is much more common though

Name of csv file to be loaded from
module_path/data/data_file_name. For example 'wine_data.csv'.
Returns
-------
Copy path View file
@@ -29,14 +29,17 @@ def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
name_or_id : int or str
The integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
set_ : str, default='raw'
Select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
mlcomp_root : str, optional
The filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Copy path View file
@@ -104,7 +104,6 @@ def fit(self, X, y, sample_weight=None):
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform",
"constant", "prior"):
@@ -386,7 +385,6 @@ def fit(self, X, y, sample_weight=None):
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("mean", "median", "quantile", "constant"):
raise ValueError("Unknown strategy type: %s, expected "
@@ -376,7 +376,7 @@ def _random_sample_mask(np.npy_intp n_total_samples,
n_total_in_bag : int
The number of elements in the sample mask which are set to 1.
random_state : np.RandomState
random_state : RandomState
A numpy ``RandomState`` object.
Returns
Copy path View file
@@ -242,7 +242,6 @@ def fit(self, X, y, sample_weight=None):
Returns
-------
self : object
Returns self.
"""
return self._fit(X, y, self.max_samples, sample_weight=sample_weight)

@@ -275,7 +274,6 @@ def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None):
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)

Copy path View file
@@ -241,7 +241,6 @@ def fit(self, X, y, sample_weight=None):
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
@@ -1895,7 +1894,6 @@ def fit(self, X, y=None, sample_weight=None):
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
@@ -987,7 +987,6 @@ def fit(self, X, y, sample_weight=None, monitor=None):
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
@@ -1412,7 +1411,7 @@ class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
.. versionadded:: 0.19
init : BaseEstimator, None, optional (default=None)
init : estimator, optional
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
@@ -1493,7 +1492,7 @@ class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
loss_ : LossFunction
The concrete ``LossFunction`` object.
init_ : BaseEstimator
init_ : estimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
@@ -1870,7 +1869,7 @@ class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
init : estimator, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
@@ -1945,7 +1944,7 @@ class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
loss_ : LossFunction
The concrete ``LossFunction`` object.
init_ : BaseEstimator
init_ : estimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
Copy path View file
@@ -157,7 +157,6 @@ def fit(self, X, y=None, sample_weight=None):
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
@@ -316,7 +316,7 @@ def set_params(self, **params):
Parameters
----------
params: keyword arguments
params : keyword arguments
Specific parameters using e.g. set_params(parameter_name=new_value)
In addition, to setting the parameters of the ``VotingClassifier``,
the individual classifiers of the ``VotingClassifier`` can also be
@@ -93,7 +93,6 @@ def fit(self, X, y, sample_weight=None):
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
@@ -188,7 +187,7 @@ def _boost(self, iboost, X, y, sample_weight, random_state):
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
random_state : RandomState
The current random number generator
Returns
@@ -403,7 +402,6 @@ def fit(self, X, y, sample_weight=None):
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
@@ -452,7 +450,7 @@ def _boost(self, iboost, X, y, sample_weight, random_state):
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
random_state : RandomState
The current random number generator
Returns
@@ -949,7 +947,6 @@ def fit(self, X, y, sample_weight=None):
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
@@ -986,7 +983,7 @@ def _boost(self, iboost, X, y, sample_weight, random_state):
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
random_state : RandomState
The current random number generator
Returns
@@ -160,7 +160,6 @@ def fit(self, X, y=None, **fit_params):
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
@@ -192,7 +191,6 @@ def partial_fit(self, X, y=None, **fit_params):
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
@@ -336,7 +336,6 @@ def fit(self, X, y):
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], multi_output=True)

Oops, something went wrong.
ProTip! Use n and p to navigate between commits in a pull request.
You can’t perform that action at this time.