Browse files

COSMIT fixing doc building errors.

  • Loading branch information...
1 parent 18c7cf1 commit c039766bfae08245048710dd3524a8d812a90844 @amueller amueller committed Jun 3, 2012
Showing with 43 additions and 44 deletions.
  1. +6 −5 sklearn/gaussian_process/gaussian_process.py
  2. +13 −15 sklearn/hmm.py
  3. +24 −24 sklearn/manifold/mds.py
View
11 sklearn/gaussian_process/gaussian_process.py
@@ -173,7 +173,7 @@ class GaussianProcess(BaseEstimator, RegressorMixin):
Attributes
----------
`theta_`: array
- Specified theta OR The best set of autocorrelation parameters (the
+ Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
`reduced_likelihood_function_value_`: array
@@ -535,7 +535,7 @@ def reduced_likelihood_function(self, theta=None):
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
- (ie theta = self.theta_).
+ (ie ``theta = self.theta_``).
Returns
-------
@@ -648,7 +648,7 @@ def reduced_likelihood_function(self, theta=None):
return reduced_likelihood_function_value, par
@deprecated("to be removed;"
- " access self.theta_ etc. directly after fit")
+ " access ``self.theta_`` etc. directly after fit")
def arg_max_reduced_likelihood_function(self):
return self._arg_max_reduced_likelihood_function()
@@ -659,8 +659,9 @@ def theta(self):
return self.theta_
@property
- @deprecated('``reduced_likelihood_function_value`` is deprecated and will be removed'
- 'please use ``reduced_likelihood_function_value_`` instead.')
+ @deprecated("``reduced_likelihood_function_value`` is deprecated and will"
+ "be removed' 'please use ``reduced_likelihood_function_value_`` "
+ "instead.")
def reduced_likelihood_function_value(self):
return self.reduced_likelihood_function_value_
View
28 sklearn/hmm.py
@@ -600,7 +600,7 @@ class GaussianHMM(_BaseHMM):
n_components : int
Number of states.
- _covariance_type : string
+ ``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
@@ -628,7 +628,7 @@ class GaussianHMM(_BaseHMM):
covars : array
Covariance parameters for each state. The shape depends on
- `_covariance_type`::
+ ``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
@@ -999,6 +999,16 @@ class GMMHMM(_BaseHMM):
Attributes
----------
+ init_params : string, optional
+ Controls which parameters are initialized prior to training. Can \
+ contain any combination of 's' for startprob, 't' for transmat, 'm' \
+ for means, and 'c' for covars, etc. Defaults to all parameters.
+
+ params : string, optional
+ Controls which parameters are updated in the training process. Can
+ contain any combination of 's' for startprob, 't' for transmat,'m' for
+ means, and 'c' for covars, etc. Defaults to all parameters.
+
n_components : int
Number of states in the model.
@@ -1011,7 +1021,7 @@ class GMMHMM(_BaseHMM):
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
- random_state: RandomState or an int seed (0 by default)
+ random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
@@ -1020,18 +1030,6 @@ class GMMHMM(_BaseHMM):
thresh : float, optional
Convergence threshold.
- params : string, optional
- Controls which parameters are updated in the training
- process. Can contain any combination of 's' for startprob,
- 't' for transmat, 'm' for means, and 'c' for covars, etc.
- Defaults to all parameters.
-
- init_params : string, optional
- Controls which parameters are initialized prior to
- training. Can contain any combination of 's' for
- startprob, 't' for transmat, 'm' for means, and 'c' for
- covars, etc. Defaults to all parameters.
-
Examples
--------
>>> from sklearn.hmm import GMMHMM
View
48 sklearn/manifold/mds.py
@@ -138,8 +138,8 @@ def _smacof_single(similarities, metric=True, n_components=2, init=None,
if similarities.shape[0] != similarities.shape[1]:
raise ValueError("similarities must be a square array (shape=%d)" % \
n_samples)
-
- if np.any((similarities - similarities.T) > 100 * np.finfo(np.float).resolution):
+ eps = 100 * np.finfo(np.float).resolution
+ if np.any((similarities - similarities.T) > eps):
raise ValueError("similarities must be symmetric")
sim_flat = ((1 - np.tri(n_samples)) * similarities).flatten()
@@ -226,26 +226,26 @@ def smacof(similarities, metric=True, n_components=2, init=None, n_init=8,
Parameters
----------
- similarities: symmetric ndarray, shape (n_samples, n_samples)
+ similarities : symmetric ndarray, shape (n_samples, n_samples)
similarities between the points
- metric: boolean, optional, default: True
+ metric : boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
- n_components: int, optional, default: 2
+ n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
- init: {None or ndarray of shape (n_samples, n_components)}
+ init : {None or ndarray of shape (n_samples, n_components)}
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
- n_init: int, optional, default: 8
+ n_init : int, optional, default: 8
Number of time the smacof algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
- n_jobs: int, optional, default: 1
+ n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
@@ -256,26 +256,26 @@ def smacof(similarities, metric=True, n_components=2, init=None, n_init=8,
(n_cpus + 1 - n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
- max_iter: int, optional, default: 300
+ max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
- verbose: int, optional, default: 0
+ verbose : int, optional, default: 0
level of verbosity
- eps: float, optional, default: 1e-6
+ eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
- random_state: integer or numpy.RandomState, optional
+ random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
- X: ndarray (n_samples,n_components)
+ X : ndarray (n_samples,n_components)
Coordinates of the n_samples points in a n_components-space
- stress: float
+ stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
@@ -336,29 +336,29 @@ class MDS(BaseEstimator):
Parameters
----------
- metric: boolean, optional, default: True
+ metric : boolean, optional, default: True
compute metric or nonmetric SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
- n_components: int, optional, default: 2
+ n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
- n_init: int, optional, default: 4
+ n_init : int, optional, default: 4
Number of time the smacof algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
- max_iter: int, optional, default: 300
+ max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
- verbose: int, optional, default: 0
+ verbose : int, optional, default: 0
level of verbosity
- eps: float, optional, default: 1e-6
+ eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
- n_jobs: int, optional, default: 1
+ n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
@@ -368,18 +368,18 @@ class MDS(BaseEstimator):
(n_cpus + 1 - n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
- random_state: integer or numpy.RandomState, optional
+ random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
- embedding_: array-like, shape [n_components, n_samples]
+ ``embedding_`` : array-like, shape [n_components, n_samples]
Stores the position of the dataset in the embedding space
- stress_: float
+ ``stress_`` : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)

0 comments on commit c039766

Please sign in to comment.