|
|
@@ -210,6 +210,11 @@ class MinMaxScaler(BaseEstimator, TransformerMixin): |
|
|
|
|
|
where min, max = feature_range. |
|
|
|
|
|
The transformation is calculated as:: |
|
|
|
|
|
X_scaled = scale * X + min - X.min(axis=0) * scale |
|
|
where scale = (max - min) / (X.max(axis=0) - X.min(axis=0)) |
|
|
|
|
|
This transformation is often used as an alternative to zero mean, |
|
|
unit variance scaling. |
|
|
|
|
|
@@ -227,10 +232,12 @@ class MinMaxScaler(BaseEstimator, TransformerMixin): |
|
|
Attributes |
|
|
---------- |
|
|
min_ : ndarray, shape (n_features,) |
|
|
Per feature adjustment for minimum. |
|
|
Per feature adjustment for minimum. Equivalent to |
|
|
``min - X.min(axis=0) * self.scale_`` |
|
|
|
|
|
scale_ : ndarray, shape (n_features,) |
|
|
Per feature relative scaling of the data. |
|
|
Per feature relative scaling of the data. Equivalent to |
|
|
``(max - min) / (X.max(axis=0) - X.min(axis=0))`` |
|
|
|
|
|
.. versionadded:: 0.17 |
|
|
*scale_* attribute. |
|
|
@@ -409,12 +416,17 @@ def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): |
|
|
that it is in the given range on the training set, i.e. between |
|
|
zero and one. |
|
|
|
|
|
The transformation is given by:: |
|
|
The transformation is given by (when ``axis=0``):: |
|
|
|
|
|
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) |
|
|
X_scaled = X_std * (max - min) + min |
|
|
|
|
|
where min, max = feature_range. |
|
|
|
|
|
The transformation is calculated as (when ``axis=0``):: |
|
|
|
|
|
X_scaled = scale * X + min - X.min(axis=0) * scale |
|
|
where scale = (max - min) / (X.max(axis=0) - X.min(axis=0)) |
|
|
|
|
|
This transformation is often used as an alternative to zero mean, |
|
|
unit variance scaling. |
|
|
|
0 comments on commit
31cef3b