diff --git a/docs/sources/CHANGELOG.md b/docs/sources/CHANGELOG.md index 6030f59b6..427e33d15 100755 --- a/docs/sources/CHANGELOG.md +++ b/docs/sources/CHANGELOG.md @@ -28,6 +28,7 @@ The CHANGELOG for the current development version is available at - the "S" vector from SVD in `PrincipalComponentAnalysis` are now scaled so that the eigenvalues via `solver='eigen'` and `solver='svd'` now store eigenvalues that have the same magnitudes. [#251](https://github.com/rasbt/mlxtend/pull/251) - The parameters for `StackingClassifier`, `StackingCVClassifier`, `StackingRegressor`, `StackingCVRegressor`, and `EnsembleVoteClassifier` can now be tuned using scikit-learn's `GridSearchCV` ([#254](https://github.com/rasbt/mlxtend/pull/254) via [James Bourbeau](https://github.com/jrbourbeau)) +- Fix issues with `self._init_time` parameter in `_IterativeModel` subclasses. [#256](https://github.com/rasbt/mlxtend/pull/256) ### Version 0.8.0 (2017-09-09) diff --git a/mlxtend/_base/_base_model.py b/mlxtend/_base/_base_model.py index 0a123f718..63abbd254 100644 --- a/mlxtend/_base/_base_model.py +++ b/mlxtend/_base/_base_model.py @@ -6,11 +6,13 @@ # # License: BSD 3 clause +from time import time + class _BaseModel(object): def __init__(self): - pass + self._init_time = time() def _check_arrays(self, X, y=None): if isinstance(X, list): diff --git a/mlxtend/_base/_classifier.py b/mlxtend/_base/_classifier.py index c28a3668f..89e24e479 100644 --- a/mlxtend/_base/_classifier.py +++ b/mlxtend/_base/_classifier.py @@ -7,6 +7,7 @@ # License: BSD 3 clause import numpy as np +from time import time class _Classifier(object): @@ -75,6 +76,7 @@ def fit(self, X, y, init_params=True): self._check_target_array(y) if hasattr(self, 'self.random_seed') and self.random_seed: self._rgen = np.random.RandomState(self.random_seed) + self._init_time = time() self._fit(X=X, y=y, init_params=init_params) self._is_fitted = True return self diff --git a/mlxtend/_base/_cluster.py b/mlxtend/_base/_cluster.py index 12b49bb2b..cec2e850a 100644 --- a/mlxtend/_base/_cluster.py +++ b/mlxtend/_base/_cluster.py @@ -7,6 +7,7 @@ # License: BSD 3 clause import numpy as np +from time import time class _Cluster(object): @@ -36,6 +37,7 @@ def fit(self, X, init_params=True): self._check_arrays(X=X) if hasattr(self, 'self.random_seed') and self.random_seed: self._rgen = np.random.RandomState(self.random_seed) + self._init_time = time() self._fit(X=X, init_params=init_params) self._is_fitted = True return self diff --git a/mlxtend/_base/_iterative_model.py b/mlxtend/_base/_iterative_model.py index 94941a598..0596271ae 100644 --- a/mlxtend/_base/_iterative_model.py +++ b/mlxtend/_base/_iterative_model.py @@ -31,7 +31,7 @@ def _print_progress(self, iteration, n_iter, if not hasattr(self, 'ela_str_'): self.ela_str_ = '00:00:00' if not iteration % time_interval: - ela_sec = time() - self.init_time_ + ela_sec = time() - self._init_time self.ela_str_ = self._to_hhmmss(ela_sec) s += ' | Elapsed: %s' % self.ela_str_ if self.print_progress > 2: diff --git a/mlxtend/_base/_regressor.py b/mlxtend/_base/_regressor.py index 1fe44d171..6bdf412d2 100644 --- a/mlxtend/_base/_regressor.py +++ b/mlxtend/_base/_regressor.py @@ -7,6 +7,7 @@ # License: BSD 3 clause import numpy as np +from time import time class _Regressor(object): @@ -44,6 +45,7 @@ def fit(self, X, y, init_params=True): self._check_target_array(y) if hasattr(self, 'self.random_seed') and self.random_seed: self._rgen = np.random.RandomState(self.random_seed) + self._init_time = time() self._fit(X=X, y=y, init_params=init_params) self._is_fitted = True return self diff --git a/mlxtend/classifier/adaline.py b/mlxtend/classifier/adaline.py index 7a7e1be79..d3c3bf9d4 100644 --- a/mlxtend/classifier/adaline.py +++ b/mlxtend/classifier/adaline.py @@ -57,6 +57,10 @@ def __init__(self, eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0): + _BaseModel.__init__(self) + _IterativeModel.__init__(self) + _Classifier.__init__(self) + self.eta = eta self.minibatches = minibatches self.epochs = epochs diff --git a/mlxtend/classifier/logistic_regression.py b/mlxtend/classifier/logistic_regression.py index 5e4e4e94b..d93c16504 100644 --- a/mlxtend/classifier/logistic_regression.py +++ b/mlxtend/classifier/logistic_regression.py @@ -61,6 +61,10 @@ def __init__(self, eta=0.01, epochs=50, random_seed=None, print_progress=0): + _BaseModel.__init__(self) + _IterativeModel.__init__(self) + _Classifier.__init__(self) + self.eta = eta self.epochs = epochs self.l2_lambda = l2_lambda diff --git a/mlxtend/classifier/multilayerperceptron.py b/mlxtend/classifier/multilayerperceptron.py index 42f723cf5..629b043e5 100644 --- a/mlxtend/classifier/multilayerperceptron.py +++ b/mlxtend/classifier/multilayerperceptron.py @@ -82,6 +82,12 @@ def __init__(self, eta=0.5, epochs=50, minibatches=1, random_seed=None, print_progress=0): + _BaseModel.__init__(self) + _Classifier.__init__(self) + _IterativeModel.__init__(self) + _MultiClass.__init__(self) + _MultiLayer.__init__(self) + if len(hidden_layers) > 1: raise AttributeError('Currently, only 1 hidden layer is supported') self.hidden_layers = hidden_layers diff --git a/mlxtend/classifier/perceptron.py b/mlxtend/classifier/perceptron.py index 0397fb3f4..8d60a1518 100644 --- a/mlxtend/classifier/perceptron.py +++ b/mlxtend/classifier/perceptron.py @@ -49,6 +49,10 @@ class Perceptron(_BaseModel, _IterativeModel, _Classifier): def __init__(self, eta=0.1, epochs=50, random_seed=None, print_progress=0): + _BaseModel.__init__(self) + _IterativeModel.__init__(self) + _Classifier.__init__(self) + self.eta = eta self.epochs = epochs self.random_seed = random_seed diff --git a/mlxtend/classifier/softmax_regression.py b/mlxtend/classifier/softmax_regression.py index 04e5d621b..8ccc3b80a 100644 --- a/mlxtend/classifier/softmax_regression.py +++ b/mlxtend/classifier/softmax_regression.py @@ -16,7 +16,7 @@ from .._base import _Classifier -class SoftmaxRegression(_BaseModel, _IterativeModel, _MultiClass, _Classifier): +class SoftmaxRegression(_BaseModel, _IterativeModel, _Classifier, _MultiClass): """Softmax regression classifier. @@ -66,6 +66,11 @@ def __init__(self, eta=0.01, epochs=50, random_seed=None, print_progress=0): + _BaseModel.__init__(self) + _IterativeModel.__init__(self) + _Classifier.__init__(self) + _MultiClass.__init__(self) + self.eta = eta self.epochs = epochs self.l2 = l2 diff --git a/mlxtend/classifier/tests/test_adaline.py b/mlxtend/classifier/tests/test_adaline.py index cf0a76d5c..ef390e9bc 100644 --- a/mlxtend/classifier/tests/test_adaline.py +++ b/mlxtend/classifier/tests/test_adaline.py @@ -79,6 +79,33 @@ def test_gradient_descent(): assert((y1 == ada.predict(X_std)).all()) +def test_print_progress_1(): + ada = Adaline(epochs=30, + eta=0.01, + minibatches=1, + print_progress=1, + random_seed=1) + ada.fit(X_std, y1) + + +def test_print_progress_2(): + ada = Adaline(epochs=30, + eta=0.01, + minibatches=1, + print_progress=2, + random_seed=1) + ada.fit(X_std, y1) + + +def test_print_progress_3(): + ada = Adaline(epochs=30, + eta=0.01, + minibatches=1, + print_progress=3, + random_seed=1) + ada.fit(X_std, y1) + + def test_score_function(): ada = Adaline(epochs=30, eta=0.01, diff --git a/mlxtend/classifier/tests/test_logistic_regression.py b/mlxtend/classifier/tests/test_logistic_regression.py index c4301bdea..1355cf04b 100644 --- a/mlxtend/classifier/tests/test_logistic_regression.py +++ b/mlxtend/classifier/tests/test_logistic_regression.py @@ -67,6 +67,33 @@ def test_logistic_regression_gd(): assert acc == 1.0, "Acc: %s" % acc +def test_print_progress_1(): + lr = LogisticRegression(epochs=100, + eta=0.01, + minibatches=1, + print_progress=1, + random_seed=1) + lr.fit(X, y) + + +def test_print_progress_2(): + lr = LogisticRegression(epochs=100, + eta=0.01, + minibatches=1, + print_progress=2, + random_seed=1) + lr.fit(X, y) + + +def test_print_progress_3(): + lr = LogisticRegression(epochs=100, + eta=0.01, + minibatches=1, + print_progress=3, + random_seed=1) + lr.fit(X, y) + + def test_score_function(): lr = LogisticRegression(epochs=100, eta=0.01, diff --git a/mlxtend/classifier/tests/test_multilayerperceptron.py b/mlxtend/classifier/tests/test_multilayerperceptron.py index 2f883c6e5..e93415813 100644 --- a/mlxtend/classifier/tests/test_multilayerperceptron.py +++ b/mlxtend/classifier/tests/test_multilayerperceptron.py @@ -34,6 +34,36 @@ def test_multiclass_gd_acc(): assert (y == mlp.predict(X)).all() +def test_progress_1(): + mlp = MLP(epochs=1, + eta=0.05, + hidden_layers=[10], + minibatches=1, + print_progress=1, + random_seed=1) + mlp.fit(X, y) + + +def test_progress_2(): + mlp = MLP(epochs=1, + eta=0.05, + hidden_layers=[10], + minibatches=1, + print_progress=2, + random_seed=1) + mlp.fit(X, y) + + +def test_progress_3(): + mlp = MLP(epochs=1, + eta=0.05, + hidden_layers=[10], + minibatches=1, + print_progress=3, + random_seed=1) + mlp.fit(X, y) + + def test_predict_proba(): mlp = MLP(epochs=20, eta=0.05, diff --git a/mlxtend/classifier/tests/test_perceptron.py b/mlxtend/classifier/tests/test_perceptron.py index 6f79a6fd2..c1d71ce1a 100644 --- a/mlxtend/classifier/tests/test_perceptron.py +++ b/mlxtend/classifier/tests/test_perceptron.py @@ -59,6 +59,21 @@ def test_standardized_iris_data(): assert (y0 == ppn.predict(X_std)).all(), ppn.predict(X_std) +def test_progress_1(): + ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, print_progress=1) + ppn = ppn.fit(X_std, y0) + + +def test_progress_2(): + ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, print_progress=2) + ppn = ppn.fit(X_std, y0) + + +def test_progress_3(): + ppn = Perceptron(epochs=15, eta=0.01, random_seed=1, print_progress=3) + ppn = ppn.fit(X_std, y0) + + def test_score_function(): ppn = Perceptron(epochs=15, eta=0.01, random_seed=1) ppn = ppn.fit(X_std, y0) diff --git a/mlxtend/classifier/tests/test_softmax_regression.py b/mlxtend/classifier/tests/test_softmax_regression.py index bc3dcd65d..19f975224 100644 --- a/mlxtend/classifier/tests/test_softmax_regression.py +++ b/mlxtend/classifier/tests/test_softmax_regression.py @@ -80,6 +80,36 @@ def test_binary_logistic_regression_sgd(): assert (y_bin == lr.predict(X_bin)).all() +def test_progress_1(): + lr = SoftmaxRegression(epochs=1, + eta=0.005, + minibatches=1, + print_progress=1, + random_seed=1) + + lr.fit(X_bin, y_bin) # 0, 1 class + + +def test_progress_2(): + lr = SoftmaxRegression(epochs=1, + eta=0.005, + minibatches=1, + print_progress=2, + random_seed=1) + + lr.fit(X_bin, y_bin) # 0, 1 class + + +def test_progress_3(): + lr = SoftmaxRegression(epochs=1, + eta=0.005, + minibatches=1, + print_progress=3, + random_seed=1) + + lr.fit(X_bin, y_bin) # 0, 1 class + + def test_binary_l2_regularization_gd(): t = np.array([[-0.17, 0.17], [-2.26, 2.26]]) diff --git a/mlxtend/cluster/kmeans.py b/mlxtend/cluster/kmeans.py index 07abd6f71..7bb181439 100644 --- a/mlxtend/cluster/kmeans.py +++ b/mlxtend/cluster/kmeans.py @@ -58,6 +58,9 @@ def __init__(self, k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0): + _BaseModel.__init__(self) + _Cluster.__init__(self) + _IterativeModel.__init__(self) self.k = k self.max_iter = max_iter self.convergence_tolerance = convergence_tolerance @@ -71,7 +74,6 @@ def _fit(self, X, init_params=True): Called in self.fit """ - n_samples = X.shape[0] if init_params: diff --git a/mlxtend/cluster/tests/test_kmeans.py b/mlxtend/cluster/tests/test_kmeans.py index edd7331d4..3b9060995 100644 --- a/mlxtend/cluster/tests/test_kmeans.py +++ b/mlxtend/cluster/tests/test_kmeans.py @@ -35,6 +35,30 @@ def test_three_blobs_multi(): assert (y_pred == y).all() +def test_print_progress_1(): + km = Kmeans(k=3, + max_iter=50, + random_seed=1, + print_progress=1) + km.fit(X) + + +def test_print_progress_2(): + km = Kmeans(k=3, + max_iter=50, + random_seed=1, + print_progress=2) + km.fit(X) + + +def test_print_progress_3(): + km = Kmeans(k=3, + max_iter=50, + random_seed=1, + print_progress=3) + km.fit(X) + + def test_three_blobs_1sample(): km = Kmeans(k=3, max_iter=50, diff --git a/mlxtend/regressor/linear_regression.py b/mlxtend/regressor/linear_regression.py index 2a34dc205..053c78e3a 100644 --- a/mlxtend/regressor/linear_regression.py +++ b/mlxtend/regressor/linear_regression.py @@ -55,6 +55,9 @@ def __init__(self, eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0): + _BaseModel.__init__(self) + _IterativeModel.__init__(self) + _Regressor.__init__(self) self.eta = eta self.epochs = epochs self.minibatches = minibatches diff --git a/mlxtend/regressor/tests/test_linear_regression.py b/mlxtend/regressor/tests/test_linear_regression.py index 207ea94b8..55dbb5063 100644 --- a/mlxtend/regressor/tests/test_linear_regression.py +++ b/mlxtend/regressor/tests/test_linear_regression.py @@ -51,6 +51,33 @@ def test_univariate_gradient_descent(): assert_almost_equal(gd_lr.b_, b_exp, decimal=1) +def test_progress_1(): + gd_lr = LinearRegression(minibatches=1, + eta=0.001, + epochs=1, + print_progress=1, + random_seed=0) + gd_lr.fit(X_rm_std, y_std) + + +def test_progress_2(): + gd_lr = LinearRegression(minibatches=1, + eta=0.001, + epochs=1, + print_progress=2, + random_seed=0) + gd_lr.fit(X_rm_std, y_std) + + +def test_progress_3(): + gd_lr = LinearRegression(minibatches=1, + eta=0.001, + epochs=1, + print_progress=2, + random_seed=0) + gd_lr.fit(X_rm_std, y_std) + + def test_univariate_stochastic_gradient_descent(): w_exp = np.array([[0.7]]) b_exp = np.array([0.0])