Skip to content

Commit

Permalink
Fix scikit-learn warnings.
Browse files Browse the repository at this point in the history
  • Loading branch information
Yurii Shevchuk committed Oct 14, 2016
1 parent 1e4b340 commit 400bda6
Show file tree
Hide file tree
Showing 31 changed files with 51 additions and 50 deletions.
2 changes: 1 addition & 1 deletion examples/autoencoder/stacked_conv_autoencoders.py
Expand Up @@ -3,7 +3,7 @@
import theano
import numpy as np
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from neupy import algorithms, layers, environment, surgery

Expand Down
2 changes: 1 addition & 1 deletion examples/boltzmann_machine/rbm_faces_sampling.py
Expand Up @@ -4,7 +4,7 @@
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from skimage.filters import threshold_adaptive
from neupy import algorithms, environment
from neupy.utils import asfloat
Expand Down
4 changes: 2 additions & 2 deletions examples/cnn/mnist_cnn.py
@@ -1,7 +1,7 @@
import theano
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn import cross_validation, metrics, datasets
from sklearn import model_selection, metrics, datasets
from neupy import algorithms, layers, environment


Expand All @@ -18,7 +18,7 @@
n_samples = data.shape[0]
data = data.reshape((n_samples, 1, 28, 28))

x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x_train, x_test, y_train, y_test = model_selection.train_test_split(
data.astype(np.float32),
target.astype(np.float32),
train_size=(6 / 7.)
Expand Down
2 changes: 1 addition & 1 deletion examples/mlp/boston_price_prediction.py
@@ -1,5 +1,5 @@
from sklearn import datasets, preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from neupy import algorithms, layers, estimators, environment

Expand Down
4 changes: 2 additions & 2 deletions examples/mlp/mnist_mlp.py
@@ -1,7 +1,7 @@
import theano
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn import cross_validation, metrics, datasets
from sklearn import model_selection, metrics, datasets
from neupy import algorithms, layers, environment


Expand All @@ -17,7 +17,7 @@
data = mnist.data / 255.
data = data - data.mean(axis=0)

x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x_train, x_test, y_train, y_test = model_selection.train_test_split(
data.astype(np.float32),
target.astype(np.float32),
train_size=(6 / 7.)
Expand Down
4 changes: 2 additions & 2 deletions examples/mlp/rectangles_mlp.py
@@ -1,4 +1,4 @@
from sklearn import cross_validation, metrics
from sklearn import model_selection, metrics
from skdata.larochelle_etal_2007 import dataset
from neupy import algorithms, layers, environment

Expand All @@ -9,7 +9,7 @@
rectangle_dataset.fetch(download_if_missing=True)

data, target = rectangle_dataset.classification_task()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x_train, x_test, y_train, y_test = model_selection.train_test_split(
data, target, train_size=0.5
)

Expand Down
2 changes: 1 addition & 1 deletion examples/rbfn/grnn_params_selection.py
Expand Up @@ -2,7 +2,7 @@

import numpy as np
from sklearn import datasets, grid_search
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from neupy import algorithms, estimators, environment


Expand Down
7 changes: 4 additions & 3 deletions examples/rbfn/music_speech/train.py
Expand Up @@ -12,7 +12,7 @@
"""
import numpy as np
from neupy import algorithms
from sklearn import preprocessing, cross_validation, metrics, decomposition
from sklearn import preprocessing, model_selection, metrics, decomposition
import matplotlib.pyplot as plt
from librosa.feature import mfcc
from sklearn.utils import shuffle
Expand Down Expand Up @@ -67,10 +67,11 @@ def extract_features(data, n_fft=2048):

print("\n> Train prediction")

skf = cross_validation.StratifiedKFold(y_train, n_folds=5)
skf = model_selection.StratifiedKFold(n_splits=5)
skf_iterator = skf.split(x_train, y_train)
scores = []

for i, (train_index, test_index) in enumerate(skf, start=1):
for i, (train_index, test_index) in enumerate(skf_iterator, start=1):
print("\nK-fold #{}".format(i))
pnnet = algorithms.PNN(std=std, verbose=False)

Expand Down
6 changes: 3 additions & 3 deletions examples/rbfn/pnn_iris.py
@@ -1,6 +1,6 @@
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.model_selection import StratifiedKFold

from neupy.algorithms import PNN

Expand All @@ -10,12 +10,12 @@
target = dataset.target

test_data_size = 10
skfold = StratifiedKFold(target, test_data_size)
skfold = StratifiedKFold(n_splits=test_data_size)
avarage_result = 0

print("> Start classify iris dataset")

for i, (train, test) in enumerate(skfold, start=1):
for i, (train, test) in enumerate(skfold.split(data, target), start=1):
x_train, x_test = data[train], data[test]
y_train, y_test = target[train], target[test]

Expand Down
2 changes: 1 addition & 1 deletion neupy/algorithms/associative/hebb.py
Expand Up @@ -23,7 +23,7 @@ class HebbRule(BaseStepAssociative):
----------
decay_rate : float
Decay rate is control your network weights. It helps network
'forgote' information and control weight sizes. Without this
'forget' information and control weight sizes. Without this
parameter network weight will grow. Defaults to ``0.2``.
{BaseAssociative.n_inputs}
{BaseAssociative.n_outputs}
Expand Down
2 changes: 1 addition & 1 deletion neupy/algorithms/ensemble/dan.py
Expand Up @@ -35,7 +35,7 @@ class DynamicallyAveragedNetwork(BaseEnsemble):
--------
>>> import numpy as np
>>> from sklearn import datasets, metrics
>>> from sklearn.cross_validation import train_test_split
>>> from sklearn.model_selection import train_test_split
>>> from neupy import algorithms
>>>
>>> np.random.seed(50)
Expand Down
2 changes: 1 addition & 1 deletion neupy/algorithms/ensemble/mixture_of_experts.py
Expand Up @@ -37,7 +37,7 @@ class MixtureOfExperts(BaseEnsemble):
--------
>>> import numpy as np
>>> from sklearn import datasets, preprocessing
>>> from sklearn.cross_validation import train_test_split
>>> from sklearn.model_selection import train_test_split
>>> from neupy import algorithms, layers
>>> from neupy.estimators import rmsle
>>>
Expand Down
2 changes: 1 addition & 1 deletion neupy/algorithms/gd/conjgrad.py
Expand Up @@ -79,7 +79,7 @@ class ConjugateGradient(NoMultipleStepSelection, GradientDescent):
Examples
--------
>>> from sklearn import datasets, preprocessing
>>> from sklearn.cross_validation import train_test_split
>>> from sklearn.model_selection import train_test_split
>>> from neupy import algorithms, layers, estimators, environment
>>>
>>> environment.reproducible()
Expand Down
2 changes: 1 addition & 1 deletion neupy/algorithms/gd/hessdiag.py
Expand Up @@ -54,7 +54,7 @@ class HessianDiagonal(NoMultipleStepSelection, GradientDescent):
Diabets dataset example
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> from sklearn.model_selection import train_test_split
>>> from sklearn import datasets, preprocessing
>>> from neupy import algorithms, layers, environment
>>> from neupy.estimators import rmsle
Expand Down
2 changes: 1 addition & 1 deletion neupy/algorithms/rbfn/grnn.py
Expand Up @@ -50,7 +50,7 @@ class GRNN(BaseNetwork, LazyLearningMixin):
Examples
--------
>>> from sklearn import datasets, preprocessing
>>> from sklearn.cross_validation import train_test_split
>>> from sklearn.model_selection import train_test_split
>>> from neupy import algorithms, estimators, environment
>>>
>>> environment.reproducible()
Expand Down
2 changes: 1 addition & 1 deletion neupy/algorithms/rbfn/pnn.py
Expand Up @@ -57,7 +57,7 @@ class PNN(BaseNetwork, LazyLearningMixin, MinibatchTrainingMixin):
>>> import numpy as np
>>>
>>> from sklearn import datasets, metrics
>>> from sklearn.cross_validation import train_test_split
>>> from sklearn.model_selection import train_test_split
>>> from neupy import algorithms, environment
>>>
>>> environment.reproducible()
Expand Down
2 changes: 1 addition & 1 deletion neupy/algorithms/step_update/linear_search.py
Expand Up @@ -31,7 +31,7 @@ class LinearSearch(SingleStepConfigurable):
Examples
--------
>>> from sklearn import datasets, preprocessing
>>> from sklearn.cross_validation import train_test_split
>>> from sklearn.model_selection import train_test_split
>>> from neupy import algorithms, layers, estimators, environment
>>>
>>> environment.reproducible()
Expand Down
2 changes: 1 addition & 1 deletion requirements/test.txt
@@ -1,4 +1,4 @@
scikit-learn>=0.15.2
scikit-learn>=0.18.0
pandas==0.16.0
Pillow==3.0.0

Expand Down
2 changes: 1 addition & 1 deletion site/2015/07/04/boston_house_prices_dataset.rst
Expand Up @@ -243,7 +243,7 @@ We use 85% of data for train.

.. code-block:: python
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from neupy import environment
environment.reproducible()
Expand Down
4 changes: 2 additions & 2 deletions site/docs/quickstart.rst
Expand Up @@ -15,7 +15,7 @@ First of all we need to load data.

.. code-block:: python
>>> from sklearn import datasets, cross_validation
>>> from sklearn import datasets, model_selection
>>> mnist = datasets.fetch_mldata('MNIST original')
>>> data, target = mnist.data, mnist.target
Expand Down Expand Up @@ -43,7 +43,7 @@ samples for training and 10,000 for test.
>>> from neupy import environment
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> from sklearn.model_selection import train_test_split
>>>
>>> environment.reproducible()
>>>
Expand Down
2 changes: 1 addition & 1 deletion tests/algorithms/gd/test_levenberg_marquardt.py
Expand Up @@ -2,7 +2,7 @@
import theano
import theano.tensor as T
from sklearn import datasets, preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split

from neupy import algorithms, layers
from neupy.utils import asfloat
Expand Down
4 changes: 2 additions & 2 deletions tests/algorithms/gd/test_quickprop.py
@@ -1,6 +1,6 @@
from functools import partial

from sklearn import datasets, cross_validation, preprocessing
from sklearn import datasets, model_selection, preprocessing

from neupy import algorithms

Expand All @@ -16,7 +16,7 @@ def setUp(self):
random_state=33)
target_scaler = preprocessing.MinMaxScaler()
target = target_scaler.fit_transform(target.reshape(-1, 1))
self.data = cross_validation.train_test_split(data, target,
self.data = model_selection.train_test_split(data, target,
train_size=0.75)
self.connection = (5, 10, 1)

Expand Down
2 changes: 1 addition & 1 deletion tests/algorithms/rbfn/test_grnn.py
@@ -1,6 +1,6 @@
import numpy as np
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split

from neupy import algorithms
from base import BaseTestCase
Expand Down
6 changes: 3 additions & 3 deletions tests/algorithms/rbfn/test_pnn.py
Expand Up @@ -3,7 +3,7 @@
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import StratifiedKFold, train_test_split
from sklearn.model_selection import StratifiedKFold, train_test_split

from neupy import algorithms
from base import BaseTestCase
Expand Down Expand Up @@ -36,10 +36,10 @@ def test_simple_pnn(self):
target = dataset.target

test_data_size = 10
skfold = StratifiedKFold(target, test_data_size)
skfold = StratifiedKFold(n_splits=test_data_size)
avarage_result = 0

for train, test in skfold:
for train, test in skfold.split(data, target):
x_train, x_test = data[train], data[test]
y_train, y_test = target[train], target[test]

Expand Down
2 changes: 1 addition & 1 deletion tests/algorithms/steps/test_linear_search.py
@@ -1,6 +1,6 @@
import numpy as np
from sklearn import datasets, preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split

from neupy import algorithms, layers
from neupy.estimators import rmsle
Expand Down
2 changes: 1 addition & 1 deletion tests/compatibilities/test_pandas.py
@@ -1,6 +1,6 @@
import pandas as pd
from sklearn import datasets, preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from neupy import algorithms, layers, estimators

from base import BaseTestCase
Expand Down
6 changes: 3 additions & 3 deletions tests/compatibilities/test_sklearn_compatibility.py
@@ -1,8 +1,8 @@
from operator import itemgetter

import numpy as np
from sklearn import datasets, preprocessing, metrics, grid_search
from sklearn.cross_validation import train_test_split
from sklearn import datasets, preprocessing, metrics, model_selection
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from neupy import algorithms, layers
from neupy.estimators import rmsle
Expand Down Expand Up @@ -81,7 +81,7 @@ def scorer(network, X, y):

self.assertAlmostEqual(0.513, error, places=3)

random_search = grid_search.RandomizedSearchCV(
random_search = model_selection.RandomizedSearchCV(
grnnet,
param_distributions={'std': np.arange(1e-2, 0.1, 1e-4)},
n_iter=10,
Expand Down
6 changes: 3 additions & 3 deletions tests/data.py
@@ -1,7 +1,7 @@
import numpy as np

from sklearn import datasets
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit


xor_input_train = np.array([[-1, -1], [-1, 1], [1, -1], [1, 1]])
Expand Down Expand Up @@ -68,10 +68,10 @@ def simple_classification(n_samples=100, n_features=10, random_state=33):
X, y = datasets.make_classification(n_samples=n_samples,
n_features=n_features,
random_state=random_state)
shuffle_split = StratifiedShuffleSplit(y, 1, train_size=0.6,
shuffle_split = StratifiedShuffleSplit(n_splits=1, train_size=0.6,
random_state=random_state)

train_index, test_index = next(shuffle_split.__iter__())
train_index, test_index = next(shuffle_split.split(X, y))
x_train, x_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]

Expand Down
6 changes: 3 additions & 3 deletions tests/ensemble/test_dan.py
@@ -1,4 +1,4 @@
from sklearn import datasets, cross_validation, metrics
from sklearn import datasets, model_selection, metrics
from neupy import algorithms, init
from neupy.layers import Relu, Sigmoid, Input

Expand All @@ -9,7 +9,7 @@ class DANTestCase(BaseTestCase):
def test_handle_errors(self):
data, target = datasets.make_classification(300, n_features=4,
n_classes=2)
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x_train, x_test, y_train, y_test = model_selection.train_test_split(
data, target, train_size=0.7
)

Expand Down Expand Up @@ -46,7 +46,7 @@ def test_handle_errors(self):
def test_dan(self):
data, target = datasets.make_classification(300, n_features=4,
n_classes=2)
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x_train, x_test, y_train, y_test = model_selection.train_test_split(
data, target, train_size=0.7
)

Expand Down
4 changes: 2 additions & 2 deletions tests/ensemble/test_mixtures_of_experts.py
@@ -1,5 +1,5 @@
import numpy as np
from sklearn import datasets, preprocessing, cross_validation
from sklearn import datasets, preprocessing, model_selection
from neupy import algorithms, layers
from neupy.utils import asfloat
from neupy.estimators import rmsle
Expand Down Expand Up @@ -121,7 +121,7 @@ def test_mixture_of_experts(self):

input_scaler = preprocessing.MinMaxScaler((-1, 1))
output_scaler = preprocessing.MinMaxScaler()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x_train, x_test, y_train, y_test = model_selection.train_test_split(
input_scaler.fit_transform(data),
output_scaler.fit_transform(target.reshape(-1, 1)),
train_size=0.8
Expand Down

0 comments on commit 400bda6

Please sign in to comment.