Skip to content

Commit

Permalink
Merge branch 'release/v0.7.0'
Browse files Browse the repository at this point in the history
  • Loading branch information
Yurii Shevchuk committed Dec 2, 2018
2 parents 025488e + 8e98f94 commit 58aa799
Show file tree
Hide file tree
Showing 307 changed files with 7,800 additions and 7,066 deletions.
5 changes: 4 additions & 1 deletion .gitignore
Expand Up @@ -11,6 +11,8 @@ build/
# Tests
*-failed-diff.png
.coverage
.coverage.*
.pytest_cache

# Other
.tox/*
Expand All @@ -22,6 +24,7 @@ notebooks/.ipynb_checkpoints/*
# Storage files
*.dill
*.h5
*.hdf5
*.pickle

# Vim
Expand All @@ -40,6 +43,6 @@ notebooks/caltech_101_images/*

# Pickle files important for VIN example
!examples/reinforcement_learning/vin/data/*.pickle
!examples/reinforcement_learning/vin/models/*.pickle
!examples/reinforcement_learning/vin/models/*.hdf5
examples/reinforcement_learning/vin/data/gridworld-16-*.pickle
examples/reinforcement_learning/vin/data/gridworld-28-*.pickle
6 changes: 3 additions & 3 deletions .travis.yml
@@ -1,7 +1,7 @@
language: python
cache: pip
python:
- "2.7"
- "3.5"
- "3.6"

install:
Expand All @@ -20,6 +20,7 @@ install:

- export MKL_THREADING_LAYER=GNU
- conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION numpy>=1.8.0 scipy>=0.14.0 mkl-service>=1.1.2
- conda install -n test-environment tensorflow
- source activate test-environment
- pip install -r requirements/travis.txt
- python setup.py develop --no-deps
Expand All @@ -33,8 +34,7 @@ before_script:
# - sleep 3 # give xvfb some time to start

script:
- export THEANO_FLAGS='floatX=float32,gcc.cxxflags="-march=core2"' && export SKIP_PLOT_TEST='YES' && nosetests
- export THEANO_FLAGS='floatX=float64,gcc.cxxflags="-march=core2"' && export SKIP_PLOT_TEST='YES' && nosetests
- export SKIP_PLOT_TEST='YES' && pytest
- flake8 neupy

after_success:
Expand Down
10 changes: 2 additions & 8 deletions README.rst
Expand Up @@ -21,17 +21,11 @@
</a>
</div>

.. raw:: html

<h3>⚠️ NeuPy will use Tensorflow as a backend starting from version 0.7.0</h3>
<p><i>About a year ago, it has been officially announced that <a href="https://groups.google.com/forum/#!msg/theano-users/7Poq8BZutbY/rNCIfvAEAwAJ">Theano will stop support for their library</a>. They don't add new features anymore and soon, they will stop adding bug fixes to the library. NeuPy cannot evolve having large number of features that depend on the dead library. For this reason, NeuPy was moved to the Tensorflow.</i></p>
<p><i>All the Theano based code has been fully migrated to Tenorflow and it can be tested from the <a href="https://github.com/itdxer/neupy/tree/release/v0.7.0">release/v0.7.0</a> branch.</i></p>


NeuPy v0.6.5
NeuPy v0.7.0
============

NeuPy is a Python library for Artificial Neural Networks. NeuPy supports many different types of Neural Networks from a simple perceptron to deep learning models.
NeuPy is a python library for prototyping and building neural networks. NeuPy uses Tensorflow as a computational backend for deep learning models.

Installation
------------
Expand Down
55 changes: 36 additions & 19 deletions examples/autoencoder/conv_autoencoder.py
Expand Up @@ -5,64 +5,81 @@


environment.reproducible()
environment.speedup()

mnist = datasets.fetch_mldata('MNIST original')

data = (mnist.data / 255.).astype(np.float32)

np.random.shuffle(data)
x_train, x_test = data[:60000], data[60000:]
x_train_4d = x_train.reshape((60000, 1, 28, 28))
x_test_4d = x_test.reshape((10000, 1, 28, 28))
x_train_4d = x_train.reshape((60000, 28, 28, 1))
x_test_4d = x_test.reshape((10000, 28, 28, 1))

conv_autoencoder = algorithms.Momentum(
[
layers.Input((1, 28, 28)),
layers.Input((28, 28, 1)),

layers.Convolution((16, 3, 3)) > layers.Relu(),
layers.Convolution((16, 3, 3)) > layers.Relu(),
layers.Convolution((3, 3, 16)) > layers.Relu(),
layers.Convolution((3, 3, 16)) > layers.Relu(),
layers.MaxPooling((2, 2)),

layers.Convolution((32, 3, 3)) > layers.Relu(),
layers.Convolution((3, 3, 32)) > layers.Relu(),
layers.MaxPooling((2, 2)),

layers.Reshape(),

layers.Relu(128),
layers.Relu(16),

# Notice that in the decoder every operation reverts back changes
# from the encoder layer. Upscale replaces MaxPooling and
# Convolutional layer without padding replaced with large padding
# that increase size of the image.
layers.Relu(128),

# 800 is a shape that we got after we reshaped our image in the
# Reshape layer
layers.Relu(800),

layers.Reshape((32, 5, 5)),
layers.Reshape((5, 5, 32)),

# Upscaling layer reverts changes from the max pooling layer
layers.Upscale((2, 2)),
layers.Convolution((16, 3, 3), padding='full') > layers.Relu(),

# If convolution operation in first layers with zero padding reduces
# size of the image, then convolution with padding=2 increases size
# of the image. It just does the opposite to the previous convolution
layers.Convolution((3, 3, 16), padding=2) > layers.Relu(),

layers.Upscale((2, 2)),
layers.Convolution((16, 3, 3), padding='full') > layers.Relu(),
layers.Convolution((1, 3, 3), padding='full') > layers.Sigmoid(),
layers.Convolution((3, 3, 16), padding=2) > layers.Relu(),
layers.Convolution((3, 3, 1), padding=2) > layers.Sigmoid(),

# We have to convert 4d tensor to the 2d in order to be
# able to compute RMSE.
layers.Reshape(),
],

verbose=True,
step=0.1,
momentum=0.99,
shuffle_data=True,
step=0.02,
momentum=0.9,
batch_size=128,
error='rmse',

shuffle_data=True,
verbose=True,

decay_rate=0.01,
addons=[algorithms.WeightDecay],
)
conv_autoencoder.architecture()
conv_autoencoder.train(x_train_4d, x_train, x_test_4d, x_test, epochs=100)
conv_autoencoder.train(x_train_4d, x_train, x_test_4d, x_test, epochs=15)

n_samples = 4
n_samples = 6
images = x_test[:n_samples] * 255.
predicted_images = conv_autoencoder.predict(x_test_4d[:n_samples])
predicted_images = predicted_images * 255.

# Compare real and reconstructed images
fig, axes = plt.subplots(4, 2, figsize=(12, 8))
fig, axes = plt.subplots(n_samples, 2, figsize=(12, 8))
iterator = zip(axes, images, predicted_images)

for (left_ax, right_ax), real_image, predicted_image in iterator:
Expand Down
2 changes: 0 additions & 2 deletions examples/autoencoder/denoising_autoencoder.py
@@ -1,12 +1,10 @@
import theano
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from neupy import algorithms, layers, environment


environment.reproducible()
theano.config.floatX = 'float32'

mnist = datasets.fetch_mldata('MNIST original')

Expand Down
67 changes: 46 additions & 21 deletions examples/autoencoder/stacked_conv_autoencoders.py
@@ -1,6 +1,5 @@
from __future__ import division

import theano
import numpy as np
from sklearn import datasets, metrics
from sklearn.model_selection import train_test_split
Expand All @@ -9,8 +8,6 @@


environment.reproducible()
theano.config.floatX = 'float32'
theano.config.allow_gc = False

mnist = datasets.fetch_mldata('MNIST original')
data = mnist.data / 255.
Expand All @@ -19,6 +16,9 @@
target = mnist.target.reshape((-1, 1))
target = target_scaler.fit_transform(target).todense()

# Originaly we should have 70000 images from the MNIST dataset, but
# we will use only 1000 training example, All data that doesn't have
# labels we use to train features in the convolutional autoencoder.
n_labeled = 1000
n_samples = len(data)
n_unlabeled = n_samples - n_labeled
Expand All @@ -29,17 +29,20 @@
test_size=(1 - n_labeled / n_samples)
)

x_labeled_4d = x_labeled.reshape((n_labeled, 1, 28, 28))
x_unlabeled_4d = x_unlabeled.reshape((n_unlabeled, 1, 28, 28))
x_labeled_4d = x_labeled.reshape((n_labeled, 28, 28, 1))
x_unlabeled_4d = x_unlabeled.reshape((n_unlabeled, 28, 28, 1))

# We will features trained in the encoder and the first part for the future
# classifier. At first we pre-train them with unlabeled data, since we have
# a lot of it and we hope to learn some common features from it.
encoder = layers.join(
layers.Input((1, 28, 28)),
layers.Input((28, 28, 1)),

layers.Convolution((16, 3, 3)) > layers.Relu(),
layers.Convolution((16, 3, 3)) > layers.Relu(),
layers.Convolution((3, 3, 16)) > layers.Relu(),
layers.Convolution((3, 3, 16)) > layers.Relu(),
layers.MaxPooling((2, 2)),

layers.Convolution((32, 3, 3)) > layers.Relu(),
layers.Convolution((3, 3, 32)) > layers.Relu(),
layers.MaxPooling((2, 2)),

layers.Reshape(),
Expand All @@ -48,18 +51,21 @@
layers.Relu(128),
)

# Notice that in the decoder every operation reverts back changes from the
# encoder layer. Upscale replaces MaxPooling and Convolutional layer
# without padding replaced with large padding that increase size of the image.
decoder = layers.join(
layers.Relu(256),
layers.Relu(32 * 5 * 5),

layers.Reshape((32, 5, 5)),
layers.Reshape((5, 5, 32)),

layers.Upscale((2, 2)),
layers.Convolution((16, 3, 3), padding='full') > layers.Relu(),
layers.Convolution((3, 3, 16), padding=2) > layers.Relu(),

layers.Upscale((2, 2)),
layers.Convolution((16, 3, 3), padding='full') > layers.Relu(),
layers.Convolution((1, 3, 3), padding='full') > layers.Sigmoid(),
layers.Convolution((3, 3, 16), padding=2) > layers.Relu(),
layers.Convolution((3, 3, 1), padding=2) > layers.Sigmoid(),

layers.Reshape(),
)
Expand All @@ -71,14 +77,20 @@
momentum=0.99,
shuffle_data=True,
batch_size=64,
error='binary_crossentropy',
error='rmse',
)
conv_autoencoder.architecture()
conv_autoencoder.train(x_unlabeled_4d, x_unlabeled,
x_labeled_4d, x_labeled, epochs=10)
conv_autoencoder.train(
x_unlabeled_4d, x_unlabeled,
x_labeled_4d, x_labeled,
epochs=1,
)

x_labeled_encoded = encoder.output(x_labeled_4d).eval()
x_unlabeled_encoded = encoder.output(x_unlabeled_4d).eval()
# In order to speed up training for the upper layers we generate
# output from the encoder. In this way we won't need to regenerate
# encoded inputs for every epoch.
x_labeled_encoded = encoder.predict(x_labeled_4d)
x_unlabeled_encoded = encoder.predict(x_unlabeled_4d)

classifier_network = layers.join(
layers.PRelu(512),
Expand All @@ -95,19 +107,32 @@
error='categorical_crossentropy',
)
encoder_classifier.architecture()
encoder_classifier.train(x_labeled_encoded, y_labeled,
x_unlabeled_encoded, y_unlabeled, epochs=100)
encoder_classifier.train(
x_labeled_encoded, y_labeled,
x_unlabeled_encoded, y_unlabeled,
epochs=400,
)

# The final part of training is to put encoder and final classifier layers
# in order to fine tune network parameters before finilizing it's prediction
classifier = algorithms.MinibatchGradientDescent(
encoder > classifier_network,
verbose=True,
step=0.01,
step=0.005,
shuffle_data=True,
batch_size=64,
error='categorical_crossentropy',

decay_rate=0.02,
addons=[algorithms.WeightDecay],
)
classifier.architecture()
classifier.train(x_labeled_4d, y_labeled, epochs=100)
classifier.train(
x_labeled_4d, y_labeled,
x_unlabeled_4d, y_unlabeled,
epochs=1,
)

unlabeled_predicted = classifier.predict(x_unlabeled_4d).argmax(axis=1)
y_unlabeled_classes = np.asarray(y_unlabeled).argmax(axis=1)
Expand Down

0 comments on commit 58aa799

Please sign in to comment.