Skip to content

Commit

Permalink
specified keras backends in travis config (#49)
Browse files Browse the repository at this point in the history
* specified keras backends in travis config

* squeeze singleton dimension

* handle keras backend differences between tensorflow and theano

* dtype fixes for lasagne model

* corrected test name

* set theano.config.floatX to float32

* fixed travis env config

* fixed dtypes in preprocessing
  • Loading branch information
jonasrauber committed Jul 25, 2017
1 parent e77e20b commit 31d0bea
Show file tree
Hide file tree
Showing 11 changed files with 48 additions and 24 deletions.
13 changes: 11 additions & 2 deletions .travis.yml
Expand Up @@ -5,12 +5,20 @@ python:
- 2.7
- 3.5
- 3.6
env:
global:
- THEANO_FLAGS='floatX=float32'
matrix:
- KERAS_BACKEND=tensorflow
- KERAS_BACKEND=theano
before_install:
- pip install -U pip
install:
- travis_wait travis_retry pip install --upgrade numpy
- travis_wait travis_retry pip install --upgrade scipy
- if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then travis_wait travis_retry pip install -r requirements-dev-py2.txt; else travis_wait travis_retry pip install -r requirements-dev.txt; fi
- travis_wait travis_retry pip install -r requirements-dev.txt
- if [[ $TRAVIS_PYTHON_VERSION == 2.7 ]]; then pip install -r requirements-dev-py2.txt; fi
- if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install -r requirements-dev-py3.txt; fi
- if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install -r requirements-dev-py3.txt; fi
- travis_wait travis_retry pip install --upgrade tensorflow
- travis_wait travis_retry pip install --upgrade theano
- travis_wait travis_retry pip install --upgrade https://github.com/Lasagne/Lasagne/archive/master.zip
Expand All @@ -19,6 +27,7 @@ install:
- if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install http://download.pytorch.org/whl/cu75/torch-0.1.12.post2-cp36-cp36m-linux_x86_64.whl; fi
- travis_wait travis_retry pip install --upgrade keras
- travis_wait travis_retry pip install --upgrade mxnet==0.10.0
install:
- pip install -e .
script:
- pytest
Expand Down
13 changes: 10 additions & 3 deletions foolbox/models/base.py
Expand Up @@ -38,6 +38,7 @@ def __init__(self, bounds, channel_axis, preprocessing=(0, 1)):
self._bounds = bounds
assert channel_axis in [0, 1, 2, 3]
self._channel_axis = channel_axis
assert len(preprocessing) == 2
self._preprocessing = preprocessing

def __enter__(self):
Expand All @@ -52,11 +53,17 @@ def bounds(self):
def channel_axis(self):
return self._channel_axis

def _process_input(self, input):
return (input - self._preprocessing[0]) / self._preprocessing[1]
def _process_input(self, input_):
result = (input_ - self._preprocessing[0]) / self._preprocessing[1]
result = result.astype(input_.dtype, copy=False)
assert result.dtype == input_.dtype
return result

def _process_gradient(self, gradient):
return gradient / self._preprocessing[1]
result = gradient / self._preprocessing[1]
result = result.astype(gradient.dtype, copy=False)
assert result.dtype == gradient.dtype
return result

@abstractmethod
def batch_predictions(self, images):
Expand Down
16 changes: 15 additions & 1 deletion foolbox/models/keras.py
Expand Up @@ -67,8 +67,22 @@ def __init__(
loss = K.sparse_categorical_crossentropy(
predictions, label_input, from_logits=predictions_are_logits)

# sparse_categorical_crossentropy returns 1-dim tensor,
# gradients wants 0-dim tensor (for some backends)
loss = K.squeeze(loss, axis=0)

grads = K.gradients(loss, images_input)
grad = grads[0]
if K.backend() == 'tensorflow':
# tensorflow backend returns a list with the gradient
# as the only element, even if loss is a single scalar
# tensor;
# theano always returns the gradient itself (and requires
# that loss is a single scalar tensor)
assert isinstance(grads, list)
grad = grads[0]
else:
assert not isinstance(grads, list)
grad = grads

self._loss_fn = K.function(
[images_input, label_input],
Expand Down
2 changes: 2 additions & 0 deletions foolbox/models/lasagne.py
Expand Up @@ -77,13 +77,15 @@ def predictions_and_gradient(self, image, label):
gradient = np.squeeze(gradient, axis=0)
assert predictions.shape == (self.num_classes(),)
assert gradient.shape == image.shape
gradient = gradient.astype(image.dtype, copy=False)
return predictions, gradient

def gradient(self, image, label):
label = np.array(label, dtype=np.int32)
gradient = self._gradient_fn(image[np.newaxis], label[np.newaxis])
gradient = np.squeeze(gradient, axis=0)
assert gradient.shape == image.shape
gradient = gradient.astype(image.dtype, copy=False)
return gradient

def num_classes(self):
Expand Down
2 changes: 2 additions & 0 deletions foolbox/models/theano.py
Expand Up @@ -74,6 +74,7 @@ def predictions_and_gradient(self, image, label):
gradient = np.squeeze(gradient, axis=0)
assert predictions.shape == (self.num_classes(),)
assert gradient.shape == image.shape
assert gradient.dtype == image.dtype
return predictions, gradient

def gradient(self, image, label):
Expand All @@ -83,6 +84,7 @@ def gradient(self, image, label):
gradient = self._process_gradient(gradient)
gradient = np.squeeze(gradient, axis=0)
assert gradient.shape == image.shape
assert gradient.dtype == image.dtype
return gradient

def num_classes(self):
Expand Down
4 changes: 2 additions & 2 deletions foolbox/tests/test_models_keras.py
Expand Up @@ -168,8 +168,8 @@ def test_keras_model_gradients():

_, g1 = model.predictions_and_gradient(test_image, test_label)

l1 = model._loss_fn([test_image[None] - eps / 2 * g1, [test_label]])[0][0]
l2 = model._loss_fn([test_image[None] + eps / 2 * g1, [test_label]])[0][0]
l1 = model._loss_fn([test_image[None] - eps / 2 * g1, [test_label]])[0]
l2 = model._loss_fn([test_image[None] + eps / 2 * g1, [test_label]])[0]

assert 1e5 * (l2 - l1) > 1

Expand Down
4 changes: 2 additions & 2 deletions foolbox/tests/test_models_lasagne.py
Expand Up @@ -16,7 +16,7 @@ def mean_brightness_net(images):
logits = GlobalPoolLayer(images)
return logits

images_var = T.tensor4('images')
images_var = T.tensor4('images', dtype='float32')
images = InputLayer((None, channels, 5, 5), images_var)
logits = mean_brightness_net(images)

Expand Down Expand Up @@ -56,7 +56,7 @@ def mean_brightness_net(images):
logits = GlobalPoolLayer(images)
return logits

images_var = T.tensor4('images')
images_var = T.tensor4('images', dtype='float32')
images = InputLayer((None, channels, 5, 5), images_var)
logits = mean_brightness_net(images)

Expand Down
2 changes: 1 addition & 1 deletion foolbox/tests/test_models_theano.py
Expand Up @@ -46,7 +46,7 @@ def mean_brightness_net(images):


@pytest.mark.parametrize('num_classes', [10, 1000])
def test_lasagne_gradient(num_classes):
def test_theano_gradient(num_classes):
bounds = (0, 255)
channels = num_classes

Expand Down
12 changes: 1 addition & 11 deletions requirements-dev-py2.txt
@@ -1,12 +1,2 @@
numpydoc >= 0.6.0
sphinx >= 1.6.2
sphinx-autobuild >= 0.6.0
sphinx_rtd_theme >= 0.2.4
twine >= 1.9.1
pytest >= 3.1.0
pytest-cov >= 2.5.1
flake8 >= 3.3.0
python-coveralls >= 2.9.1
pillow >= 4.1.1
flake8 >= 3.3.0
-r requirements-dev.txt
mock >= 2.0.0
2 changes: 2 additions & 0 deletions requirements-dev-py3.txt
@@ -0,0 +1,2 @@
-r requirements-dev.txt
mypy >= 0.511
2 changes: 0 additions & 2 deletions requirements-dev.txt
Expand Up @@ -6,7 +6,5 @@ twine >= 1.9.1
pytest >= 3.1.0
pytest-cov >= 2.5.1
flake8 >= 3.3.0
mypy >= 0.511
python-coveralls >= 2.9.1
pillow >= 4.1.1
flake8 >= 3.3.0

0 comments on commit 31d0bea

Please sign in to comment.