Skip to content

Commit

Permalink
Merge pull request #117 from bartvm/fixes
Browse files Browse the repository at this point in the history
Miscellaneous fixes reported by Landscape.io
  • Loading branch information
bartvm committed Jan 20, 2015
2 parents 4ff20da + fc741f3 commit 7bbd347
Show file tree
Hide file tree
Showing 26 changed files with 211 additions and 107 deletions.
1 change: 1 addition & 0 deletions .landscape-requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
scipy
3 changes: 3 additions & 0 deletions .landscape.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
ignore-paths: ['docs', 'examples']
requirements:
- .landscape-requirements.txt
File renamed without changes.
3 changes: 3 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
.. image:: https://readthedocs.org/projects/blocks/badge/?version=latest
:target: https://blocks.readthedocs.org/

.. image:: https://landscape.io/github/bartvm/blocks/master/landscape.svg
:target: https://landscape.io/github/bartvm/blocks/master

Blocks
======
Blocks is a framework that helps you build neural network models on top of
Expand Down
8 changes: 5 additions & 3 deletions blocks/algorithms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,8 @@ def add_updates(self, updates):
"""
if isinstance(updates, OrderedDict):
updates = list(updates.items())
assert isinstance(updates, list)
if not isinstance(updates, list):
raise ValueError
self.updates.extend(updates)


Expand Down Expand Up @@ -178,7 +179,8 @@ def initialize(self):
self._function = theano.function(self.inputs, [], updates=all_updates)

def process_batch(self, batch):
assert set(batch.keys()) == set([v.name for v in self.inputs])
if not set(batch.keys()) == set([v.name for v in self.inputs]):
raise ValueError
ordered_batch = [batch[v.name] for v in self.inputs]
self._function(*ordered_batch)

Expand All @@ -203,7 +205,7 @@ def compute_step(self, param, grad_wr_param):
A Theano expression for the descent step.
"""
raise NotImplemented()
raise NotImplementedError

def additional_updates(self):
"""Return updates to be done in addition to parameter modification.
Expand Down
34 changes: 22 additions & 12 deletions blocks/bricks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from theano import tensor

from blocks.utils import (pack, repr_attrs, reraise_as, shared_floatx_zeros,
unpack, update_instance, put_hook)
unpack, put_hook)

DEFAULT_SEED = [2014, 10, 5]

Expand Down Expand Up @@ -293,7 +293,7 @@ def push_allocation_config(self):
for child in self.children:
try:
child.push_allocation_config()
except:
except Exception:
self.allocation_config_pushed = False
raise

Expand Down Expand Up @@ -325,7 +325,7 @@ def push_initialization_config(self):
for child in self.children:
try:
child.push_initialization_config()
except:
except Exception:
self.initialization_config_pushed = False
raise

Expand Down Expand Up @@ -533,7 +533,8 @@ def __call__(self, *inputs, **kwargs):

return_dict = kwargs.pop('return_dict', False)
return_list = kwargs.pop('return_list', False)
assert not return_list or not return_dict
if return_list and return_dict:
raise ValueError

arg_names, varargs_name, _, _ = inspect.getargspec(
self.application_method)
Expand Down Expand Up @@ -582,7 +583,7 @@ def copy_and_tag(variable, role, name):
for i, output in enumerate(outputs):
try:
name = self.outputs[i]
except:
except Exception:
name = "output_{}".format(i)
if isinstance(output, tensor.Variable):
# TODO Tag with dimensions, axes, etc. for error-checking
Expand Down Expand Up @@ -758,7 +759,7 @@ class Random(Brick):
"""
def __init__(self, theano_rng=None, **kwargs):
super(Random, self).__init__(**kwargs)
update_instance(self, locals())
self.theano_rng = theano_rng

@property
def theano_rng(self):
Expand Down Expand Up @@ -878,7 +879,8 @@ class Linear(Initializable):
@lazy
def __init__(self, input_dim, output_dim, **kwargs):
super(Linear, self).__init__(**kwargs)
update_instance(self, locals())
self.input_dim = input_dim
self.output_dim = output_dim

def _allocate(self):
self.params.append(shared_floatx_zeros((self.input_dim,
Expand Down Expand Up @@ -990,11 +992,16 @@ class LinearMaxout(Initializable):
-----
See :class:`Initializable` for initialization parameters.
.. todo:: Name of :attr:`linear_transformation` shouldn't be hardcoded.
"""
@lazy
def __init__(self, input_dim, output_dim, num_pieces, **kwargs):
super(LinearMaxout, self).__init__(**kwargs)
update_instance(self, locals())
self.input_dim = input_dim
self.output_dim = output_dim
self.num_pieces = num_pieces

self.linear_transformation = Linear(name='linear_to_maxout',
input_dim=input_dim,
output_dim=output_dim * num_pieces,
Expand Down Expand Up @@ -1106,8 +1113,9 @@ class Sequence(Brick):
def __init__(self, bricks, application_methods=None, **kwargs):
super(Sequence, self).__init__(**kwargs)
if application_methods is None:
application_methods = ['apply' for brick in bricks]
assert len(application_methods) == len(bricks)
application_methods = ['apply' for _ in bricks]
if not len(application_methods) == len(bricks):
raise ValueError
self.children = bricks
self.application_methods = application_methods

Expand Down Expand Up @@ -1155,7 +1163,8 @@ class MLP(Sequence, Initializable):
"""
@lazy
def __init__(self, activations, dims, **kwargs):
update_instance(self, locals())
self.activations = activations

self.linear_transformations = [Linear(name='linear_{}'.format(i))
for i in range(len(activations))]
# Interleave the transformations and activations
Expand All @@ -1167,7 +1176,8 @@ def __init__(self, activations, dims, **kwargs):
super(MLP, self).__init__(children, **kwargs)

def _push_allocation_config(self):
assert len(self.dims) - 1 == len(self.linear_transformations)
if not len(self.dims) - 1 == len(self.linear_transformations):
raise ValueError
for input_dim, output_dim, layer in zip(self.dims[:-1], self.dims[1:],
self.linear_transformations):
layer.input_dim = input_dim
Expand Down
24 changes: 15 additions & 9 deletions blocks/bricks/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@

from blocks.bricks import MLP, Identity, lazy, application, Initializable
from blocks.bricks.parallel import Parallel
from blocks.utils import update_instance


class SequenceContentAttention(Initializable):
Expand Down Expand Up @@ -70,17 +69,24 @@ def __init__(self, state_names, state_dims, sequence_dim, match_dim,
energy_computer=None,
**kwargs):
super(SequenceContentAttention, self).__init__(**kwargs)
update_instance(self, locals())
self.state_names = state_names
self.state_dims = state_dims
self.sequence_dim = sequence_dim
self.match_dim = match_dim
self.state_transformer = state_transformer

self.state_transformers = Parallel(channel_names=state_names,
prototype=self.state_transformer,
prototype=state_transformer,
name="state_trans")
if not self.sequence_transformer:
self.sequence_transformer = MLP([Identity()], name="seq_trans")
if not self.energy_computer:
self.energy_computer = MLP([Identity()], name="energy_comp")
self.children = [self.state_transformers, self.sequence_transformer,
self.energy_computer]
if not sequence_transformer:
sequence_transformer = MLP([Identity()], name="seq_trans")
if not energy_computer:
energy_computer = MLP([Identity()], name="energy_comp")
self.sequence_transformer = sequence_transformer
self.energy_computer = energy_computer

self.children = [self.state_transformers, sequence_transformer,
energy_computer]

def _push_allocation_config(self):
self.state_transformers.input_dims = self.state_dims
Expand Down
2 changes: 1 addition & 1 deletion blocks/bricks/cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def cost_matrix(self, y, y_hat):
class AbsoluteError(CostMatrix):
@application
def cost_matrix(self, y, y_hat):
cost = tensor.abs(y - y_hat)
cost = abs(y - y_hat)
return cost


Expand Down
6 changes: 3 additions & 3 deletions blocks/bricks/lookup.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
"""Introduces Lookup brick."""
from blocks.bricks import application, Initializable, lazy
from blocks.utils import (check_theano_variable, shared_floatx_zeros,
update_instance)
from blocks.utils import check_theano_variable, shared_floatx_zeros


class LookupTable(Initializable):
Expand All @@ -25,7 +24,8 @@ class LookupTable(Initializable):
@lazy
def __init__(self, length, dim, **kwargs):
super(LookupTable, self).__init__(**kwargs)
update_instance(self, locals())
self.length = length
self.dim = dim

@property
def W(self):
Expand Down
16 changes: 10 additions & 6 deletions blocks/bricks/parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
import copy

from blocks.bricks import lazy, application, MLP, Identity, Initializable
from blocks.utils import update_instance


class Parallel(Initializable):
Expand Down Expand Up @@ -36,12 +35,16 @@ class Parallel(Initializable):
def __init__(self, channel_names, input_dims, output_dims,
prototype=None, **kwargs):
super(Parallel, self).__init__(**kwargs)
update_instance(self, locals())
self.channel_names = channel_names
self.input_dims = input_dims
self.output_dims = output_dims

if not prototype:
prototype = MLP([Identity()], use_bias=False)
self.prototype = prototype

if not self.prototype:
self.prototype = MLP([Identity()], use_bias=False)
self.transforms = []
for name in self.channel_names:
for name in channel_names:
self.transforms.append(copy.deepcopy(self.prototype))
self.transforms[-1].name = "transform_{}".format(name)
self.children = self.transforms
Expand Down Expand Up @@ -158,7 +161,8 @@ def _push_allocation_config(self):
@application
def apply(self, **kwargs):
new = kwargs.pop(self.new_name)
assert set(kwargs.keys()) == set(self.old_names)
if not set(kwargs.keys()) == set(self.old_names):
raise ValueError
result = super(Mixer, self).apply(
return_list=True, **{name: new for name in self.old_names})
for i, name in enumerate(self.old_names):
Expand Down
15 changes: 11 additions & 4 deletions blocks/bricks/recurrent.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from blocks.bricks import (Application, application, application_wrapper,
Brick, Initializable, Identity, Sigmoid, lazy)
from blocks.initialization import NdarrayInitialization
from blocks.utils import pack, shared_floatx_zeros, update_instance
from blocks.utils import pack, shared_floatx_zeros


class BaseRecurrent(Brick):
Expand Down Expand Up @@ -229,7 +229,9 @@ def __init__(self, dim, activation=None, **kwargs):
super(Recurrent, self).__init__(**kwargs)
if activation is None:
activation = Identity()
update_instance(self, locals())
self.dim = dim
self.activation = activation

self.children = [activation]

@property
Expand Down Expand Up @@ -319,13 +321,17 @@ class GatedRecurrent(BaseRecurrent, Initializable):
def __init__(self, activation, gate_activation, dim,
use_update_gate=True, use_reset_gate=True, **kwargs):
super(GatedRecurrent, self).__init__(**kwargs)
self.dim = dim
self.use_update_gate = use_update_gate
self.use_reset_gate = use_reset_gate

if not activation:
activation = Identity()
if not gate_activation:
gate_activation = Sigmoid()
self.activation = activation
self.gate_activation = gate_activation

update_instance(self, locals())
self.children = [activation, gate_activation]

@property
Expand Down Expand Up @@ -455,7 +461,8 @@ class Bidirectional(Initializable):
@lazy
def __init__(self, prototype, **kwargs):
super(Bidirectional, self).__init__(**kwargs)
update_instance(self, locals())
self.prototype = prototype

self.children = [copy.deepcopy(prototype) for i in range(2)]
self.children[0].name = 'forward'
self.children[1].name = 'backward'
Expand Down

0 comments on commit 7bbd347

Please sign in to comment.