Skip to content

Commit

Permalink
Merge pull request #170 from f0k/remove-args
Browse files Browse the repository at this point in the history
Remove *args from get_output() and get_output_for()
  • Loading branch information
benanne committed Mar 22, 2015
2 parents 4e41b68 + 2d7f25f commit 359dafb
Show file tree
Hide file tree
Showing 15 changed files with 51 additions and 57 deletions.
16 changes: 8 additions & 8 deletions lasagne/layers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def get_output_shape(self):
"""
return self.get_output_shape_for(self.input_shape)

def get_output(self, input=None, *args, **kwargs):
def get_output(self, input=None, **kwargs):
"""
Computes the output of the network at this layer. Optionally, you can
define an input to propagate through the network instead of using the
Expand Down Expand Up @@ -125,8 +125,8 @@ def get_output(self, input=None, *args, **kwargs):
"there isn't anything to get its input from. "
"Did you mean get_output_for()?")
else: # in all other cases, just pass the input on to the next layer.
layer_input = self.input_layer.get_output(input, *args, **kwargs)
return self.get_output_for(layer_input, *args, **kwargs)
layer_input = self.input_layer.get_output(input, **kwargs)
return self.get_output_for(layer_input, **kwargs)

def get_output_shape_for(self, input_shape):
"""
Expand All @@ -153,7 +153,7 @@ def get_output_shape_for(self, input_shape):
"""
return input_shape

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
"""
Propagates the given input through this layer (and only this layer).
Expand Down Expand Up @@ -272,7 +272,7 @@ def __init__(self, incomings, name=None):
def get_output_shape(self):
return self.get_output_shape_for(self.input_shapes)

def get_output(self, input=None, *args, **kwargs):
def get_output(self, input=None, **kwargs):
if isinstance(input, dict) and (self in input):
# this layer is mapped to an expression or numpy array
return utils.as_theano_expression(input[self])
Expand All @@ -282,9 +282,9 @@ def get_output(self, input=None, *args, **kwargs):
"Did you mean get_output_for()?")
# In all other cases, just pass the network input on to the next layers
else:
layer_inputs = [input_layer.get_output(input, *args, **kwargs) for
layer_inputs = [input_layer.get_output(input, **kwargs) for
input_layer in self.input_layers]
return self.get_output_for(layer_inputs, *args, **kwargs)
return self.get_output_for(layer_inputs, **kwargs)

def get_output_shape_for(self, input_shapes):
"""
Expand All @@ -310,7 +310,7 @@ def get_output_shape_for(self, input_shapes):
"""
raise NotImplementedError

def get_output_for(self, inputs, *args, **kwargs):
def get_output_for(self, inputs, **kwargs):
"""
Propagates the given inputs through this layer (and only this layer).
Expand Down
4 changes: 2 additions & 2 deletions lasagne/layers/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def get_output_shape_for(self, input_shape):

return (input_shape[0], self.num_filters, output_length)

def get_output_for(self, input, input_shape=None, *args, **kwargs):
def get_output_for(self, input, input_shape=None, **kwargs):
# the optional input_shape argument is for when get_output_for is
# called directly with a different shape than self.input_shape.
if input_shape is None:
Expand Down Expand Up @@ -171,7 +171,7 @@ def get_output_shape_for(self, input_shape):

return (input_shape[0], self.num_filters, output_rows, output_columns)

def get_output_for(self, input, input_shape=None, *args, **kwargs):
def get_output_for(self, input, input_shape=None, **kwargs):
# the optional input_shape argument is for when get_output_for is
# called directly with a different shape than self.input_shape.
if input_shape is None:
Expand Down
2 changes: 1 addition & 1 deletion lasagne/layers/corrmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def get_output_shape_for(self, input_shape):

return (batch_size, self.num_filters, output_rows, output_columns)

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
filters = self.W
if self.flip_filters:
filters = filters[:, :, ::-1, ::-1] # flip top-down, left-right
Expand Down
10 changes: 5 additions & 5 deletions lasagne/layers/cuda_convnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def get_output_shape_for(self, input_shape):
else:
return (self.num_filters, output_rows, output_columns, batch_size)

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
if self.dimshuffle:
filters = self.W.dimshuffle(1, 2, 3, 0) # bc01 to c01b
input = input.dimshuffle(1, 2, 3, 0) # bc01 to c01b
Expand Down Expand Up @@ -243,7 +243,7 @@ def get_output_shape_for(self, input_shape):
return (num_input_channels, output_rows, output_columns,
batch_size)

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
if self.dimshuffle:
input = input.dimshuffle(1, 2, 3, 0) # bc01 to c01b

Expand Down Expand Up @@ -271,7 +271,7 @@ class ShuffleBC01ToC01BLayer(Layer):
def get_output_shape_for(self, input_shape):
return (input_shape[1], input_shape[2], input_shape[3], input_shape[0])

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
return input.dimshuffle(1, 2, 3, 0)

bc01_to_c01b = ShuffleBC01ToC01BLayer # shortcut
Expand All @@ -286,7 +286,7 @@ class ShuffleC01BToBC01Layer(Layer):
def get_output_shape_for(self, input_shape):
return (input_shape[3], input_shape[0], input_shape[1], input_shape[2])

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
return input.dimshuffle(3, 0, 1, 2)

c01b_to_bc01 = ShuffleC01BToBC01Layer # shortcut
Expand Down Expand Up @@ -332,7 +332,7 @@ def get_bias_params(self):
def get_output_shape_for(self, input_shape):
return (self.num_units,) + input_shape[1:]

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
# fc * c01b... = f01b...
out = T.tensordot(self.W, input, axes=[[1], [0]])

Expand Down
4 changes: 2 additions & 2 deletions lasagne/layers/dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def get_bias_params(self):
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units)

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
if input.ndim > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
Expand Down Expand Up @@ -131,7 +131,7 @@ def get_bias_params(self):
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units) + input_shape[2:]

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
# cf * bc01... = fb01...
out_r = T.tensordot(self.W, input, axes=[[0], [1]])
# input dims to broadcast over
Expand Down
4 changes: 2 additions & 2 deletions lasagne/layers/dnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def get_output_shape_for(self, input_shape):
output_shape[3] = (output_shape[3] - self.ds[1]) // self.strides[1] + 1
return tuple(output_shape)

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
return dnn.dnn_pool(input, self.ds, self.strides, self.mode)


Expand Down Expand Up @@ -133,7 +133,7 @@ def get_output_shape_for(self, input_shape):

return (batch_size, self.num_filters, output_rows, output_columns)

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
# by default we assume 'cross', consistent with corrmm.
conv_mode = 'conv' if self.flip_filters else 'cross'
# if 'border_mode' is one of 'valid' or 'full' use that.
Expand Down
2 changes: 1 addition & 1 deletion lasagne/layers/input.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def __init__(self, shape, input_var=None, name=None, **kwargs):
def get_output_shape(self):
return self.shape

def get_output(self, input=None, *args, **kwargs):
def get_output(self, input=None, **kwargs):
if isinstance(input, dict):
input = input.get(self, None)
if input is None:
Expand Down
4 changes: 2 additions & 2 deletions lasagne/layers/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def get_output_shape_for(self, input_shapes):
output_shape[self.axis] = sum(sizes)
return tuple(output_shape)

def get_output_for(self, inputs, *args, **kwargs):
def get_output_for(self, inputs, **kwargs):
return T.concatenate(inputs, axis=self.axis)

concat = ConcatLayer # shortcut
Expand Down Expand Up @@ -66,7 +66,7 @@ def get_output_shape_for(self, input_shapes):
raise ValueError("Mismatch: not all input shapes are the same")
return input_shapes[0]

def get_output_for(self, inputs, *args, **kwargs):
def get_output_for(self, inputs, **kwargs):
output = None
for coeff, input in zip(self.coeffs, inputs):
if coeff != 1:
Expand Down
4 changes: 2 additions & 2 deletions lasagne/layers/noise.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def __init__(self, incoming, p=0.5, rescale=True, **kwargs):
self.p = p
self.rescale = rescale

def get_output_for(self, input, deterministic=False, *args, **kwargs):
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic or self.p == 0:
return input
else:
Expand All @@ -44,7 +44,7 @@ def __init__(self, incoming, sigma=0.1, **kwargs):
super(GaussianNoiseLayer, self).__init__(incoming, **kwargs)
self.sigma = sigma

def get_output_for(self, input, deterministic=False, *args, **kwargs):
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic or self.sigma == 0:
return input
else:
Expand Down
2 changes: 1 addition & 1 deletion lasagne/layers/normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def __init__(self, incoming, alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
def get_output_shape_for(self, input_shape):
return input_shape

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
input_shape = self.input_shape
if any(s is None for s in input_shape):
input_shape = input.shape
Expand Down
8 changes: 4 additions & 4 deletions lasagne/layers/pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def get_output_shape_for(self, input_shape):

return tuple(output_shape)

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
return downsample.max_pool_2d(input, self.ds, self.ignore_border)


Expand Down Expand Up @@ -74,7 +74,7 @@ def get_output_shape_for(self, input_shape):
output_shape[self.axis] = output_shape[self.axis] // self.ds
return tuple(output_shape)

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
num_feature_maps = input.shape[self.axis]
num_feature_maps_out = num_feature_maps // self.ds

Expand Down Expand Up @@ -113,7 +113,7 @@ def __init__(self, incoming, ds, axis=1, **kwargs):
"multiple of the group size (ds=%d)" %
(num_feature_maps, self.ds))

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
num_feature_maps = input.shape[self.axis]
num_pools = num_feature_maps // self.ds

Expand Down Expand Up @@ -151,5 +151,5 @@ def __init__(self, incoming, pool_function=T.mean, **kwargs):
def get_output_shape_for(self, input_shape):
return input_shape[:2]

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
return self.pool_function(input.flatten(3), axis=2)
10 changes: 5 additions & 5 deletions lasagne/layers/shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class FlattenLayer(Layer):
def get_output_shape_for(self, input_shape):
return (input_shape[0], int(np.prod(input_shape[1:])))

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
return input.flatten(2)

flatten = FlattenLayer # shortcut
Expand Down Expand Up @@ -80,7 +80,7 @@ def __init__(self, incoming, shape, **kwargs):
raise ValueError("`shape` cannot contain multiple -1")
self.shape = shape

def get_output_shape_for(self, input_shape, *args, **kwargs):
def get_output_shape_for(self, input_shape, **kwargs):
# Initialize output shape from shape specification
output_shape = list(self.shape)
# First, replace all `[i]` with the corresponding input dimension, and
Expand Down Expand Up @@ -127,7 +127,7 @@ def get_output_shape_for(self, input_shape, *args, **kwargs):
(input_shape, self.shape))
return tuple(output_shape)

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
# Replace all `[i]` with the corresponding input dimension
output_shape = list(self.shape)
for dim, o in enumerate(output_shape):
Expand Down Expand Up @@ -226,7 +226,7 @@ def get_output_shape_for(self, input_shape):

return tuple(output_shape)

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
return input.dimshuffle(self.pattern)

dimshuffle = DimshuffleLayer # shortcut
Expand All @@ -249,7 +249,7 @@ def get_output_shape_for(self, input_shape):

return output_shape

def get_output_for(self, input, *args, **kwargs):
def get_output_for(self, input, **kwargs):
return padding.pad(input, self.width, self.val, self.batch_ndim)

pad = PadLayer # shortcut
13 changes: 4 additions & 9 deletions lasagne/objectives.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,7 @@ def __init__(self, input_layer, loss_function=mse, aggregation='mean'):
'or None, not {0}'.format(aggregation))
self.aggregation = aggregation

def get_loss(self, input=None, target=None, aggregation=None,
*args, **kwargs):
def get_loss(self, input=None, target=None, aggregation=None, **kwargs):
"""
Get loss scalar expression
Expand All @@ -63,15 +62,13 @@ def get_loss(self, input=None, target=None, aggregation=None,
given the input
- aggregation : None to use the value passed to the
constructor or a value to override it
- args : additional arguments passed to `input_layer`'s
`get_output` method
- kwargs : additional keyword arguments passed to `input_layer`'s
`get_output` method
:returns:
- output : loss expressions
"""
network_output = self.input_layer.get_output(input, *args, **kwargs)
network_output = self.input_layer.get_output(input, **kwargs)
if target is None:
target = self.target_var
if aggregation not in self._valid_aggregation:
Expand Down Expand Up @@ -130,7 +127,7 @@ def __init__(self, input_layer, loss_function=mse, aggregation='mean'):
self.aggregation = aggregation

def get_loss(self, input=None, target=None, mask=None,
aggregation=None, *args, **kwargs):
aggregation=None, **kwargs):
"""
Get loss scalar expression
Expand All @@ -146,15 +143,13 @@ def get_loss(self, input=None, target=None, mask=None,
contributions of the resulting loss values
- aggregation : None to use the value passed to the
constructor or a value to override it
- args : additional arguments passed to `input_layer`'s
`get_output` method
- kwargs : additional keyword arguments passed to `input_layer`'s
`get_output` method
:returns:
- output : loss expressions
"""
network_output = self.input_layer.get_output(input, *args, **kwargs)
network_output = self.input_layer.get_output(input, **kwargs)
if target is None:
target = self.target_var
if mask is None:
Expand Down
18 changes: 9 additions & 9 deletions lasagne/tests/layers/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ def test_get_output_without_arguments(self, layer):
layer.input_layer.get_output.assert_called_with(None)

def test_get_output_passes_on_arguments_to_input_layer(self, layer):
input, arg, kwarg = object(), object(), object()
input, kwarg = object(), object()
layer.get_output_for = Mock()

output = layer.get_output(input, arg, kwarg=kwarg)
output = layer.get_output(input, kwarg=kwarg)
assert output is layer.get_output_for.return_value
layer.get_output_for.assert_called_with(
layer.input_layer.get_output.return_value, arg, kwarg=kwarg)
layer.input_layer.get_output.return_value, kwarg=kwarg)
layer.input_layer.get_output.assert_called_with(
input, arg, kwarg=kwarg)
input, kwarg=kwarg)

def test_get_output_input_is_a_mapping(self, layer):
input = {layer: theano.tensor.matrix()}
Expand Down Expand Up @@ -140,19 +140,19 @@ def test_get_output_without_arguments(self, layer):
layer.input_layers[1].get_output.assert_called_with(None)

def test_get_output_passes_on_arguments_to_input_layer(self, layer):
input, arg, kwarg = object(), object(), object()
input, kwarg = object(), object()
layer.get_output_for = Mock()

output = layer.get_output(input, arg, kwarg=kwarg)
output = layer.get_output(input, kwarg=kwarg)
assert output is layer.get_output_for.return_value
layer.get_output_for.assert_called_with([
layer.input_layers[0].get_output.return_value,
layer.input_layers[1].get_output.return_value,
], arg, kwarg=kwarg)
], kwarg=kwarg)
layer.input_layers[0].get_output.assert_called_with(
input, arg, kwarg=kwarg)
input, kwarg=kwarg)
layer.input_layers[1].get_output.assert_called_with(
input, arg, kwarg=kwarg)
input, kwarg=kwarg)

def test_get_output_input_is_a_mapping(self, layer):
input = {layer: theano.tensor.matrix()}
Expand Down
Loading

0 comments on commit 359dafb

Please sign in to comment.