Skip to content

Commit

Permalink
Merge pull request #41 from ragavvenkatesan/master
Browse files Browse the repository at this point in the history
Sync
  • Loading branch information
Ragav Venkatesan committed Mar 3, 2017
2 parents 0dfa0c0 + 57cdf7d commit 6a14420
Show file tree
Hide file tree
Showing 7 changed files with 253 additions and 47 deletions.
1 change: 0 additions & 1 deletion pantry/tutorials/gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,6 @@ def shallow_gan ( dataset= None, verbose = 1 ):
distribution = 'normal',
mu = 0,
sigma = 1,
limits = (0,1),
verbose = verbose)

#x - inputs come from dataset 1 X 784
Expand Down
2 changes: 1 addition & 1 deletion pantry/tutorials/mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ def mlp ( dataset, verbose = 1 ):
"""
optimizer_params = {
"momentum_type" : 'polyak',
"momentum_params" : (0.9, 0.95, 30),
"momentum_params" : (0.65, 0.9, 30),
"regularization" : (0.0001, 0.0001),
"optimizer_type" : 'adagrad',
"id" : "main"
Expand Down
72 changes: 72 additions & 0 deletions yann/layers/input.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,78 @@ def __init__(self,
if verbose >= 3:
print("... Dropped out")

class tensor_layer (layer):
"""
This converts a theano tensor or a shared value into a layer. Simply the
value becomes the layer's outptus.
Args:
input: some tensor
input_shape: shape of the tensor
verbose: Similar to all of the toolbox.
Notes:
Use ``input_layer.output`` to continue onwards with the network
``input_layer.output_shape`` will tell you the output size.
"""
def __init__(self,
id,
input,
input_shape,
verbose = 2):

if verbose >= 3:
print("... Creating the input layer")

super(tensor_layer, self).__init__(id = id, type = 'tensor', verbose = verbose)
self.output = input
self.output_shape = input_shape
self.inference = self.output

if verbose >= 3:
print("... Tensor layer is created with output shape " + str(self.output_shape))


class dropout_tensor_layer (tensor_layer):
"""
This converts a theano tensor or a shared value into a layer. Simply the
value becomes the layer's outptus.
Args:
input: some tensor
input_shape: shape of the tensor
dropout_rate: default is 0.5, typically.
rng: Random number generator
verbose: Similar to all of the toolbox.
Notes:
Use ``input_layer.output`` to continue onwards with the network
``input_layer.output_shape`` will tell you the output size.
"""
def __init__(self,
id,
input,
input_shape,
rng = None,
dropout_rate = 0.5,
verbose = 2):

if verbose >= 3:
print("... set up the dropout tensor layer")
if rng is None:
rng = numpy.random
super(dropout_tensor_layer, self).__init__(
id = id,
input = input,
input_shape = input_shape,
verbose = verbose)
if not dropout_rate == 0:
self.output = _dropout(rng = rng,
params = self.output,
dropout_rate = dropout_rate)

if verbose >= 3:
print("... Dropped out")

if __name__ == '__main__':
pass
25 changes: 22 additions & 3 deletions yann/layers/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,24 @@ class merge_layer (layer):
x: a list of inputs (lenght must be two basically)
input_shape: List of the shapes of all inputs.
type: ``'error'`` creates an error layer.
other options are ``'sum'`` and ``'concatenate'``
other options are ``'sum'``, ``'batch'`` and ``'concatenate'``
error: If the type was ``'error'``, then this variable is used.
options include, ``'rmse'``, ``'l2'``, ``'l1'``,``'cross_entropy'``.
input_type: If this argument was ``'tensor'``, we simply merge the ouptus,
if this was not provided or was ``'layer'``, this merges the outputs
of the two layers.
Notes:
``'concatenate'`` concatenates the outputs on the channels where as ``'batch'`` concatenates
across the batches. It will increase the batchsize.
"""
def __init__ ( self,
x,
input_shape,
id = -1,
type = 'error',
error = 'rmse',
input_type = 'layer',
verbose = 2):

super(merge_layer,self).__init__(id = id, type = 'merge', verbose = verbose)
Expand All @@ -47,11 +54,13 @@ def __init__ ( self,
self.output = error(x[0], x[1])
self.output_shape = (1,)

"""
if len(input_shape) == 2:
self.num_neurons = self.output_shape[-1]
elif len(input_shape) == 4:
self.num_neurons = self.output_shape[1]

"""

self.generation = x[0] # I'm basically assuming that 0 is the generation in case
# this was an auto encoder network.

Expand All @@ -69,7 +78,17 @@ def __init__ ( self,
self.output_shape = (input_shape [0][0], input_shape[0][1] + input_shape[1][1])
elif len(input_shape[1]) == 4:
self.output_shape = (input_shape [0][0], input_shape[0][1] + input_shape[1][1],
input_shape[2], input_shape[3])
input_shape[0][2], input_shape[0][3])

elif type == 'batch':
self.output = T.concatenate([x[0],x[1]], axis = 0)
if len(input_shape[0]) == 2:
self.output_shape = (input_shape [0][0] + input_shape[1][0] , input_shape[0][1] )
elif len(input_shape[1]) == 4:
self.output_shape = (input_shape [0][0] + input_shape[1][0], input_shape[0][1],
input_shape[0][2], input_shape[0][3])
else:
raise Exception ( " This type is not allowed. " )
self.inference = self.output

def loss(self, type = None):
Expand Down
3 changes: 2 additions & 1 deletion yann/layers/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,12 +282,13 @@ def __init__(self,
else:
self.output = loss(y = labels, type = objective)


if L1 is not None:
self.output = self.output + l1_coeff * L1
if L2 is not None:
self.output = self.output + l2_coeff * L2
self.output_shape = (1,)

self.output_shape = (1,)
if verbose >= 3:
print("... Objective_layer is created with output shape " + str(self.output_shape))

Expand Down
2 changes: 1 addition & 1 deletion yann/modules/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def calculate_gradients(self, params, objective, verbose = 1):
for param in params:
if verbose >=3 :
print "... Estimating gradient of parameter ",
print param
print param.name
try:
gradient = T.grad( objective ,param)
self.gradients.append ( gradient )
Expand Down

0 comments on commit 6a14420

Please sign in to comment.