Skip to content

Commit

Permalink
Deconvolutional Layer added. Tutorials in Autoencoder.
Browse files Browse the repository at this point in the history
  • Loading branch information
Ragav Venkatesan committed Mar 8, 2017
1 parent 6bef805 commit fbc3f22
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 17 deletions.
33 changes: 16 additions & 17 deletions pantry/tutorials/autoencoder.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
"""
TODO:
Need a validation and testing thats better than just measuring rmse. Can't find something great.
* Need a validation and testing thats better than just measuring rmse. Can't find something
great.
* Loss increases after 3 epochs.
"""
from yann.network import network

def autoencoder ( dataset= None, verbose = 1 ):
def autoencoder ( dataset= None, verbose = 1 ):
"""
This function is a demo example of a sparse autoencoder.
This is an example code. You should study this code rather than merely run it.
Expand All @@ -27,16 +29,16 @@ def autoencoder ( dataset= None, verbose = 1 ):
"sample_size": 32,
"rgb_filters": False,
"debug_functions" : False,
"debug_layers": False,
"debug_layers": True,
"id" : 'main'
}

# intitialize the network
optimizer_params = {
"momentum_type" : 'nesterov',
"momentum_params" : (0.65, 0.95, 30),
"momentum_type" : 'polyak',
"momentum_params" : (0.5, 0.95, 20),
"regularization" : (0.0001, 0.0001),
"optimizer_type" : 'rmsprop',
"optimizer_type" : 'adagrad',
"id" : "main"
}
net = network( borrow = True,
Expand Down Expand Up @@ -71,16 +73,15 @@ def autoencoder ( dataset= None, verbose = 1 ):
origin = "flatten",
id = "encoder",
num_neurons = 64,
activation = 'tanh',
# regularize = True,
activation = 'relu',
verbose = verbose
)

net.add_layer ( type = "dot_product",
origin = "encoder",
id = "decoder",
num_neurons = 784,
activation = 'tanh',
activation = 'relu',
input_params = [net.dropout_layers['encoder'].w.T, None],
# Use the same weights but transposed for decoder.
learnable = False,
Expand Down Expand Up @@ -117,7 +118,7 @@ def autoencoder ( dataset= None, verbose = 1 ):
verbose = verbose
)

learning_rates = (0.05, 0.1, 0.01)
learning_rates = (0.001, 0.1, 0.001)
net.cook( objective_layers = ['obj'],
datastream = 'data',
learning_rates = learning_rates,
Expand All @@ -126,7 +127,7 @@ def autoencoder ( dataset= None, verbose = 1 ):

# from yann.utils.graph import draw_network
# draw_network(net.graph, filename = 'autoencoder.png')
# net.pretty_print()
net.pretty_print()

net.train( epochs = (10, 10),
validate_after_epochs = 1,
Expand Down Expand Up @@ -200,7 +201,7 @@ def convolutional_autoencoder ( dataset= None, verbose = 1 ):
filter_size = (5,5),
pool_size = (1,1),
activation = 'tanh',
regularize = False,
regularize = True,
#stride = (2,2),
verbose = verbose
)
Expand All @@ -226,7 +227,6 @@ def convolutional_autoencoder ( dataset= None, verbose = 1 ):
id = "encoder",
num_neurons = 128,
activation = 'tanh',
batch_norm = True,
dropout_rate = 0.5,
regularize = True,
verbose = verbose
Expand Down Expand Up @@ -277,7 +277,6 @@ def convolutional_autoencoder ( dataset= None, verbose = 1 ):
pool_size = (1,1),
output_shape = (28,28,1),
activation = 'tanh',
regularize = True,
input_params = [net.dropout_layers['conv'].w, None],
learnable = False,
#stride = (2,2),
Expand Down Expand Up @@ -330,7 +329,7 @@ def convolutional_autoencoder ( dataset= None, verbose = 1 ):
dataset = None
if len(sys.argv) > 1:
if sys.argv[1] == 'create_dataset':
from yann.special.datasets import cook_mnist_normalized_zero_mean as cook_mnist
from yann.special.datasets import cook_mnist_normalized as cook_mnist
data = cook_mnist (verbose = 2)
dataset = data.dataset_location()
else:
Expand All @@ -340,9 +339,9 @@ def convolutional_autoencoder ( dataset= None, verbose = 1 ):

if dataset is None:
print " creating a new dataset to run through"
from yann.special.datasets import cook_mnist_normalized_zero_mean as cook_mnist
from yann.special.datasets import cook_mnist_normalized as cook_mnist
data = cook_mnist (verbose = 2)
dataset = data.dataset_location()

autoencoder ( dataset, verbose = 2 )
#autoencoder ( dataset, verbose = 2 )
convolutional_autoencoder ( dataset , verbose = 2 )
4 changes: 4 additions & 0 deletions pantry/tutorials/gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@
Goodfellow, Ian, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair,
Aaron Courville, and Yoshua Bengio. "Generative adversarial nets." In Advances in Neural Information
Processing Systems, pp. 2672-2680. 2014.
Todo:
Deconvolutional GAN is throwing a wierd error.
"""
from yann.special.gan import gan
from theano import tensor as T
Expand Down
9 changes: 9 additions & 0 deletions yann/core/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,5 +222,14 @@ def __init__ ( self,
)
self.out_shp = (_out_height, _out_width)

# Check by using the reverse on a convolution shape, the actual size.
_,_,_in_height,_in_width = conv_shape (image_shape = output_shape,
kernel_shape = filter_shape,
border_mode = border_mode,
subsample = subsample)

if not _in_height == image_shape [2] and _in_width == image_shape [3]:
raise Exception (" This dimensionality of th output image cannot be achieved.")

if __name__ == '__main__':
pass

0 comments on commit fbc3f22

Please sign in to comment.