Skip to content

Commit

Permalink
Moved things around for ease of use.
Browse files Browse the repository at this point in the history
  • Loading branch information
ragav committed Jan 25, 2017
1 parent 89a3e1a commit 0028102
Show file tree
Hide file tree
Showing 13 changed files with 348 additions and 418 deletions.
Binary file removed lenet5/visualizer/data/image_2.jpg
Binary file not shown.
17 changes: 9 additions & 8 deletions pantry/tutorials/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def autoencoder ( dataset= None, verbose = 1 ):
net.add_layer ( type = "input",
id = "input",
verbose = verbose,
datastream_origin = 'data', # if you didnt add a dataset module, now is
origin = 'data', # if you didnt add a dataset module, now is
# the time.
mean_subtract = True )

Expand All @@ -72,6 +72,7 @@ def autoencoder ( dataset= None, verbose = 1 ):
id = "encoder",
num_neurons = 64,
activation = 'tanh',
regularize = True,
verbose = verbose
)

Expand Down Expand Up @@ -103,17 +104,17 @@ def autoencoder ( dataset= None, verbose = 1 ):
verbose = verbose)

net.add_layer ( type = "objective",
origin = "merge",
id = "obj",
objective = None,
layer_type = 'generator',
origin = "merge", # this is useless anyway.
layer_type = 'value',
objective = net.layers['merge'].output,
datastream_origin = 'data',
verbose = verbose
)
)

learning_rates = (0, 0.1, 0.01)
net.cook( objective_layer = 'obj',
datastream = 'data',
generator = 'merge',
learning_rates = learning_rates,
verbose = verbose
)
Expand All @@ -135,7 +136,7 @@ def autoencoder ( dataset= None, verbose = 1 ):
dataset = None
if len(sys.argv) > 1:
if sys.argv[1] == 'create_dataset':
from yann.special.datasets import cook_mnist
from yann.special.datasets import cook_mnist_normalized_zero_mean as cook_mnist
data = cook_mnist (verbose = 2)
dataset = data.dataset_location()
else:
Expand All @@ -145,7 +146,7 @@ def autoencoder ( dataset= None, verbose = 1 ):

if dataset is None:
print " creating a new dataset to run through"
from yann.special.datasets import cook_mnist
from yann.special.datasets import cook_mnist_normalized_zero_mean as cook_mnist
data = cook_mnist (verbose = 2)
dataset = data.dataset_location()

Expand Down
8 changes: 3 additions & 5 deletions pantry/tutorials/gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,7 @@ def simple_gan ( dataset= None, verbose = 1 ):
net.add_layer ( type = "input",
id = "x",
verbose = verbose,
datastream_origin = 'data', # if you didnt add a dataset module, now is
# the time.
mean_subtract = False )
datastream_origin = 'data' )

#G(z) contains params theta_g - 100 X 784 - creates images of 1 X 784
net.add_layer ( type = "dot_product",
Expand Down Expand Up @@ -187,7 +185,7 @@ def simple_gan ( dataset= None, verbose = 1 ):
dataset = None
if len(sys.argv) > 1:
if sys.argv[1] == 'create_dataset':
from yann.special.datasets import cook_mnist
from yann.special.datasets import cook_mnist_normalized_zero_mean as cook_mnist
data = cook_mnist (verbose = 2)
dataset = data.dataset_location()
else:
Expand All @@ -197,7 +195,7 @@ def simple_gan ( dataset= None, verbose = 1 ):

if dataset is None:
print " creating a new dataset to run through"
from yann.special.datasets import cook_mnist
from yann.special.datasets import cook_mnist_normalized_zero_mean as cook_mnist
data = cook_mnist (verbose = 2)
dataset = data.dataset_location()

Expand Down
41 changes: 29 additions & 12 deletions pantry/tutorials/lenet.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
"""
TODO:
Something is off with the visualizations of the CNN filters. Need to check what is going on.
"""

from yann.network import network
from yann.utils.graph import draw_network

Expand Down Expand Up @@ -71,6 +78,7 @@ def lenet5 ( dataset= None, verbose = 1 ):
filter_size = (5,5),
pool_size = (2,2),
activation = 'relu',
regularize = True,
verbose = verbose
)

Expand All @@ -81,6 +89,7 @@ def lenet5 ( dataset= None, verbose = 1 ):
filter_size = (3,3),
pool_size = (2,2),
activation = 'relu',
regularize = True,
verbose = verbose
)

Expand All @@ -90,21 +99,24 @@ def lenet5 ( dataset= None, verbose = 1 ):
id = "dot_product_1",
num_neurons = 800,
activation = 'relu',
regularize = True,
verbose = verbose
)

net.add_layer ( type = "dot_product",
origin = "dot_product_1",
id = "dot_product_2",
num_neurons = 800,
activation = 'relu',
activation = 'relu',
regularize = True,
verbose = verbose
)

net.add_layer ( type = "classifier",
id = "softmax",
origin = "dot_product_2",
num_classes = 10,
regularize = True,
activation = 'softmax',
verbose = verbose
)
Expand All @@ -118,12 +130,13 @@ def lenet5 ( dataset= None, verbose = 1 ):
)

learning_rates = (0.05, 0.01, 0.001)
net.pretty_print()
#draw_network(net.graph, filename = 'lenet.png')
#net.pretty_print()
#draw_network(net.graph, filename = 'lenet.png')

net.cook( optimizer = 'main',
objective_layer = 'obj',
datastream = 'data',
classifier = 'softmax',
classifier_layer = 'softmax',
verbose = verbose
)

Expand Down Expand Up @@ -192,40 +205,42 @@ def lenet_maxout ( dataset= None, verbose = 1 ):
net.add_layer ( type = "input",
id = "input",
verbose = verbose,
datastream_origin = 'data', # if you didnt add a dataset module, now is
origin = 'data', # if you didnt add a dataset module, now is
# the time.
mean_subtract = False )

net.add_layer ( type = "conv_pool",
origin = "input",
id = "conv_pool_1",
num_neurons = 20,
num_neurons = 40,
filter_size = (5,5),
pool_size = (2,2),
activation = ('maxout', 'maxout', 2),
batch_norm = True,
batch_norm = True,
regularize = True,
verbose = verbose
)

net.add_layer ( type = "conv_pool",
origin = "conv_pool_1",
id = "conv_pool_2",
num_neurons = 50,
num_neurons = 100,
filter_size = (3,3),
pool_size = (2,2),
activation = ('maxout', 'maxout', 2),
batch_norm = True,
regularize = True,
dropout_rate = 0, # because of maxout
verbose = verbose
)

net.add_layer ( type = "dot_product",
origin = "conv_pool_2",
id = "dot_product_1",
num_neurons = 800,
activation = 'relu',
num_neurons = 1600,
regularize = True,
activation = ('maxout', 'maxout', 2),
batch_norm = True,
dropout_rate = 0.5,
verbose = verbose
)

Expand All @@ -236,13 +251,15 @@ def lenet_maxout ( dataset= None, verbose = 1 ):
activation = 'relu',
batch_norm = True,
dropout_rate = 0.5,
regularize = True,
verbose = verbose
)

net.add_layer ( type = "classifier",
id = "softmax",
origin = "dot_product_2",
num_classes = 10,
regularize = True,
activation = 'softmax',
verbose = verbose
)
Expand Down Expand Up @@ -283,7 +300,7 @@ def lenet_maxout ( dataset= None, verbose = 1 ):
dataset = None
if len(sys.argv) > 1:
if sys.argv[1] == 'create_dataset':
from yann.special.datasets import cook_mnist
from yann.special.datasets import cook_cifar10
data = cook_mnist (verbose = 2)
dataset = data.dataset_location()
else:
Expand Down
83 changes: 14 additions & 69 deletions pantry/tutorials/log_reg.py
Original file line number Diff line number Diff line change
@@ -1,77 +1,23 @@
from yann.network import network
from yann.utils.graph import draw_network

def log_reg ( dataset, verbose ):
def log_reg ( dataset ):
"""
This function is a demo example of multi-layer neural networks from the infamous paper by
Yann LeCun. This is an example code. You should study this code rather than merely run it.
This function is a demo example of logistic regression.
"""
dataset_params = {
"dataset" : dataset,
"svm" : False,
"n_classes" : 10,
"id" : 'data',
}


# intitialize the network
net = network( verbose = verbose )

# or you can add modules after you create the net.
net.add_module ( type = 'datastream',
params = dataset_params,
verbose = verbose )

# add an input layer
net.add_layer ( type = "input",
id = "input",
verbose = verbose,
datastream_origin = 'data',
# if you didnt add a dataset module, now is the time.
mean_subtract = False )

net.add_layer ( type = "classifier",
id = "softmax",
origin = "input",
num_classes = 10,
activation = 'softmax',
verbose = verbose
)

net.add_layer ( type = "objective",
id = "nll",
origin = "softmax",
objective = 'nll',
verbose = verbose
)

# objective provided by classifier layer
# nll-negative log likelihood,
# cce-categorical cross entropy,
# bce-binary cross entropy,
# hinge-hinge loss
learning_rates = (0.05, 0.01, 0.001)
# (initial_learning_rate, annealing, ft_learnint_rate)

net.cook( objective_layer = 'nll',
datastream = 'data',
classifier = 'softmax',
learning_rates = learning_rates,
verbose = verbose
)
# visualization of the network.
# draw_network(net.graph, filename = 'log_reg.png')
dataset_params = { "dataset" : dataset,
"svm" : False,
"n_classes" : 10 }
net = network()
net.add_module ( type = 'datastream', params = dataset_params )
net.add_layer ( type = "input", datastream_origin = 'data')
net.add_layer ( type = "classifier", num_classes = 10 )
net.add_layer ( type = "objective" )
net.cook()
net.pretty_print()
net.train( epochs = (20, 20),
validate_after_epochs = 1,
training_accuracy = True,
show_progress = True,
early_terminate = True,
verbose = verbose)

net.test( show_progress = True,
verbose = verbose)
net.train()
net.test()


## Boiler Plate ##
Expand All @@ -94,5 +40,4 @@ def log_reg ( dataset, verbose ):
data = cook_mnist (verbose = 3)
dataset = data.dataset_location()

log_reg ( dataset, verbose = 2 )

log_reg ( dataset )
8 changes: 3 additions & 5 deletions pantry/tutorials/mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,7 @@ def mlp ( dataset, verbose = 1 ):
net.add_layer ( type = "input",
id = "input",
verbose = verbose,
datastream_origin = 'data', # if you didnt add a dataset module, now is
# the time.
mean_subtract = True )
datastream_origin = 'data')

net.add_layer ( type = "dot_product",
origin = "input",
Expand All @@ -66,15 +64,15 @@ def mlp ( dataset, verbose = 1 ):
)

net.add_layer ( type = "objective",
id = "nll",
id = "obj",
origin = "softmax",
verbose = verbose
)

learning_rates = (0.05, 0.01, 0.001)

net.cook( optimizer = 'main',
objective_layer = 'nll',
objective_layer = 'obj',
datastream = 'data',
classifier = 'softmax',
verbose = verbose
Expand Down
14 changes: 11 additions & 3 deletions yann/layers/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,18 +110,26 @@ def _graph_attributes(self, verbose = 2):
out["type"] = self.type
return out

def get_params (self ,verbose = 2):
def get_params (self , borrow = True, verbose = 2):
"""
This method returns the parameters of the layer in a numpy ndarray format.
Args:
borrow : Theano borrow, default is True.
verbose: As always
Notes:
This is a slow method, because we are taking the values out of GPU. Ordinarily, I should
have used get_value( borrow = True ), but I can't do this because some parameters are
theano.tensor.var.TensroVariable which needs to be run through eval.
theano.tensor.var.TensorVariable which needs to be run through eval.
"""
out = []

for p in self.params:
out.append(numpy.asarray(p.eval()))
try:
out.append(p.get_value(borrow = borrow))
except:
out.append(numpy.asarray(p.eval()))
return out

def _dropout(rng, params, dropout_rate, verbose = 2):
Expand Down

0 comments on commit 0028102

Please sign in to comment.