Skip to content

Commit

Permalink
Pushing for the miniproject.
Browse files Browse the repository at this point in the history
  • Loading branch information
Ragav Venkatesan committed Feb 15, 2017
1 parent e72b79b commit 158f25a
Show file tree
Hide file tree
Showing 5 changed files with 178 additions and 201 deletions.
5 changes: 3 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
.cache
.eggs
*.png
/visualizer
/lenet5
visualizer
lenet5
.vscode
svhn
resultor
10 changes: 7 additions & 3 deletions pantry/tutorials/mat2yann.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,16 @@ def cook_svhn_normalized( location, verbose = 1, **kwargs):
if not 'data_params' in kwargs.keys():

data_params = {
"source" : 'mat',
"source" : 'matlab',
"name" : 'yann_Svhn', # some name.
"location" : location, # some location
"location" : 'svhn', # some location
"height" : 32,
"width" : 32,
"channels" : 3 }
"channels" : 3,
"batches2test" : 42,
"batches2train" : 56,
"batches2validate" : 28,
"batch_size" : 500,}

else:
data_params = kwargs['data_params']
Expand Down
69 changes: 45 additions & 24 deletions yann/modules/resultor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class resultor(module):
resultor_init_args = {
"root" : "<root directory to save stuff inside>",
"results" : "<results_file_name>.txt",
"accuracy" : "<error_file_name>.txt",
"errors" : "<error_file_name>.txt",
"costs" : "<cost_file_name>.txt",
"confusion" : "<confusion_file_name>.txt",
"network" : "<network_save_file_name>.pkl"
Expand Down Expand Up @@ -48,7 +48,7 @@ def __init__( self, resultor_init_args, verbose = 1):
if not "results" in resultor_init_args.keys():
resultor_init_args["results"] = "results.txt"

if not "accuracy" in resultor_init_args.keys():
if not "errors" in resultor_init_args.keys():
resultor_init_args["erros"] = "errors.txt"

if not "costs" in resultor_init_args.keys():
Expand All @@ -74,7 +74,7 @@ def __init__( self, resultor_init_args, verbose = 1):
self.root = value
elif item == "results":
self.results_file = value
elif item == "accuracy":
elif item == "errors":
self.error_file = value
elif item == "costs":
self.cost_file = value
Expand All @@ -94,30 +94,20 @@ def __init__( self, resultor_init_args, verbose = 1):
if verbose >= 3:
print "... Creating a root directory for save files"
os.makedirs(self.root)

for file in [self.results_file, self.error_file, self.cost_file, self.confusion_file,
self.learning_rate, self.momentum]:
f = open(self.root + "/" + file, 'w')
f.close()

if verbose >= 3:
print "... Resultor is initiliazed"

def cook ( self,
cost,
lr,
mom,
acc,
verbose = 2):
"""
Get the arrays from the network and make them available locally in this module.
Args:
cost: cost arrays (numpy array)
lr: learning rate array (theano tensor)
mom: momentum array (theano tensor)
acc: accuracy tuple of (training, validation)
print ( "... Resultor is initiliazed" )

"""



def process_results( ):
def process_results( self,
cost,
lr,
mom,
verbose = 2 ):
"""
This method will print results and also write them down in the appropriate files.
Expand All @@ -126,3 +116,34 @@ def process_results( ):
lr: Learning Rate, is a float
mom: Momentum, is a float.
"""
print ( ".. Cost : " + str(cost) )
if verbose >= 3:
print ( "... Learning Rate : " + str(lr) )
print ( "... Momentum : " + str(mom) )

f = open(self.root + "/" + self.cost_file, 'a')
f.write(str(cost))
f.write('\n')
f.close()

f = open(self.root + "/" + self.learning_rate, 'a')
f.write(str(lr))
f.write('\n')
f.close()

f = open(self.root + "/" + self.momentum, 'a')
f.write(str(mom))
f.write('\n')
f.close()

def plot (self, verbose = 2):
"""
This method will (should) plot all the values in the files.
"""
print "TBD"

def update_plot (self, verbose = 2):
"""
This method should update the open plots with costs and other values.
"""
print "TBD"
63 changes: 39 additions & 24 deletions yann/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,17 @@ def _add_resultor(self, resultor_params = None, verbose = 2):
verbose: Similar to what is found in the rest of the toolbox.
"""
if resultor_params is None:
resultor_params = {}
resultor_params = {
"root" : "resultor",
"results" : "results.txt",
"errors" : "errors.txt",
"costs" : "costs.txt",
"confusion" : "confusion.txt",
"network" : "network.pkl",
"learning_rate" : "learning_rate.txt",
"momentum" : "momentum.txt",
"visualize" : False,
}

if not "id" in resultor_params.keys():
id = len(self.resultor) + 1
Expand Down Expand Up @@ -1794,15 +1804,11 @@ def _cook_resultor (self, resultor = None, verbose = 2):
Args:
verbose: as always
"""
if verbsoe >= 3:
if verbose >= 3:
print "... Cooking the resultor"

resultor.cook(cost = self.cost,
lr = self.learning_rate,
mom = self.mom,
acc = (self.training_accuracy, self.validation_accuracy)
)

# Nothing is needed to cook resultor.

if verbose >= 3:
print "... Resultor is cooked"

Expand Down Expand Up @@ -1830,6 +1836,7 @@ def visualize_filters( self, epoch = 0, verbose = 2):
self.cooked_visualizer.visualize_filters(layers = self.dropout_layers,
epoch = epoch,
verbose = verbose)

def visualize(self, epoch = 0, verbose =2 ):
"""
This method will use the cooked visualizer to save down the visualizations
Expand All @@ -1841,8 +1848,18 @@ def visualize(self, epoch = 0, verbose =2 ):
self.visualize_activities(epoch = epoch, verbose = verbose)
self.visualize_filters(epoch = epoch, verbose = verbose)

def write_results(self, epoch = 0, verbose =2 ):

"""
This method will use the cooked visualizer to save down the visualizations

Args:
epoch: supply the epoch number ( used to create directories to save
"""
if (epoch % self.write_results_after_epochs == 0):
self.write_results(epoch = epoch, verbose = verbose)
self.visualize_filters(epoch = epoch, verbose = verbose)

def cook(self, verbose = 2, **kwargs):
"""
This function builds the backprop network, and makes the trainer, tester and validator
Expand Down Expand Up @@ -2032,22 +2049,18 @@ def print_status (self, epoch , verbose = 2):
if self.cooked_datastream is None:
raise Exception(" Cook first then run this.")

if verbose >=2 :
if len(self.cost) < self.batches2train * self.mini_batches_per_batch[0]:
print ".. Cost : " + str(self.cost[-1])
else:
print ".. Cost : " + str(numpy.mean(self.cost[-1 *
self.batches2train * self.mini_batches_per_batch[0]:]))
if verbose >= 3:
print "... Learning Rate : " + str(self.learning_rate.get_value(borrow=\
self.borrow))
print "... Momentum : " + str(self.current_momentum(epoch))
if len(self.cost) < self.batches2train * self.mini_batches_per_batch[0]:
cost = self.cost[-1]
else:
cost = numpy.mean(self.cost[-1 * self.batches2train * self.mini_batches_per_batch[0]:])

self.cooked_resultor.process_results(cost = self.cost[-1],
lr = self.learning_rate.get_value(borrow=self.borrow),
mom = self.current_momentum(epoch),
verbose = verbose)
lr = self.learning_rate.get_value(borrow = self.borrow)
mom = self.current_momentum(epoch)

self.cooked_resultor.process_results(cost = cost,
lr = lr,
mom = mom,
verbose = verbose)

def _print_layer (self, id, prefix = " ", nest = True, last = True):
"""
Expand Down Expand Up @@ -2387,7 +2400,9 @@ def train(self, verbose = 2, **kwargs):
training_accuracy = training_accuracy,
show_progress = show_progress,
verbose = verbose )
self.visualize ( epoch = epoch_counter , verbose = verbose)
self.visualize ( epoch = epoch_counter , verbose = verbose )
self.print_status ( epoch = epoch_counter, verbose=verbose )

if best is True:
copy_params(source = self.active_params, destination= nan_insurance ,
borrow = self.borrow)
Expand Down

0 comments on commit 158f25a

Please sign in to comment.