Skip to content
This repository has been archived by the owner on Jul 10, 2021. It is now read-only.

Commit

Permalink
Merge d4c905d into 3a06089
Browse files Browse the repository at this point in the history
  • Loading branch information
alexjc committed Nov 20, 2015
2 parents 3a06089 + d4c905d commit 84ff193
Show file tree
Hide file tree
Showing 4 changed files with 99 additions and 12 deletions.
2 changes: 2 additions & 0 deletions sknn/backend/lasagne/mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,12 +243,14 @@ def _batch_impl(self, X, y, processor, output, shuffle):
progress, batches = 0, X.shape[0] / self.batch_size
loss, count = 0.0, 0
for Xb, yb in self._iterate_data(X, y, self.batch_size, shuffle):
self._do_callback('on_batch_start', locals())
loss += processor(Xb, yb)
count += 1
while count / batches > progress / 60:
sys.stdout.write(output)
sys.stdout.flush()
progress += 1
self._do_callback('on_batch_finish', locals())
sys.stdout.write('\r')
return loss / count

Expand Down
42 changes: 30 additions & 12 deletions sknn/mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,53 +118,69 @@ def _reshape(self, X, y=None):
X = X.reshape((X.shape[0], numpy.product(X.shape[1:])))
return X, y

def _do_callback(self, event, variables):
if self.callback is None:
return

del variables['self']
if isinstance(self.callback, dict):
function = self.callback.get(event, None)
return function(**variables) if function else None
else:
return self.callback(event, **variables)

def _train(self, X, y):
assert self.n_iter or self.n_stable,\
"Neither n_iter nor n_stable were specified; training would loop forever."

best_train_error, best_valid_error = float("inf"), float("inf")
best_params = []
n_stable = 0
self._do_callback('on_train_start', locals())

for i in itertools.count(1):
start = time.time()
start_time = time.time()
self._do_callback('on_epoch_start', locals())

best_train = False
is_best_train = False
avg_train_error = self._backend._train_impl(X, y)
if avg_train_error is not None:
if math.isnan(avg_train_error):
raise RuntimeError("Training diverged and returned NaN.")

best_train_error = min(best_train_error, avg_train_error)
best_train = bool(avg_train_error < best_train_error * (1.0 + self.f_stable))
is_best_train = bool(avg_train_error < best_train_error * (1.0 + self.f_stable))

best_valid = False
is_best_valid = False
avg_valid_error = None
if self.valid_set is not None:
avg_valid_error = self._backend._valid_impl(*self.valid_set)
if avg_valid_error is not None:
best_valid_error = min(best_valid_error, avg_valid_error)
best_valid = bool(avg_valid_error < best_valid_error * (1.0 + self.f_stable))
is_best_valid = bool(avg_valid_error < best_valid_error * (1.0 + self.f_stable))

finish_time = time.time()
log.debug("\r{:>5} {}{}{} {}{}{} {:>5.1f}s".format(
i,
ansi.BLUE if best_train else "",
ansi.BLUE if is_best_train else "",
"{0:>10.3e}".format(float(avg_train_error)) if (avg_train_error is not None) else " N/A ",
ansi.ENDC if best_train else "",
ansi.ENDC if is_best_train else "",

ansi.GREEN if best_valid else "",
ansi.GREEN if is_best_valid else "",
"{:>10.3e}".format(float(avg_valid_error)) if (avg_valid_error is not None) else " N/A ",
ansi.ENDC if best_valid else "",
ansi.ENDC if is_best_valid else "",

time.time() - start
finish_time - start_time
))

if best_valid or (self.valid_set is None and best_train):
if is_best_valid or (self.valid_set is None and is_best_train):
best_params = self._backend._mlp_to_array()
n_stable = 0
else:
n_stable += 1

self._do_callback('on_epoch_finish', locals())

if self.valid_set is not None and n_stable >= self.n_stable:
log.debug("")
log.info("Early termination condition fired at %i iterations.", i)
Expand All @@ -173,7 +189,8 @@ def _train(self, X, y):
log.debug("")
log.info("Terminating after specified %i total iterations.", i)
break


self._do_callback('on_train_finish', locals())
self._backend._array_to_mlp(best_params, self._backend.mlp)

def _fit(self, X, y):
Expand Down Expand Up @@ -362,6 +379,7 @@ def partial_fit(self, X, y, classes=None):
self.label_binarizers = [LB() for _ in range(y.shape[1])]
for lb, cls in zip(self.label_binarizers, classes):
lb.fit(cls)

return self.fit(X, y)

def predict_proba(self, X):
Expand Down
2 changes: 2 additions & 0 deletions sknn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,7 @@ def __init__(
mutator=None,
debug=False,
verbose=None,
callback=None,
**params):

assert warning is None,\
Expand Down Expand Up @@ -442,6 +443,7 @@ def __init__(
self.mutator = mutator
self.debug = debug
self.verbose = verbose
self.callback = callback

self._backend = None
self._create_logger()
Expand Down
65 changes: 65 additions & 0 deletions sknn/tests/test_callback.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import unittest
from nose.tools import (assert_in, assert_raises, assert_equals)

import collections
import numpy
from sknn.mlp import MultiLayerPerceptron as MLP, Layer as L

import sknn.mlp


class TestSingleCallback(unittest.TestCase):

def setUp(self):
self.data = collections.defaultdict(list)

def _callback(self, event, **variables):
self.data[event].append(variables)

def test_TrainingCallbacks(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
nn = MLP(layers=[L("Linear")], n_iter=4, callback=self._callback)
nn._fit(a_in, a_out)
assert_equals(len(self.data['on_train_start']), 1)
assert_equals(len(self.data['on_train_finish']), 1)

def test_EpochCallbacks(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
nn = MLP(layers=[L("Linear")], n_iter=4, callback=self._callback)
nn._fit(a_in, a_out)
assert_equals(len(self.data['on_epoch_start']), 4)
assert_equals(len(self.data['on_epoch_finish']), 4)

def test_BatchCallbacks(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
nn = MLP(layers=[L("Linear")], n_iter=1, batch_size=4, callback=self._callback)
nn._fit(a_in, a_out)
assert_equals(len(self.data['on_batch_start']), 2)
assert_equals(len(self.data['on_batch_finish']), 2)


class TestSpecificCallback(unittest.TestCase):

def setUp(self):
self.data = []

def _callback(self, **variables):
self.data.append(variables)

def test_TrainingCallback(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
nn = MLP(layers=[L("Linear")], n_iter=4, callback={'on_train_start': self._callback})
nn._fit(a_in, a_out)
assert_equals(len(self.data), 1)

def test_EpochCallback(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
nn = MLP(layers=[L("Linear")], n_iter=4, callback={'on_epoch_start': self._callback})
nn._fit(a_in, a_out)
assert_equals(len(self.data), 4)

def test_BatchCallbacks(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
nn = MLP(layers=[L("Linear")], n_iter=1, batch_size=4, callback={'on_batch_start': self._callback})
nn._fit(a_in, a_out)
assert_equals(len(self.data), 2)

0 comments on commit 84ff193

Please sign in to comment.