Skip to content

Commit

Permalink
feat: added early stop patience
Browse files Browse the repository at this point in the history
  • Loading branch information
Avsecz committed Feb 9, 2017
1 parent 4f92562 commit bb21a40
Show file tree
Hide file tree
Showing 3 changed files with 42 additions and 0 deletions.
12 changes: 12 additions & 0 deletions HISTORY.rst
Original file line number Diff line number Diff line change
Expand Up @@ -39,3 +39,15 @@ History
------------------

* bugfix: multi-task learning

0.4.2 (2017-02-09)
------------------

* same as 0.4.1 (pypi upload failed for 0.4.1)

0.4.3 (2017-02-09)
------------------

* feat: added early_stop_patience argument


29 changes: 29 additions & 0 deletions concise/concise.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ class Concise(object):
optimizer (str): Which optimizer to use. Can be :code:`"adam"` or :code:`"lbfgs"`.
batch_size (int): Batch size - number of training samples used in one parameter update iteration.
n_epochs (int): Number of epochs - how many times should a single training sample be used in the parameter update iteration.
early_stop_patience (int or None): Number of epochs with no improvement after which training will be stopped. If None, don't use early_stop.
n_iterations_checkpoint (int): Number of internal L-BFGS-B steps to perform at every step.
motif_length (int): Length of the trained motif (number), i.e. width of the convolutional filter.
n_motifs (int): Number of motifs to train.
Expand Down Expand Up @@ -84,6 +85,7 @@ class Concise(object):
"optimizer": {str},
"batch_size": {int, np.int64},
"n_epochs": {int, np.int64},
"early_stop_patience": {int, type(None)},
"n_iterations_checkpoint": {int, np.int64},
"motif_length": {int, np.int64},
"n_motifs": {int, np.int64},
Expand Down Expand Up @@ -116,6 +118,7 @@ def __init__(self,
optimizer="adam",
batch_size=32,
n_epochs=3,
early_stop_patience=None,
n_iterations_checkpoint=20,
# network details
motif_length=9,
Expand Down Expand Up @@ -621,6 +624,7 @@ def train(self, X_feat, X_seq, y,
self._var_res = _train(X_feat, X_seq, y,
X_feat_valid, X_seq_valid, y_valid,
graph=self._graph, var=self._var, other_var=self._other_var,
early_stop_patience=self._param["early_stop_patience"],
n_cores=n_cores)

self._model_fitted = True
Expand Down Expand Up @@ -652,6 +656,7 @@ def _accuracy_in_session(self, sess, other_var, X_feat, X_seq, y):
def _train_lbfgs(self, X_feat_train, X_seq_train, y_train,
X_feat_valid, X_seq_valid, y_valid,
graph, var, other_var,
early_stop_patience=None,
n_cores=3):
"""
Train the model actual model
Expand Down Expand Up @@ -682,6 +687,8 @@ def _train_lbfgs(self, X_feat_train, X_seq_train, y_train,

sess.run(other_var["init"])

best_performance = None
best_performance_epoch = 0
for step in range(n_epochs):
# run the model (sess.run)
# compute the optimizer, loss and train_prediction in the graph
Expand Down Expand Up @@ -710,6 +717,15 @@ def _train_lbfgs(self, X_feat_train, X_seq_train, y_train,
step_history.append(step / num_steps)
print('Step %4d: loss %f, train mse: %f, validation mse: %f' %
(step, l, train_accuracy, valid_accuracy))
# check if this is the best accuracy
if best_performance is None or valid_accuracy <= best_performance:
best_performance = valid_accuracy
best_performance_epoch = step

if early_stop_patience is not None and step > best_performance_epoch + early_stop_patience:
print("Early stopping. best_performance_epoch: %d, best_performance: %f" %
(best_performance_epoch, best_performance))
break

# get the test accuracies
train_accuracy_final = self._accuracy_in_session(sess, other_var,
Expand Down Expand Up @@ -762,6 +778,7 @@ def _train_lbfgs(self, X_feat_train, X_seq_train, y_train,
def _train_adam(self, X_feat_train, X_seq_train, y_train,
X_feat_valid, X_seq_valid, y_valid,
graph, var, other_var,
early_stop_patience=None,
n_cores=3):
"""
Train the model actual model
Expand Down Expand Up @@ -797,6 +814,8 @@ def _train_adam(self, X_feat_train, X_seq_train, y_train,

print('Initialized')
epoch_count = 0
best_performance = None
best_performance_epoch = 0
for step in range(num_steps * n_epochs):
# where in the model are we
# get the batch data + batch labels
Expand Down Expand Up @@ -837,6 +856,16 @@ def _train_adam(self, X_feat_train, X_seq_train, y_train,
print('Step %4d (epoch %d): loss %f, train mse: %f, validation mse: %f' %
(step, epoch, l, train_accuracy, valid_accuracy))

# check if this is the best accuracy
if best_performance is None or valid_accuracy <= best_performance:
best_performance = valid_accuracy
best_performance_epoch = epoch

if early_stop_patience is not None and epoch > best_performance_epoch + early_stop_patience:
print("Early stopping. best_performance_epoch: %d, best_performance: %f" %
(best_performance_epoch, best_performance))
break

# get the test accuracies
train_accuracy_final = self._accuracy_in_session(sess, other_var,
X_feat_train, X_seq_train, y_train)
Expand Down
1 change: 1 addition & 0 deletions tests/test_Concise_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ def setup_class(cls):
cls.data[0]["n_motifs"] = 1
cls.data[0]["motif_length"] = 1
cls.data[0]["step_size"] = 0.001
cls.data[0]["early_stop_patience"] = 3

def test_non_std(self):
# test the nice print:
Expand Down

0 comments on commit bb21a40

Please sign in to comment.