Skip to content

Commit

Permalink
better log for grad & model preparation
Browse files Browse the repository at this point in the history
  • Loading branch information
ppwwyyxx committed Apr 18, 2016
1 parent d04661e commit 0bd1e92
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 9 deletions.
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# tensorpack
Neural Network Toolbox on TensorFlow

In development. No document.
In development. No document. See [examples](https://github.com/ppwwyyxx/tensorpack/tree/master/examples).

## Features:
+ Scoped abstraction of common models.
+ Callbacks systems to control different aspects of training.
+ Use `Dataflow` to gain fine-grained control on data preprocessing.
+ Training and testing graph are modeled together. Just need to follow the conventions to setup stuffs.
+ Use `Dataflow` to define data preprocessing in pure Python.
+ Callbacks systems to control training.
+ Training and testing are described together. Just need to follow the conventions to setup stuffs.
+ Write summary easier for tensorboard.
2 changes: 1 addition & 1 deletion tensorpack/callbacks/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def _get_vars(self):
for v in vars:
name = v.op.name
if re.match('tower[1-9]', name):
logger.info("Skip {} when saving model.".format(name))
#logger.info("Skip {} when saving model.".format(name))
continue
if 'tower0/' in name:
new_name = name.replace('tower0/', '')
Expand Down
2 changes: 1 addition & 1 deletion tensorpack/callbacks/param.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def get_current_value(self):
ret = self._get_current_value()
if ret is not None and ret != self.last_value:
logger.info("{} at epoch {} will change to {}".format(
self.op_name, self.epoch_num, ret))
self.op_name, self.epoch_num + 1, ret))
self.last_value = ret
return ret

Expand Down
4 changes: 2 additions & 2 deletions tensorpack/dataflow/imgaug/crop.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ def __init__(self, crop_shape):

def _augment(self, img):
orig_shape = img.arr.shape
h0 = (orig_shape[0] - self.crop_shape[0]) * 0.5
w0 = (orig_shape[1] - self.crop_shape[1]) * 0.5
h0 = int((orig_shape[0] - self.crop_shape[0]) * 0.5)
w0 = int((orig_shape[1] - self.crop_shape[1]) * 0.5)
img.arr = img.arr[h0:h0+self.crop_shape[0],w0:w0+self.crop_shape[1]]
if img.coords:
raise NotImplementedError()
Expand Down
7 changes: 6 additions & 1 deletion tensorpack/train/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,12 @@ def _average_grads(tower_grads):
ret = []
with tf.device('/gpu:0'):
for grad_and_vars in zip(*tower_grads):
grad = tf.add_n([x[0] for x in grad_and_vars]) / float(len(tower_grads))
v = grad_and_vars[0][1]
try:
grad = tf.add_n([x[0] for x in grad_and_vars]) / float(len(tower_grads))
except AssertionError:
logger.error("Error while processing gradients of {}".format(v.name))
raise
ret.append((grad, v))
return ret

Expand Down Expand Up @@ -129,6 +133,7 @@ def get_model_inputs():
tf.get_variable_scope().reuse_variables()
for k in coll_keys:
kept_summaries[k] = copy.copy(tf.get_collection(k))
logger.info("Graph built for tower {}.".format(i))
for k in coll_keys:
del tf.get_collection(k)[:]
tf.get_collection(k).extend(kept_summaries[k])
Expand Down

0 comments on commit 0bd1e92

Please sign in to comment.