Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Add tensorboard support in Speedometer. (#5345)
Browse files Browse the repository at this point in the history
* Add tensorboard support in Speedometer.

* fix pylint.

* Add tensorboard_callback.

* Refactor.

* fix lint.
  • Loading branch information
zihaolucky authored and piiswrong committed Mar 24, 2017
1 parent b276a9d commit 1550f17
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 0 deletions.
2 changes: 2 additions & 0 deletions python/mxnet/contrib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,5 @@

from . import symbol as sym
from . import ndarray as nd

from . import tensorboard
56 changes: 56 additions & 0 deletions python/mxnet/contrib/tensorboard.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# coding: utf-8
"""TensorBoard functions that can be used to log various status during epoch."""
from __future__ import absolute_import

import logging


class LogMetricsCallback(object):
"""Log metrics periodically in TensorBoard.
This callback works almost same as `callback.Speedometer`, but write TensorBoard event file
for visualization. For more usage, please refer https://github.com/dmlc/tensorboard
Parameters
----------
logging_dir : str
TensorBoard event file directory.
After that, use `tensorboard --logdir=path/to/logs` to launch TensorBoard visualization.
prefix : str
Prefix for a metric name of `scalar` value.
You might want to use this param to leverage TensorBoard plot feature,
where TensorBoard plots different curves in one graph when they have same `name`.
The follow example shows the usage(how to compare a train and eval metric in a same graph).
Examples
--------
>>> # log train and eval metrics under different directories.
>>> training_log = 'logs/train'
>>> evaluation_log = 'logs/eval'
>>> # in this case, each training and evaluation metric pairs has same name,
>>> # you can add a prefix to make it separate.
>>> batch_end_callbacks = [mx.tensorboard.LogMetricsCallback(training_log)]
>>> eval_end_callbacks = [mx.tensorboard.LogMetricsCallback(evaluation_log)]
>>> # run
>>> model.fit(train,
>>> ...
>>> batch_end_callback = batch_end_callbacks,
>>> eval_end_callback = eval_end_callbacks)
>>> # Then use `tensorboard --logdir=logs/` to launch TensorBoard visualization.
"""
def __init__(self, logging_dir, prefix=None):
self.prefix = prefix
try:
from tensorboard import SummaryWriter
self.summary_writer = SummaryWriter(logging_dir)
except ImportError:
logging.error('You can install tensorboard via `pip install tensorboard`.')

def __call__(self, param):
"""Callback to log training speed and metrics in TensorBoard."""
if param.eval_metric is None:
return
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
if self.prefix is not None:
name = '%s-%s' % (self.prefix, name)
self.summary_writer.add_scalar(name, value)

0 comments on commit 1550f17

Please sign in to comment.