Skip to content

Commit

Permalink
update docstring
Browse files Browse the repository at this point in the history
  • Loading branch information
lanpa committed Aug 9, 2017
1 parent 6499c36 commit f35bb16
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 28 deletions.
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import sys
# sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import tensorboard
#import tensorboard #uncomment to shadow pip installation
# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
Expand Down
2 changes: 2 additions & 0 deletions docs/tensorboard.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,6 @@ tensorboard-pytorch

.. autoclass:: SummaryWriter
:members:

.. automethod:: __init__
.. autofunction:: tensorboard.embedding.add_embedding
47 changes: 32 additions & 15 deletions tensorboard/embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,34 +36,51 @@ def make_pbtxt(save_path, metadata, label_img):
f.write('}\n')


'''
mat: torch tensor. mat.size(0) is the number of data. mat.size(1) is the cardinality of feature dimensions
save_path: self-explained.
metadata: a list of {int, string} of length equals mat.size(0)
label_img: 4D torch tensor. label_img.size(0) equals mat.size(0).
'''

def add_embedding(mat, save_path, metadata=None, label_img=None):
"""add embedding
Args:
mat (torch.Tensor): Data identifier
save_path (string): Save location
metadata (list): A list of label
label_img (torch.Tensor): Images correspond to each point
mat (torch.Tensor): A matrix which each row is the feature vector of the data point
save_path (string): Save path (use ``writer.file_writer.get_logdir()`` to show embedding along with other summaries)
metadata (list): A list of labels, each element will be convert to string
label_img (torch.Tensor): Images correspond to each data point
Shape:
mat
mat: :math:`(N, D)`, where N is number of data and D is feature dimension
label_img: :math:`(N, C, H, W)`
.. note::
needs tensorflow
This function needs tensorflow installed. It invokes tensorflow to dump data.
Therefore I separate it from the SummaryWriter class. Please pass ``writer.file_writer.get_logdir()`` to ``save_path`` to prevent glitches.
If ``save_path`` is different than SummaryWritter's save path, you need to pass the leave directory to tensorboard's logdir argument,
otherwise it cannot display anything. e.g. if ``save_path`` equals 'path/to/embedding',
you need to call 'tensorboard --logdir=path/to/embedding', instead of 'tensorboard --logdir=path'.
Finally, this funtion breaks PyTorch if you have 'torch.nn.DataParallel' in your code. Use it after training completes.
See https://github.com/pytorch/pytorch/issues/2230
Examples::
>>> # With square kernels and equal stride
>>> m = nn.Conv2d(16, 33, 3, stride=2)
from tensorboard.embedding import add_embedding
import keyword
import torch
meta = []
while len(meta)<100:
meta = meta+keyword.kwlist # get some strings
meta = meta[:100]
for i, v in enumerate(meta):
meta[i] = v+str(i)
label_img = torch.rand(100, 3, 10, 32)
for i in range(100):
label_img[i]*=i/100.0
add_embedding(torch.randn(100, 5), 'embedding1', metadata=meta, label_img=label_img)
add_embedding(torch.randn(100, 5), 'embedding2', label_img=label_img)
add_embedding(torch.randn(100, 5), 'embedding3', metadata=meta)
"""
try:
os.makedirs(save_path)
Expand Down
41 changes: 29 additions & 12 deletions tensorboard/writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,8 +239,8 @@ def add_scalar(self, name, scalar_value, global_step=None):
Args:
tag (string): Data identifier
scalar_value (float): value to save
global_step (int): global step value to record
scalar_value (float): Value to save
global_step (int): Global step value to record
"""
self.file_writer.add_summary(scalar(name, scalar_value), global_step)
Expand All @@ -250,8 +250,9 @@ def add_histogram(self, name, values, global_step=None, bins='tensorflow'):
Args:
tag (string): Data identifier
values (numpy.array):
global_step (int): global step value to record
values (numpy.array): Values to build histogram
global_step (int): Global step value to record
bins (string): one of {'tensorflow','auto', 'fd', ...}, this determines how the bins are made. You can find other options in: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
"""
if bins=='tensorflow':
Expand All @@ -263,30 +264,36 @@ def add_image(self, tag, img_tensor, global_step=None):
Args:
tag (string): Data identifier
img_tensor (torch.Tensor):
global_step (int): global step value to record
img_tensor (torch.Tensor): Image data
global_step (int): Global step value to record
Shape:
img_tensor: :math:`(C, H, W)`
img_tensor: :math:`(3, H, W)`. Use ``torchvision.utils.make_grid()`` to prepare it is a good idea.
"""
self.file_writer.add_summary(image(tag, img_tensor), global_step)
def add_audio(self, tag, snd_tensor, global_step=None):
"""Add audio data to summary.
Args:
tag (string): Data identifier
snd_tensor (torch.Tensor):
global_step (int): global step value to record
snd_tensor (torch.Tensor): Sound data
global_step (int): Global step value to record
- snd_tensor: :math:`(1, L)`
Shape:
snd_tensor: :math:`(1, L)`. The values should between [-1, 1]. The sample rate is currently fixed at 44100 KHz.
"""
self.file_writer.add_summary(audio(tag, snd_tensor), global_step)
def add_text(self, tag, text_string, global_step=None):
"""Add text data to summary.
Args:
tag (string): Data identifier
text_string (string):
global_step (int): global step value to record
text_string (string): String to save
global_step (int): Global step value to record
Examples::
writer.add_text('lstm', 'This is an lstm', 0)
writer.add_text('rnn', 'This is an rnn', 10)
"""
self.file_writer.add_summary(text(tag, text_string), global_step)
Expand All @@ -302,9 +309,19 @@ def add_graph(self, model, lastVar):
# no, let tensorboard handles it and show its warning message.
"""Add graph data to summary.
To draw the graph, you need a model ``m`` and an input variable ``t`` that have correct size for ``m``.
Say you have runned ``r = m(t)``, then you can use ``writer.add_graph(m, r)`` to save the graph.
By default, the input tensor does not require gradient, therefore it will be omitted when back tracing.
To draw the input node, pass an additional parameter ``requires_grad=True`` when creating the input tensor.
Args:
model (torch.nn.Module): model to draw.
lastVar (torch.autograd.Variable): the root node start from.
.. note::
This is experimental feature. Graph drawing is based on autograd’s backward tracing.
It goes along the ``next_functions`` attribute in a variable recursively, drawing each encountered nodes.
In some cases, the result is strange. See https://github.com/lanpa/tensorboard-pytorch/issues/7
"""
import torch
if not hasattr(torch.autograd.Variable, 'grad_fn'):
Expand Down

0 comments on commit f35bb16

Please sign in to comment.