diff --git a/README.md b/README.md index 2d5d25b..149e700 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # NamedTensor -## Introductiono +## Introduction A proposal for a named tensor for PyTorch. For now check out the blog post: diff --git a/docs/source/modules.rst b/docs/source/modules.rst new file mode 100644 index 0000000..783b89c --- /dev/null +++ b/docs/source/modules.rst @@ -0,0 +1,147 @@ +Named Tensor +============= + +.. autoclass:: namedtensor.NamedTensor + :inherited-members: + :members: + + +Basic Methods +------------- + +These methods return a named tensor of the same form as the original. + + .. method:: _basic(*args) + + +.. jinja:: tensor + + {% for k in noshift_methods %} :py:meth:`torch.Tensor.{{k}}` {% endfor %} + + +Reduction Methods +----------------- + +These methods return a named tensor with one or more reduced dimensions + + .. method:: _reduction(dims, *args) + + +.. jinja:: tensor + + {% for k in reduce_methods %} :py:meth:`torch.Tensor.{{k}}` {% endfor %} + + +Tupled Reduction Methods +------------------------- + +These methods return a tuple of named tensor with one or more reduced dimensions + + .. method:: _tuple_reduction(dims, *args) + + +.. jinja:: tensor + + {% for k in multi_reduce_methods %} :py:meth:`torch.Tensor.{{k}}` {% endfor %} + + + +Non-Tensor Methods +------------------- + +These methods return non-tensor information. + + .. method:: _info(*args) + + +.. jinja:: tensor + + {% for k in info_methods %} :py:meth:`torch.Tensor.{{k}}` {% endfor %} + + +Broadcast Methods +----------------- + +These methods apply broadcasting before operating between two tensors. + + .. method:: _operate(other, *args) + + +.. jinja:: tensor + + {% for k in binop_methods %} :py:meth:`torch.Tensor.{{k}}` {% endfor %} + + + +Named Torch +============= + +Named torch `ntorch` is a module that wraps the core torch operations +with named variants. It contains named variants of most of the the +core torch functionality. + + +Dictionary Builders +---------------------- + +These methods construct a new named tensor where the sizes are specified +through an ordered dict of names. + +.. function:: _build(ordereddict, *args) + + +.. jinja:: ntorch + + {% for k in build %} :py:func:`torch.{{k}}` {% endfor %} + + +Other Builders +---------------- + +These methods construct a new named tensor where the sizes are specified +through an ordered dict of names. + +.. function:: _build(ordereddict, *args) + + +.. jinja:: ntorch + + {% for k in build %} :py:func:`torch.{{k}}` {% endfor %} + + + +Basic Functions +---------------- + +These functions return a named tensor of the same form as the original. + + .. method:: _basic(*args) + + +.. jinja:: ntorch + + {% for k in noshift %} :py:func:`torch.{{k}}` {% endfor %} + + + +Distributions +=============== + +A wrapping of the torch distributions library to make it more clear +to sample and batch the object. + + + +Builders +---------------------- + +These methods construct a new named distributinon where the sizes are specified +through an ordered dict of names. + + +.. function:: _build(ordereddict, *args) + + +.. jinja:: ndistributions + + {% for k in build %} :py:class:`torch.distributions.{{k}}` {% endfor %} diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000..e69de29 diff --git a/examples/mnist.py b/examples/mnist.py new file mode 100644 index 0000000..e971f06 --- /dev/null +++ b/examples/mnist.py @@ -0,0 +1,116 @@ +from __future__ import print_function +import argparse +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.Linear(4*4*50, 500) + self.fc2 = nn.Linear(500, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4*4*50) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.log_softmax(x, dim=1) + +def train(args, model, device, train_loader, optimizer, epoch): + model.train() + for batch_idx, (data, target) in enumerate(train_loader): + data, target = data.to(device), target.to(device) + optimizer.zero_grad() + output = model(data) + loss = F.nll_loss(output, target) + loss.backward() + optimizer.step() + if batch_idx % args.log_interval == 0: + print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + epoch, batch_idx * len(data), len(train_loader.dataset), + 100. * batch_idx / len(train_loader), loss.item())) + +def test(args, model, device, test_loader): + model.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + data, target = data.to(device), target.to(device) + output = model(data) + test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss + pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability + correct += pred.eq(target.view_as(pred)).sum().item() + + test_loss /= len(test_loader.dataset) + + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), + 100. * correct / len(test_loader.dataset))) + +def main(): + # Training settings + parser = argparse.ArgumentParser(description='PyTorch MNIST Example') + parser.add_argument('--batch-size', type=int, default=64, metavar='N', + help='input batch size for training (default: 64)') + parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', + help='input batch size for testing (default: 1000)') + parser.add_argument('--epochs', type=int, default=10, metavar='N', + help='number of epochs to train (default: 10)') + parser.add_argument('--lr', type=float, default=0.01, metavar='LR', + help='learning rate (default: 0.01)') + parser.add_argument('--momentum', type=float, default=0.5, metavar='M', + help='SGD momentum (default: 0.5)') + parser.add_argument('--no-cuda', action='store_true', default=False, + help='disables CUDA training') + parser.add_argument('--seed', type=int, default=1, metavar='S', + help='random seed (default: 1)') + parser.add_argument('--log-interval', type=int, default=10, metavar='N', + help='how many batches to wait before logging training status') + + parser.add_argument('--save-model', action='store_true', default=False, + help='For Saving the current Model') + args = parser.parse_args() + use_cuda = not args.no_cuda and torch.cuda.is_available() + + torch.manual_seed(args.seed) + + device = torch.device("cuda" if use_cuda else "cpu") + + kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} + train_loader = torch.utils.data.DataLoader( + datasets.MNIST('../data', train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args.batch_size, shuffle=True, **kwargs) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST('../data', train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=args.test_batch_size, shuffle=True, **kwargs) + + + model = Net().to(device) + optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) + + for epoch in range(1, args.epochs + 1): + train(args, model, device, train_loader, optimizer, epoch) + test(args, model, device, test_loader) + + if (args.save_model): + torch.save(model.state_dict(),"mnist_cnn.pt") + +if __name__ == '__main__': + main()