Skip to content

Commit

Permalink
instances of classification and generation
Browse files Browse the repository at this point in the history
  • Loading branch information
dingguanglei committed Nov 8, 2018
1 parent 0c6e574 commit 2248699
Show file tree
Hide file tree
Showing 12 changed files with 311 additions and 831 deletions.
41 changes: 38 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,16 @@ pip install jdit-0.0.2-py3-none-any.whl
```

## Quick start
Here I will give you some instances by using jdit.
After building and installing jdit package, you can make a new directory for a quick test.
Assuming that you get a new directory `example`.
run this code in `ipython` cmd.(Create a `main.py` file is also acceptable.)

### Fashing Classification
run this code in `ipython`.(Create a `main.py` file is also acceptable.)
```python
from jdit.trainer.instances.fashingClassification import start_example
start_example()
from jdit.trainer.instances.fashingClassification import start_fashingClassTrainer

start_fashingClassTrainer()
```
Then you will see something like this as following.
```
Expand Down Expand Up @@ -61,6 +65,37 @@ For training process, you can find learning curves in `tensorboard`.

It will create a `log` directory in `example/`, which saves training processing data and configures.

### Fashing Generate GAN

run this code in `ipython` .(Create a `main.py` file is also acceptable.)
```python
from jdit.trainer.instances.fashingGenerateGan import start_fashingGenerateGanTrainer

start_fashingGenerateGanTrainer()
```
Then you will see something like this as following.

```
===> Build dataset
use 2 thread!
===> Building model
discriminator Total number of parameters: 100865
discriminator model use GPU(0)!
apply kaiming weight init!
generator Total number of parameters: 951361
generator model use GPU(0)!
apply kaiming weight init!
===> Building optimizer
===> Training
0%| | 0/200 [00:00<?, ?epoch/s]
0step [00:00, ?step/s]
1step [00:22, 22.23s/step]
```

It will create a `log` directory in `example/`, which saves training processing data and configures.
Besides, you can see training processes in the tensorboard.

### Make your instances
Although it is just an example, you still can build your own project easily by using jdit framework.
Jdit framework can deal with
* Data visualization. (learning curves, images in pilot process)
Expand Down
4 changes: 2 additions & 2 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ run this code in `ipython` cmd.(Create a `main.py` file is also acceptable.)
.. code:: python
from jdit.trainer.instances.fashingClassification
import start_example
start_example()
import start_fashingClassTrainer
start_fashingClassTrainer()
Then you will see something like this as following.

Expand Down
29 changes: 27 additions & 2 deletions examples/class_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,30 @@ def compute_valid(self):
var_dic["ACC"] = acc
return var_dic

def quickStart(self):
gpus = [0]
batch_shape = (64, 1, 32, 32)
nepochs = 100

lr = 1e-3
lr_decay = 0.9 # 0.94
weight_decay = 2e-5 # 2e-5
momentum = 0
betas = (0.9, 0.999)

opt_name = "RMSprop"
# opt_name = "Adam"

print('===> Build dataset')
mnist = Fashion_mnist(batch_shape=batch_shape)
torch.backends.cudnn.benchmark = True
print('===> Building model')
net = Model(LinearModel(depth=64), gpu_ids_abs=gpus, init_method="kaiming")
print('===> Building optimizer')
opt = Optimizer(net.parameters(), lr, lr_decay, weight_decay, momentum, betas, opt_name)
print('===> Training')
Trainer = FashingClassTrainer("log", nepochs, gpus, net, opt, mnist)
Trainer.train()

class LinearModel(nn.Module):
def __init__(self, depth=64):
Expand All @@ -54,7 +78,8 @@ def __init__(self, depth=64):
self.drop = nn.Dropout(0.2)

def forward(self, input):
out = self.layer1(input)
out = input.view(input.size()[0],-1)
out = self.layer1(out)
out = self.drop(self.layer2(out))
out = self.drop(self.layer3(out))
out = self.drop(self.layer4(out))
Expand All @@ -63,7 +88,7 @@ def forward(self, input):


if __name__ == '__main__':
gpus = []
gpus = [0]
batch_shape = (64, 1, 32, 32)
nepochs = 100

Expand Down
29 changes: 10 additions & 19 deletions examples/generate_trainer.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
# coding=utf-8
import os, torch
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.autograd import Variable
from jdit.trainer.gan.generate import GanTrainer
from jdit.trainer import GanTrainer
from jdit.model import Model
from jdit.optimizer import Optimizer
from jdit.dataset import Cifar10
from jdit.dataset import Fashion_mnist


def gradPenalty(D_net, real, fake, LAMBDA=10, use_gpu=False):
Expand Down Expand Up @@ -70,22 +69,17 @@ def compute_g_loss(self):
return loss_g, var_dic

def valid(self):
# register a fixed input
if self.fixed_input is None:
self.fixed_input = Variable()
if self.use_gpu:
self.fixed_input = self.fixed_input.cuda()
fixed_input_cpu = Variable(torch.randn((32, *self.latent_shape)))
self.mv_inplace(fixed_input_cpu, self.fixed_input)
self.fixed_input = Variable(torch.randn((32, *self.latent_shape))).to(self.device)

self.netG.eval()
# watching the variation during training by a fixed input
with torch.no_grad():
fake = self.netG(self.fixed_input).detach()
self.watcher.image(fake, self.current_epoch, tag="Valid/Fixed_fake", grid_size=(4, 4), shuffle=False)
# saving training processes to build a .gif.
self.watcher.set_training_progress_images(fake, grid_size=(4, 4))

var_dic = {}
# var_dic["FID_SCORE"] = self.metric.evaluate_model_fid(self.netG, (256, *self.latent_shape), amount=8)
# self.watcher.scalars(var_dic, self.step, tag="Valid")
self.netG.train()


Expand Down Expand Up @@ -147,29 +141,26 @@ def forward(self, input_data):

if __name__ == '__main__':

gpus = [2, 3]
batch_shape = (128, 3, 32, 32)
gpus = [] # set `gpus = []` to use cpu
batch_shape = (128, 1, 32, 32)
image_channel = batch_shape[1]
nepochs = 200
mid_channel = 8

opt_G_name = "Adam"
depth_G = 8
lr = 1e-3
lr_decay = 0.94 # 0.94
weight_decay = 0 # 2e-5
betas = (0.9, 0.999)
G_mid_channel = 8

opt_D_name = "RMSprop"
depth_D = 64
momentum = 0
D_mid_channel = 16

# the input shape of generator
latent_shape = (256, 1, 1)
print('===> Build dataset')
cifar10 = Cifar10(batch_shape=batch_shape)
cifar10 = Fashion_mnist(batch_shape=batch_shape)
torch.backends.cudnn.benchmark = True
print('===> Building model')
D_net = discriminator(input_nc=image_channel, depth=depth_D)
Expand Down
28 changes: 20 additions & 8 deletions jdit/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def buildDatasets(self):
"""

def __init__(self, root, batch_shape, num_workers=-1, shuffle=True, subdata_prop=0.1):
def __init__(self, root, batch_shape, num_workers=-1, shuffle=True, subdata_size=0.1):
""" Build data loaders.
:param root: root path of datasets.
Expand Down Expand Up @@ -81,7 +81,7 @@ def __init__(self, root, batch_shape, num_workers=-1, shuffle=True, subdata_prop
self.nsteps_valid = None
self.nsteps_test = None

self.sample_dataset_prop = subdata_prop
self.sample_dataset_size = subdata_size

self.buildTransforms()
self.buildDatasets()
Expand Down Expand Up @@ -133,34 +133,46 @@ def buildLoaders(self):

@property
def samples_train(self):
return self._get_samples(self.dataset_train, self.sample_dataset_prop)
return self._get_samples(self.dataset_train, self.sample_dataset_size)

@property
def samples_valid(self):
return self._get_samples(self.dataset_train, self.sample_dataset_prop)
return self._get_samples(self.dataset_train, self.sample_dataset_size)

@property
def samples_test(self):
return self._get_samples(self.dataset_train, self.sample_dataset_prop)
return self._get_samples(self.dataset_train, self.sample_dataset_size)

def _get_samples(self, dataset, sample_dataset_prop=0.1):
def _get_samples(self, dataset, sample_dataset_size=0.1):
import math
assert len(dataset) > 10, "Dataset is (%d) to small" % len(dataset)
subdata_size = math.floor(sample_dataset_prop * len(dataset))
size_is_prop = isinstance(sample_dataset_size, float)
size_is_amount = isinstance(sample_dataset_size, int)
if size_is_prop:
assert sample_dataset_size <= 1 and sample_dataset_size > 0, \
"sample_dataset_size proportion should between 0. and 1."
subdata_size = math.floor(sample_dataset_size * len(dataset))
elif size_is_amount:
assert sample_dataset_size < len(dataset), \
"sample_dataset_size amount should be smaller than length of dataset"
subdata_size = math.floor(sample_dataset_size * len(dataset))
else:
raise Exception("sample_dataset_size should be float or int."
"%s was given" % str(sample_dataset_size))
sample_dataset, _ = random_split(dataset, [subdata_size, len(dataset) - subdata_size])
sample_loader = DataLoader(sample_dataset, batch_size=subdata_size, shuffle=True)
[samples_data] = list(sample_loader)
return samples_data

@property
def configure(self):
# configs = dict(vars(self))
configs = dict()
configs["dataset_name"] = [str(self.dataset_train.__class__.__name__)]
configs["batch_size"] = [str(self.batch_size)]
configs["shuffle"] = [str(self.shuffle)]
configs["root"] = [str(self.root)]
configs["num_workers"] = [str(self.num_workers)]
configs["sample_dataset_size"] = [str(self.sample_dataset_size)]
configs["nsteps_train"] = [str(self.nsteps_train)]
configs["nsteps_valid"] = [str(self.nsteps_valid)]
configs["nsteps_test"] = [str(self.nsteps_test)]
Expand Down
5 changes: 3 additions & 2 deletions jdit/trainer/instances/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from .fashingClassification import FashingClassTrainer, start_example
from .fashingClassification import FashingClassTrainer, start_fashingClassTrainer
from .fashingGenerateGan import FashingGenerateGanTrainer, start_fashingGenerateGanTrainer

__all__ = ['FashingClassTrainer', 'start_example']
__all__ = ['FashingClassTrainer', 'start_fashingClassTrainer','FashingGenerateGanTrainer', 'start_fashingGenerateGanTrainer']
76 changes: 38 additions & 38 deletions jdit/trainer/instances/fashingClassification.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,46 @@
# coding=utf-8
import torch
from torch.nn import CrossEntropyLoss
import torch.nn as nn
from jdit.trainer.classification import ClassificationTrainer
from jdit.model import Model
from jdit.optimizer import Optimizer
from jdit.dataset import Cifar10, Fashion_mnist
from .resnet import Resnet18
from jdit.dataset import Fashion_mnist


class FashingClassTrainer(ClassificationTrainer):
"""this is an instance of how to use `ClassificationTrainer` to build your own trainer
class LinearModel(nn.Module):
def __init__(self, depth=64):
super(LinearModel, self).__init__()
self.layer1 = nn.Linear(32 * 32, depth * 8)
self.layer2 = nn.Linear(512, depth * 4)
self.layer3 = nn.Linear(256, depth * 2)
self.layer4 = nn.Linear(128, depth * 1)
self.layer5 = nn.Linear(depth * 1, 1)
self.drop = nn.Dropout(0.2)

def forward(self, input):
out = input.view(input.size()[0], -1)
out = self.layer1(out)
out = self.drop(self.layer2(out))
out = self.drop(self.layer3(out))
out = self.drop(self.layer4(out))
out = self.layer5(out)
return out


"""
class FashingClassTrainer(ClassificationTrainer):
mode = "L"
num_class = 10
every_epoch_checkpoint = 20 # 2
every_epoch_changelr = 1 # 1
every_epoch_changelr = 10 # 1

def __init__(self, logdir, nepochs, gpu_ids, net, opt, dataset):
super(FashingClassTrainer, self).__init__(logdir, nepochs, gpu_ids, net, opt, dataset)
print("using `tensorboard --logdir=%s` to see learning curves and net structure." % logdir)
print("training and valid data, configures info and checkpoint were save in `%s` directory." % logdir)

self.watcher.graph(net, (4, 1, 32, 32), self.use_gpu)
data, label = self.datasets.samples_train
self.watcher.embedding(data, data, label)

def compute_loss(self):
var_dic = {}
# Input: (N,C) where C = number of classes
# Target: (N) where each value is 0≤targets[i]≤C−1
# ground_truth = self.ground_truth.long().squeeze()
# var_dic["GP"] = gp =gradPenalty()
# var_dic["SGP"] = gp = spgradPenalty(self.net,self.input,self.input)
var_dic["CEP"] = loss = CrossEntropyLoss()(self.output, self.labels.squeeze().long())
var_dic["CEP"] = loss = nn.CrossEntropyLoss()(self.output, self.labels.squeeze().long())

_, predict = torch.max(self.output.detach(), 1) # 0100=>1 0010=>2
total = predict.size(0) * 1.0
Expand All @@ -44,10 +52,7 @@ def compute_loss(self):

def compute_valid(self):
var_dic = {}
# Input: (N,C) where C = number of classes
# Target: (N) where each value is 0≤targets[i]≤C−1
# ground_truth = self.ground_truth.long().squeeze()
var_dic["CEP"] = cep = CrossEntropyLoss()(self.output, self.labels.squeeze().long())
var_dic["CEP"] = cep = nn.CrossEntropyLoss()(self.output, self.labels.squeeze().long())

_, predict = torch.max(self.output.detach(), 1) # 0100=>1 0010=>2
total = predict.size(0) * 1.0
Expand All @@ -58,31 +63,26 @@ def compute_valid(self):
return var_dic


def start_example():
""" run this to test a `FashingClassTrainer` instance
:return:
"""
gpus = [0]
batch_shape = (32, 3, 32, 32)
nepochs = 10

lr = 1e-3
lr_decay = 0.94 # 0.94
weight_decay = 0 # 2e-5
def start_fashingClassTrainer(gpus=(), nepochs=100, lr=1e-3, depth=32):
gpus = gpus
batch_shape = (64, 1, 32, 32)
nepochs = nepochs
opt_name = "RMSprop"
lr = lr
lr_decay = 0.9 # 0.94
weight_decay = 2e-5 # 2e-5
momentum = 0
betas = (0.9, 0.999)

opt_name = "RMSprop"
# opt_name = "Adam"

print('===> Build dataset')
mnist = Fashion_mnist(batch_shape = batch_shape)
mnist = Fashion_mnist(batch_shape=batch_shape)
torch.backends.cudnn.benchmark = True
print('===> Building model')
net = Model(Resnet18(64), gpu_ids_abs=gpus, init_method="kaiming")
net = Model(LinearModel(depth=depth), gpu_ids_abs=gpus, init_method="kaiming")
print('===> Building optimizer')
opt = Optimizer(net.parameters(), lr, lr_decay, weight_decay, momentum, betas, opt_name)
print('===> Training')
print("using `tensorboard --logdir=log` to see learning curves and net structure."
"training and valid data, configures info and checkpoint were save in `log` directory.")
Trainer = FashingClassTrainer("log", nepochs, gpus, net, opt, mnist)
Trainer.train()

0 comments on commit 2248699

Please sign in to comment.