Skip to content

Commit

Permalink
Fix Instances name
Browse files Browse the repository at this point in the history
Fix bug in unittests
  • Loading branch information
dingguanglei committed Dec 22, 2019
1 parent 4f98733 commit a05d679
Show file tree
Hide file tree
Showing 18 changed files with 153 additions and 151 deletions.
27 changes: 25 additions & 2 deletions jdit/trainer/gan/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,13 @@ def compute_d_loss(self):
You should return a **loss** for the first position.
You can return a ``dict`` of loss that you want to visualize on the second position.like
The train logic is :
self.input, self.ground_truth = self.get_data_from_batch(batch, self.device)
self.fake = self.netG(self.input)
self._train_iteration(self.optD, self.compute_d_loss, csv_filename="Train_D")
if (self.step % self.d_turn) == 0:
self._train_iteration(self.optG, self.compute_g_loss, csv_filename="Train_G")
So, you use `self.input` , `self.ground_truth`, `self.fake`, `self.netG`, `self.optD` to compute loss.
Example::
d_fake = self.netD(self.fake.detach())
Expand All @@ -70,7 +76,13 @@ def compute_g_loss(self):
You should return a **loss** for the first position.
You can return a ``dict`` of loss that you want to visualize on the second position.like
The train logic is :
self.input, self.ground_truth = self.get_data_from_batch(batch, self.device)
self.fake = self.netG(self.input)
self._train_iteration(self.optD, self.compute_d_loss, csv_filename="Train_D")
if (self.step % self.d_turn) == 0:
self._train_iteration(self.optG, self.compute_g_loss, csv_filename="Train_G")
So, you use `self.input` , `self.ground_truth`, `self.fake`, `self.netG`, `self.optD` to compute loss.
Example::
d_fake = self.netD(self.fake, self.input)
Expand All @@ -85,6 +97,17 @@ def compute_g_loss(self):

@abstractmethod
def compute_valid(self):
"""
The train logic is :
self.input, self.ground_truth = self.get_data_from_batch(batch, self.device)
self.fake = self.netG(self.input)
self._train_iteration(self.optD, self.compute_d_loss, csv_filename="Train_D")
if (self.step % self.d_turn) == 0:
self._train_iteration(self.optG, self.compute_g_loss, csv_filename="Train_G")
So, you use `self.input` , `self.ground_truth`, `self.fake`, `self.netG`, `self.optD` to compute validations.
:return:
"""
_, d_var_dic = self.compute_g_loss()
_, g_var_dic = self.compute_d_loss()
var_dic = dict(d_var_dic, **g_var_dic)
Expand Down
16 changes: 14 additions & 2 deletions jdit/trainer/gan/pix2pix.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,13 @@ def compute_d_loss(self):
You should return a **loss** for the first position.
You can return a ``dict`` of loss that you want to visualize on the second position.like
The training logic is :
self.input, self.ground_truth = self.get_data_from_batch(batch, self.device)
self.fake = self.netG(self.input)
self._train_iteration(self.optD, self.compute_d_loss, csv_filename="Train_D")
if (self.step % self.d_turn) == 0:
self._train_iteration(self.optG, self.compute_g_loss, csv_filename="Train_G")
So, you use `self.input` , `self.ground_truth`, `self.fake`, `self.netG`, `self.optD` to compute loss.
Example::
d_fake = self.netD(self.fake.detach())
Expand All @@ -71,7 +77,13 @@ def compute_g_loss(self):
You should return a **loss** for the first position.
You can return a ``dict`` of loss that you want to visualize on the second position.like
The training logic is :
self.input, self.ground_truth = self.get_data_from_batch(batch, self.device)
self.fake = self.netG(self.input)
self._train_iteration(self.optD, self.compute_d_loss, csv_filename="Train_D")
if (self.step % self.d_turn) == 0:
self._train_iteration(self.optG, self.compute_g_loss, csv_filename="Train_G")
So, you use `self.input` , `self.ground_truth`, `self.fake`, `self.netG`, `self.optD` to compute loss.
Example::
d_fake = self.netD(self.fake, self.input)
Expand Down
1 change: 0 additions & 1 deletion jdit/trainer/gan/sup_gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,6 @@ def valid_epoch(self):
if avg_dic == {}:
avg_dic: dict = dic
else:
# 求和
for key in dic.keys():
avg_dic[key] += dic[key]
Expand Down
16 changes: 8 additions & 8 deletions jdit/trainer/instances/__init__.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from .fashingClassification import FashingClassTrainer, start_fashingClassTrainer
from .fashingGenerateGan import FashingGenerateGenerateGanTrainer, start_fashingGenerateGanTrainer
from .fashionClassification import FashionClassTrainer, start_fashionClassTrainer
from .fashionGenerateGan import FashionGenerateGenerateGanTrainer, start_fashionGenerateGanTrainer
from .cifarPix2pixGan import start_cifarPix2pixGanTrainer
from .fashionClassParallelTrainer import start_fashingClassPrarallelTrainer
from .fashingAutoencoder import FashingAutoEncoderTrainer, start_fashingAotoencoderTrainer
__all__ = ['FashingClassTrainer', 'start_fashingClassTrainer',
'FashingGenerateGenerateGanTrainer', 'start_fashingGenerateGanTrainer',
'cifarPix2pixGan', 'start_cifarPix2pixGanTrainer', 'start_fashingClassPrarallelTrainer',
'start_fashingAotoencoderTrainer', 'FashingAutoEncoderTrainer']
from .fashionClassParallelTrainer import start_fashionClassPrarallelTrainer
from .fashionAutoencoder import FashionAutoEncoderTrainer, start_fashionAutoencoderTrainer
__all__ = ['FashionClassTrainer', 'start_fashionClassTrainer',
'FashionGenerateGenerateGanTrainer', 'start_fashionGenerateGanTrainer',
'cifarPix2pixGan', 'start_cifarPix2pixGanTrainer', 'start_fashionClassPrarallelTrainer',
'start_fashionAutoencoderTrainer', 'FashionAutoEncoderTrainer']
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ def forward(self, input):
return out


class FashingAutoEncoderTrainer(AutoEncoderTrainer):
class FashionAutoEncoderTrainer(AutoEncoderTrainer):
def __init__(self, logdir, nepochs, gpu_ids, net, opt, datasets):
super(FashingAutoEncoderTrainer, self).__init__(logdir, nepochs, gpu_ids, net, opt, datasets)
super(FashionAutoEncoderTrainer, self).__init__(logdir, nepochs, gpu_ids, net, opt, datasets)
data, label = self.datasets.samples_train
self.watcher.embedding(data, data, label, 1)

Expand All @@ -46,8 +46,8 @@ def compute_valid(self):
return var_dic


def start_fashingAotoencoderTrainer(gpus=(), nepochs=10, run_type="train"):
"""" An example of fashing-mnist classification
def start_fashionAutoencoderTrainer(gpus=(), nepochs=10, run_type="train"):
"""" An example of fashion-mnist classification
"""
depth = 32
Expand All @@ -73,12 +73,12 @@ def start_fashingAotoencoderTrainer(gpus=(), nepochs=10, run_type="train"):
print('===> Training')
print("using `tensorboard --logdir=log` to see learning curves and net structure."
"training and valid_epoch data, configures info and checkpoint were save in `log` directory.")
Trainer = FashingAutoEncoderTrainer("log/fashion_classify", nepochs, gpus, net, opt, mnist)
Trainer = FashionAutoEncoderTrainer("log/fashion_classify", nepochs, gpus, net, opt, mnist)
if run_type == "train":
Trainer.train()
elif run_type == "debug":
Trainer.debug()


if __name__ == '__main__':
start_fashingAotoencoderTrainer()
start_fashionAutoencoderTrainer()
15 changes: 10 additions & 5 deletions jdit/trainer/instances/fashionClassParallelTrainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ def forward(self, input):
return out


class FashingClassTrainer(ClassificationTrainer):
class FashionClassTrainer(ClassificationTrainer):
def __init__(self, logdir, nepochs, gpu_ids, net, opt, dataset, num_class):
super(FashingClassTrainer, self).__init__(logdir, nepochs, gpu_ids, net, opt, dataset, num_class)
super(FashionClassTrainer, self).__init__(logdir, nepochs, gpu_ids, net, opt, dataset, num_class)

def compute_loss(self):
var_dic = {}
Expand All @@ -51,6 +51,11 @@ def compute_valid(self):


def build_task_trainer(unfixed_params):
"""build a task just like FashionClassTrainer.
:param unfixed_params:
:return:
"""
logdir = unfixed_params['logdir']
gpu_ids_abs = unfixed_params["gpu_ids_abs"]
depth = unfixed_params["depth"]
Expand All @@ -70,7 +75,7 @@ def build_task_trainer(unfixed_params):
net = Model(SimpleModel(depth), gpu_ids_abs=gpu_ids_abs, init_method="kaiming", verbose=False)
opt = Optimizer(net.parameters(), opt_name, lr_decay, decay_position, position_type=position_type,
lr=lr, weight_decay=weight_decay, momentum=momentum)
Trainer = FashingClassTrainer(logdir, nepochs, gpu_ids_abs, net, opt, mnist, num_class)
Trainer = FashionClassTrainer(logdir, nepochs, gpu_ids_abs, net, opt, mnist, num_class)
return Trainer


Expand All @@ -94,9 +99,9 @@ def trainerParallel():
return tp


def start_fashingClassPrarallelTrainer(run_type="debug"):
def start_fashionClassPrarallelTrainer(run_type="debug"):
tp = trainerParallel()
tp.train()

if __name__ == '__main__':
start_fashingClassPrarallelTrainer()
start_fashionClassPrarallelTrainer()
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ def forward(self, input):
return out


class FashingClassTrainer(ClassificationTrainer):
class FashionClassTrainer(ClassificationTrainer):
def __init__(self, logdir, nepochs, gpu_ids, net, opt, datasets, num_class):
super(FashingClassTrainer, self).__init__(logdir, nepochs, gpu_ids, net, opt, datasets, num_class)
super(FashionClassTrainer, self).__init__(logdir, nepochs, gpu_ids, net, opt, datasets, num_class)
data, label = self.datasets.samples_train
self.watcher.embedding(data, data, label, 1)

Expand All @@ -51,8 +51,8 @@ def compute_valid(self):
return var_dic


def start_fashingClassTrainer(gpus=(), nepochs=10, run_type="train"):
"""" An example of fashing-mnist classification
def start_fashionClassTrainer(gpus=(), nepochs=10, run_type="train"):
"""" An example of fashion-mnist classification
"""
num_class = 10
Expand Down Expand Up @@ -80,7 +80,7 @@ def start_fashingClassTrainer(gpus=(), nepochs=10, run_type="train"):
print('===> Training')
print("using `tensorboard --logdir=log` to see learning curves and net structure."
"training and valid_epoch data, configures info and checkpoint were save in `log` directory.")
Trainer = FashingClassTrainer("log/fashion_classify", nepochs, gpus, net, opt, mnist, num_class)
Trainer = FashionClassTrainer("log/fashion_classify", nepochs, gpus, net, opt, mnist, num_class)
if run_type == "train":
Trainer.train()
elif run_type == "debug":
Expand All @@ -89,4 +89,4 @@ def start_fashingClassTrainer(gpus=(), nepochs=10, run_type="train"):


if __name__ == '__main__':
start_fashingClassTrainer()
start_fashionClassTrainer()
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,11 @@ def forward(self, input_data):
return out


class FashingGenerateGenerateGanTrainer(GenerateGanTrainer):
class FashionGenerateGenerateGanTrainer(GenerateGanTrainer):
d_turn = 1

def __init__(self, logdir, nepochs, gpu_ids_abs, netG, netD, optG, optD, dataset, latent_shape):
super(FashingGenerateGenerateGanTrainer, self).__init__(logdir, nepochs, gpu_ids_abs, netG, netD, optG, optD,
super(FashionGenerateGenerateGanTrainer, self).__init__(logdir, nepochs, gpu_ids_abs, netG, netD, optG, optD,
dataset,
latent_shape=latent_shape)

Expand All @@ -92,7 +92,7 @@ def compute_valid(self):
return var_dic


def start_fashingGenerateGanTrainer(gpus=(), nepochs=50, lr=1e-3, depth_G=32, depth_D=32, latent_shape=(256, 1, 1),
def start_fashionGenerateGanTrainer(gpus=(), nepochs=50, lr=1e-3, depth_G=32, depth_D=32, latent_shape=(256, 1, 1),
run_type="train"):
gpus = gpus # set `gpus = []` to use cpu
batch_size = 64
Expand Down Expand Up @@ -130,7 +130,7 @@ def start_fashingGenerateGanTrainer(gpus=(), nepochs=50, lr=1e-3, depth_G=32, de
print('===> Training')
print("using `tensorboard --logdir=log` to see learning curves and net structure."
"training and valid_epoch data, configures info and checkpoint were save in `log` directory.")
Trainer = FashingGenerateGenerateGanTrainer("log/fashion_generate", nepochs, gpus, G, D, opt_G, opt_D, mnist,
Trainer = FashionGenerateGenerateGanTrainer("log/fashion_generate", nepochs, gpus, G, D, opt_G, opt_D, mnist,
latent_shape)
if run_type == "train":
Trainer.train()
Expand All @@ -139,4 +139,4 @@ def start_fashingGenerateGanTrainer(gpus=(), nepochs=50, lr=1e-3, depth_G=32, de


if __name__ == '__main__':
start_fashingGenerateGanTrainer()
start_fashionGenerateGanTrainer()
44 changes: 16 additions & 28 deletions jdit/trainer/single/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,17 @@ def __init__(self, logdir, nepochs, gpu_ids, net, opt, datasets):

@abstractmethod
def compute_loss(self):
"""Compute the main loss and observed variables.
"""Compute the main loss and observed values.
Compute the loss and other caring variables.
Compute the loss and other values shown in tensorboard scalars visualization.
You should return a main loss for doing backward propagation.
For the caring variables will only be used in tensorboard scalars visualization.
So, if you want some variables visualized. Make a ``dict()`` with key name is the variable's name.
So, if you want some values visualized. Make a ``dict()`` with key name is the variable's name.
The training logic is :
self.input, self.ground_truth = self.get_data_from_batch(batch, self.device)
self.output = self.net(self.input)
self._train_iteration(self.opt, self.compute_loss, csv_filename="Train")
So, you have `self.net`, `self.input`, `self.output`, `self.ground_truth` to compute your own loss here.
.. note::
Expand All @@ -38,17 +41,7 @@ def compute_loss(self):
Example::
var_dic = {}
# visualize the value of CrossEntropyLoss.
var_dic["CEP"] = loss = CrossEntropyLoss()(self.output, self.labels.squeeze().long())
_, predict = torch.max(self.output.detach(), 1) # 0100=>1 0010=>2
total = predict.size(0) * 1.0
labels = self.labels.squeeze().long()
correct = predict.eq(labels).cpu().sum().float()
acc = correct / total
# visualize the value of accuracy.
var_dic["ACC"] = acc
# using CrossEntropyLoss as the main loss for backward, and return by visualized ``dict``
var_dic["CEP"] = loss = nn.MSELoss(reduction="mean")(self.output, self.ground_truth)
return loss, var_dic
"""
Expand All @@ -68,18 +61,8 @@ def compute_valid(self):
So, you can compute some grads variables for visualization.
Example::
var_dic = {}
# visualize the valid_epoch curve of CrossEntropyLoss
var_dic["CEP"] = loss = CrossEntropyLoss()(self.output, self.labels.squeeze().long())
_, predict = torch.max(self.output.detach(), 1) # 0100=>1 0010=>2
total = predict.size(0) * 1.0
labels = self.labels.squeeze().long()
correct = predict.eq(labels).cpu().sum().float()
acc = correct / total
# visualize the valid_epoch curve of accuracy
var_dic["ACC"] = acc
var_dic["CEP"] = loss = nn.MSELoss(reduction="mean")(self.output, self.ground_truth)
return var_dic
"""
Expand All @@ -94,7 +77,6 @@ def valid_epoch(self):
if avg_dic == {}:
avg_dic = dic
else:
# 求和
for key in dic.keys():
avg_dic[key] += dic[key]

Expand All @@ -106,6 +88,12 @@ def valid_epoch(self):
self.net.train()

def get_data_from_batch(self, batch_data, device):
"""If you have different behavior. You need to rewrite thisd method and the method `sllf.train_epoch()`
:param batch_data: A Tensor loads from dataset
:param device: compute device
:return: Tensors,
"""
input_tensor, ground_gruth_tensor = batch_data[0], batch_data[1]
return input_tensor, ground_gruth_tensor

Expand Down

0 comments on commit a05d679

Please sign in to comment.