forked from xuan-li/zi2zi-pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
75 lines (66 loc) · 3.88 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
from data import DatasetFromObj
from torch.utils.data import DataLoader
from model import Zi2ZiModel
import os
import argparse
import torch
parser = argparse.ArgumentParser(description='Train')
parser.add_argument('--experiment_dir', required=True,
help='experiment directory, data, samples,checkpoints,etc')
parser.add_argument('--gpu_ids', default=[], nargs='+', help="GPUs")
parser.add_argument('--image_size', type=int, default=256,
help="size of your input and output image")
parser.add_argument('--L1_penalty', type=int, default=100, help='weight for L1 loss')
parser.add_argument('--Lconst_penalty', type=int, default=15, help='weight for const loss')
# parser.add_argument('--Ltv_penalty', dest='Ltv_penalty', type=float, default=0.0, help='weight for tv loss')
parser.add_argument('--Lcategory_penalty', type=float, default=1.0,
help='weight for category loss')
parser.add_argument('--embedding_num', type=int, default=41,
help="number for distinct embeddings")
parser.add_argument('--embedding_dim', type=int, default=128, help="dimension for embedding")
parser.add_argument('--epoch', type=int, default=100, help='number of epoch')
parser.add_argument('--batch_size', type=int, default=16, help='number of examples in batch')
parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--schedule', dest='schedule', type=int, default=10, help='number of epochs to half learning rate')
parser.add_argument('--resume', type=int, default=None, help='resume from previous training')
parser.add_argument('--freeze_encoder', action='store_true',
help="freeze encoder weights during training")
parser.add_argument('--fine_tune', type=str, default=None,
help='specific labels id to be fine tuned')
parser.add_argument('--inst_norm', action='store_true',
help='use conditional instance normalization in your model')
parser.add_argument('--sample_steps', type=int, default=10,
help='number of batches in between two samples are drawn from validation set')
parser.add_argument('--checkpoint_steps', type=int, default=100,
help='number of batches in between two checkpoints')
parser.add_argument('--flip_labels', action='store_true',
help='whether flip training data labels or not, in fine tuning')
def main():
args = parser.parse_args()
data_dir = os.path.join(args.experiment_dir, "data")
checkpoint_dir = os.path.join(args.experiment_dir, "checkpoint")
sample_dir = os.path.join(args.experiment_dir, "sample")
log_dir = os.path.join(args.experiment_dir, "logs")
train_dataset = DatasetFromObj(os.path.join(data_dir, 'train.obj'))
val_dataset = DatasetFromObj(os.path.join(data_dir, 'val.obj'))
dataloader = DataLoader(train_dataset, batch_size=args.batch_size,shuffle=True)
model = Zi2ZiModel(embedding_num=args.embedding_num, embedding_dim=args.embedding_dim,
Lconst_penalty=args.Lconst_penalty, Lcategory_penalty=args.Lcategory_penalty,
save_dir=checkpoint_dir, gpu_ids=args.gpu_ids)
model.setup()
model.print_networks(True)
if args.resume is not None:
model.load_networks(args.resume)
start_epoch = args.resume if args.resume is not None else 0
for epoch in range(start_epoch, args.epoch):
for i, batch in enumerate(dataloader):
model.set_input(batch[0], batch[2], batch[1])
model.optimize_parameters()
print(model.g_loss.data, model.d_loss.data)
if i % args.checkpoint_steps == 0:
model.save_networks(epoch)
if i % args.sample_steps == 0:
model.sample(batch, os.path.join(sample_dir, "sample_{}_{}".format(epoch, i)))
model.update_lr()
if __name__ == '__main__':
main()