-
Notifications
You must be signed in to change notification settings - Fork 10
/
train.py
196 lines (173 loc) · 7.8 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
"""
Run this script to train the spine keypoint network
"""
import numpy as np
import load_utils
import spine_augmentation as aug
import confidence_map as cmap
import part_affinity_field_net
import ladder_shufflenet
import torch.optim as optim
import torch.nn as nn
import torch
import os.path as path
import torchvision
import matplotlib.pyplot as plt
import cv2
from PIL import Image
import folders as f
import os
import argparse
def save_grid_images(img, gau, name):
# gau = F.interpolate(gau, size=(img.size(2), img.size(3)), mode="bilinear")
gau_img = torch.cat((gau, img), dim=0)
gau_img = torchvision.utils.make_grid(gau_img, nrow=batch_size)
npimg = gau_img.detach().cpu().numpy()
npimg = np.clip(npimg, 0., 1.)
npimg = np.transpose(npimg, (1, 2, 0))
npimg = (npimg*255.).astype(np.uint8)
# npimg = cv2.resize(npimg, None, fx=4, fy=4) # Gaussian
cv2.imwrite(path.join(f.train_results, "%s.jpg" % name), npimg)
def label_normalize_flatten(batch_labels, batch_imgs):
"""
Normalize pts to [0,1] for training the prediction network
:param batch_labels:
:param batch_imgs:
:return:
"""
hw = np.asarray(batch_imgs).shape[2:4]
bl = np.array(batch_labels, np.float32)
# Normalization
bl[:, :, 0] = bl[:, :, 0] / hw[1]
bl[:, :, 1] = bl[:, :, 1] / hw[0]
# Flatten
bl = bl.reshape((bl.shape[0], -1))
return bl
def plot_norm_pts(batch_imgs, batch_norm_pts, name):
hw = batch_imgs.shape[2:4]
plt.style.use('grayscale')
batch_norm_pts = batch_norm_pts.detach().cpu().numpy()
batch_norm_pts = batch_norm_pts.reshape((batch_imgs.shape[0], 68, 2)) # Batchsize, joints*4, xy
for i in range(batch_imgs.shape[0]):
img = batch_imgs[i,0] # NCHW -> HW
# img = np.repeat(img[..., np.newaxis], 3, axis=2) # HWC
img = img / 255.
plt_img = Image.fromarray(img)
plt.imshow(plt_img)
xy_list = batch_norm_pts[i] # [J][XY]
xy_list *= np.array((hw[1], hw[0]), np.float32)
x_list, y_list = np.transpose(xy_list, axes=[1, 0]).tolist() # [XY][J]
plt.scatter(x_list, y_list, color='yellow', s=9)
for j in range(len(x_list)):
plt.annotate(j, (x_list[j], y_list[j]), color='red', size=5)
plt.axis("off")
plt.savefig(path.join(f.train_results, "%s_%d_pts.jpg" % (name, i)), dpi=400)
plt.clf()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Train network.')
parser.add_argument('-s', type=int, default=4, help='batch size')
parser.add_argument("--trainval", action='store_true', default=False)
parser.add_argument("--lr", type=float, default=0.001, help="initial learning rate")
parser.add_argument("--patience", type=int, default=5000, help="patience for decrease lr on plateau")
args = parser.parse_args()
os.makedirs(f.train_results, exist_ok=True)
os.makedirs(f.checkpoint, exist_ok=True)
net = ladder_shufflenet.LadderModelAdd()
if not torch.cuda.is_available():
raise RuntimeError("GPU not available")
batch_size = args.s
print("Training with batch size: %d" % batch_size)
if args.trainval: # Final training, use train and val set
train_data_loader = load_utils.train_loader(batch_size, use_trainval=True)
print("--- Using [train, val] set as training set!")
else:
train_data_loader = load_utils.train_loader(batch_size)
test_data_loader = load_utils.test_loader(batch_size)
device = torch.device("cuda")
# Load checkpoint
# If in trainval mode, no "trainval" checkpoint found,
# and the checkpoint for "train" mode exists,
# then load the "train" checkpoint for "trainval" training
if not args.trainval:
save_path = f.checkpoint_heat_path
if path.exists(save_path):
net.load_state_dict(torch.load(save_path))
print("Model loaded")
else:
print("New model created")
else: # Trainval mode
save_path = f.checkpoint_heat_trainval_path
if path.exists(save_path):
net.load_state_dict(torch.load(save_path))
print("Load model weights from [trainval] checkpoint")
elif path.exists(f.checkpoint_heat_path):
net.load_state_dict(torch.load(f.checkpoint_heat_path))
print("No [trainval] checkpoint but [train] checkpoint exists. Load [train]")
else:
print("No [trainval] or [train] checkpoint, training [train, val] from scratch")
net.cuda().train()
optimizer = optim.Adam(net.parameters(), lr=args.lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, patience=args.patience, verbose=True) # Be patient for n steps
step = 0
for train_imgs, train_labels in train_data_loader:
train_imgs, train_labels = aug.augment_batch_img(train_imgs, train_labels)
cm = cmap.ConfidenceMap()
# Classify labels as (top left, top right, bottom left, bottom right, left center, right center)
heat_scale = 1
heat_hw = np.asarray(train_imgs).shape[1:3]
NCHW_corner_gau = cm.batch_gaussian_split_corner(train_imgs, train_labels, heat_scale)
NCHW_center_gau = cm.batch_gaussian_LRCenter(train_imgs, train_labels, heat_scale)
# NCHW_t_lines = cm.batch_lines_LRTop(heat_hw, train_labels)
NCHW_c_lines = cm.batch_lines_LRCenter(heat_hw, train_labels, heat_scale)
# NCHW_b_lines = cm.batch_lines_LRBottom(heat_hw, train_labels)
NCHW_first_lrpt = cm.batch_gaussian_first_lrpt(train_imgs, train_labels)
# NCHW_last_lrpt = cm.batch_gaussian_last_lrpt(train_imgs, train_labels)
NCHW_paf = NCHW_c_lines
NCHW_pcm = np.concatenate((NCHW_corner_gau, NCHW_center_gau, NCHW_first_lrpt), axis=1)
# NCHW_spine_mask = cm.batch_spine_mask(heat_hw, train_labels)
optimizer.zero_grad()
criterion = nn.MSELoss()
# To numpy, NCHW. normalize to [0, 1]
train_imgs = np.asarray(train_imgs, np.float32)[:, np.newaxis, :, :] / 255.0
# Normalize train labels to [0, 1] to predict them directly
# norm_labels = label_normalize_flatten(train_labels, train_imgs)
# To tensor
train_imgs = torch.from_numpy(np.asarray(train_imgs)).cuda()
tensor_gt_pcm = torch.from_numpy(np.asarray(NCHW_pcm)).cuda()
tensor_gt_paf = torch.from_numpy(np.asarray(NCHW_paf)).cuda()
# tensor_gt_mask = torch.from_numpy(np.asarray(NCHW_spine_mask)).cuda()
res_dict = net(train_imgs)
out_pcm, out_paf = res_dict["pcm"], res_dict["paf"]
# Loss
loss1 = criterion(out_pcm, tensor_gt_pcm)
loss2 = criterion(out_paf, tensor_gt_paf)
# loss3 = criterion(out_mask, tensor_gt_mask)
loss = loss1 + (loss2 / 5) # + (loss3 / 50) # pcm + paf + mask
loss.backward()
optimizer.step()
step = step + 1
loss_value = loss.item()
scheduler.step(loss_value)
lr = optimizer.param_groups[0]['lr']
print("Step: %d, Loss: %f, LR: %f" % (step, loss_value, lr))
# Save
if step % 200 == 0:
torch.save(net.state_dict(), save_path)
print("Model saved")
if lr <= 0.00005:
print("Stop on plateau")
break
# Test
if step % 200 == 0:
net.eval()
test_imgs, test_labels = next(test_data_loader)
test_imgs = np.asarray(test_imgs, np.float32)[:, np.newaxis, :, :]
test_imgs_01 = test_imgs / 255.0
with torch.no_grad():
test_imgs_tensor = torch.from_numpy(test_imgs_01).to(device)
test_res_dict = net(test_imgs_tensor) # NCHW
out_paf = test_res_dict["paf"]
save_grid_images(test_imgs_tensor, out_paf[:, 0:1, ...], str(step))
# plot_norm_pts(test_imgs, test_out_pts, str(step))
net.train()