-
Notifications
You must be signed in to change notification settings - Fork 59
/
Copy pathutils.py
57 lines (44 loc) · 1.88 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import torch
#from tensorboard_logger import log_value
from torch.autograd import Variable
def loss_calc(pred, label, ignore_label):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
label = Variable(label.long()).cuda()
criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label).cuda()
return criterion(pred, label)
def lr_poly(base_lr, iter, max_iter, power):
return base_lr * ((1 - float(iter) / max_iter) ** power)
def get_1x_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = [model.conv1, model.bn1, model.layer1, model.layer2, model.layer3, model.layer4]
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj += 1
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = [model.layer5.parameters(), model.main_classifier.parameters()]
for j in range(len(b)):
for i in b[j]:
yield i
def adjust_learning_rate(optimizer, i_iter, learning_rate, num_steps, power):
"""Sets the learning rate to the initial LR divided by 5 at 60th, 120th and 160th epochs"""
lr = lr_poly(learning_rate, i_iter, num_steps, power)
#log_value('learning', lr, i_iter)
optimizer.param_groups[0]['lr'] = lr
optimizer.param_groups[1]['lr'] = lr * 10