-
Notifications
You must be signed in to change notification settings - Fork 9
/
train.py
57 lines (39 loc) · 1.63 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import torch
import torch.nn as nn
import numpy as np
class UM_loss(nn.Module):
def __init__(self, alpha, beta, margin):
super(UM_loss, self).__init__()
self.alpha = alpha
self.beta = beta
self.margin = margin
self.ce_criterion = nn.BCELoss()
def forward(self, score_act, score_bkg, feat_act, feat_bkg, label):
loss = {}
label = label / torch.sum(label, dim=1, keepdim=True)
loss_cls = self.ce_criterion(score_act, label)
label_bkg = torch.ones_like(label).cuda()
label_bkg /= torch.sum(label_bkg, dim=1, keepdim=True)
loss_be = self.ce_criterion(score_bkg, label_bkg)
loss_act = self.margin - torch.norm(torch.mean(feat_act, dim=1), p=2, dim=1)
loss_act[loss_act < 0] = 0
loss_bkg = torch.norm(torch.mean(feat_bkg, dim=1), p=2, dim=1)
loss_um = torch.mean((loss_act + loss_bkg) ** 2)
loss_total = loss_cls + self.alpha * loss_um + self.beta * loss_be
loss["loss_cls"] = loss_cls
loss["loss_be"] = loss_be
loss["loss_um"] = loss_um
loss["loss_total"] = loss_total
return loss_total, loss
def train(net, loader_iter, optimizer, criterion, logger, step):
net.train()
_data, _label, _, _, _ = next(loader_iter)
_data = _data.cuda()
_label = _label.cuda()
optimizer.zero_grad()
score_act, score_bkg, feat_act, feat_bkg, _, _ = net(_data)
cost, loss = criterion(score_act, score_bkg, feat_act, feat_bkg, _label)
cost.backward()
optimizer.step()
for key in loss.keys():
logger.log_value(key, loss[key].cpu().item(), step)