-
Notifications
You must be signed in to change notification settings - Fork 1
/
models.py
123 lines (99 loc) · 5.1 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import numpy as np
import torch
import math
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import init
class PositionalEncoding(nn.Module):
def __init__(self,
emb_size: int,
dropout: float = 0.1,
maxlen: int = 750):
super(PositionalEncoding, self).__init__()
den = torch.exp(- torch.arange(0, emb_size, 2)* math.log(10000) / emb_size)
pos = torch.arange(0, maxlen).reshape(maxlen, 1)
pos_embedding = torch.zeros((maxlen, emb_size))
pos_embedding[:, 0::2] = torch.sin(pos * den)
pos_embedding[:, 1::2] = torch.cos(pos * den)
pos_embedding = pos_embedding.unsqueeze(-2)
self.dropout = nn.Dropout(dropout)
self.register_buffer('pos_embedding', pos_embedding)
def forward(self, token_embedding: torch.Tensor):
return self.dropout(token_embedding + self.pos_embedding[:token_embedding.size(0), :])
class MYNET(torch.nn.Module):
def __init__(self, opt):
super(MYNET, self).__init__()
self.n_feature=opt["feat_dim"]
n_class=opt["num_of_class"]
n_embedding_dim=opt["hidden_dim"]
n_enc_layer=opt["enc_layer"]
n_enc_head=opt["enc_head"]
n_dec_layer=opt["dec_layer"]
n_dec_head=opt["dec_head"]
n_seglen=opt["segment_size"]
self.anchors=opt["anchors"]
self.anchors_stride=[]
dropout=0.3
self.best_loss=1000000
self.best_map=0
# FC layers for the 2 streams
self.feature_reduction_rgb = nn.Linear(self.n_feature//2, n_embedding_dim//2)
self.feature_reduction_flow = nn.Linear(self.n_feature//2, n_embedding_dim//2)
self.positional_encoding = PositionalEncoding(n_embedding_dim, dropout, maxlen=400)
self.encoder = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=n_embedding_dim,
nhead=n_enc_head,
dropout=dropout,
activation='gelu'),
n_enc_layer,
nn.LayerNorm(n_embedding_dim))
self.decoder = nn.TransformerDecoder(
nn.TransformerDecoderLayer(d_model=n_embedding_dim,
nhead=n_dec_head,
dropout=dropout,
activation='gelu'),
n_dec_layer,
nn.LayerNorm(n_embedding_dim))
self.classifier = nn.Sequential(nn.Linear(n_embedding_dim,n_embedding_dim), nn.ReLU(), nn.Linear(n_embedding_dim,n_class))
self.regressor = nn.Sequential(nn.Linear(n_embedding_dim,n_embedding_dim), nn.ReLU(), nn.Linear(n_embedding_dim,2))
self.decoder_token = nn.Parameter(torch.zeros(len(self.anchors), 1, n_embedding_dim))
self.relu = nn.ReLU(True)
self.softmaxd1 = nn.Softmax(dim=-1)
def forward(self, inputs):
# inputs - batch x seq_len x featSize
base_x_rgb = self.feature_reduction_rgb(inputs[:,:,:self.n_feature//2])
base_x_flow = self.feature_reduction_flow(inputs[:,:,self.n_feature//2:])
base_x = torch.cat([base_x_rgb,base_x_flow],dim=-1)
base_x = base_x.permute([1,0,2])# seq_len x batch x featsize x
pe_x = self.positional_encoding(base_x)
encoded_x = self.encoder(pe_x)
decoder_token = self.decoder_token.expand(-1, encoded_x.shape[1], -1)
decoded_x = self.decoder(decoder_token, encoded_x)
decoded_x = decoded_x.permute([1, 0, 2])
anc_cls = self.classifier(decoded_x)
anc_reg = self.regressor(decoded_x)
return anc_cls, anc_reg
class SuppressNet(torch.nn.Module):
def __init__(self, opt):
super(SuppressNet, self).__init__()
n_class=opt["num_of_class"]-1
n_seglen=opt["segment_size"]
n_embedding_dim=2*n_seglen
dropout=0.3
self.best_loss=1000000
self.best_map=0
# FC layers for the 2 streams
self.mlp1 = nn.Linear(n_seglen, n_embedding_dim)
self.mlp2 = nn.Linear(n_embedding_dim, 1)
self.norm = nn.InstanceNorm1d(n_class)
self.relu = nn.ReLU(True)
self.sigmoid = nn.Sigmoid()
def forward(self, inputs):
#inputs - batch x seq_len x class
base_x = inputs.permute([0,2,1])
base_x = self.norm(base_x)
x = self.relu(self.mlp1(base_x))
x = self.sigmoid(self.mlp2(x))
x = x.squeeze(-1)
return x