-
Notifications
You must be signed in to change notification settings - Fork 155
/
model.py
executable file
·274 lines (229 loc) · 10.2 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
import torch
import torch.nn as nn
import torch.nn.functional as F
def weight_init(m):
if isinstance(m, (nn.Conv2d,)):
# print("Applying custom weight initialization for nn.Conv2d layer...")
torch.nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
# torch.nn.init.normal_(m.weight, mean=0, std=0.01)
if m.weight.data.shape[1] == torch.Size([1]):
torch.nn.init.normal_(m.weight, std=0.1,)
if m.weight.data.shape == torch.Size([1, 6, 1, 1]):
torch.nn.init.constant_(m.weight, 0.2) # for fuse conv
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
# for fusion layer
if isinstance(m, (nn.ConvTranspose2d,)):
torch.nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
# torch.nn.init.normal_(m.weight, mean=0, std=0.01)
if m.weight.data.shape[1] == torch.Size([1]):
torch.nn.init.normal_(m.weight, std=0.1)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
class _DenseLayer(nn.Sequential):
def __init__(self, input_features, out_features):
super(_DenseLayer, self).__init__()
# self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(input_features, out_features,
kernel_size=3, stride=1, bias=True)),
self.add_module('norm1', nn.BatchNorm2d(out_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(out_features, out_features,
kernel_size=3, stride=1, padding=1, bias=True)),
self.add_module('norm2', nn.BatchNorm2d(out_features))
# double check the norm1 comment if necessary and put norm after conv2
def forward(self, x):
x1, x2 = x
# maybe I should put here a RELU
new_features = super(_DenseLayer, self).forward(F.relu(x1)) # F.relu()
return 0.5 * (new_features + x2), x2
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, input_features, out_features):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(input_features, out_features)
self.add_module('denselayer%d' % (i + 1), layer)
input_features = out_features
class UpConvBlock(nn.Module):
def __init__(self, in_features, up_scale, mode='deconv'):
super(UpConvBlock, self).__init__()
self.up_factor = 2
self.constant_features = 16
layers = None
if mode == 'deconv':
layers = self.make_deconv_layers(in_features, up_scale)
elif mode == 'pixel_shuffle':
layers = self.make_pixel_shuffle_layers(in_features, up_scale)
assert layers is not None, layers
self.features = nn.Sequential(*layers)
def make_deconv_layers(self, in_features, up_scale):
layers = []
for i in range(up_scale):
kernel_size = 2 ** up_scale
out_features = self.compute_out_features(i, up_scale)
layers.append(nn.Conv2d(in_features, out_features, 1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.ConvTranspose2d(
out_features, out_features, kernel_size, stride=2))
in_features = out_features
return layers
def make_pixel_shuffle_layers(self, in_features, up_scale):
layers = []
for i in range(up_scale):
kernel_size = 2 ** (i + 1)
out_features = self.compute_out_features(i, up_scale)
in_features = int(in_features / (self.up_factor ** 2))
layers.append(nn.PixelShuffle(self.up_factor))
layers.append(nn.Conv2d(in_features, out_features, 1))
if i < up_scale:
layers.append(nn.ReLU(inplace=True))
in_features = out_features
return layers
def compute_out_features(self, idx, up_scale):
return 1 if idx == up_scale - 1 else self.constant_features
def forward(self, x):
return self.features(x)
class SingleConvBlock(nn.Module):
def __init__(self, in_features, out_features, stride,
use_bs=True # XXX Unused
):
super(SingleConvBlock, self).__init__()
self.use_bn = use_bs
self.conv = nn.Conv2d(in_features, out_features, 1, stride=stride)
self.bn = nn.BatchNorm2d(out_features)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return x
class DoubleConvBlock(nn.Module):
def __init__(self, in_features, mid_features,
out_features=None,
stride=1,
use_act=True):
super(DoubleConvBlock, self).__init__()
self.use_act = use_act
if out_features is None:
out_features = mid_features
self.conv1 = nn.Conv2d(in_features, mid_features,
3, padding=1, stride=stride)
self.bn1 = nn.BatchNorm2d(mid_features)
self.conv2 = nn.Conv2d(mid_features, out_features, 3, padding=1)
self.bn2 = nn.BatchNorm2d(out_features)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.use_act:
x = self.relu(x)
return x
class DexiNet(nn.Module):
""" Definition of the DXtrem network. """
def __init__(self):
super(DexiNet, self).__init__()
self.block_1 = DoubleConvBlock(3, 32, 64, stride=2,)
self.block_2 = DoubleConvBlock(64, 128, use_act=False)
self.dblock_3 = _DenseBlock(2, 128, 256)
self.dblock_4 = _DenseBlock(3, 256, 512)
self.dblock_5 = _DenseBlock(3, 512, 512)
self.dblock_6 = _DenseBlock(3, 512, 256)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# left skip connections, figure in Journal
self.side_1 = SingleConvBlock(64, 128, 2)
self.side_2 = SingleConvBlock(128, 256, 2)
self.side_3 = SingleConvBlock(256, 512, 2)
self.side_4 = SingleConvBlock(512, 512, 1)
self.side_5 = SingleConvBlock(512, 256, 1)
# right skip connections, figure in Journal
self.pre_dense_2 = SingleConvBlock(128, 256, 2, use_bs=False)
self.pre_dense_3 = SingleConvBlock(128, 256, 1)
self.pre_dense_4 = SingleConvBlock(256, 512, 1)
self.pre_dense_5_0 = SingleConvBlock(256, 512, 2,use_bs=False)
self.pre_dense_5 = SingleConvBlock(512, 512, 1)
self.pre_dense_6 = SingleConvBlock(512, 256, 1)
# USNet
self.up_block_1 = UpConvBlock(64, 1)
self.up_block_2 = UpConvBlock(128, 1)
self.up_block_3 = UpConvBlock(256, 2)
self.up_block_4 = UpConvBlock(512, 3)
self.up_block_5 = UpConvBlock(512, 4)
self.up_block_6 = UpConvBlock(256, 4)
self.block_cat = nn.Conv2d(6, 1, kernel_size=1)
self.apply(weight_init)
def slice(self, tensor, slice_shape):
height, width = slice_shape
return tensor[..., :height, :width]
def forward(self, x):
assert x.ndim == 4, x.shape
# Block 1
# print(f"x shape : {x.shape}")
block_1 = self.block_1(x)
# print(f"block_1 shape : {block_1.shape}")
block_1_side = self.side_1(block_1)
# print(f"block_1_side shape: {block_1_side.shape}")
# Block 2
block_2 = self.block_2(block_1)
block_2_down = self.maxpool(block_2)
block_2_add = block_2_down + block_1_side
block_2_side = self.side_2(block_2_add)
# Block 3
block_3_pre_dense = self.pre_dense_3(block_2_down)
block_3, _ = self.dblock_3([block_2_add, block_3_pre_dense])
block_3_down = self.maxpool(block_3)
block_3_add = block_3_down + block_2_side
block_3_side = self.side_3(block_3_add)
# Block 4
block_4_pre_dense_256 = self.pre_dense_2(block_2_down)
block_4_pre_dense = self.pre_dense_4(
block_4_pre_dense_256 + block_3_down)
block_4, _ = self.dblock_4([block_3_add, block_4_pre_dense])
block_4_down = self.maxpool(block_4)
block_4_add = block_4_down + block_3_side
block_4_side = self.side_4(block_4_add)
# Block 5
block_5_pre_dense_512 = self.pre_dense_5_0(block_4_pre_dense_256)
block_5_pre_dense = self.pre_dense_5(
block_5_pre_dense_512 + block_4_down)
block_5, _ = self.dblock_5([block_4_add, block_5_pre_dense])
block_5_add = block_5 + block_4_side
# Block 6
block_6_pre_dense = self.pre_dense_6(block_5)
# block_5_pre_dense_256 = self.pre_dense_6(block_5_add) # if error uncomment
block_6, _ = self.dblock_6([block_5_add, block_6_pre_dense])
# upsampling blocks
height, width = x.shape[-2:]
slice_shape = (height, width)
out_1 = self.slice(self.up_block_1(block_1), slice_shape)
out_2 = self.slice(self.up_block_2(block_2), slice_shape)
out_3 = self.slice(self.up_block_3(block_3), slice_shape)
out_4 = self.slice(self.up_block_4(block_4), slice_shape)
out_5 = self.slice(self.up_block_5(block_5), slice_shape)
out_6 = self.slice(self.up_block_6(block_6), slice_shape)
results = [out_1, out_2, out_3, out_4, out_5, out_6]
# print(out_1.shape)
# concatenate multiscale outputs
block_cat = torch.cat(results, dim=1) # Bx6xHxW
block_cat = self.block_cat(block_cat) # Bx1xHxW
# return results
results.append(block_cat)
return results
if __name__ == '__main__':
batch_size = 8
img_height = 400
img_width = 400
# device = "cuda" if torch.cuda.is_available() else "cpu"
device = "cpu"
input = torch.rand(batch_size, 3, img_height, img_width).to(device)
# target = torch.rand(batch_size, 1, img_height, img_width).to(device)
print(f"input shape: {input.shape}")
model = DexiNet().to(device)
output = model(input)
print(f"output shapes: {[t.shape for t in output]}")
# for i in range(20000):
# print(i)
# output = model(input)
# loss = nn.MSELoss()(output[-1], target)
# loss.backward()