-
Notifications
You must be signed in to change notification settings - Fork 42
/
model.lua
174 lines (145 loc) · 4.91 KB
/
model.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
--------------------------------------------------------------------------------
-- Decoder for ResNet-18
--
-- Abhishek Chaurasia
-- September 2016
--------------------------------------------------------------------------------
torch.setdefaulttensortype('torch.FloatTensor')
require 'cudnn'
local nn = require 'nn'
--------------------------------------------------------------------------------
local histClasses = opt.datahistClasses
local classes = opt.dataClasses
--------------------------------------------------------------------------------
-- Initialization functions
local function ConvInit(v)
local n = v.kW*v.kH*v.nOutputPlane
v.weight:normal(0, math.sqrt(2/n))
end
local function BNInit(v)
v.weight:fill(1)
v.bias:zero()
end
print('\n\27[31m\27[4mConstructing Neural Network\27[0m')
print('Using pretrained ResNet-18')
-- loading model
local oModel = torch.load(opt.pretrained)
-- Getting rid of classifier
oModel:remove(11)
oModel:remove(10)
oModel:remove(9)
-- Last layer is size 512x8x8
-- Function and variable definition
local iChannels = 64
local Convolution = cudnn.SpatialConvolution
local Avg = nn.SpatialAveragePooling
local ReLU = nn.ReLU
local Max = nn.SpatialMaxPooling
local SBatchNorm = nn.SpatialBatchNormalization
-- Handles decoder
local function decode(iFeatures, oFeatures, stride, adjS)
local mainBlock = nn.Sequential()
mainBlock:add(Convolution(iFeatures, iFeatures/4, 1, 1, 1, 1, 0, 0))
mainBlock:add(SBatchNorm(iFeatures/4, 1e-3))
mainBlock:add(nn.ReLU(true))
mainBlock:add(nn.SpatialFullConvolution(iFeatures/4, iFeatures/4, 3, 3, stride, stride, 1, 1, adjS, adjS))
mainBlock:add(SBatchNorm(iFeatures/4, 1e-3))
mainBlock:add(nn.ReLU(true))
mainBlock:add(Convolution(iFeatures/4, oFeatures, 1, 1, 1, 1, 0, 0))
mainBlock:add(SBatchNorm(oFeatures, 1e-3))
mainBlock:add(nn.ReLU(true))
-- Initialization
for i = 1, 2 do
ConvInit(mainBlock:get(1))
ConvInit(mainBlock:get(4))
ConvInit(mainBlock:get(7))
BNInit(mainBlock:get(2))
BNInit(mainBlock:get(5))
BNInit(mainBlock:get(8))
end
return mainBlock
end
-- Creates count residual blocks with specified number of features
local function layer(layerN, features)
iChannels = features
local s = nn.Sequential()
for i=1, 2 do
s:add(oModel:get(4+layerN):get(i))
end
return s
end
-- Creates bypass modules for decoders
local function bypass2dec(features, layers, stride, adjS)
local container = nn.Sequential()
local accum = nn.ConcatTable()
local prim = nn.Sequential() -- Container for encoder
local oFeatures = iChannels
accum:add(prim):add(nn.Identity())
-- Add the bottleneck modules
prim:add(layer(layers, features))
if layers == 4 then
--DECODER
prim:add(decode(features, oFeatures, 2, 1))
return container:add(accum):add(nn.CAddTable()):add(nn.ReLU(true))
end
-- Move on to next bottleneck
prim:add(bypass2dec(2*features, layers+1, 2, 1))
-- Add decoder module
prim:add(decode(features, oFeatures, stride, adjS))
return container:add(accum):add(nn.CAddTable()):add(nn.ReLU(true))
end
-- Model definition starts
local model
if paths.filep(opt.save .. '/all/model-last.net') then
model = torch.load(opt.save .. '/all/model-last.net')
else
model = nn.Sequential()
model:add(oModel:get(1))
model:add(oModel:get(2))
model:add(oModel:get(3))
model:add(oModel:get(4))
model:add(bypass2dec(64, 1, 1, 0))
-- Decoder section without bypassed information
model:add(nn.SpatialFullConvolution(64, 32, 3, 3, 2, 2, 1, 1, 1, 1))
model:add(SBatchNorm(32))
model:add(ReLU(true))
-- 64x128x128
model:add(Convolution(32, 32, 3, 3, 1, 1, 1, 1))
model:add(SBatchNorm(32, 1e-3))
model:add(ReLU(true))
-- 32x128x128
model:add(nn.SpatialFullConvolution(32, #classes, 2, 2, 2, 2, 0, 0, 0, 0))
-- Model definition ends here
-- Initialize convolutions and batch norm existing in later stage of decoder
for i = 1, 2 do
ConvInit(model:get(#model))
ConvInit(model:get(#model))
ConvInit(model:get(#model-3))
ConvInit(model:get(#model-3))
ConvInit(model:get(#model-6))
ConvInit(model:get(#model-6))
BNInit(model:get(#model-2))
BNInit(model:get(#model-2))
BNInit(model:get(#model-5))
BNInit(model:get(#model-5))
end
end
if cutorch.getDeviceCount() > 1 then
local gpu_list = {}
for i = 1,cutorch.getDeviceCount() do gpu_list[i] = i end
model = nn.DataParallelTable(1, true, false):add(model:cuda(), gpu_list)
print('\27[32m' .. opt.nGPU .. " GPUs being used\27[0m")
end
-- Loss: NLL
print('Defining loss function...')
local classWeights = torch.pow(torch.log(1.02 + histClasses/histClasses:max()), -1)
-- classWeights[1] = 0
loss = cudnn.SpatialCrossEntropyCriterion(classWeights)
model:cuda()
loss:cuda()
----------------------------------------------------------------------
-- return package:
return {
model = model,
loss = loss,
}