-
Notifications
You must be signed in to change notification settings - Fork 0
/
siren_res.py
222 lines (190 loc) · 7.68 KB
/
siren_res.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
# from ..builder import BACKBONES
from collections import OrderedDict
import warnings
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer,
constant_init)
from mmcv.cnn.bricks import DropPath, build_activation_layer
from mmcv.runner import BaseModule
from mmcv.utils.parrots_wrapper import _BatchNorm
import torch.nn.functional as F
eps = 1.0e-5
class Sine(nn.Module):
def __init__(self, w0=30.):
super(Sine, self).__init__()
self.w0 = w0
def forward(self, x, modulations):
x = torch.sin(self.w0 * (x + modulations))
# x = F.relu(x + modulations)
# x = x.sigmoid()
return x
class SirenLayer(BaseModule):
def __init__(self,
in_channels,
out_channels,
num_fcs=1,
bias=True,
act_cfg=dict(type='Sine', w0=30.),
init_cfg=dict(type='Uniform', layer='Linear', a=-0.01, b=0.01),
**kwargs):
super(SirenLayer, self).__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
self.act_cfg = act_cfg
if act_cfg['type'] == 'Identity':
self.activation_func = nn.Identity()
else:
self.activation_func = Sine()
_in_channels = in_channels
_out_channels = out_channels
self.layers = []
for i in range(0, num_fcs):
self.add_module(f'layer_{i}_fc', nn.Linear(_in_channels, _out_channels, bias=bias))
self.add_module(f'layer_{i}_actfunc', self.activation_func)
self.layers.append([
f'layer_{i}_fc',
f'layer_{i}_actfunc'
])
nn.init.uniform_(getattr(self, f'layer_{i}_fc').weight, init_cfg['a'], init_cfg['b'])
# # nn.init.uniform_(getattr(self, f'layer_{i}_fc').weight, -0.0001, 0.0001)
nn.init.uniform_(getattr(self, f'layer_{i}_fc').bias, init_cfg['a'], init_cfg['b'])
# nn.init.constant_(getattr(self, f'layer_{i}_fc').bias, 0)
_in_channels = _out_channels
# self.init_weights()
def init_weights(self):
super(SirenLayer, self).init_weights()
def forward(self, x, *args):
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name[0])
x = layer(x)
layer = getattr(self, layer_name[1])
x = layer(x, *args)
return x
class Siren_Res(BaseModule):
def __init__(self,
inner_layers=6,
in_channels=2,
out_channels=3,
base_channels=512,
num_modulation=512,
bias=True,
expansions=[1],
init_cfg=None,
):
super(Siren_Res, self).__init__(init_cfg)
if len(expansions) == 1:
self.expansions = expansions * inner_layers
assert inner_layers == len(self.expansions)
if isinstance(self.expansions, list):
self.expansions = torch.tensor(self.expansions)
self.inner_layers = inner_layers
self.in_channels = in_channels
self.out_channels = out_channels
self.base_channels = base_channels
self.bias = bias
self.layers = []
_in_channels = in_channels
out_channels_list = base_channels * self.expansions
out_channels_list = torch.cat((out_channels_list, torch.tensor([self.out_channels])))
for i in range(self.inner_layers + 1):
_out_channels = out_channels_list[i]
w0 = 30.
if i == 0:
w_std = 1. / _in_channels
else:
c = 6
w_std = torch.sqrt(c / _in_channels) / w0
init_cfg = dict(type='Uniform', layer='Linear', a=-w_std, b=w_std)
if i == self.inner_layers:
act_cfg = dict(type='Identity')
else:
act_cfg = dict(type='Sine', w0=w0)
layer = SirenLayer(
_in_channels, _out_channels, num_fcs=1,
bias=bias, act_cfg=act_cfg,
init_cfg=init_cfg
)
_in_channels = _out_channels
layer_name = f'SirenLayer_{i}'
self.add_module(layer_name, layer)
self.layers.append(layer_name)
self.modulation_size_dict = self.get_bias_size()
_out_channels = sum(self.modulation_size_dict.values())
self.shift_modulation_layer = nn.Sequential(
nn.Linear(num_modulation, num_modulation * 2, bias=bias),
nn.LeakyReLU(),
nn.Linear(num_modulation * 2, _out_channels, bias=bias),
)
# self.init_weights()
def get_bias_size(self):
parameters_size = OrderedDict()
for name, parm in self.named_parameters():
if '.weight' in name:
parameters_size[name.replace('.weight', '.bias')] = parm.size(0)
parameters_size.popitem(last=True)
return parameters_size
def get_parameters_size(self):
parameters_size = dict()
for name, parm in self.named_parameters():
parameters_size[name] = parm.size()
return parameters_size
def freeze_model_w(self):
for name, param in self.named_parameters():
if 'weight' in name:
param.requires_grad = False
def freeze_model_b(self):
for name, param in self.named_parameters():
if 'bias' in name:
param.requires_grad = False
def train_model_w(self):
for name, param in self.named_parameters():
if 'weight' in name:
param.requires_grad = True
def train_model_b(self):
for name, param in self.named_parameters():
if 'bias' in name:
param.requires_grad = True
def get_model_b_data(self):
data = {}
for name, param in self.named_parameters():
if 'bias' in name:
data[name] = param.data
return data
def set_model_b_data(self, data):
for name, param in self.named_parameters():
if 'bias' in name:
param.data = data[name]
def zero_model_b(self):
for name, param in self.named_parameters():
if 'bias' in name:
param.data = torch.zeros_like(param)
def freeze_model_w_b(self):
self.eval()
for param in self.parameters():
param.requires_grad = False
def init_weights(self):
super(Siren_Res, self).init_weights()
# nn.init.normal_(self.shift_modulation_layer.weight.data, -1/256., 1/256.)
# nn.init.normal_(self.shift_modulation_layer.bias.data, -1 / 256., 1 / 256.)
# nn.init.constant_(self.shift_modulation_layer.bias.data, 0)
def forward(self, x, modulations):
shift_modulations = self.shift_modulation_layer(modulations)
# shift_modulations = modulations
shift_modulations_split = torch.split(shift_modulations, list(self.modulation_size_dict.values()), dim=1)
shift_modulations_split = shift_modulations_split + (None,)
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
if shift_modulations_split[i] is not None:
# shift_modulations[:, t:t + sizes[i]]
x = layer(x, shift_modulations_split[i])
# t += sizes[i]
else:
x = layer(x)
x = 0.5 * x + 0.5
return x
def train_model_w_b(self, mode=True):
super(Siren_Res, self).train(mode)
for param in self.parameters():
param.requires_grad = True