-
Notifications
You must be signed in to change notification settings - Fork 1
/
aleatoric.py
executable file
·77 lines (62 loc) · 3.08 KB
/
aleatoric.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import torch.nn as nn
import torch
import torch.distributions as td
import torch.nn.functional as F
from utils1.distributions import ReshapedDistribution
# FEATURE_MAPS = (30, 30, 40, 40, 40, 40, 50, 50)
# f_out is the number of channels for the input
f_out = 16
# 看一下这个feature map的影响
class StochasticDeepMedic(nn.Module):
def __init__(self,
num_classes,
rank: int = 10,
epsilon=1e-5,
diagonal=False,
dim=3):
super().__init__()
self.dim = dim
conv_fn = nn.Conv3d if self.dim == 3 else nn.Conv2d
self.rank = rank
self.num_classes = num_classes
self.epsilon = epsilon
self.diagonal = diagonal # whether to use only the diagonal (independent normals)
self.mean_l = conv_fn(f_out, num_classes, kernel_size=(1, ) * self.dim)
self.log_cov_diag_l = conv_fn(f_out, num_classes, kernel_size=(1, ) * self.dim)
self.cov_factor_l = conv_fn(f_out, num_classes * rank, kernel_size=(1, ) * self.dim)
def forward(self, input, sampling_mask):
logits = F.relu(input)
batch_size = logits.shape[0]
event_shape = (self.num_classes,) + logits.shape[2:]
mean = self.mean_l(logits)
cov_diag = self.log_cov_diag_l(logits).exp() + self.epsilon
mean = mean.view((batch_size, -1))
cov_diag = cov_diag.view((batch_size, -1))
cov_factor = self.cov_factor_l(logits)
cov_factor = cov_factor.view((batch_size, self.rank, self.num_classes, -1))
cov_factor = cov_factor.flatten(2, 3)
cov_factor = cov_factor.transpose(1, 2)
# covariance in the background tens to blow up to infinity, hence set to 0 outside the ROI
#mask = kwargs['sampling_mask']
mask = sampling_mask
mask = mask.unsqueeze(1).expand((batch_size, self.num_classes) + mask.shape[1:]).reshape(batch_size, -1)
cov_factor = cov_factor * mask.unsqueeze(-1)
cov_diag = cov_diag * mask + self.epsilon
if self.diagonal:
base_distribution = td.Independent(td.Normal(loc=mean, scale=torch.sqrt(cov_diag)), 1)
else:
try:
base_distribution = td.LowRankMultivariateNormal(loc=mean, cov_factor=cov_factor, cov_diag=cov_diag)
except:
print('Covariance became not invertible using independent normals for this batch!')
base_distribution = td.Independent(td.Normal(loc=mean, scale=torch.sqrt(cov_diag)), 1)
distribution = ReshapedDistribution(base_distribution, event_shape)
shape = (batch_size,) + event_shape
logit_mean = mean.view(shape)
cov_diag_view = cov_diag.view(shape).detach()
cov_factor_view = cov_factor.transpose(2, 1).view((batch_size, self.num_classes * self.rank) + event_shape[1:]).detach()
output_dict = {'logit_mean': logit_mean.detach(),
'cov_diag': cov_diag_view,
'cov_factor': cov_factor_view,
'distribution': distribution}
return logit_mean, output_dict