-
Notifications
You must be signed in to change notification settings - Fork 0
/
helpers.py
123 lines (101 loc) · 3.61 KB
/
helpers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import math
import pandas as pd
import numpy as np
import torch.nn as nn
from sklearn.metrics import confusion_matrix
from scipy.optimize import linear_sum_assignment
def npy(t, to_cpu=True):
"""
Convert a tensor to a numpy array.
:param t: Input tensor
:type t: th.Tensor
:param to_cpu: Call the .cpu() method on `t`?
:type to_cpu: bool
:return: Numpy array
:rtype: np.ndarray
"""
if isinstance(t, (list, tuple)):
# We got a list. Convert each element to numpy
return [npy(ti) for ti in t]
elif isinstance(t, dict):
# We got a dict. Convert each value to numpy
return {k: npy(v) for k, v in t.items()}
# Assuming t is a tensor.
if to_cpu:
return t.cpu().detach().numpy()
return t.detach().numpy()
def ensure_iterable(elem, expected_length=1):
if isinstance(elem, (list, tuple)):
assert len(elem) == expected_length, f"Expected iterable {elem} with length {len(elem)} does not have " \
f"expected length {expected_length}"
else:
elem = expected_length * [elem]
return elem
def dict_means(dicts):
"""
Compute the mean value of keys in a list of dicts
:param dicts: Input dicts
:type dicts: List[dict]
:return: Mean values
:rtype: dict
"""
return pd.DataFrame(dicts).mean(axis=0).to_dict()
def add_prefix(dct, prefix, sep="/"):
"""
Add a prefix to all keys in `dct`.
:param dct: Input dict
:type dct: dict
:param prefix: Prefix
:type prefix: str
:param sep: Separator between prefix and key
:type sep: str
:return: Dict with prefix prepended to all keys
:rtype: dict
"""
return {prefix + sep + key: value for key, value in dct.items()}
def ordered_cmat(labels, pred):
"""
Compute the confusion matrix and accuracy corresponding to the best cluster-to-class assignment.
:param labels: Label array
:type labels: np.array
:param pred: Predictions array
:type pred: np.array
:return: Accuracy and confusion matrix
:rtype: Tuple[float, np.array]
"""
cmat = confusion_matrix(labels, pred)
ri, ci = linear_sum_assignment(-cmat)
ordered = cmat[np.ix_(ri, ci)]
acc = np.sum(np.diag(ordered))/np.sum(ordered)
return acc, ordered
def he_init_weights(module):
"""
Initialize network weights using the He (Kaiming) initialization strategy.
:param module: Network module
:type module: nn.Module
"""
if isinstance(module, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(module.weight)
def num2tuple(num):
return num if isinstance(num, (tuple, list)) else (num, num)
def conv2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):
"""
Compute the output shape of a convolution operation.
:param h_w: Height and width of input
:type h_w: Tuple[int, int]
:param kernel_size: Size of kernel
:type kernel_size: Union[int, Tuple[int, int]]
:param stride: Stride of convolution
:type stride: Union[int, Tuple[int, int]]
:param pad: Padding (in pixels)
:type pad: Union[int, Tuple[int, int]]
:param dilation: Dilation
:type dilation: Union[int, Tuple[int, int]]
:return: Height and width of output
:rtype: Tuple[int, int]
"""
h_w, kernel_size, stride, = num2tuple(h_w), num2tuple(kernel_size), num2tuple(stride)
pad, dilation = num2tuple(pad), num2tuple(dilation)
h = math.floor((h_w[0] + 2 * pad[0] - dilation[0] * (kernel_size[0] - 1) - 1) / stride[0] + 1)
w = math.floor((h_w[1] + 2 * pad[1] - dilation[1] * (kernel_size[1] - 1) - 1) / stride[1] + 1)
return h, w