-
Notifications
You must be signed in to change notification settings - Fork 0
/
batch_norm.py
50 lines (40 loc) · 1.42 KB
/
batch_norm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
"""Implementation of BatchNorm.
Sources:
* https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm1d.html#torch.nn.BatchNorm1d
"""
import numpy as np
import torch
class BatchNorm:
def __init__(self, num_features, eps=1e-5, momentum=0.1):
self.weight = np.ones((1, num_features))
self.bias = np.zeros((1, num_features))
self.running_mean = np.zeros((1, num_features))
self.running_var = np.ones((1, num_features))
self.eps = eps
self.momentum = momentum
self.training = True
def __call__(self, x):
assert len(x.shape) == 2
if self.training:
m = np.mean(x, axis=0, keepdims=True)
v = np.var(x, axis=0, keepdims=True)
# modify the running mean and variance at the end of the call
# and only in training
# = not +=
self.running_mean = (1. - self.momentum) * m + self.momentum * self.running_mean
self.running_var = (1. - self.momentum) * v + self.momentum * self.running_var
else:
m = self.running_mean
v = self.running_var
z = (x - m) / np.sqrt(v + self.eps)
return self.weight * z + self.bias
if __name__ == "__main__":
np.random.seed(0)
num_features = 100
x = np.random.randn(20, num_features)
x_tensor = torch.FloatTensor(x)
torch_batch_norm_layer = torch.nn.BatchNorm1d(num_features)
batch_norm_layer = BatchNorm(num_features)
expected = np.array(torch_batch_norm_layer(x_tensor).tolist())
actual = batch_norm_layer(x)
np.testing.assert_almost_equal(actual, expected, decimal=6)