Skip to content

Commit

Permalink
Fix layernorm cpu precision issues (#108089)
Browse files Browse the repository at this point in the history
  • Loading branch information
mikaylagawarecki authored and pytorchmergebot committed Aug 30, 2023
1 parent 8a089f6 commit 3817de5
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 3 deletions.
6 changes: 3 additions & 3 deletions aten/src/ATen/native/cpu/layer_norm_kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,17 +55,17 @@ void LayerNormKernelImplInternal(
std::tie(mean_val, rstd_val) = RowwiseMoments(X_ptr, N);
rstd_val = T(1) / std::sqrt(rstd_val + eps);
const T scale = rstd_val;
const T bias = -rstd_val * mean_val;
const T bias = - mean_val;
if (gamma_null || beta_null) {
for (const auto j : c10::irange(N)) {
const T gamma_v = gamma_null ? T(1) : gamma_data[j];
const T beta_v = beta_null ? T(0) : beta_data[j];
Y_ptr[j] = (X_ptr[j] * scale + bias) * gamma_v + beta_v;
Y_ptr[j] = (X_ptr[j] + bias) * rstd_val * gamma_v + beta_v;
}
} else {
vec::map3<T>(
[scale, bias](Vec x, Vec gamma, Vec beta) {
return (x * Vec(scale) + Vec(bias)) * gamma + beta;
return (x + Vec(bias)) * Vec(scale) * gamma + beta;
},
Y_ptr,
X_ptr,
Expand Down
6 changes: 6 additions & 0 deletions test/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -7273,6 +7273,12 @@ def test_layer_norm_grads_with_create_graph_flag(self):

self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)

def test_layer_norm_eps(self):
# test for https://github.com/pytorch/pytorch/issues/108072
x = torch.Tensor([[[2.0, 2.0], [14.0, 14.0]], [[2.0, 2.0], [14.0, 14.0]]])
ln = torch.nn.LayerNorm(2, eps=1e-6, elementwise_affine=False)
self.assertEqual(ln.forward(x), torch.zeros_like(x))

def test_padding_list(self):
# Padding can be a list, or tuple (regression test for gh-54452)
x = torch.randn(4, 8, 32, 32)
Expand Down

0 comments on commit 3817de5

Please sign in to comment.