diff --git a/test/test_autograd.py b/test/test_autograd.py index 937dd391ede9..0bc86915d2d5 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -5624,7 +5624,7 @@ def test_ctc_loss_cudnn(self, device): self.assertEqual(grad_cudnn, grad_native, atol=1e-4) @skipCUDAIfRocm - def test_leaky_relu_inplace_with_zero_or_neg_slope(self, device): + def test_leaky_relu_inplace_with_neg_slope(self, device): a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.leaky_relu_(a.clone(), -2) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): @@ -5635,10 +5635,13 @@ def test_leaky_relu_inplace_with_zero_or_neg_slope(self, device): with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) + @skipCUDAIfRocm + def test_leaky_relu_inplace_with_zero_slope(self, device): a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True) b = torch.nn.functional.leaky_relu_(a.clone(), 0.0) b.backward(torch.ones(3, device=device)) - self.assertEqual(a.grad, torch.tensor([0., 0., 1.], device=device)) + expected = torch.tensor([0., 0., 1.], device=device) + self.assertEqual(a.grad, expected) @onlyCUDA def test_free_unneeded_tensor(self, device): diff --git a/torch/testing/_internal/common_nn.py b/torch/testing/_internal/common_nn.py index 59fc6506eb45..b1f66a717b29 100644 --- a/torch/testing/_internal/common_nn.py +++ b/torch/testing/_internal/common_nn.py @@ -225,6 +225,14 @@ def get_weight(m): check_inplace=True, desc='with_negval' ), + dict( + module_name='LeakyReLU', + constructor_args=(0.0,), + cpp_constructor_args='torch::nn::LeakyReLUOptions().negative_slope(0.0)', + input_fn=lambda: torch.randn(10, 10), + check_inplace=True, + desc='with_zero_negval' + ), dict( module_name='LogSigmoid', input_size=(2, 3, 4),