Skip to content

Commit

Permalink
fix vmap test and remove fractional values from test case
Browse files Browse the repository at this point in the history
  • Loading branch information
janeyx99 committed Oct 13, 2020
1 parent 611731d commit f3765e9
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 4 deletions.
2 changes: 1 addition & 1 deletion aten/src/ATen/native/Pow.cpp
Expand Up @@ -11,7 +11,7 @@ DEFINE_DISPATCH(pow_tensor_tensor_stub);
DEFINE_DISPATCH(pow_tensor_scalar_stub);

Tensor& pow_out(Tensor& result, const Tensor& base, const Tensor& exp) {
if (exp.dim() == 0) {
if (exp.dim() == 0 && base.dim() != 0) {
return native::pow_out(result, base, exp.item());
}
auto iter = TensorIterator::binary_op(result, base, exp);
Expand Down
6 changes: 3 additions & 3 deletions test/test_torch.py
Expand Up @@ -13793,9 +13793,9 @@ def test_float_scalar_pow_float_tensor(self, device):
@onlyCUDA
@unittest.skipIf(not TEST_NUMPY, 'Numpy not found')
def test_cuda_tensor_pow_scalar_tensor(self, device):
cuda_tensor = torch.randn((3, 3), device='cuda')
scalars_tensors = [torch.tensor(5), torch.tensor(4.2), torch.tensor(-0.5)]
for exp in scalars_tensors:
cuda_tensor = torch.randn((3, 3), device=device)
scalar_tensors = [torch.tensor(5), torch.tensor(-3), torch.tensor(1)]
for exp in scalar_tensors:
self._test_pow(cuda_tensor, exp)

@onlyOnCPUAndCUDA
Expand Down

0 comments on commit f3765e9

Please sign in to comment.