Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[numpy] torch.a{cosh, sinh} : promote integer inputs to float #47152

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 4 additions & 4 deletions aten/src/ATen/native/UnaryOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -351,17 +351,17 @@ Tensor& cosh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(
Tensor cosh(const Tensor& self) { return unary_op_impl(self, at::cosh_out); }
Tensor& cosh_(Tensor& self) { return unary_op_impl_(self, at::cosh_out); }

Tensor& acosh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, acosh_stub); }
Tensor acosh(const Tensor& self) { return unary_op_impl(self, at::acosh_out); }
Tensor& acosh_out(Tensor& result, const Tensor& self) { return unary_op_impl_float_out(result, self, acosh_stub); }
Tensor acosh(const Tensor& self) { return unary_op_impl_float(self, acosh_stub); }
Tensor& acosh_(Tensor& self) { return unary_op_impl_(self, at::acosh_out); }

// arccosh, alias for acosh
Tensor& arccosh_out(Tensor& result, const Tensor& self) { return at::acosh_out(result, self); }
Tensor arccosh(const Tensor& self) { return at::acosh(self); }
Tensor& arccosh_(Tensor& self) { return at::acosh_(self); }

Tensor& asinh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, asinh_stub); }
Tensor asinh(const Tensor& self) { return unary_op_impl(self, at::asinh_out); }
Tensor& asinh_out(Tensor& result, const Tensor& self) { return unary_op_impl_float_out(result, self, asinh_stub); }
Tensor asinh(const Tensor& self) { return unary_op_impl_float(self, asinh_stub); }
Tensor& asinh_(Tensor& self) { return unary_op_impl_(self, at::asinh_out); }

// arcsinh, alias for asinh
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/native/cuda/UnaryGeometricKernels.cu
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,15 @@ void tanh_kernel_cuda(TensorIterator& iter) {
}

void acosh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "acosh_cuda", [&]() {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "acosh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}

void asinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "asinh_cuda", [&]() {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "asinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
Expand Down
12 changes: 8 additions & 4 deletions torch/testing/_internal/common_methods_invocations.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,8 +233,10 @@ def sample_inputs(self, device, dtype, requires_grad=False):
UnaryUfuncInfo('acosh',
ref=np.arccosh,
domain=(1, float('inf')),
dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
promotes_integers_to_float=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
test_inplace_grad=False),
UnaryUfuncInfo('asin',
Expand All @@ -251,8 +253,10 @@ def sample_inputs(self, device, dtype, requires_grad=False):
# NOTE: derivative for inplace asinh is not implemented
UnaryUfuncInfo('asinh',
ref=np.arcsinh,
dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
promotes_integers_to_float=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
test_inplace_grad=False),
UnaryUfuncInfo('atan',
Expand Down