Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[numpy] torch.sinh: promote integer inputs to float #48644

4 changes: 2 additions & 2 deletions aten/src/ATen/native/UnaryOps.cpp
Expand Up @@ -343,8 +343,8 @@ Tensor& cos_out(Tensor& result, const Tensor& self) { return unary_op_impl_float
Tensor cos(const Tensor& self) { return unary_op_impl_float(self, cos_stub); }
Tensor& cos_(Tensor& self) { return unary_op_impl_(self, at::cos_out); }

Tensor& sinh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, sinh_stub); }
Tensor sinh(const Tensor& self) { return unary_op_impl(self, at::sinh_out); }
Tensor& sinh_out(Tensor& result, const Tensor& self) { return unary_op_impl_float_out(result, self, sinh_stub); }
Tensor sinh(const Tensor& self) { return unary_op_impl_float(self, sinh_stub); }
Tensor& sinh_(Tensor& self) { return unary_op_impl_(self, at::sinh_out); }

Tensor& cosh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, cosh_stub); }
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/cuda/UnaryGeometricKernels.cu
Expand Up @@ -51,7 +51,7 @@ void cos_kernel_cuda(TensorIterator& iter) {
}

void sinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "sinh_cuda", [&]() {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
Expand Down
5 changes: 3 additions & 2 deletions torch/csrc/jit/tensorexpr/kernel.cpp
Expand Up @@ -1148,8 +1148,9 @@ Tensor* TensorExprKernel::computeValue(const torch::jit::Value* v) {
} break;

case aten::sinh: {
return computeOneOperand(
"aten_sinh", v, [](const ExprHandle& a) { return sinh(a); });
return computeOneOperand("aten_sinh", v, [](const ExprHandle& a) {
return sinh(promoteIntegerToFloat(a));
});
} break;

case aten::atan: {
Expand Down
42 changes: 38 additions & 4 deletions torch/testing/_internal/common_methods_invocations.py
@@ -1,4 +1,4 @@
from functools import reduce
from functools import reduce, wraps
from operator import mul, itemgetter
import collections

Expand All @@ -21,7 +21,8 @@
random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, make_nonzero_det,
random_fullrank_matrix_distinct_singular_value, set_rng_seed,
TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, make_tensor, TEST_SCIPY)
TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, make_tensor, TEST_SCIPY,
torch_to_numpy_dtype_dict)

if TEST_SCIPY:
import scipy.special
Expand Down Expand Up @@ -262,6 +263,34 @@ def sample_inputs_addmm(self, device, dtype, requires_grad):
low=None, high=None,
requires_grad=False))),)

def np_integer_promotion_wrapper(fn):
# NumPy promotes integer types to double while
# PyTorch promotes integer to float.
#
# This affects operators whose outputs grow very fast
# eg. sinh, exp, etc.
# To mitigate that, we force NumPy to compute with the
# dtype that Pytorch uses for those computations.
#
# Code Example
# >>> x = torch.tensor(501.)
# >>> x.exp()
# tensor(inf)
# >>> x = torch.tensor(501., dtype=torch.double)
# >>> x.exp()
# tensor(3.8154e+217, dtype=torch.float64)
def is_integral(dtype):
return dtype in [np.bool, np.uint8, np.int8, np.int16, np.int32, np.int64]

np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]

@wraps(fn)
def wrapped_fn(x):
kshitij12345 marked this conversation as resolved.
Show resolved Hide resolved
if is_integral(x.dtype):
return fn(x, dtype=np_dtype)
return fn(x)

return wrapped_fn

# Operator database (sorted alphabetically)
op_db: List[Any] = [
Expand Down Expand Up @@ -508,8 +537,10 @@ def sample_inputs_addmm(self, device, dtype, requires_grad):
dtypes=[torch.float], active_if=TEST_WITH_ROCM),
)),
UnaryUfuncInfo('sinh',
ref=np.sinh,
dtypesIfCPU=floating_and_complex_types(),
ref=np_integer_promotion_wrapper(np.sinh),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
promotes_integers_to_float=True,
assert_autodiffed=True,
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
Expand All @@ -519,6 +550,9 @@ def sample_inputs_addmm(self, device, dtype, requires_grad):
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
# Reference: https://github.com/pytorch/pytorch/issues/48641
mruberry marked this conversation as resolved.
Show resolved Hide resolved
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.int8]),
SkipInfo('TestCommon', 'test_variant_consistency_jit',
device_type='cuda', dtypes=[torch.float16]),
)),
Expand Down