Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[numpy] torch.sinh: promote integer inputs to float #48644

4 changes: 2 additions & 2 deletions aten/src/ATen/native/UnaryOps.cpp
Expand Up @@ -343,8 +343,8 @@ Tensor& cos_out(Tensor& result, const Tensor& self) { return unary_op_impl_float
Tensor cos(const Tensor& self) { return unary_op_impl_float(self, cos_stub); }
Tensor& cos_(Tensor& self) { return unary_op_impl_(self, at::cos_out); }

Tensor& sinh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, sinh_stub); }
Tensor sinh(const Tensor& self) { return unary_op_impl(self, at::sinh_out); }
Tensor& sinh_out(Tensor& result, const Tensor& self) { return unary_op_impl_float_out(result, self, sinh_stub); }
Tensor sinh(const Tensor& self) { return unary_op_impl_float(self, sinh_stub); }
Tensor& sinh_(Tensor& self) { return unary_op_impl_(self, at::sinh_out); }

Tensor& cosh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, cosh_stub); }
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/cuda/UnaryGeometricKernels.cu
Expand Up @@ -51,7 +51,7 @@ void cos_kernel_cuda(TensorIterator& iter) {
}

void sinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "sinh_cuda", [&]() {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
Expand Down
5 changes: 3 additions & 2 deletions torch/csrc/jit/tensorexpr/kernel.cpp
Expand Up @@ -1147,8 +1147,9 @@ Tensor* TensorExprKernel::computeValue(const torch::jit::Value* v) {
} break;

case aten::sinh: {
return computeOneOperand(
"aten_sinh", v, [](const ExprHandle& a) { return sinh(a); });
return computeOneOperand("aten_sinh", v, [](const ExprHandle& a) {
return sinh(promoteIntegerToFloat(a));
});
} break;

case aten::atan: {
Expand Down
39 changes: 36 additions & 3 deletions torch/testing/_internal/common_methods_invocations.py
Expand Up @@ -21,7 +21,8 @@
random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, make_nonzero_det,
random_fullrank_matrix_distinct_singular_value, set_rng_seed,
TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, make_tensor, TEST_SCIPY)
TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, make_tensor, TEST_SCIPY,
torch_to_numpy_dtype_dict)

if TEST_SCIPY:
import scipy.special
Expand Down Expand Up @@ -220,6 +221,33 @@ def sample_inputs(self, device, dtype, requires_grad=False):
requires_grad=requires_grad)),)


def fast_growing_function_wrapper(fn):
kshitij12345 marked this conversation as resolved.
Show resolved Hide resolved
# NumPy promotes integer types to double while
# PyTorch promotes integer to float.
#
# This affects operators whose outputs grow very fast
# eg. sinh, exp, etc.
# To mitigate that, we force NumPy to compute with the
# dtype that Pytorch uses for those computations.
#
# Code Example
# >>> x = torch.tensor(501.)
# >>> x.exp()
# tensor(inf)
# >>> x = torch.tensor(501., dtype=torch.double)
# >>> x.exp()
# tensor(3.8154e+217, dtype=torch.float64)
def is_integral(dtype):
return dtype in [np.uint8, np.int8, np.int16, np.int32, np.int64]
kshitij12345 marked this conversation as resolved.
Show resolved Hide resolved

np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]

def wrapped_fn(x):
kshitij12345 marked this conversation as resolved.
Show resolved Hide resolved
if is_integral(x.dtype):
return fn(x, dtype=np_dtype)
return fn(x)

return wrapped_fn

# Operator database (sorted alphabetically)
op_db = [
Expand Down Expand Up @@ -407,8 +435,10 @@ def sample_inputs(self, device, dtype, requires_grad=False):
dtypes=[torch.float], active_if=TEST_WITH_ROCM),
)),
UnaryUfuncInfo('sinh',
ref=np.sinh,
dtypesIfCPU=floating_and_complex_types(),
ref=fast_growing_function_wrapper(np.sinh),
dtypesIfCPU=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
promotes_integers_to_float=True,
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
Expand All @@ -417,6 +447,9 @@ def sample_inputs(self, device, dtype, requires_grad=False):
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
# Reference: https://github.com/pytorch/pytorch/issues/48641
mruberry marked this conversation as resolved.
Show resolved Hide resolved
SkipInfo('TestUnaryUfuncs', 'test_reference_numerics',
device_type='cpu', dtypes=[torch.int8]),
)),
UnaryUfuncInfo('tan',
ref=np.tan,
Expand Down