diff --git a/aten/src/ATen/Declarations.cwrap b/aten/src/ATen/Declarations.cwrap index 7594f4eb7e8c9..6e3a3a8bd3dd4 100644 --- a/aten/src/ATen/Declarations.cwrap +++ b/aten/src/ATen/Declarations.cwrap @@ -1193,7 +1193,6 @@ types: - floating_point backends: - - CPU - CUDA variants: function return: argument 0 @@ -1236,7 +1235,6 @@ types: - floating_point backends: - - CPU - CUDA variants: function return: argument 0 diff --git a/aten/src/ATen/native/UnaryOps.cpp b/aten/src/ATen/native/UnaryOps.cpp index 23cb36f8abb1d..caf603c6877d4 100644 --- a/aten/src/ATen/native/UnaryOps.cpp +++ b/aten/src/ATen/native/UnaryOps.cpp @@ -157,22 +157,6 @@ Tensor& _sigmoid_out_cpu(Tensor& result, const Tensor& self) { return result; \ } -#define IMPLEMENT_UNARY_OP_TH(op) \ - Tensor op(const Tensor& self) { \ - Tensor result = at::empty({0}, self.options()); \ - at::op##_out(result, self); \ - return result; \ - } \ - Tensor& _##op##__cpu(Tensor& self) { \ - return at::op##_out(self, self); \ - } \ - Tensor& _##op##_out_cpu(Tensor& result, const Tensor& self) { \ - checkBackend(#op, {result}, Backend::CPU); \ - assert_no_internal_overlap(result, #op); \ - result.resize_(self.sizes()); \ - return legacy::cpu::_th_##op##_out(result, self); \ - } - // NB: Temp. defaulting to TH implementation of abs due to issues with Apple IMPLEMENT_UNARY_OP_VEC(abs) @@ -181,7 +165,7 @@ IMPLEMENT_UNARY_OP_VEC(asin) IMPLEMENT_UNARY_OP_VEC(atan) IMPLEMENT_UNARY_OP_VEC(ceil) IMPLEMENT_UNARY_OP_VEC(cos) -IMPLEMENT_UNARY_OP_TH(cosh) +IMPLEMENT_UNARY_OP_VEC(cosh) IMPLEMENT_UNARY_OP_VEC(erf) IMPLEMENT_UNARY_OP_VEC(erfc) IMPLEMENT_UNARY_OP_VEC(exp) @@ -197,7 +181,7 @@ IMPLEMENT_UNARY_OP_VEC(reciprocal) IMPLEMENT_UNARY_OP_VEC(round) IMPLEMENT_UNARY_OP_VEC(rsqrt) IMPLEMENT_UNARY_OP_VEC(sin) -IMPLEMENT_UNARY_OP_TH(sinh) +IMPLEMENT_UNARY_OP_VEC(sinh) IMPLEMENT_UNARY_OP_VEC(sqrt) IMPLEMENT_UNARY_OP_VEC(tan) IMPLEMENT_UNARY_OP_VEC(tanh) @@ -209,6 +193,7 @@ DEFINE_DISPATCH(asin_stub); DEFINE_DISPATCH(atan_stub); DEFINE_DISPATCH(ceil_stub); DEFINE_DISPATCH(cos_stub); +DEFINE_DISPATCH(cosh_stub); DEFINE_DISPATCH(erf_stub); DEFINE_DISPATCH(erfc_stub); DEFINE_DISPATCH(exp_stub); @@ -225,6 +210,7 @@ DEFINE_DISPATCH(round_stub); DEFINE_DISPATCH(rsqrt_stub); DEFINE_DISPATCH(sigmoid_stub); DEFINE_DISPATCH(sin_stub); +DEFINE_DISPATCH(sinh_stub); DEFINE_DISPATCH(sqrt_stub); DEFINE_DISPATCH(tan_stub); DEFINE_DISPATCH(tanh_stub); diff --git a/aten/src/ATen/native/UnaryOps.h b/aten/src/ATen/native/UnaryOps.h index a74d2b72763f0..bea35f1ce79d6 100644 --- a/aten/src/ATen/native/UnaryOps.h +++ b/aten/src/ATen/native/UnaryOps.h @@ -19,7 +19,7 @@ DECLARE_DISPATCH(unary_fn, asin_stub); DECLARE_DISPATCH(unary_fn, atan_stub); DECLARE_DISPATCH(unary_fn, ceil_stub); DECLARE_DISPATCH(unary_fn, cos_stub); -// DECLARE_DISPATCH(unary_fn, cosh_stub); +DECLARE_DISPATCH(unary_fn, cosh_stub); DECLARE_DISPATCH(unary_fn, erf_stub); DECLARE_DISPATCH(unary_fn, erfc_stub); DECLARE_DISPATCH(unary_fn, exp_stub); @@ -36,7 +36,7 @@ DECLARE_DISPATCH(unary_fn, round_stub); DECLARE_DISPATCH(unary_fn, rsqrt_stub); DECLARE_DISPATCH(unary_fn, sigmoid_stub); DECLARE_DISPATCH(unary_fn, sin_stub); -// DECLARE_DISPATCH(unary_fn, sinh_stub); +DECLARE_DISPATCH(unary_fn, sinh_stub); DECLARE_DISPATCH(unary_fn, sqrt_stub); DECLARE_DISPATCH(unary_fn, tan_stub); DECLARE_DISPATCH(unary_fn, tanh_stub); diff --git a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp index 3f9aecac5474e..5c5c443e733ab 100644 --- a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp +++ b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp @@ -100,6 +100,22 @@ static void neg_kernel(TensorIterator& iter) { }); } +static void sinh_kernel(TensorIterator& iter) { + AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "sinh_cpu", [&]() { + unary_kernel( + iter, + [=](scalar_t a) -> scalar_t { return std::sinh(a); }); + }); +} + +static void cosh_kernel(TensorIterator& iter) { + AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "cosh_cpu", [&]() { + unary_kernel( + iter, + [=](scalar_t a) -> scalar_t { return std::cosh(a); }); + }); +} + #if !AT_MKL_ENABLED() void bernoulli_mkl_kernel(Tensor &output, const double p, Generator* gen) { // Use AT_ASSERTM because this should never be reached, and AT_ASSERTM tells @@ -212,6 +228,8 @@ REGISTER_DISPATCH(frac_stub, &frac_kernel); REGISTER_DISPATCH(reciprocal_stub, &reciprocal_kernel); REGISTER_DISPATCH(neg_stub, &neg_kernel); REGISTER_DISPATCH(fill_stub, &fill_kernel); +REGISTER_DISPATCH(sinh_stub, &sinh_kernel); +REGISTER_DISPATCH(cosh_stub, &cosh_kernel); // IMPLEMENT_FLOAT_KERNEL(ALL, abs) IMPLEMENT_FLOAT_KERNEL(FLOATING, acos)