diff --git a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp index 32ebaf7752f7..6ed4b3af6f23 100644 --- a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp +++ b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp @@ -336,7 +336,7 @@ static void cosh_kernel(TensorIterator& iter) { } static void acosh_kernel(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "acosh_cpu", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.dtype(), "acosh_cpu", [&]() { cpu_kernel( iter, [=](scalar_t a) -> scalar_t { return std::acosh(a); }); @@ -344,7 +344,7 @@ static void acosh_kernel(TensorIterator& iter) { } static void asinh_kernel(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "asinh_cpu", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.dtype(), "asinh_cpu", [&]() { cpu_kernel( iter, [=](scalar_t a) -> scalar_t { return std::asinh(a); }); @@ -352,7 +352,7 @@ static void asinh_kernel(TensorIterator& iter) { } static void atanh_kernel(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "atanh_cpu", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.dtype(), "atanh_cpu", [&]() { cpu_kernel( iter, [=](scalar_t a) -> scalar_t { return std::atanh(a); }); diff --git a/aten/src/ATen/native/cuda/UnaryGeometricKernels.cu b/aten/src/ATen/native/cuda/UnaryGeometricKernels.cu index 867855217092..bac3a05439d2 100644 --- a/aten/src/ATen/native/cuda/UnaryGeometricKernels.cu +++ b/aten/src/ATen/native/cuda/UnaryGeometricKernels.cu @@ -75,7 +75,7 @@ void tanh_kernel_cuda(TensorIterator& iter) { } void acosh_kernel_cuda(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "acosh_cuda", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "acosh_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::acosh(a); }); @@ -83,7 +83,7 @@ void acosh_kernel_cuda(TensorIterator& iter) { } void asinh_kernel_cuda(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "asinh_cuda", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "asinh_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::asinh(a); }); @@ -91,7 +91,7 @@ void asinh_kernel_cuda(TensorIterator& iter) { } void atanh_kernel_cuda(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "atanh_cuda", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "atanh_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::atanh(a); }); diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index c25c724559f6..08b6672d96fd 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -80,7 +80,7 @@ 'unbind', 'split', 'split_with_sizes', 'unsafe_split', 'split_with_sizes_backward', 'dot', 'vdot', 'cholesky', 'triangular_solve', 'mm', '_unsafe_view', 'mv', 'ger', 'bmm', 'diagonal', 'alias', 'atan', 'log', 'log10', 'log1p', 'log2', 'reciprocal', - 'tan', 'pow', 'rsqrt', 'tanh', 'tanh_backward', 'asinh', 'acosh', 'take', 'fill_', + 'tan', 'pow', 'rsqrt', 'tanh', 'tanh_backward', 'asinh', 'acosh', 'atanh', 'take', 'fill_', 'exp', 'nonzero', 'mean', 'inverse', 'solve', 'linalg_cholesky', 'addcmul', 'addcdiv', 'matrix_exp', 'linalg_eigh', 'cholesky_solve', 'linalg_qr', '_svd_helper', '_fft_c2c', '_fft_r2c', 'linalg_solve', 'sqrt', 'stack', 'gather', 'index_select', 'index_add_', 'linalg_inv', diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index 6c983abffba0..52a2c75da97e 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -773,9 +773,9 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): UnaryUfuncInfo('acosh', ref=np.arccosh, domain=(1, float('inf')), - dtypes=all_types_and(torch.bool), - dtypesIfCPU=all_types_and(torch.bool), - dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypes=all_types_and_complex_and(torch.bool), + dtypesIfCPU=all_types_and_complex_and(torch.bool), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), promotes_integers_to_float=True, decorators=(precisionOverride({torch.bfloat16: 5e-2}),), test_inplace_grad=False, @@ -783,6 +783,11 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): # RuntimeError: "rsqrt_cuda" not implemented for 'BFloat16' SkipInfo('TestCommon', 'test_variant_consistency_jit', device_type='cuda', dtypes=[torch.bfloat16]), + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), )), OpInfo('addmm', dtypes=floating_types(), @@ -827,9 +832,9 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): # NOTE: derivative for inplace asinh is not implemented UnaryUfuncInfo('asinh', ref=np.arcsinh, - dtypes=all_types_and(torch.bool), - dtypesIfCPU=all_types_and(torch.bool), - dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypes=all_types_and_complex_and(torch.bool), + dtypesIfCPU=all_types_and_complex_and(torch.bool), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), promotes_integers_to_float=True, decorators=(precisionOverride({torch.bfloat16: 5e-2}),), test_inplace_grad=False, @@ -837,6 +842,11 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): # RuntimeError: "rsqrt_cuda" not implemented for 'BFloat16' SkipInfo('TestCommon', 'test_variant_consistency_jit', device_type='cuda', dtypes=[torch.bfloat16]), + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), )), UnaryUfuncInfo('atan', ref=np.arctan, @@ -857,12 +867,19 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): UnaryUfuncInfo('atanh', ref=np.arctanh, domain=(-1, 1), - dtypes=all_types_and(torch.bool), - dtypesIfCPU=all_types_and(torch.bool), - dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypes=all_types_and_complex_and(torch.bool), + dtypesIfCPU=all_types_and_complex_and(torch.bool), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), promotes_integers_to_float=True, decorators=(precisionOverride({torch.bfloat16: 1e-2}),), - test_inplace_grad=False), + test_inplace_grad=False, + skips=( + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + )), OpInfo('broadcast_to', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_tensor_out=False,