From 98e01b6645fefe40cb398992b5807891dcf58a71 Mon Sep 17 00:00:00 2001 From: anjali411 Date: Mon, 11 Jan 2021 11:17:31 -0800 Subject: [PATCH 1/4] Add complex support for [ghstack-poisoned] --- aten/src/ATen/native/cpu/UnaryOpsKernel.cpp | 6 +++--- test/test_torch.py | 6 +++--- tools/autograd/gen_variable_type.py | 2 +- .../_internal/common_methods_invocations.py | 18 +++++++++--------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp index 32ebaf7752f7..6ed4b3af6f23 100644 --- a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp +++ b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp @@ -336,7 +336,7 @@ static void cosh_kernel(TensorIterator& iter) { } static void acosh_kernel(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "acosh_cpu", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.dtype(), "acosh_cpu", [&]() { cpu_kernel( iter, [=](scalar_t a) -> scalar_t { return std::acosh(a); }); @@ -344,7 +344,7 @@ static void acosh_kernel(TensorIterator& iter) { } static void asinh_kernel(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "asinh_cpu", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.dtype(), "asinh_cpu", [&]() { cpu_kernel( iter, [=](scalar_t a) -> scalar_t { return std::asinh(a); }); @@ -352,7 +352,7 @@ static void asinh_kernel(TensorIterator& iter) { } static void atanh_kernel(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "atanh_cpu", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.dtype(), "atanh_cpu", [&]() { cpu_kernel( iter, [=](scalar_t a) -> scalar_t { return std::atanh(a); }); diff --git a/test/test_torch.py b/test/test_torch.py index 874a8a6ac9f6..3ac66c5d7c08 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -6904,9 +6904,9 @@ def inner(self, device, dtype): torch.testing.get_all_fp_dtypes() + _complex_types, [torch.bfloat16]), ('asin', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, _float_types, [torch.bfloat16]), ('atan', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, _float_types, [torch.bfloat16]), - ('acosh', '', lambda t, d: _small_3d(t, d) + 1, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes()), - ('asinh', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes()), - ('atanh', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes()), + ('acosh', '', lambda t, d: _small_3d(t, d) + 1, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes() + _complex_types), + ('asinh', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes() + _complex_types), + ('atanh', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes() + _complex_types), ('erf', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes(), [torch.bfloat16]), ('erfc', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, _float_types, [torch.bfloat16]), ('rad2deg', '', _small_3d, lambda t, d: [], 1e-1, 1e-0, 1e-5, torch.testing.get_all_fp_dtypes(), [torch.bfloat16]), diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index 19555caa537c..7db4abf116cf 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -80,7 +80,7 @@ 'unbind', 'split', 'split_with_sizes', 'unsafe_split', 'split_with_sizes_backward', 'dot', 'vdot', 'cholesky', 'triangular_solve', 'mm', '_unsafe_view', 'mv', 'ger', 'bmm', 'diagonal', 'alias', 'atan', 'log', 'log10', 'log1p', 'log2', 'reciprocal', - 'tan', 'pow', 'rsqrt', 'tanh', 'tanh_backward', 'asinh', 'acosh', 'take', 'fill_', + 'tan', 'pow', 'rsqrt', 'tanh', 'tanh_backward', 'asinh', 'acosh', 'atanh', 'take', 'fill_', 'exp', 'nonzero', 'mean', 'inverse', 'solve', 'linalg_cholesky', 'addcmul', 'addcdiv', 'matrix_exp', 'linalg_eigh', 'cholesky_solve', 'linalg_qr', '_svd_helper', '_fft_c2c', '_fft_r2c', 'linalg_solve', 'sqrt', 'stack', 'gather', 'index_select', 'index_add_', 'linalg_inv', diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index cbe90a61237d..4196d7a6af91 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -724,9 +724,9 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): UnaryUfuncInfo('acosh', ref=np.arccosh, domain=(1, float('inf')), - dtypes=all_types_and(torch.bool), - dtypesIfCPU=all_types_and(torch.bool), - dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypes=all_types_and_complex_and(torch.bool), + dtypesIfCPU=all_types_and_complex_and(torch.bool), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), promotes_integers_to_float=True, decorators=(precisionOverride({torch.bfloat16: 5e-2}),), test_inplace_grad=False, @@ -777,9 +777,9 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): # NOTE: derivative for inplace asinh is not implemented UnaryUfuncInfo('asinh', ref=np.arcsinh, - dtypes=all_types_and(torch.bool), - dtypesIfCPU=all_types_and(torch.bool), - dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypes=all_types_and_complex_and(torch.bool), + dtypesIfCPU=all_types_and_complex_and(torch.bool), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), promotes_integers_to_float=True, decorators=(precisionOverride({torch.bfloat16: 5e-2}),), test_inplace_grad=False, @@ -807,9 +807,9 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): UnaryUfuncInfo('atanh', ref=np.arctanh, domain=(-1, 1), - dtypes=all_types_and(torch.bool), - dtypesIfCPU=all_types_and(torch.bool), - dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypes=all_types_and_complex_and(torch.bool), + dtypesIfCPU=all_types_and_complex_and(torch.bool), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), promotes_integers_to_float=True, decorators=(precisionOverride({torch.bfloat16: 1e-2}),), test_inplace_grad=False), From 53ee41d43ebfb6bfb653829a2ead8ef60432fb48 Mon Sep 17 00:00:00 2001 From: anjali411 Date: Mon, 11 Jan 2021 11:25:04 -0800 Subject: [PATCH 2/4] Update on "Add complex support for `torch.{cosh, sinh, tanh}`" [ghstack-poisoned] --- test/test_torch.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_torch.py b/test/test_torch.py index 3ac66c5d7c08..f362de10521e 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -6904,7 +6904,8 @@ def inner(self, device, dtype): torch.testing.get_all_fp_dtypes() + _complex_types, [torch.bfloat16]), ('asin', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, _float_types, [torch.bfloat16]), ('atan', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, _float_types, [torch.bfloat16]), - ('acosh', '', lambda t, d: _small_3d(t, d) + 1, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes() + _complex_types), + ('acosh', '', lambda t, d: _small_3d(t, d) + 1, lambda t, d: [], 1e-3, 1e-2, 1e-5, + torch.testing.get_all_fp_dtypes() + _complex_types), ('asinh', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes() + _complex_types), ('atanh', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes() + _complex_types), ('erf', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes(), [torch.bfloat16]), From c905cf02e87e14e860da8cd303483b9022f6bb85 Mon Sep 17 00:00:00 2001 From: anjali411 Date: Fri, 15 Jan 2021 13:40:01 -0800 Subject: [PATCH 3/4] Update on "Add complex support for `torch.{acosh, asinh, atanh}`" [ghstack-poisoned] --- .../ATen/native/cuda/UnaryGeometricKernels.cu | 6 +++--- test/test_torch.py | 4 ---- .../_internal/common_methods_invocations.py | 19 ++++++++++++++++++- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/aten/src/ATen/native/cuda/UnaryGeometricKernels.cu b/aten/src/ATen/native/cuda/UnaryGeometricKernels.cu index 867855217092..bac3a05439d2 100644 --- a/aten/src/ATen/native/cuda/UnaryGeometricKernels.cu +++ b/aten/src/ATen/native/cuda/UnaryGeometricKernels.cu @@ -75,7 +75,7 @@ void tanh_kernel_cuda(TensorIterator& iter) { } void acosh_kernel_cuda(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "acosh_cuda", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "acosh_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::acosh(a); }); @@ -83,7 +83,7 @@ void acosh_kernel_cuda(TensorIterator& iter) { } void asinh_kernel_cuda(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "asinh_cuda", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "asinh_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::asinh(a); }); @@ -91,7 +91,7 @@ void asinh_kernel_cuda(TensorIterator& iter) { } void atanh_kernel_cuda(TensorIterator& iter) { - AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "atanh_cuda", [&]() { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "atanh_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::atanh(a); }); diff --git a/test/test_torch.py b/test/test_torch.py index f362de10521e..446fd135b75c 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -6904,10 +6904,6 @@ def inner(self, device, dtype): torch.testing.get_all_fp_dtypes() + _complex_types, [torch.bfloat16]), ('asin', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, _float_types, [torch.bfloat16]), ('atan', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, _float_types, [torch.bfloat16]), - ('acosh', '', lambda t, d: _small_3d(t, d) + 1, lambda t, d: [], 1e-3, 1e-2, 1e-5, - torch.testing.get_all_fp_dtypes() + _complex_types), - ('asinh', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes() + _complex_types), - ('atanh', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes() + _complex_types), ('erf', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, torch.testing.get_all_fp_dtypes(), [torch.bfloat16]), ('erfc', '', _small_3d, lambda t, d: [], 1e-3, 1e-2, 1e-5, _float_types, [torch.bfloat16]), ('rad2deg', '', _small_3d, lambda t, d: [], 1e-1, 1e-0, 1e-5, torch.testing.get_all_fp_dtypes(), [torch.bfloat16]), diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index 4196d7a6af91..c2ab3ff3612f 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -734,6 +734,11 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): # RuntimeError: "rsqrt_cuda" not implemented for 'BFloat16' SkipInfo('TestCommon', 'test_variant_consistency_jit', device_type='cuda', dtypes=[torch.bfloat16]), + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), )), OpInfo('addmm', dtypes=floating_types(), @@ -787,6 +792,11 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): # RuntimeError: "rsqrt_cuda" not implemented for 'BFloat16' SkipInfo('TestCommon', 'test_variant_consistency_jit', device_type='cuda', dtypes=[torch.bfloat16]), + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), )), UnaryUfuncInfo('atan', ref=np.arctan, @@ -812,7 +822,14 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), promotes_integers_to_float=True, decorators=(precisionOverride({torch.bfloat16: 1e-2}),), - test_inplace_grad=False), + test_inplace_grad=False, + skips=( + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + )), OpInfo('broadcast_to', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_tensor_out=False, From 46f516a910a69f7175bedde7b9abcf7993262121 Mon Sep 17 00:00:00 2001 From: anjali411 Date: Mon, 18 Jan 2021 10:12:50 -0800 Subject: [PATCH 4/4] Update on "Add complex support for `torch.{acosh, asinh, atanh}`" [ghstack-poisoned] --- torch/testing/_internal/common_methods_invocations.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index 52a2c75da97e..619a88c61cec 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -788,6 +788,11 @@ def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad): SkipInfo('TestUnaryUfuncs', 'test_reference_numerics', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + # Reference: https://github.com/pytorch/pytorch/issues/50692 + SkipInfo('TestGradients', 'test_fn_grad', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + SkipInfo('TestGradients', 'test_method_grad', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), )), OpInfo('addmm', dtypes=floating_types(),