diff --git a/aten/src/ATen/native/ReduceAllOps.cpp b/aten/src/ATen/native/ReduceAllOps.cpp index 34a4b58cbce0a..2ac14a76fbc66 100644 --- a/aten/src/ATen/native/ReduceAllOps.cpp +++ b/aten/src/ATen/native/ReduceAllOps.cpp @@ -8,6 +8,7 @@ #include #include #else +#include #include #include #include @@ -65,4 +66,11 @@ Tensor& max_unary_out(const Tensor &self, Tensor& out) { return out; } +// DEPRECATED: Use at::aminmax instead +std::tuple _aminmax_all(const Tensor &self) { + TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead." + " This warning will only appear once per process."); + return at::aminmax(self); +} + } // namespace at::native diff --git a/aten/src/ATen/native/TensorCompare.cpp b/aten/src/ATen/native/TensorCompare.cpp index cbb79dfabc7eb..974ad302ca0c8 100644 --- a/aten/src/ATen/native/TensorCompare.cpp +++ b/aten/src/ATen/native/TensorCompare.cpp @@ -20,6 +20,7 @@ #include #include #else +#include #include #include #include @@ -681,6 +682,13 @@ std::tuple qmin(const Tensor& self, int64_t dim, bool keepdim) { at::_make_per_tensor_quantized_tensor(min, self.q_scale(), self.q_zero_point()), min_indices); } +// DEPRECATED: Use at::aminmax instead +std::tuple _aminmax(const Tensor& self, int64_t dim, bool keepdim) { + TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead." + " This warning will only appear once per process."); + return at::aminmax(self, dim, keepdim); +} + TORCH_IMPL_FUNC(clamp_out) ( const Tensor& /*self*/, diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 119c0b8572301..00c131fd9f326 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -3762,6 +3762,18 @@ # This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp - func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor +# DEPRECATED: Use torch.aminmax instead +- func: _aminmax(Tensor self) -> (Tensor, Tensor) + dispatch: + CPU, CUDA: _aminmax_all + autogen: _aminmax.out + +# DEPRECATED: Use torch.aminmax instead +- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) + dispatch: + CPU, CUDA: _aminmax + autogen: _aminmax.dim_out + - func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) device_check: NoCheck # TensorIterator structured_delegate: aminmax.out diff --git a/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu b/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu index c28f095bb9074..d75a10c0db897 100644 --- a/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu +++ b/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu @@ -7,7 +7,7 @@ #ifndef AT_PER_OPERATOR_HEADERS #include #else -#include +#include #include #include #include @@ -148,7 +148,7 @@ void _calculate_moving_average( cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); if (per_row_fq) { - std::tie(x_min, x_max) = at::aminmax(x, 1); + std::tie(x_min, x_max) = at::_aminmax(x, 1); float* x_min_data = x_min.data_ptr(); float* x_max_data = x_max.data_ptr(); int num_threads = std::min(size, (int64_t)512); @@ -165,7 +165,7 @@ void _calculate_moving_average( size); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { - std::tie(x_min, x_max) = at::aminmax(x); + std::tie(x_min, x_max) = at::_aminmax(x); float* x_min_data = x_min.data_ptr(); float* x_max_data = x_max.data_ptr(); // Moving Average Min/Max observer for activations diff --git a/test/allowlist_for_publicAPI.json b/test/allowlist_for_publicAPI.json index 8dc5807b9a940..ac77325188ee0 100644 --- a/test/allowlist_for_publicAPI.json +++ b/test/allowlist_for_publicAPI.json @@ -1109,6 +1109,7 @@ "_add_relu", "_add_relu_", "_addmm_activation", + "_aminmax", "_amp_foreach_non_finite_check_and_unscale_", "_amp_update_scale_", "_assert_async", @@ -1318,7 +1319,6 @@ "_values_copy", "_weight_norm", "_weight_norm_interface", - "aminmax", "autocast", "broadcast_shapes", "candidate", diff --git a/test/expect/HasDecompTest.test_has_decomposition.expect b/test/expect/HasDecompTest.test_has_decomposition.expect index c28bfb8832ae5..669c3d91e849d 100644 --- a/test/expect/HasDecompTest.test_has_decomposition.expect +++ b/test/expect/HasDecompTest.test_has_decomposition.expect @@ -18,6 +18,10 @@ aten::_add_relu.Tensor aten::_add_relu.out aten::_add_relu_.Scalar aten::_add_relu_.Tensor +aten::_aminmax +aten::_aminmax.dim +aten::_aminmax.dim_out +aten::_aminmax.out aten::_amp_foreach_non_finite_check_and_unscale aten::_amp_foreach_non_finite_check_and_unscale.out aten::_amp_foreach_non_finite_check_and_unscale_ diff --git a/test/forward_backward_compatibility/check_forward_backward_compatibility.py b/test/forward_backward_compatibility/check_forward_backward_compatibility.py index c458110c859c5..285e410a79edc 100644 --- a/test/forward_backward_compatibility/check_forward_backward_compatibility.py +++ b/test/forward_backward_compatibility/check_forward_backward_compatibility.py @@ -9,9 +9,6 @@ from torch._C import parse_schema -# Run by backwards_compat CI job - - # How to run this test locally: # 1 Have two virtual environments (eg conda env), one without PyTorch installed (venv_nightly) # one with your local changes (venv_yours). @@ -143,10 +140,6 @@ ("onednn::qconv2d_pointwise", datetime.date(2024, 12, 31)), ("onednn::qconv3d_pointwise", datetime.date(2024, 12, 31)), ("onednn::qconv2d_pointwise.binary", datetime.date(2024, 12, 31)), - ("aten::_aminmax", datetime.date(2024, 12, 31)), - ("aten::_aminmax.dim", datetime.date(2024, 12, 31)), - ("aten::_aminmax.out", datetime.date(2024, 12, 31)), - ("aten::_aminmax.dim_out", datetime.date(2024, 12, 31)), ] ALLOW_LIST_COMPILED = [ diff --git a/test/mobile/model_test/coverage.yaml b/test/mobile/model_test/coverage.yaml index 679fd7f85c4c2..5433fea4df102 100644 --- a/test/mobile/model_test/coverage.yaml +++ b/test/mobile/model_test/coverage.yaml @@ -1048,6 +1048,7 @@ uncovered_ops: aten::__is__: 83 aten::__isnot__: 81 aten::__not__: 32 + aten::_aminmax: 4 aten::_convolution: 12 aten::_convolution.deprecated: 3 aten::_make_per_tensor_quantized_tensor: 2 diff --git a/test/mobile/model_test/model_ops.yaml b/test/mobile/model_test/model_ops.yaml index e62f5fd35117c..43e4876451e38 100644 --- a/test/mobile/model_test/model_ops.yaml +++ b/test/mobile/model_test/model_ops.yaml @@ -30,6 +30,7 @@ root_operators: aten::__range_length: 106 aten::__rshift__.int: 2 aten::__xor__.bool: 16 + aten::_aminmax: 18 aten::_convolution: 27 aten::_convolution.deprecated: 3 aten::_infer_size: 9 diff --git a/test/test_reductions.py b/test/test_reductions.py index 90f192199d2da..d1f72b49694f0 100644 --- a/test/test_reductions.py +++ b/test/test_reductions.py @@ -1220,10 +1220,18 @@ def test_amax(self, device, dtype): def test_aminmax(self, device, dtype): def _amin_wrapper(x, dim=None, keepdims=False): - return torch.aminmax(x, dim=dim, keepdim=keepdims)[0] + with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"): + if dim is None: + return torch._aminmax(x)[0] + else: + return torch._aminmax(x, dim, keepdims)[0] def _amax_wrapper(x, dim=None, keepdims=False): - return torch.aminmax(x, dim=dim, keepdim=keepdims)[1] + with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"): + if dim is None: + return torch._aminmax(x)[1] + else: + return torch._aminmax(x, dim, keepdims)[1] self._test_minmax_helper(_amin_wrapper, np.amin, device, dtype) self._test_minmax_helper(_amax_wrapper, np.amax, device, dtype) diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py index 6441ac3b0e84b..8a2c12ee4e84e 100644 --- a/torch/_dynamo/trace_rules.py +++ b/torch/_dynamo/trace_rules.py @@ -373,6 +373,7 @@ "torch._add_relu_", "torch._add_relu", "torch._addmm_activation", + "torch._aminmax", "torch._amp_foreach_non_finite_check_and_unscale_", "torch._amp_update_scale_", "torch._assert_async",