diff --git a/aten/src/ATen/native/ReduceAllOps.cpp b/aten/src/ATen/native/ReduceAllOps.cpp index 2ac14a76fbc66..34a4b58cbce0a 100644 --- a/aten/src/ATen/native/ReduceAllOps.cpp +++ b/aten/src/ATen/native/ReduceAllOps.cpp @@ -8,7 +8,6 @@ #include #include #else -#include #include #include #include @@ -66,11 +65,4 @@ Tensor& max_unary_out(const Tensor &self, Tensor& out) { return out; } -// DEPRECATED: Use at::aminmax instead -std::tuple _aminmax_all(const Tensor &self) { - TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead." - " This warning will only appear once per process."); - return at::aminmax(self); -} - } // namespace at::native diff --git a/aten/src/ATen/native/TensorCompare.cpp b/aten/src/ATen/native/TensorCompare.cpp index 04d8e8cbf8313..e9599b4898fcd 100644 --- a/aten/src/ATen/native/TensorCompare.cpp +++ b/aten/src/ATen/native/TensorCompare.cpp @@ -20,7 +20,6 @@ #include #include #else -#include #include #include #include @@ -682,13 +681,6 @@ std::tuple qmin(const Tensor& self, int64_t dim, bool keepdim) { at::_make_per_tensor_quantized_tensor(min, self.q_scale(), self.q_zero_point()), min_indices); } -// DEPRECATED: Use at::aminmax instead -std::tuple _aminmax(const Tensor& self, int64_t dim, bool keepdim) { - TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead." - " This warning will only appear once per process."); - return at::aminmax(self, dim, keepdim); -} - TORCH_IMPL_FUNC(clamp_out) ( const Tensor& /*self*/, diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 8c84694c0ec37..1ea973f93261b 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -3762,18 +3762,6 @@ # This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp - func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor -# DEPRECATED: Use torch.aminmax instead -- func: _aminmax(Tensor self) -> (Tensor, Tensor) - dispatch: - CPU, CUDA: _aminmax_all - autogen: _aminmax.out - -# DEPRECATED: Use torch.aminmax instead -- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) - dispatch: - CPU, CUDA: _aminmax - autogen: _aminmax.dim_out - - func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) device_check: NoCheck # TensorIterator structured_delegate: aminmax.out diff --git a/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu b/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu index d75a10c0db897..c28f095bb9074 100644 --- a/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu +++ b/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu @@ -7,7 +7,7 @@ #ifndef AT_PER_OPERATOR_HEADERS #include #else -#include +#include #include #include #include @@ -148,7 +148,7 @@ void _calculate_moving_average( cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); if (per_row_fq) { - std::tie(x_min, x_max) = at::_aminmax(x, 1); + std::tie(x_min, x_max) = at::aminmax(x, 1); float* x_min_data = x_min.data_ptr(); float* x_max_data = x_max.data_ptr(); int num_threads = std::min(size, (int64_t)512); @@ -165,7 +165,7 @@ void _calculate_moving_average( size); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { - std::tie(x_min, x_max) = at::_aminmax(x); + std::tie(x_min, x_max) = at::aminmax(x); float* x_min_data = x_min.data_ptr(); float* x_max_data = x_max.data_ptr(); // Moving Average Min/Max observer for activations diff --git a/test/allowlist_for_publicAPI.json b/test/allowlist_for_publicAPI.json index 9bc60578ea7a2..52f623ba87d6b 100644 --- a/test/allowlist_for_publicAPI.json +++ b/test/allowlist_for_publicAPI.json @@ -1104,7 +1104,6 @@ "_add_relu", "_add_relu_", "_addmm_activation", - "_aminmax", "_amp_foreach_non_finite_check_and_unscale_", "_amp_update_scale_", "_assert_async", @@ -1314,6 +1313,7 @@ "_values_copy", "_weight_norm", "_weight_norm_interface", + "aminmax", "autocast", "broadcast_shapes", "candidate", diff --git a/test/expect/HasDecompTest.test_has_decomposition.expect b/test/expect/HasDecompTest.test_has_decomposition.expect index 764379473b012..5540ee6259551 100644 --- a/test/expect/HasDecompTest.test_has_decomposition.expect +++ b/test/expect/HasDecompTest.test_has_decomposition.expect @@ -18,10 +18,6 @@ aten::_add_relu.Tensor aten::_add_relu.out aten::_add_relu_.Scalar aten::_add_relu_.Tensor -aten::_aminmax -aten::_aminmax.dim -aten::_aminmax.dim_out -aten::_aminmax.out aten::_amp_foreach_non_finite_check_and_unscale aten::_amp_foreach_non_finite_check_and_unscale.out aten::_amp_foreach_non_finite_check_and_unscale_ diff --git a/test/forward_backward_compatibility/check_forward_backward_compatibility.py b/test/forward_backward_compatibility/check_forward_backward_compatibility.py index 285e410a79edc..c458110c859c5 100644 --- a/test/forward_backward_compatibility/check_forward_backward_compatibility.py +++ b/test/forward_backward_compatibility/check_forward_backward_compatibility.py @@ -9,6 +9,9 @@ from torch._C import parse_schema +# Run by backwards_compat CI job + + # How to run this test locally: # 1 Have two virtual environments (eg conda env), one without PyTorch installed (venv_nightly) # one with your local changes (venv_yours). @@ -140,6 +143,10 @@ ("onednn::qconv2d_pointwise", datetime.date(2024, 12, 31)), ("onednn::qconv3d_pointwise", datetime.date(2024, 12, 31)), ("onednn::qconv2d_pointwise.binary", datetime.date(2024, 12, 31)), + ("aten::_aminmax", datetime.date(2024, 12, 31)), + ("aten::_aminmax.dim", datetime.date(2024, 12, 31)), + ("aten::_aminmax.out", datetime.date(2024, 12, 31)), + ("aten::_aminmax.dim_out", datetime.date(2024, 12, 31)), ] ALLOW_LIST_COMPILED = [ diff --git a/test/mobile/model_test/coverage.yaml b/test/mobile/model_test/coverage.yaml index 5433fea4df102..679fd7f85c4c2 100644 --- a/test/mobile/model_test/coverage.yaml +++ b/test/mobile/model_test/coverage.yaml @@ -1048,7 +1048,6 @@ uncovered_ops: aten::__is__: 83 aten::__isnot__: 81 aten::__not__: 32 - aten::_aminmax: 4 aten::_convolution: 12 aten::_convolution.deprecated: 3 aten::_make_per_tensor_quantized_tensor: 2 diff --git a/test/mobile/model_test/model_ops.yaml b/test/mobile/model_test/model_ops.yaml index 43e4876451e38..e62f5fd35117c 100644 --- a/test/mobile/model_test/model_ops.yaml +++ b/test/mobile/model_test/model_ops.yaml @@ -30,7 +30,6 @@ root_operators: aten::__range_length: 106 aten::__rshift__.int: 2 aten::__xor__.bool: 16 - aten::_aminmax: 18 aten::_convolution: 27 aten::_convolution.deprecated: 3 aten::_infer_size: 9 diff --git a/test/test_reductions.py b/test/test_reductions.py index bb43d1ae3a2c4..acce789586733 100644 --- a/test/test_reductions.py +++ b/test/test_reductions.py @@ -1219,18 +1219,10 @@ def test_amax(self, device, dtype): def test_aminmax(self, device, dtype): def _amin_wrapper(x, dim=None, keepdims=False): - with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"): - if dim is None: - return torch._aminmax(x)[0] - else: - return torch._aminmax(x, dim, keepdims)[0] + return torch.aminmax(x, dim=dim, keepdim=keepdims)[0] def _amax_wrapper(x, dim=None, keepdims=False): - with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"): - if dim is None: - return torch._aminmax(x)[1] - else: - return torch._aminmax(x, dim, keepdims)[1] + return torch.aminmax(x, dim=dim, keepdim=keepdims)[1] self._test_minmax_helper(_amin_wrapper, np.amin, device, dtype) self._test_minmax_helper(_amax_wrapper, np.amax, device, dtype) diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py index 8a2c12ee4e84e..6441ac3b0e84b 100644 --- a/torch/_dynamo/trace_rules.py +++ b/torch/_dynamo/trace_rules.py @@ -373,7 +373,6 @@ "torch._add_relu_", "torch._add_relu", "torch._addmm_activation", - "torch._aminmax", "torch._amp_foreach_non_finite_check_and_unscale_", "torch._amp_update_scale_", "torch._assert_async",