diff --git a/aten/src/ATen/native/ReduceAllOps.cpp b/aten/src/ATen/native/ReduceAllOps.cpp index 2ac14a76fbc66..34a4b58cbce0a 100644 --- a/aten/src/ATen/native/ReduceAllOps.cpp +++ b/aten/src/ATen/native/ReduceAllOps.cpp @@ -8,7 +8,6 @@ #include #include #else -#include #include #include #include @@ -66,11 +65,4 @@ Tensor& max_unary_out(const Tensor &self, Tensor& out) { return out; } -// DEPRECATED: Use at::aminmax instead -std::tuple _aminmax_all(const Tensor &self) { - TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead." - " This warning will only appear once per process."); - return at::aminmax(self); -} - } // namespace at::native diff --git a/aten/src/ATen/native/TensorCompare.cpp b/aten/src/ATen/native/TensorCompare.cpp index 04d8e8cbf8313..e9599b4898fcd 100644 --- a/aten/src/ATen/native/TensorCompare.cpp +++ b/aten/src/ATen/native/TensorCompare.cpp @@ -20,7 +20,6 @@ #include #include #else -#include #include #include #include @@ -682,13 +681,6 @@ std::tuple qmin(const Tensor& self, int64_t dim, bool keepdim) { at::_make_per_tensor_quantized_tensor(min, self.q_scale(), self.q_zero_point()), min_indices); } -// DEPRECATED: Use at::aminmax instead -std::tuple _aminmax(const Tensor& self, int64_t dim, bool keepdim) { - TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead." - " This warning will only appear once per process."); - return at::aminmax(self, dim, keepdim); -} - TORCH_IMPL_FUNC(clamp_out) ( const Tensor& /*self*/, diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 8c84694c0ec37..1ea973f93261b 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -3762,18 +3762,6 @@ # This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp - func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor -# DEPRECATED: Use torch.aminmax instead -- func: _aminmax(Tensor self) -> (Tensor, Tensor) - dispatch: - CPU, CUDA: _aminmax_all - autogen: _aminmax.out - -# DEPRECATED: Use torch.aminmax instead -- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) - dispatch: - CPU, CUDA: _aminmax - autogen: _aminmax.dim_out - - func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) device_check: NoCheck # TensorIterator structured_delegate: aminmax.out diff --git a/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu b/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu index d75a10c0db897..c28f095bb9074 100644 --- a/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu +++ b/aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu @@ -7,7 +7,7 @@ #ifndef AT_PER_OPERATOR_HEADERS #include #else -#include +#include #include #include #include @@ -148,7 +148,7 @@ void _calculate_moving_average( cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); if (per_row_fq) { - std::tie(x_min, x_max) = at::_aminmax(x, 1); + std::tie(x_min, x_max) = at::aminmax(x, 1); float* x_min_data = x_min.data_ptr(); float* x_max_data = x_max.data_ptr(); int num_threads = std::min(size, (int64_t)512); @@ -165,7 +165,7 @@ void _calculate_moving_average( size); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { - std::tie(x_min, x_max) = at::_aminmax(x); + std::tie(x_min, x_max) = at::aminmax(x); float* x_min_data = x_min.data_ptr(); float* x_max_data = x_max.data_ptr(); // Moving Average Min/Max observer for activations diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py index 8a2c12ee4e84e..6441ac3b0e84b 100644 --- a/torch/_dynamo/trace_rules.py +++ b/torch/_dynamo/trace_rules.py @@ -373,7 +373,6 @@ "torch._add_relu_", "torch._add_relu", "torch._addmm_activation", - "torch._aminmax", "torch._amp_foreach_non_finite_check_and_unscale_", "torch._amp_update_scale_", "torch._assert_async",