Skip to content

Commit

Permalink
Remove deprecated _aminmax operator (#125995)
Browse files Browse the repository at this point in the history
It has been deprecated for a long time.

Co-authored-by: Edward Z. Yang <ezyang@meta.com>
Pull Request resolved: #125995
Approved by: https://github.com/ezyang
  • Loading branch information
cyyever authored and pytorchmergebot committed May 12, 2024
1 parent 037615b commit 0116ffa
Show file tree
Hide file tree
Showing 11 changed files with 13 additions and 49 deletions.
8 changes: 0 additions & 8 deletions aten/src/ATen/native/ReduceAllOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/aminmax.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/max.h>
Expand Down Expand Up @@ -66,11 +65,4 @@ Tensor& max_unary_out(const Tensor &self, Tensor& out) {
return out;
}

// DEPRECATED: Use at::aminmax instead
std::tuple<Tensor, Tensor> _aminmax_all(const Tensor &self) {
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
" This warning will only appear once per process.");
return at::aminmax(self);
}

} // namespace at::native
8 changes: 0 additions & 8 deletions aten/src/ATen/native/TensorCompare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_print_native.h>
Expand Down Expand Up @@ -682,13 +681,6 @@ std::tuple<Tensor, Tensor> qmin(const Tensor& self, int64_t dim, bool keepdim) {
at::_make_per_tensor_quantized_tensor(min, self.q_scale(), self.q_zero_point()), min_indices);
}

// DEPRECATED: Use at::aminmax instead
std::tuple<Tensor, Tensor> _aminmax(const Tensor& self, int64_t dim, bool keepdim) {
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
" This warning will only appear once per process.");
return at::aminmax(self, dim, keepdim);
}

TORCH_IMPL_FUNC(clamp_out)
(
const Tensor& /*self*/,
Expand Down
12 changes: 0 additions & 12 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3762,18 +3762,6 @@
# This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp
- func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor

# DEPRECATED: Use torch.aminmax instead
- func: _aminmax(Tensor self) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax_all
autogen: _aminmax.out

# DEPRECATED: Use torch.aminmax instead
- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax
autogen: _aminmax.dim_out

- func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
device_check: NoCheck # TensorIterator
structured_delegate: aminmax.out
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/_aminmax.h>
#include <ATen/ops/aminmax.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
#include <ATen/ops/fake_quantize_per_channel_affine.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
Expand Down Expand Up @@ -148,7 +148,7 @@ void _calculate_moving_average(
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();

if (per_row_fq) {
std::tie(x_min, x_max) = at::_aminmax(x, 1);
std::tie(x_min, x_max) = at::aminmax(x, 1);
float* x_min_data = x_min.data_ptr<float>();
float* x_max_data = x_max.data_ptr<float>();
int num_threads = std::min(size, (int64_t)512);
Expand All @@ -165,7 +165,7 @@ void _calculate_moving_average(
size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
std::tie(x_min, x_max) = at::_aminmax(x);
std::tie(x_min, x_max) = at::aminmax(x);
float* x_min_data = x_min.data_ptr<float>();
float* x_max_data = x_max.data_ptr<float>();
// Moving Average Min/Max observer for activations
Expand Down
2 changes: 1 addition & 1 deletion test/allowlist_for_publicAPI.json
Original file line number Diff line number Diff line change
Expand Up @@ -1104,7 +1104,6 @@
"_add_relu",
"_add_relu_",
"_addmm_activation",
"_aminmax",
"_amp_foreach_non_finite_check_and_unscale_",
"_amp_update_scale_",
"_assert_async",
Expand Down Expand Up @@ -1314,6 +1313,7 @@
"_values_copy",
"_weight_norm",
"_weight_norm_interface",
"aminmax",
"autocast",
"broadcast_shapes",
"candidate",
Expand Down
4 changes: 0 additions & 4 deletions test/expect/HasDecompTest.test_has_decomposition.expect
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,6 @@ aten::_add_relu.Tensor
aten::_add_relu.out
aten::_add_relu_.Scalar
aten::_add_relu_.Tensor
aten::_aminmax
aten::_aminmax.dim
aten::_aminmax.dim_out
aten::_aminmax.out
aten::_amp_foreach_non_finite_check_and_unscale
aten::_amp_foreach_non_finite_check_and_unscale.out
aten::_amp_foreach_non_finite_check_and_unscale_
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
from torch._C import parse_schema


# Run by backwards_compat CI job


# How to run this test locally:
# 1 Have two virtual environments (eg conda env), one without PyTorch installed (venv_nightly)
# one with your local changes (venv_yours).
Expand Down Expand Up @@ -140,6 +143,10 @@
("onednn::qconv2d_pointwise", datetime.date(2024, 12, 31)),
("onednn::qconv3d_pointwise", datetime.date(2024, 12, 31)),
("onednn::qconv2d_pointwise.binary", datetime.date(2024, 12, 31)),
("aten::_aminmax", datetime.date(2024, 12, 31)),
("aten::_aminmax.dim", datetime.date(2024, 12, 31)),
("aten::_aminmax.out", datetime.date(2024, 12, 31)),
("aten::_aminmax.dim_out", datetime.date(2024, 12, 31)),
]

ALLOW_LIST_COMPILED = [
Expand Down
1 change: 0 additions & 1 deletion test/mobile/model_test/coverage.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1048,7 +1048,6 @@ uncovered_ops:
aten::__is__: 83
aten::__isnot__: 81
aten::__not__: 32
aten::_aminmax: 4
aten::_convolution: 12
aten::_convolution.deprecated: 3
aten::_make_per_tensor_quantized_tensor: 2
Expand Down
1 change: 0 additions & 1 deletion test/mobile/model_test/model_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ root_operators:
aten::__range_length: 106
aten::__rshift__.int: 2
aten::__xor__.bool: 16
aten::_aminmax: 18
aten::_convolution: 27
aten::_convolution.deprecated: 3
aten::_infer_size: 9
Expand Down
12 changes: 2 additions & 10 deletions test/test_reductions.py
Original file line number Diff line number Diff line change
Expand Up @@ -1219,18 +1219,10 @@ def test_amax(self, device, dtype):
def test_aminmax(self, device, dtype):

def _amin_wrapper(x, dim=None, keepdims=False):
with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"):
if dim is None:
return torch._aminmax(x)[0]
else:
return torch._aminmax(x, dim, keepdims)[0]
return torch.aminmax(x, dim=dim, keepdim=keepdims)[0]

def _amax_wrapper(x, dim=None, keepdims=False):
with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"):
if dim is None:
return torch._aminmax(x)[1]
else:
return torch._aminmax(x, dim, keepdims)[1]
return torch.aminmax(x, dim=dim, keepdim=keepdims)[1]

self._test_minmax_helper(_amin_wrapper, np.amin, device, dtype)
self._test_minmax_helper(_amax_wrapper, np.amax, device, dtype)
Expand Down
1 change: 0 additions & 1 deletion torch/_dynamo/trace_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,6 @@
"torch._add_relu_",
"torch._add_relu",
"torch._addmm_activation",
"torch._aminmax",
"torch._amp_foreach_non_finite_check_and_unscale_",
"torch._amp_update_scale_",
"torch._assert_async",
Expand Down

1 comment on commit 0116ffa

@pytorchmergebot
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Reverted #125995 on behalf of https://github.com/huydhn due to Sorry for reverting your change but we need to reland this after I get rid of all usage of _aminmax internally in Meta (comment)

Please sign in to comment.