Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove deprecated _aminmax operator #125995

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions aten/src/ATen/native/ReduceAllOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/aminmax.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/max.h>
Expand Down Expand Up @@ -66,11 +65,4 @@ Tensor& max_unary_out(const Tensor &self, Tensor& out) {
return out;
}

// DEPRECATED: Use at::aminmax instead
std::tuple<Tensor, Tensor> _aminmax_all(const Tensor &self) {
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
" This warning will only appear once per process.");
return at::aminmax(self);
}

} // namespace at::native
8 changes: 0 additions & 8 deletions aten/src/ATen/native/TensorCompare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_print_native.h>
Expand Down Expand Up @@ -682,13 +681,6 @@ std::tuple<Tensor, Tensor> qmin(const Tensor& self, int64_t dim, bool keepdim) {
at::_make_per_tensor_quantized_tensor(min, self.q_scale(), self.q_zero_point()), min_indices);
}

// DEPRECATED: Use at::aminmax instead
std::tuple<Tensor, Tensor> _aminmax(const Tensor& self, int64_t dim, bool keepdim) {
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
" This warning will only appear once per process.");
return at::aminmax(self, dim, keepdim);
}

TORCH_IMPL_FUNC(clamp_out)
(
const Tensor& /*self*/,
Expand Down
12 changes: 0 additions & 12 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3762,18 +3762,6 @@
# This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp
- func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor

# DEPRECATED: Use torch.aminmax instead
- func: _aminmax(Tensor self) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax_all
autogen: _aminmax.out

# DEPRECATED: Use torch.aminmax instead
- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax
autogen: _aminmax.dim_out

- func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
device_check: NoCheck # TensorIterator
structured_delegate: aminmax.out
Expand Down
2 changes: 1 addition & 1 deletion test/allowlist_for_publicAPI.json
Original file line number Diff line number Diff line change
Expand Up @@ -1109,7 +1109,6 @@
"_add_relu",
"_add_relu_",
"_addmm_activation",
"_aminmax",
"_amp_foreach_non_finite_check_and_unscale_",
"_amp_update_scale_",
"_assert_async",
Expand Down Expand Up @@ -1319,6 +1318,7 @@
"_values_copy",
"_weight_norm",
"_weight_norm_interface",
"aminmax",
"autocast",
"broadcast_shapes",
"candidate",
Expand Down
4 changes: 0 additions & 4 deletions test/expect/HasDecompTest.test_has_decomposition.expect
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,6 @@ aten::_add_relu.Tensor
aten::_add_relu.out
aten::_add_relu_.Scalar
aten::_add_relu_.Tensor
aten::_aminmax
aten::_aminmax.dim
aten::_aminmax.dim_out
aten::_aminmax.out
aten::_amp_foreach_non_finite_check_and_unscale
aten::_amp_foreach_non_finite_check_and_unscale.out
aten::_amp_foreach_non_finite_check_and_unscale_
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
from torch._C import parse_schema


# Run by backwards_compat CI job


# How to run this test locally:
# 1 Have two virtual environments (eg conda env), one without PyTorch installed (venv_nightly)
# one with your local changes (venv_yours).
Expand Down Expand Up @@ -142,6 +145,10 @@
("onednn::qconv2d_pointwise.binary", datetime.date(2024, 12, 31)),
# BC-breaking change in can_cast signature: 'from' -> 'from_'
("aten::can_cast", datetime.date(2024, 5, 31)),
("aten::_aminmax", datetime.date(2024, 12, 31)),
("aten::_aminmax.dim", datetime.date(2024, 12, 31)),
("aten::_aminmax.out", datetime.date(2024, 12, 31)),
("aten::_aminmax.dim_out", datetime.date(2024, 12, 31)),
]

ALLOW_LIST_COMPILED = [
Expand Down
1 change: 0 additions & 1 deletion test/mobile/model_test/coverage.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1048,7 +1048,6 @@ uncovered_ops:
aten::__is__: 83
aten::__isnot__: 81
aten::__not__: 32
aten::_aminmax: 4
aten::_convolution: 12
aten::_convolution.deprecated: 3
aten::_make_per_tensor_quantized_tensor: 2
Expand Down
1 change: 0 additions & 1 deletion test/mobile/model_test/model_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ root_operators:
aten::__range_length: 106
aten::__rshift__.int: 2
aten::__xor__.bool: 16
aten::_aminmax: 18
aten::_convolution: 27
aten::_convolution.deprecated: 3
aten::_infer_size: 9
Expand Down
1 change: 0 additions & 1 deletion torch/_dynamo/trace_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,6 @@
"torch._add_relu_",
"torch._add_relu",
"torch._addmm_activation",
"torch._aminmax",
"torch._amp_foreach_non_finite_check_and_unscale_",
"torch._amp_update_scale_",
"torch._assert_async",
Expand Down
Loading