Skip to content

Commit

Permalink
Revert "Remove deprecated _aminmax operator (#125995)"
Browse files Browse the repository at this point in the history
This reverts commit 0116ffa.

Reverted #125995 on behalf of https://github.com/huydhn due to Sorry for reverting your change but we need to reland this after I get rid of all usage of _aminmax internally in Meta ([comment](#125995 (comment)))
  • Loading branch information
pytorchmergebot authored and ZelboK committed May 19, 2024
1 parent 4417b4c commit f30d086
Show file tree
Hide file tree
Showing 11 changed files with 49 additions and 13 deletions.
8 changes: 8 additions & 0 deletions aten/src/ATen/native/ReduceAllOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/aminmax.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/max.h>
Expand Down Expand Up @@ -65,4 +66,11 @@ Tensor& max_unary_out(const Tensor &self, Tensor& out) {
return out;
}

// DEPRECATED: Use at::aminmax instead
std::tuple<Tensor, Tensor> _aminmax_all(const Tensor &self) {
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
" This warning will only appear once per process.");
return at::aminmax(self);
}

} // namespace at::native
8 changes: 8 additions & 0 deletions aten/src/ATen/native/TensorCompare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_print_native.h>
Expand Down Expand Up @@ -681,6 +682,13 @@ std::tuple<Tensor, Tensor> qmin(const Tensor& self, int64_t dim, bool keepdim) {
at::_make_per_tensor_quantized_tensor(min, self.q_scale(), self.q_zero_point()), min_indices);
}

// DEPRECATED: Use at::aminmax instead
std::tuple<Tensor, Tensor> _aminmax(const Tensor& self, int64_t dim, bool keepdim) {
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
" This warning will only appear once per process.");
return at::aminmax(self, dim, keepdim);
}

TORCH_IMPL_FUNC(clamp_out)
(
const Tensor& /*self*/,
Expand Down
12 changes: 12 additions & 0 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3762,6 +3762,18 @@
# This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp
- func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor

# DEPRECATED: Use torch.aminmax instead
- func: _aminmax(Tensor self) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax_all
autogen: _aminmax.out

# DEPRECATED: Use torch.aminmax instead
- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax
autogen: _aminmax.dim_out

- func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
device_check: NoCheck # TensorIterator
structured_delegate: aminmax.out
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/aminmax.h>
#include <ATen/ops/_aminmax.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
#include <ATen/ops/fake_quantize_per_channel_affine.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
Expand Down Expand Up @@ -148,7 +148,7 @@ void _calculate_moving_average(
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();

if (per_row_fq) {
std::tie(x_min, x_max) = at::aminmax(x, 1);
std::tie(x_min, x_max) = at::_aminmax(x, 1);
float* x_min_data = x_min.data_ptr<float>();
float* x_max_data = x_max.data_ptr<float>();
int num_threads = std::min(size, (int64_t)512);
Expand All @@ -165,7 +165,7 @@ void _calculate_moving_average(
size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
std::tie(x_min, x_max) = at::aminmax(x);
std::tie(x_min, x_max) = at::_aminmax(x);
float* x_min_data = x_min.data_ptr<float>();
float* x_max_data = x_max.data_ptr<float>();
// Moving Average Min/Max observer for activations
Expand Down
2 changes: 1 addition & 1 deletion test/allowlist_for_publicAPI.json
Original file line number Diff line number Diff line change
Expand Up @@ -1109,6 +1109,7 @@
"_add_relu",
"_add_relu_",
"_addmm_activation",
"_aminmax",
"_amp_foreach_non_finite_check_and_unscale_",
"_amp_update_scale_",
"_assert_async",
Expand Down Expand Up @@ -1318,7 +1319,6 @@
"_values_copy",
"_weight_norm",
"_weight_norm_interface",
"aminmax",
"autocast",
"broadcast_shapes",
"candidate",
Expand Down
4 changes: 4 additions & 0 deletions test/expect/HasDecompTest.test_has_decomposition.expect
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ aten::_add_relu.Tensor
aten::_add_relu.out
aten::_add_relu_.Scalar
aten::_add_relu_.Tensor
aten::_aminmax
aten::_aminmax.dim
aten::_aminmax.dim_out
aten::_aminmax.out
aten::_amp_foreach_non_finite_check_and_unscale
aten::_amp_foreach_non_finite_check_and_unscale.out
aten::_amp_foreach_non_finite_check_and_unscale_
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,6 @@
from torch._C import parse_schema


# Run by backwards_compat CI job


# How to run this test locally:
# 1 Have two virtual environments (eg conda env), one without PyTorch installed (venv_nightly)
# one with your local changes (venv_yours).
Expand Down Expand Up @@ -143,10 +140,6 @@
("onednn::qconv2d_pointwise", datetime.date(2024, 12, 31)),
("onednn::qconv3d_pointwise", datetime.date(2024, 12, 31)),
("onednn::qconv2d_pointwise.binary", datetime.date(2024, 12, 31)),
("aten::_aminmax", datetime.date(2024, 12, 31)),
("aten::_aminmax.dim", datetime.date(2024, 12, 31)),
("aten::_aminmax.out", datetime.date(2024, 12, 31)),
("aten::_aminmax.dim_out", datetime.date(2024, 12, 31)),
]

ALLOW_LIST_COMPILED = [
Expand Down
1 change: 1 addition & 0 deletions test/mobile/model_test/coverage.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1048,6 +1048,7 @@ uncovered_ops:
aten::__is__: 83
aten::__isnot__: 81
aten::__not__: 32
aten::_aminmax: 4
aten::_convolution: 12
aten::_convolution.deprecated: 3
aten::_make_per_tensor_quantized_tensor: 2
Expand Down
1 change: 1 addition & 0 deletions test/mobile/model_test/model_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ root_operators:
aten::__range_length: 106
aten::__rshift__.int: 2
aten::__xor__.bool: 16
aten::_aminmax: 18
aten::_convolution: 27
aten::_convolution.deprecated: 3
aten::_infer_size: 9
Expand Down
12 changes: 10 additions & 2 deletions test/test_reductions.py
Original file line number Diff line number Diff line change
Expand Up @@ -1220,10 +1220,18 @@ def test_amax(self, device, dtype):
def test_aminmax(self, device, dtype):

def _amin_wrapper(x, dim=None, keepdims=False):
return torch.aminmax(x, dim=dim, keepdim=keepdims)[0]
with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"):
if dim is None:
return torch._aminmax(x)[0]
else:
return torch._aminmax(x, dim, keepdims)[0]

def _amax_wrapper(x, dim=None, keepdims=False):
return torch.aminmax(x, dim=dim, keepdim=keepdims)[1]
with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"):
if dim is None:
return torch._aminmax(x)[1]
else:
return torch._aminmax(x, dim, keepdims)[1]

self._test_minmax_helper(_amin_wrapper, np.amin, device, dtype)
self._test_minmax_helper(_amax_wrapper, np.amax, device, dtype)
Expand Down
1 change: 1 addition & 0 deletions torch/_dynamo/trace_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,7 @@
"torch._add_relu_",
"torch._add_relu",
"torch._addmm_activation",
"torch._aminmax",
"torch._amp_foreach_non_finite_check_and_unscale_",
"torch._amp_update_scale_",
"torch._assert_async",
Expand Down

0 comments on commit f30d086

Please sign in to comment.