Skip to content

Commit

Permalink
Remove deprecated _aminmax operator
Browse files Browse the repository at this point in the history
  • Loading branch information
cyyever committed May 11, 2024
1 parent 1115a25 commit bf63de4
Show file tree
Hide file tree
Showing 5 changed files with 3 additions and 32 deletions.
8 changes: 0 additions & 8 deletions aten/src/ATen/native/ReduceAllOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/aminmax.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/max.h>
Expand Down Expand Up @@ -66,11 +65,4 @@ Tensor& max_unary_out(const Tensor &self, Tensor& out) {
return out;
}

// DEPRECATED: Use at::aminmax instead
std::tuple<Tensor, Tensor> _aminmax_all(const Tensor &self) {
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
" This warning will only appear once per process.");
return at::aminmax(self);
}

} // namespace at::native
8 changes: 0 additions & 8 deletions aten/src/ATen/native/TensorCompare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_print_native.h>
Expand Down Expand Up @@ -682,13 +681,6 @@ std::tuple<Tensor, Tensor> qmin(const Tensor& self, int64_t dim, bool keepdim) {
at::_make_per_tensor_quantized_tensor(min, self.q_scale(), self.q_zero_point()), min_indices);
}

// DEPRECATED: Use at::aminmax instead
std::tuple<Tensor, Tensor> _aminmax(const Tensor& self, int64_t dim, bool keepdim) {
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
" This warning will only appear once per process.");
return at::aminmax(self, dim, keepdim);
}

TORCH_IMPL_FUNC(clamp_out)
(
const Tensor& /*self*/,
Expand Down
12 changes: 0 additions & 12 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3762,18 +3762,6 @@
# This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp
- func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor

# DEPRECATED: Use torch.aminmax instead
- func: _aminmax(Tensor self) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax_all
autogen: _aminmax.out

# DEPRECATED: Use torch.aminmax instead
- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
dispatch:
CPU, CUDA: _aminmax
autogen: _aminmax.dim_out

- func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
device_check: NoCheck # TensorIterator
structured_delegate: aminmax.out
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/native/quantized/cuda/FusedObsFakeQuant.cu
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/_aminmax.h>
#include <ATen/ops/aminmax.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
#include <ATen/ops/fake_quantize_per_channel_affine.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
Expand Down Expand Up @@ -148,7 +148,7 @@ void _calculate_moving_average(
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();

if (per_row_fq) {
std::tie(x_min, x_max) = at::_aminmax(x, 1);
std::tie(x_min, x_max) = at::aminmax(x, 1);
float* x_min_data = x_min.data_ptr<float>();
float* x_max_data = x_max.data_ptr<float>();
int num_threads = std::min(size, (int64_t)512);
Expand All @@ -165,7 +165,7 @@ void _calculate_moving_average(
size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
std::tie(x_min, x_max) = at::_aminmax(x);
std::tie(x_min, x_max) = at::aminmax(x);
float* x_min_data = x_min.data_ptr<float>();
float* x_max_data = x_max.data_ptr<float>();
// Moving Average Min/Max observer for activations
Expand Down
1 change: 0 additions & 1 deletion torch/_dynamo/trace_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,6 @@
"torch._add_relu_",
"torch._add_relu",
"torch._addmm_activation",
"torch._aminmax",
"torch._amp_foreach_non_finite_check_and_unscale_",
"torch._amp_update_scale_",
"torch._assert_async",
Expand Down

0 comments on commit bf63de4

Please sign in to comment.