diff --git a/aten/src/ATen/core/interned_strings.h b/aten/src/ATen/core/interned_strings.h index f14d4b165499..d8c265069047 100644 --- a/aten/src/ATen/core/interned_strings.h +++ b/aten/src/ATen/core/interned_strings.h @@ -290,8 +290,6 @@ namespace c10 { _(aten, movedim) \ _(aten, moveaxis) \ _(aten, has_torch_function) \ - _(aten, has_torch_function_unary) \ - _(aten, has_torch_function_variadic) \ FORALL_ATEN_BASE_SYMBOLS(_) \ _(onnx, Add) \ _(onnx, Concat) \ diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in index 89d8799cb4bc..5192bc242c9b 100644 --- a/torch/_C/__init__.pyi.in +++ b/torch/_C/__init__.pyi.in @@ -497,7 +497,7 @@ def _supported_qengines() -> List[_int]: ... # THPModule_supportedQEngines def _is_xnnpack_enabled() -> _bool: ... # THPModule_isEnabledXNNPACK def _has_torch_function(Iterable[Any]) -> _bool: ... # THPModule_has_torch_function def _has_torch_function_unary(Any) -> _bool: ... # THPModule_has_torch_function_unary -def _has_torch_function_variadic(...: Any) -> _bool: ... # THPModule_has_torch_function_variadic +def _has_torch_function_variadic(*args: Any) -> _bool: ... # THPModule_has_torch_function_variadic def _vmapmode_increment_nesting() -> _int: ... # THPModule_vmapmode_increment_nesting def _vmapmode_decrement_nesting() -> _int: ... # THPModule_vmapmode_decrement_nesting def _log_api_usage_once(str) -> None: ... # LogAPIUsageOnceFromPython diff --git a/torch/csrc/jit/frontend/ir_emitter.cpp b/torch/csrc/jit/frontend/ir_emitter.cpp index 3fa2a3089558..84e9d5d44c63 100644 --- a/torch/csrc/jit/frontend/ir_emitter.cpp +++ b/torch/csrc/jit/frontend/ir_emitter.cpp @@ -1227,10 +1227,7 @@ struct to_ir { auto kind = expr_out->node()->kind(); if (kind == aten::is_scripting) { static_if = true; - } else if ( - kind == aten::has_torch_function || - kind == aten::has_torch_function_unary || - kind == aten::has_torch_function_variadic) { + } else if (kind == aten::has_torch_function) { static_if = false; } // MetaCompile on boolean literals and constants diff --git a/torch/csrc/jit/runtime/register_special_ops.cpp b/torch/csrc/jit/runtime/register_special_ops.cpp index db4169c4f0c6..2cd5a13d3f4b 100644 --- a/torch/csrc/jit/runtime/register_special_ops.cpp +++ b/torch/csrc/jit/runtime/register_special_ops.cpp @@ -373,17 +373,7 @@ RegisterOperators reg({ [](Stack* stack) { push(stack, true); }, aliasAnalysisFromSchema()), OperatorGenerator( - TORCH_SELECTIVE_SCHEMA("aten::has_torch_function(Any args) -> bool"), - [](Stack* stack) { push(stack, false); }, - aliasAnalysisFromSchema()), - OperatorGenerator( - TORCH_SELECTIVE_SCHEMA( - "aten::has_torch_function_unary(Any obj) -> bool"), - [](Stack* stack) { push(stack, false); }, - aliasAnalysisFromSchema()), - OperatorGenerator( - TORCH_SELECTIVE_SCHEMA( - "aten::has_torch_function_variadic(...) -> bool"), + TORCH_SELECTIVE_SCHEMA("aten::has_torch_function(...) -> bool"), [](Stack* stack) { push(stack, false); }, aliasAnalysisFromSchema()), OperatorGenerator( diff --git a/torch/csrc/utils/disable_torch_function.cpp b/torch/csrc/utils/disable_torch_function.cpp index 13232d76594a..6dc8526e56c5 100644 --- a/torch/csrc/utils/disable_torch_function.cpp +++ b/torch/csrc/utils/disable_torch_function.cpp @@ -179,7 +179,7 @@ auto check_has_torch_function(PyObject* obj) -> bool } } // namespace torch -inline bool _sequence_has_torch_function(PyObject* args) { +inline bool sequence_has_torch_function(PyObject* args) { Py_ssize_t nargs = PySequence_Fast_GET_SIZE(args); for (Py_ssize_t i = 0; i < nargs; i++) { PyObject* obj = PySequence_Fast_GET_ITEM(args, i); @@ -189,7 +189,7 @@ inline bool _sequence_has_torch_function(PyObject* args) { return false; } -inline bool _array_has_torch_function(PyObject *const *args, Py_ssize_t nargs) { +inline bool array_has_torch_function(PyObject *const *args, Py_ssize_t nargs) { for (Py_ssize_t i = 0; i < nargs; i++) { if (torch::check_has_torch_function(args[i])) return true; @@ -198,18 +198,18 @@ inline bool _array_has_torch_function(PyObject *const *args, Py_ssize_t nargs) { } PyObject* THPModule_has_torch_function(PyObject*, PyObject *arg) { - bool result; + bool result; // NOLINT(cppcoreguidelines-init-variables) if (PyTuple_CheckExact(arg) || PyList_CheckExact(arg)) { // Fast path: // If we know that we have a tuple or list, we can skip an INCREF and // DECREF from PySequence_Fast. Core functions will always follow this // convention (almost always tuples), and it shaves ~3.5% off the cost of // the check. - result = _sequence_has_torch_function(arg); + result = sequence_has_torch_function(arg); } else { auto args = py::reinterpret_steal( PySequence_Fast(arg, "expected a sequence")); - result = _sequence_has_torch_function(args.ptr()); + result = sequence_has_torch_function(args.ptr()); } if (result) @@ -226,7 +226,7 @@ PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject *obj) { } PyObject* THPModule_has_torch_function_variadic(PyObject*, PyObject *const *args, Py_ssize_t nargs) { - if (_array_has_torch_function(args, nargs)) + if (array_has_torch_function(args, nargs)) Py_RETURN_TRUE; Py_RETURN_FALSE; diff --git a/torch/jit/_script.py b/torch/jit/_script.py index dc852fbb2f92..537cc04fe6d2 100644 --- a/torch/jit/_script.py +++ b/torch/jit/_script.py @@ -1084,5 +1084,5 @@ def _unwrap_optional(x): _register_builtin(_unwrap_optional, "aten::_unwrap_optional") _register_builtin(_jit_internal.is_scripting, "aten::is_scripting") _register_builtin(has_torch_function, "aten::has_torch_function") -_register_builtin(has_torch_function_unary, "aten::has_torch_function_unary") -_register_builtin(has_torch_function_variadic, "aten::has_torch_function_variadic") +_register_builtin(has_torch_function_unary, "aten::has_torch_function") +_register_builtin(has_torch_function_variadic, "aten::has_torch_function")