From 75b954b7157ef26102796606944210b1ba7555a3 Mon Sep 17 00:00:00 2001 From: cyy Date: Sun, 17 Sep 2023 17:11:50 +0000 Subject: [PATCH] [4/N] Enable clang-tidy in torch/csrc/autograd (#109455) The PR enables clang-tidy checks in torch/csrc/autograd. Pull Request resolved: https://github.com/pytorch/pytorch/pull/109455 Approved by: https://github.com/Skylion007 --- .lintrunner.toml | 4 +++- tools/autograd/templates/VariableType.h | 2 +- torch/csrc/autograd/TraceTypeManual.cpp | 6 ++++-- torch/csrc/autograd/VariableTypeManual.cpp | 5 ++--- torch/csrc/autograd/autograd_not_implemented_fallback.cpp | 6 +++--- torch/csrc/autograd/engine.cpp | 2 ++ torch/csrc/autograd/functions/basic_ops.h | 2 +- torch/csrc/autograd/functions/comm.cpp | 5 ++--- torch/csrc/autograd/functions/comm.h | 4 ++-- torch/csrc/autograd/functions/init.cpp | 2 +- torch/csrc/autograd/profiler_python.cpp | 2 ++ torch/csrc/autograd/python_hook.cpp | 4 ++++ torch/csrc/autograd/python_saved_variable_hooks.cpp | 1 + torch/csrc/autograd/record_function_ops.cpp | 2 +- torch/csrc/autograd/variable.h | 3 +-- 15 files changed, 30 insertions(+), 20 deletions(-) diff --git a/.lintrunner.toml b/.lintrunner.toml index 3259a3d63cfe2..f51737db3c381 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -274,7 +274,9 @@ exclude_patterns = [ 'c10/test/**', 'third_party/**/*', 'torch/csrc/api/**', - 'torch/csrc/autograd/**', + 'torch/csrc/autograd/functions/**', + 'torch/csrc/autograd/generated/**', + 'torch/csrc/autograd/profiler_legacy.cpp', 'torch/csrc/CudaIPCTypes.cpp', 'torch/csrc/cuda/**', 'torch/csrc/dynamo/*', diff --git a/tools/autograd/templates/VariableType.h b/tools/autograd/templates/VariableType.h index abad2f9338435..5ff1a6dc567d3 100644 --- a/tools/autograd/templates/VariableType.h +++ b/tools/autograd/templates/VariableType.h @@ -52,7 +52,7 @@ namespace VariableType { at::Tensor & unpack(Tensor & t, const char * name, int pos); const at::Tensor & unpack(const Tensor & t, const char * name, int pos); at::Tensor unpack_opt(const Tensor & t, const char * name, int pos); - std::vector unpack(at::ITensorListRef tl, const char *name, int pos); + std::vector unpack(const at::ITensorListRef& tl, const char *name, int pos); }; }} // namespace torch::autograd diff --git a/torch/csrc/autograd/TraceTypeManual.cpp b/torch/csrc/autograd/TraceTypeManual.cpp index 8d2b383eb9ca0..4134ef6d992ba 100644 --- a/torch/csrc/autograd/TraceTypeManual.cpp +++ b/torch/csrc/autograd/TraceTypeManual.cpp @@ -176,7 +176,8 @@ static void general_trace_function( tracer::recordSourceLocation(node); const auto& args = op.schema().arguments(); int i = 0; - for (auto iter = stack->end() - input_size; iter != stack->end(); + for (auto iter = stack->end() - static_cast(input_size); + iter != stack->end(); ++iter, ++i) { // TODO we need to refactor graph APIs (e.g., addInputs) // appropriately; after that, we can get rid of the giant if-else @@ -266,7 +267,8 @@ static void general_trace_function( if (tracer_state) { tracer::setTracingState(std::move(tracer_state)); int i = 0; - for (auto iter = stack->end() - output_size; iter != stack->end(); + for (auto iter = stack->end() - static_cast(output_size); + iter != stack->end(); ++iter, ++i) { const auto& type = op.schema().returns()[i].type(); if (type->isSubtypeOf(*TensorType::get())) { diff --git a/torch/csrc/autograd/VariableTypeManual.cpp b/torch/csrc/autograd/VariableTypeManual.cpp index c9d99d23eeb77..2622018c3df39 100644 --- a/torch/csrc/autograd/VariableTypeManual.cpp +++ b/torch/csrc/autograd/VariableTypeManual.cpp @@ -97,7 +97,7 @@ Tensor unpack_opt(const Tensor& t, const char* name, int pos) { } std::vector unpack( - at::ITensorListRef tl, + const at::ITensorListRef& tl, const char* name, int pos) { std::vector ret; @@ -447,13 +447,12 @@ static Tensor detach(c10::DispatchKeySet ks, const Tensor& self) { // NB: we can't make detach() a normal view operator because the codegen // generates allow_tensor_metadata_change = True for them. In the future we // should have an option for this in the codegen. - std::function func = nullptr; auto result = as_view( /* base */ self, /* output */ out, /* is_bw_differentiable */ false, /* is_fw_differentiable */ false, - /* view_func */ std::move(func), + /* view_func */ nullptr, /* creation_meta */ CreationMeta::DEFAULT, /*allow_tensor_metadata_change=*/false); diff --git a/torch/csrc/autograd/autograd_not_implemented_fallback.cpp b/torch/csrc/autograd/autograd_not_implemented_fallback.cpp index 4dc9ab0cbb6c0..1808d83c5a0b2 100644 --- a/torch/csrc/autograd/autograd_not_implemented_fallback.cpp +++ b/torch/csrc/autograd/autograd_not_implemented_fallback.cpp @@ -270,7 +270,7 @@ static void autogradNotImplementedFallbackImpl( } } - int aliased_input_idx = -1; + int64_t aliased_input_idx = -1; for (const auto i : c10::irange(num_arguments)) { if (schema.is_aliasing({c10::SchemaArgType::input, i}) && !schema.is_mutable({c10::SchemaArgType::input, i})) { @@ -279,7 +279,7 @@ static void autogradNotImplementedFallbackImpl( "Expected only a single input in the operator schema to have a non-write alias annotation (i.e., 'Tensor(a)'). " "Non-composite functions where multiple inputs are aliased with outputs aren't supported. " "Please rewrite your function as a composite function."); - aliased_input_idx = i; + aliased_input_idx = static_cast(i); } } @@ -479,7 +479,7 @@ static void autogradNotImplementedInplaceOrViewFallbackImpl( "non-write alias annotation (i.e., 'Tensor(a)'). " "Non-composite functions where multiple outputs are aliased with inputs aren't supported." "Please rewrite your function as a composite function."); - aliased_output_idx = i; + aliased_output_idx = static_cast(i); } } diff --git a/torch/csrc/autograd/engine.cpp b/torch/csrc/autograd/engine.cpp index 6dc5766252f7a..ec7e40efdf94b 100644 --- a/torch/csrc/autograd/engine.cpp +++ b/torch/csrc/autograd/engine.cpp @@ -639,6 +639,7 @@ void Engine::reentrant_thread_init() { } void Engine::thread_on_exception( + // NOLINTNEXTLINE(performance-unnecessary-value-param) std::shared_ptr graph_task, const std::shared_ptr& fn, std::exception& e) { @@ -953,6 +954,7 @@ static variable_list call_function( }); if (has_post_hooks) { + // NOLINTNEXTLINE(bugprone-use-after-move) return call_post_hooks(fn, std::move(outputs), inputs); } return outputs; diff --git a/torch/csrc/autograd/functions/basic_ops.h b/torch/csrc/autograd/functions/basic_ops.h index cf05c60ef6ee7..2248c7082d8cb 100644 --- a/torch/csrc/autograd/functions/basic_ops.h +++ b/torch/csrc/autograd/functions/basic_ops.h @@ -44,7 +44,7 @@ struct TORCH_API NotImplemented : public Error { // Identity in forward, Error in backward. Used to implement // @once_differentiable struct TORCH_API DelayedError : public Node { - DelayedError(std::string msg, int num_inputs) : msg(std::move(msg)) { + DelayedError(std::string msg, int64_t num_inputs) : msg(std::move(msg)) { // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) for (const auto i : c10::irange(num_inputs)) { (void)i; // Suppress unused variable warning diff --git a/torch/csrc/autograd/functions/comm.cpp b/torch/csrc/autograd/functions/comm.cpp index 5ae5476e68dd0..9bcd511285734 100644 --- a/torch/csrc/autograd/functions/comm.cpp +++ b/torch/csrc/autograd/functions/comm.cpp @@ -8,7 +8,6 @@ #include #include -#include #include #include @@ -44,8 +43,8 @@ variable_list Scatter::apply(variable_list&& inputs) { auto device_indices = fmap(devices_, [](const at::Device& device) -> int64_t { return device.index(); }); - auto tensors = torch::cuda::scatter( - std::move(input), device_indices, chunk_sizes_, dim_, streams_); + auto tensors = + torch::cuda::scatter(input, device_indices, chunk_sizes_, dim_, streams_); std::vector variables; variables.reserve(tensors.size()); diff --git a/torch/csrc/autograd/functions/comm.h b/torch/csrc/autograd/functions/comm.h index 5a99e83099035..9b1f0daf50bce 100644 --- a/torch/csrc/autograd/functions/comm.h +++ b/torch/csrc/autograd/functions/comm.h @@ -5,8 +5,8 @@ #include #include -#include -#include +#include +#include #include #include diff --git a/torch/csrc/autograd/functions/init.cpp b/torch/csrc/autograd/functions/init.cpp index 3a00570e2d51b..70c2f54e18aaa 100644 --- a/torch/csrc/autograd/functions/init.cpp +++ b/torch/csrc/autograd/functions/init.cpp @@ -31,7 +31,7 @@ struct DelayedErrorCtor { auto arg2 = PyTuple_GET_ITEM(args, 1); TORCH_CHECK( THPUtils_checkLong(arg2), "argument 'num_inputs' must be an int"); - int num_inputs = THPUtils_unpackLong(arg2); + auto num_inputs = THPUtils_unpackLong(arg2); return new DelayedError(std::move(msg), num_inputs); } }; diff --git a/torch/csrc/autograd/profiler_python.cpp b/torch/csrc/autograd/profiler_python.cpp index 4ca367a68dd16..86766ba49a787 100644 --- a/torch/csrc/autograd/profiler_python.cpp +++ b/torch/csrc/autograd/profiler_python.cpp @@ -676,6 +676,7 @@ struct ThreadLocalResults { class PythonTracer final : public python_tracer::PythonTracerBase { public: PythonTracer(torch::profiler::impl::RecordQueue* queue); + // NOLINTNEXTLINE(bugprone-exception-escape) ~PythonTracer() override; static int pyProfileFn( @@ -816,6 +817,7 @@ void PythonTracer::stop() { } } +// NOLINTNEXTLINE(bugprone-exception-escape) PythonTracer::~PythonTracer() { if (active_) { TORCH_WARN("`PythonTracer::stop()` was not called."); diff --git a/torch/csrc/autograd/python_hook.cpp b/torch/csrc/autograd/python_hook.cpp index df7a9d2717c2f..27b5a506d9a52 100644 --- a/torch/csrc/autograd/python_hook.cpp +++ b/torch/csrc/autograd/python_hook.cpp @@ -99,6 +99,7 @@ PyFunctionTensorPreHook::PyFunctionTensorPreHook( Py_INCREF(dict); } +// NOLINTNEXTLINE(bugprone-exception-escape) PyFunctionTensorPreHook::~PyFunctionTensorPreHook() { // If python is already dead, leak the wrapped python objects if (Py_IsInitialized()) { @@ -127,6 +128,7 @@ PyFunctionPreHook::PyFunctionPreHook(PyObject* dict) : dict(dict) { Py_INCREF(dict); } +// NOLINTNEXTLINE(bugprone-exception-escape) PyFunctionPreHook::~PyFunctionPreHook() { // If python is already dead, leak the wrapped python objects if (Py_IsInitialized()) { @@ -149,6 +151,7 @@ PyFunctionPostHook::PyFunctionPostHook(PyObject* dict) : dict(dict) { Py_INCREF(dict); } +// NOLINTNEXTLINE(bugprone-exception-escape) PyFunctionPostHook::~PyFunctionPostHook() { // If python is already dead, leak the wrapped python objects if (Py_IsInitialized()) { @@ -205,6 +208,7 @@ PyFunctionTensorPostAccGradHooks::PyFunctionTensorPostAccGradHooks( Py_INCREF(dict); } +// NOLINTNEXTLINE(bugprone-exception-escape) PyFunctionTensorPostAccGradHooks::~PyFunctionTensorPostAccGradHooks() { // If python is already dead, leak the wrapped python objects if (Py_IsInitialized()) { diff --git a/torch/csrc/autograd/python_saved_variable_hooks.cpp b/torch/csrc/autograd/python_saved_variable_hooks.cpp index 1f16036eb04a7..ef7ae89dc3492 100644 --- a/torch/csrc/autograd/python_saved_variable_hooks.cpp +++ b/torch/csrc/autograd/python_saved_variable_hooks.cpp @@ -45,6 +45,7 @@ at::Tensor PySavedVariableHooks::call_unpack_hook() { // unpack_hook_ will be manually decrefed when the saved variable is released } +// NOLINTNEXTLINE(bugprone-exception-escape) PySavedVariableHooks::~PySavedVariableHooks() { // If python is already dead, leak the wrapped python objects if (Py_IsInitialized()) { diff --git a/torch/csrc/autograd/record_function_ops.cpp b/torch/csrc/autograd/record_function_ops.cpp index 7d91aa850b4ac..e5153ae4028aa 100644 --- a/torch/csrc/autograd/record_function_ops.cpp +++ b/torch/csrc/autograd/record_function_ops.cpp @@ -82,7 +82,7 @@ c10::intrusive_ptr _call_end_callbacks_on_fut( const c10::intrusive_ptr& fut) { // Profiling callback that ends the associated record_function // and returns the value of the passed in future. - std::function futureProfilingFunc = + auto futureProfilingFunc = [get_record = std::move(get_record)](c10::ivalue::Future& fut) { auto& rec = get_record(); rec.end(); diff --git a/torch/csrc/autograd/variable.h b/torch/csrc/autograd/variable.h index f5d95cf2f5b25..c67182e2856d1 100644 --- a/torch/csrc/autograd/variable.h +++ b/torch/csrc/autograd/variable.h @@ -267,8 +267,7 @@ struct TORCH_API AutogradMeta : public c10::AutogradMetaInterface { /// Sets the `requires_grad` property of `Variable`. This should be true for /// leaf variables that want to accumulate gradients, and false for all other /// variables. - void set_requires_grad(bool requires_grad, at::TensorImpl* self_impl) - override { + void set_requires_grad(bool requires_grad, at::TensorImpl* self_impl) final { TORCH_CHECK( !requires_grad || isDifferentiableType(at::typeMetaToScalarType(self_impl->dtype())),