Skip to content

Commit

Permalink
[4/N] Enable clang-tidy in torch/csrc/autograd (#109455)
Browse files Browse the repository at this point in the history
The PR enables clang-tidy checks in torch/csrc/autograd.

Pull Request resolved: #109455
Approved by: https://github.com/Skylion007
  • Loading branch information
cyyever authored and pytorchmergebot committed Sep 17, 2023
1 parent c7017ff commit 75b954b
Show file tree
Hide file tree
Showing 15 changed files with 30 additions and 20 deletions.
4 changes: 3 additions & 1 deletion .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,9 @@ exclude_patterns = [
'c10/test/**',
'third_party/**/*',
'torch/csrc/api/**',
'torch/csrc/autograd/**',
'torch/csrc/autograd/functions/**',
'torch/csrc/autograd/generated/**',
'torch/csrc/autograd/profiler_legacy.cpp',
'torch/csrc/CudaIPCTypes.cpp',
'torch/csrc/cuda/**',
'torch/csrc/dynamo/*',
Expand Down
2 changes: 1 addition & 1 deletion tools/autograd/templates/VariableType.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ namespace VariableType {
at::Tensor & unpack(Tensor & t, const char * name, int pos);
const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
std::vector<at::Tensor> unpack(at::ITensorListRef tl, const char *name, int pos);
std::vector<at::Tensor> unpack(const at::ITensorListRef& tl, const char *name, int pos);
};

}} // namespace torch::autograd
6 changes: 4 additions & 2 deletions torch/csrc/autograd/TraceTypeManual.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,8 @@ static void general_trace_function(
tracer::recordSourceLocation(node);
const auto& args = op.schema().arguments();
int i = 0;
for (auto iter = stack->end() - input_size; iter != stack->end();
for (auto iter = stack->end() - static_cast<std::ptrdiff_t>(input_size);
iter != stack->end();
++iter, ++i) {
// TODO we need to refactor graph APIs (e.g., addInputs)
// appropriately; after that, we can get rid of the giant if-else
Expand Down Expand Up @@ -266,7 +267,8 @@ static void general_trace_function(
if (tracer_state) {
tracer::setTracingState(std::move(tracer_state));
int i = 0;
for (auto iter = stack->end() - output_size; iter != stack->end();
for (auto iter = stack->end() - static_cast<std::ptrdiff_t>(output_size);
iter != stack->end();
++iter, ++i) {
const auto& type = op.schema().returns()[i].type();
if (type->isSubtypeOf(*TensorType::get())) {
Expand Down
5 changes: 2 additions & 3 deletions torch/csrc/autograd/VariableTypeManual.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ Tensor unpack_opt(const Tensor& t, const char* name, int pos) {
}

std::vector<at::Tensor> unpack(
at::ITensorListRef tl,
const at::ITensorListRef& tl,
const char* name,
int pos) {
std::vector<at::Tensor> ret;
Expand Down Expand Up @@ -447,13 +447,12 @@ static Tensor detach(c10::DispatchKeySet ks, const Tensor& self) {
// NB: we can't make detach() a normal view operator because the codegen
// generates allow_tensor_metadata_change = True for them. In the future we
// should have an option for this in the codegen.
std::function<at::Tensor(const at::Tensor&)> func = nullptr;
auto result = as_view(
/* base */ self,
/* output */ out,
/* is_bw_differentiable */ false,
/* is_fw_differentiable */ false,
/* view_func */ std::move(func),
/* view_func */ nullptr,
/* creation_meta */ CreationMeta::DEFAULT,
/*allow_tensor_metadata_change=*/false);

Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/autograd/autograd_not_implemented_fallback.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ static void autogradNotImplementedFallbackImpl(
}
}

int aliased_input_idx = -1;
int64_t aliased_input_idx = -1;
for (const auto i : c10::irange(num_arguments)) {
if (schema.is_aliasing({c10::SchemaArgType::input, i}) &&
!schema.is_mutable({c10::SchemaArgType::input, i})) {
Expand All @@ -279,7 +279,7 @@ static void autogradNotImplementedFallbackImpl(
"Expected only a single input in the operator schema to have a non-write alias annotation (i.e., 'Tensor(a)'). "
"Non-composite functions where multiple inputs are aliased with outputs aren't supported. "
"Please rewrite your function as a composite function.");
aliased_input_idx = i;
aliased_input_idx = static_cast<int64_t>(i);
}
}

Expand Down Expand Up @@ -479,7 +479,7 @@ static void autogradNotImplementedInplaceOrViewFallbackImpl(
"non-write alias annotation (i.e., 'Tensor(a)'). "
"Non-composite functions where multiple outputs are aliased with inputs aren't supported."
"Please rewrite your function as a composite function.");
aliased_output_idx = i;
aliased_output_idx = static_cast<int64_t>(i);
}
}

Expand Down
2 changes: 2 additions & 0 deletions torch/csrc/autograd/engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -639,6 +639,7 @@ void Engine::reentrant_thread_init() {
}

void Engine::thread_on_exception(
// NOLINTNEXTLINE(performance-unnecessary-value-param)
std::shared_ptr<GraphTask> graph_task,
const std::shared_ptr<Node>& fn,
std::exception& e) {
Expand Down Expand Up @@ -953,6 +954,7 @@ static variable_list call_function(
});

if (has_post_hooks) {
// NOLINTNEXTLINE(bugprone-use-after-move)
return call_post_hooks(fn, std::move(outputs), inputs);
}
return outputs;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/functions/basic_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ struct TORCH_API NotImplemented : public Error {
// Identity in forward, Error in backward. Used to implement
// @once_differentiable
struct TORCH_API DelayedError : public Node {
DelayedError(std::string msg, int num_inputs) : msg(std::move(msg)) {
DelayedError(std::string msg, int64_t num_inputs) : msg(std::move(msg)) {
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
for (const auto i : c10::irange(num_inputs)) {
(void)i; // Suppress unused variable warning
Expand Down
5 changes: 2 additions & 3 deletions torch/csrc/autograd/functions/comm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@

#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Optional.h>

#include <cstddef>
#include <memory>
Expand Down Expand Up @@ -44,8 +43,8 @@ variable_list Scatter::apply(variable_list&& inputs) {
auto device_indices = fmap(devices_, [](const at::Device& device) -> int64_t {
return device.index();
});
auto tensors = torch::cuda::scatter(
std::move(input), device_indices, chunk_sizes_, dim_, streams_);
auto tensors =
torch::cuda::scatter(input, device_indices, chunk_sizes_, dim_, streams_);

std::vector<Variable> variables;
variables.reserve(tensors.size());
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/autograd/functions/comm.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
#include <torch/csrc/autograd/variable.h>

#include <ATen/ATen.h>
#include <ATen/cuda/ATenCUDAGeneral.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAStream.h>
#include <c10/util/Optional.h>

#include <cstddef>
#include <vector>
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/functions/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ struct DelayedErrorCtor {
auto arg2 = PyTuple_GET_ITEM(args, 1);
TORCH_CHECK(
THPUtils_checkLong(arg2), "argument 'num_inputs' must be an int");
int num_inputs = THPUtils_unpackLong(arg2);
auto num_inputs = THPUtils_unpackLong(arg2);
return new DelayedError(std::move(msg), num_inputs);
}
};
Expand Down
2 changes: 2 additions & 0 deletions torch/csrc/autograd/profiler_python.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -676,6 +676,7 @@ struct ThreadLocalResults {
class PythonTracer final : public python_tracer::PythonTracerBase {
public:
PythonTracer(torch::profiler::impl::RecordQueue* queue);
// NOLINTNEXTLINE(bugprone-exception-escape)
~PythonTracer() override;

static int pyProfileFn(
Expand Down Expand Up @@ -816,6 +817,7 @@ void PythonTracer::stop() {
}
}

// NOLINTNEXTLINE(bugprone-exception-escape)
PythonTracer::~PythonTracer() {
if (active_) {
TORCH_WARN("`PythonTracer::stop()` was not called.");
Expand Down
4 changes: 4 additions & 0 deletions torch/csrc/autograd/python_hook.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ PyFunctionTensorPreHook::PyFunctionTensorPreHook(
Py_INCREF(dict);
}

// NOLINTNEXTLINE(bugprone-exception-escape)
PyFunctionTensorPreHook::~PyFunctionTensorPreHook() {
// If python is already dead, leak the wrapped python objects
if (Py_IsInitialized()) {
Expand Down Expand Up @@ -127,6 +128,7 @@ PyFunctionPreHook::PyFunctionPreHook(PyObject* dict) : dict(dict) {
Py_INCREF(dict);
}

// NOLINTNEXTLINE(bugprone-exception-escape)
PyFunctionPreHook::~PyFunctionPreHook() {
// If python is already dead, leak the wrapped python objects
if (Py_IsInitialized()) {
Expand All @@ -149,6 +151,7 @@ PyFunctionPostHook::PyFunctionPostHook(PyObject* dict) : dict(dict) {
Py_INCREF(dict);
}

// NOLINTNEXTLINE(bugprone-exception-escape)
PyFunctionPostHook::~PyFunctionPostHook() {
// If python is already dead, leak the wrapped python objects
if (Py_IsInitialized()) {
Expand Down Expand Up @@ -205,6 +208,7 @@ PyFunctionTensorPostAccGradHooks::PyFunctionTensorPostAccGradHooks(
Py_INCREF(dict);
}

// NOLINTNEXTLINE(bugprone-exception-escape)
PyFunctionTensorPostAccGradHooks::~PyFunctionTensorPostAccGradHooks() {
// If python is already dead, leak the wrapped python objects
if (Py_IsInitialized()) {
Expand Down
1 change: 1 addition & 0 deletions torch/csrc/autograd/python_saved_variable_hooks.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ at::Tensor PySavedVariableHooks::call_unpack_hook() {
// unpack_hook_ will be manually decrefed when the saved variable is released
}

// NOLINTNEXTLINE(bugprone-exception-escape)
PySavedVariableHooks::~PySavedVariableHooks() {
// If python is already dead, leak the wrapped python objects
if (Py_IsInitialized()) {
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/record_function_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ c10::intrusive_ptr<c10::ivalue::Future> _call_end_callbacks_on_fut(
const c10::intrusive_ptr<c10::ivalue::Future>& fut) {
// Profiling callback that ends the associated record_function
// and returns the value of the passed in future.
std::function<c10::IValue(c10::ivalue::Future&)> futureProfilingFunc =
auto futureProfilingFunc =
[get_record = std::move(get_record)](c10::ivalue::Future& fut) {
auto& rec = get_record();
rec.end();
Expand Down
3 changes: 1 addition & 2 deletions torch/csrc/autograd/variable.h
Original file line number Diff line number Diff line change
Expand Up @@ -267,8 +267,7 @@ struct TORCH_API AutogradMeta : public c10::AutogradMetaInterface {
/// Sets the `requires_grad` property of `Variable`. This should be true for
/// leaf variables that want to accumulate gradients, and false for all other
/// variables.
void set_requires_grad(bool requires_grad, at::TensorImpl* self_impl)
override {
void set_requires_grad(bool requires_grad, at::TensorImpl* self_impl) final {
TORCH_CHECK(
!requires_grad ||
isDifferentiableType(at::typeMetaToScalarType(self_impl->dtype())),
Expand Down

0 comments on commit 75b954b

Please sign in to comment.