Skip to content

Commit

Permalink
Fix modernize-use-equals-default nolint failures in torch/csrcs (#61142)
Browse files Browse the repository at this point in the history
Summary:
Test-plan: Compile + clang-tidy

Pull Request resolved: #61142

Reviewed By: VitalyFedyunin

Differential Revision: D29529372

Pulled By: malfet

fbshipit-source-id: 2ccde7712a51c28243b16bbb4d1d68086e0414a6
  • Loading branch information
malfet authored and facebook-github-bot committed Jul 6, 2021
1 parent 718db96 commit 635d864
Show file tree
Hide file tree
Showing 50 changed files with 96 additions and 202 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ jobs:
# FunctionsManual.cpp is excluded to keep this diff clean. It will be fixed
# in a follow up PR.
# /torch/csrc/generic/*.cpp is excluded because those files aren't actually built.
# deploy/interpreter files are excluded due to using macros and other techniquies
# deploy/interpreter files are excluded due to using macros and other techniques
# that are not easily converted to accepted c++
python3 tools/linter/clang_tidy.py \
--parallel \
Expand Down
6 changes: 2 additions & 4 deletions aten/src/ATen/core/dispatch/Dispatcher.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@ class RegistrationListenerList final {
};
}

// NOLINTNEXTLINE(modernize-use-equals-default)
OpRegistrationListener::~OpRegistrationListener() {}
OpRegistrationListener::~OpRegistrationListener()= default;

Dispatcher::Dispatcher()
: operators_()
Expand All @@ -42,8 +41,7 @@ Dispatcher::Dispatcher()
, listeners_(std::make_unique<detail::RegistrationListenerList>())
, mutex_() {}

// NOLINTNEXTLINE(modernize-use-equals-default)
Dispatcher::~Dispatcher() {}
Dispatcher::~Dispatcher() = default;

C10_EXPORT Dispatcher& Dispatcher::realSingleton() {
static Dispatcher _singleton;
Expand Down
10 changes: 4 additions & 6 deletions aten/src/ATen/native/RNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -729,9 +729,8 @@ struct Cell {
using hidden_type = hidden_type_tmpl;
using cell_params = cell_params_tmpl;

// NOLINTNEXTLINE(modernize-use-equals-default)
virtual ~Cell() {} // This is really dumb, but enables projects with
// -Wnon-virtual-dtor to compile...
virtual ~Cell() = default; // This is really dumb, but enables projects with
// -Wnon-virtual-dtor to compile...

virtual hidden_type operator()(
const Tensor& input,
Expand Down Expand Up @@ -846,9 +845,8 @@ template<typename io_type, typename hidden_type, typename param_type>
struct Layer {
using output_type = LayerOutput<io_type, hidden_type>;

// NOLINTNEXTLINE(modernize-use-equals-default)
virtual ~Layer() {} // This is really dumb, but enables projects with
// -Wnon-virtual-dtor to compile...
virtual ~Layer() = default; // This is really dumb, but enables projects with
// -Wnon-virtual-dtor to compile...
virtual output_type operator()(
const io_type& input,
const hidden_type& input_hidden,
Expand Down
3 changes: 1 addition & 2 deletions aten/src/ATen/native/metal/MetalGuardImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@ namespace at {
namespace detail {

struct MetalGuardImpl final : public c10::impl::DeviceGuardImplInterface {
// NOLINTNEXTLINE(modernize-use-equals-default)
MetalGuardImpl() {}
MetalGuardImpl() = default;

explicit MetalGuardImpl(DeviceType t) {
TORCH_INTERNAL_ASSERT(t == DeviceType::Metal);
Expand Down
14 changes: 5 additions & 9 deletions aten/src/ATen/nnapi/nnapi_bind.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,16 +44,12 @@ MAKE_SMART_PTR(Execution)
#undef MAKE_SMART_PTR

struct NnapiCompilation : torch::jit::CustomClassHolder {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,modernize-use-equals-default)
NnapiCompilation() {
// Could possibly call load_platform_library here, but error reporting
// can be complicated if the constructor is called during model loading.
// Instead, delay all work until the explicit init call.
}
// Could possibly call load_platform_library here, but error reporting
// can be complicated if the constructor is called during model loading.
// Instead, delay all work until the explicit init call.
NnapiCompilation() = default;

// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
~NnapiCompilation() {
}
~NnapiCompilation() override = default;

void init(
at::Tensor serialized_model_tensor,
Expand Down
3 changes: 1 addition & 2 deletions aten/src/ATen/quantized/Quantizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -208,8 +208,7 @@ Tensor PerChannelAffineFloatQParamsQuantizer::dequantize(const Tensor& qtensor)
return rtensor;
}

// NOLINTNEXTLINE(modernize-use-equals-default)
Quantizer::~Quantizer() {}
Quantizer::~Quantizer() = default;

C10_EXPORT void set_quantizer_(const Tensor& self, ConstQuantizerPtr quantizer) {
get_qtensorimpl(self)->set_quantizer_(quantizer);
Expand Down
9 changes: 3 additions & 6 deletions aten/src/ATen/test/variant_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,9 @@ namespace enumtype {
// error: default initialization of an object of const type 'const enumtype::Enum1'
// without a user-provided default constructor
// ```
// NOLINTNEXTLINE(modernize-use-equals-default)
struct Enum1 { Enum1() {}; };
// NOLINTNEXTLINE(modernize-use-equals-default)
struct Enum2 { Enum2() {}; };
// NOLINTNEXTLINE(modernize-use-equals-default)
struct Enum3 { Enum3() {}; };
struct Enum1 { Enum1() = default; };
struct Enum2 { Enum2() = default; };
struct Enum3 { Enum3() = default; };
} // namespace enumtype

struct enum_name {
Expand Down
3 changes: 1 addition & 2 deletions aten/src/ATen/test/vulkan_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -436,8 +436,7 @@ class Conv2d : public BaseOp {

class OpsList {
public:
// NOLINTNEXTLINE(modernize-use-equals-default)
OpsList() {}
OpsList() = default;
OpsList(std::vector<std::unique_ptr<BaseOp>>& _ops) : ops(std::move(_ops)) {}

auto runDual(at::Tensor& in, at::Tensor& vin) {
Expand Down
3 changes: 1 addition & 2 deletions c10/core/Allocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ void reportMemoryUsageToProfiler(void* ptr, int64_t alloc_size, Device device) {
}
}

// NOLINTNEXTLINE(modernize-use-equals-default)
MemoryReportingInfoBase::MemoryReportingInfoBase() {}
MemoryReportingInfoBase::MemoryReportingInfoBase() = default;

} // namespace c10
5 changes: 1 addition & 4 deletions c10/core/CPUAllocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,10 +103,7 @@ void free_cpu(void* data) {
}

struct C10_API DefaultCPUAllocator final : at::Allocator {
// NOLINTNEXTLINE(modernize-use-equals-default)
DefaultCPUAllocator() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~DefaultCPUAllocator() override {}
DefaultCPUAllocator() = default;
at::DataPtr allocate(size_t nbytes) const override {
void* data = alloc_cpu(nbytes);
profiledCPUMemoryReporter().New(data, nbytes);
Expand Down
3 changes: 1 addition & 2 deletions c10/core/TensorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -449,8 +449,7 @@ at::DataPtr PlacementDeleteContext::makeDataPtr(
device};
}

// NOLINTNEXTLINE(modernize-use-equals-default)
AutogradMetaInterface::~AutogradMetaInterface() {}
AutogradMetaInterface::~AutogradMetaInterface() = default;

// Setting requires_grad to true on inference tensor outside InferenceMode
// is forbidden. Ideally it would also be illegal inside InferenceMode.
Expand Down
2 changes: 1 addition & 1 deletion c10/cuda/CUDACachingAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class C10_CUDA_API CUDAOutOfMemoryError : public c10::Error {
// block inside of already allocated area.
class C10_CUDA_API FreeMemoryCallback {
public:
virtual ~FreeMemoryCallback(){};
virtual ~FreeMemoryCallback() = default;
virtual bool Execute() = 0;
};

Expand Down
10 changes: 3 additions & 7 deletions c10/test/util/either_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1161,20 +1161,16 @@ class ClassWithDestructorCallback {
public:
ClassWithDestructorCallback(const DestructorCallback* destructorCallback)
: _destructorCallback(destructorCallback) {}
// NOLINTNEXTLINE(modernize-use-equals-default)
ClassWithDestructorCallback(const ClassWithDestructorCallback& rhs)
: _destructorCallback(rhs._destructorCallback) {}

~ClassWithDestructorCallback() {
_destructorCallback->call();
}

private:
const DestructorCallback* _destructorCallback;

// NOLINTNEXTLINE(modernize-use-equals-delete)
ClassWithDestructorCallback& operator=(
const ClassWithDestructorCallback& rhs) = delete;

private:
const DestructorCallback* _destructorCallback;
};
class OnlyMoveableClassWithDestructorCallback {
public:
Expand Down
3 changes: 1 addition & 2 deletions c10/test/util/registry_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@ class Foo {
explicit Foo(int x) {
// LOG(INFO) << "Foo " << x;
}
// NOLINTNEXTLINE(modernize-use-equals-default)
virtual ~Foo() {}
virtual ~Foo() = default;
};

C10_DECLARE_REGISTRY(FooRegistry, Foo, int);
Expand Down
3 changes: 1 addition & 2 deletions c10/test/util/typeid_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,7 @@ TEST(TypeMetaTest, TypeMeta) {
class ClassAllowAssignment {
public:
ClassAllowAssignment() : x(42) {}
// NOLINTNEXTLINE(modernize-use-equals-default)
ClassAllowAssignment(const ClassAllowAssignment& src) : x(src.x) {}
ClassAllowAssignment(const ClassAllowAssignment& src) = default;
ClassAllowAssignment& operator=(const ClassAllowAssignment& src) = default;
int x;
};
Expand Down
2 changes: 1 addition & 1 deletion c10/util/ThreadLocalDebugInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ enum class C10_API_ENUM DebugInfoKind : uint8_t {
class C10_API DebugInfoBase {
public:
DebugInfoBase() {}
virtual ~DebugInfoBase() {}
virtual ~DebugInfoBase() = default;
};

// Thread local debug information is propagated across the forward
Expand Down
4 changes: 4 additions & 0 deletions tools/linter/clang_tidy.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,10 @@ def main() -> None:
{"name": name, "lines": lines} for name, lines, in changed_files.items()
]
files = list(changed_files.keys())
# Since header files are excluded, add .cpp file if it exists in the same folder
cpp_files = [f[:-1] + "cpp" for f in files if f.endswith(".h")]
cpp_files = [f for f in cpp_files if os.path.exists(f)]
files = list(set(files + cpp_files))
else:
line_filters = []
files = get_all_files(paths)
Expand Down
13 changes: 5 additions & 8 deletions torch/csrc/CudaIPCTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,12 @@ void warnProducerTerminatedBeforeSharedTensorsReleased() {

struct CudaIPCGlobalEntities {
std::mutex ref_counters_mutex_;
std::atomic<int64_t> sync_events_used_;
std::atomic<int64_t> sync_events_used_{0};
std::map<std::string, std::shared_ptr<CudaIPCRefCountersFile>>
ref_counters_files_;
std::shared_ptr<CudaIPCRefCountersFile> next_available_ref_counters_file_;
CudaIPCSentDataLimbo CudaIPCSentDataLimbo_;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
// NOLINTNEXTLINE(modernize-use-equals-default)
CudaIPCGlobalEntities() : ref_counters_files_() {}
CudaIPCGlobalEntities() = default;
~CudaIPCGlobalEntities() {
CudaIPCSentDataLimbo_.collect();
// Clear shared blocks to avoid releasing shared blocks after
Expand Down Expand Up @@ -109,7 +107,6 @@ void CudaIPCSentDataLimbo::add(std::unique_ptr<CudaIPCSentData> shared_block) {
}

void CudaIPCSentDataDelete(void* ptr) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::unique_ptr<CudaIPCSentData> sent_data(
static_cast<CudaIPCSentData*>(ptr));
if (sent_data->counter_value() > 0) {
Expand All @@ -133,13 +130,12 @@ void ReturnRefCounter(const std::string& handle, uint64_t offset /* unused */) {

} // namespace

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
CudaIPCSentData::CudaIPCSentData(
const std::string& handle,
std::string handle,
int64_t offset,
int64_t* counter_ptr,
at::Device device)
: handle_(handle),
: handle_(std::move(handle)),
offset_(offset),
counter_ptr_(counter_ptr),
original_ptr_(),
Expand Down Expand Up @@ -172,6 +168,7 @@ CudaIPCSentData::CudaIPCSentData(
} else {
auto stream = c10::cuda::getCurrentCUDAStream(device.index());
C10_CUDA_CHECK(cudaStreamSynchronize(stream));
event_ = nullptr;
event_sync_required_ = false;
}
#else
Expand Down
8 changes: 3 additions & 5 deletions torch/csrc/CudaIPCTypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ struct CudaIPCSentData final {
at::Device device_;

CudaIPCSentData(
const std::string& handle,
std::string handle,
int64_t offset,
int64_t* counter_ptr,
at::Device device);
Expand Down Expand Up @@ -78,13 +78,13 @@ struct CudaIPCSentDataLimbo final {

struct CudaIPCRefCountersFile final {
CudaIPCRefCountersFile(
const std::string& handle,
std::string handle,
uint64_t size,
at::DataPtr data_ptr)
: next_offset_(0),
size_(size),
used_slots_(0),
handle_(handle),
handle_(std::move(handle)),
refcounted_shared_mem_(std::move(data_ptr)) {}

int64_t* counter_ptr() {
Expand Down Expand Up @@ -135,8 +135,6 @@ namespace c10 {
namespace {
class CudaIPCCollectCallback : public FreeMemoryCallback {
public:
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
~CudaIPCCollectCallback() {};
bool Execute() override {
return torch::CudaIPCCollect();
}
Expand Down
14 changes: 3 additions & 11 deletions torch/csrc/autograd/forward_grad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,11 @@ namespace {
// See discussion in forward_grad.h for why these are global variables and not
// thread local

// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
static std::mutex all_forward_levels_mutex_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
static uint64_t next_forward_idx_ = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
static std::vector<std::shared_ptr<ForwardADLevel>> all_forward_levels_;
std::mutex all_forward_levels_mutex_;
uint64_t next_forward_idx_ = 0;
std::vector<std::shared_ptr<ForwardADLevel>> all_forward_levels_;

const static at::Tensor singleton_undefined_tensor;

// Temporary flag to disable forward mode
// TODO(alband) remove these when perf issues are solved
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,cppcoreguidelines-avoid-non-const-global-variables)
static bool is_forward_grad_enabled = false;
}

uint64_t ForwardADLevel::get_next_idx() {
Expand Down
4 changes: 1 addition & 3 deletions torch/csrc/autograd/forward_grad.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ struct ForwardGrad;
#define EXPECTED_MAX_LEVEL 2

struct TORCH_API ForwardADLevel {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
ForwardADLevel(uint64_t idx) : idx_(idx) {}
~ForwardADLevel();

Expand All @@ -111,8 +110,7 @@ struct TORCH_API ForwardADLevel {
};

struct TORCH_API ForwardGrad : std::enable_shared_from_this<ForwardGrad> {
// NOLINTNEXTLINE(modernize-use-equals-default,cppcoreguidelines-pro-type-member-init)
ForwardGrad() {}
ForwardGrad() = default;

// This function must only be called when AutogradMeta or SavedVariable is
// being destructed as it ensures that:
Expand Down
3 changes: 1 addition & 2 deletions torch/csrc/autograd/functions/basic_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,7 @@ struct TORCH_API UndefinedGradBackward : public Node {
UndefinedGradBackward(edge_list&& next_edges)
: Node(std::move(next_edges)) {}

// NOLINTNEXTLINE(modernize-use-equals-default)
UndefinedGradBackward() {}
UndefinedGradBackward() = default;

variable_list apply(variable_list&& inputs) override;
};
Expand Down
7 changes: 2 additions & 5 deletions torch/csrc/autograd/functions/comm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@ Scatter::Scatter(
streams_(streams),
unsqueeze_scalars_(unsqueeze_scalars) {}

// NOLINTNEXTLINE(modernize-use-equals-default)
Scatter::~Scatter() {}
Scatter::~Scatter() = default;

variable_list Scatter::apply(variable_list&& inputs) {
AT_ASSERT(inputs.size() == 1);
Expand All @@ -52,7 +51,6 @@ variable_list Scatter::apply(variable_list&& inputs) {
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(input), device_indices, chunk_sizes_, dim_, streams_);

// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<Variable> variables;
variables.reserve(tensors.size());
for (auto& tensor : tensors) {
Expand All @@ -75,8 +73,7 @@ variable_list Scatter::apply(variable_list&& inputs) {
Gather::Gather(const at::Device& destination_device, int64_t dim)
: destination_device_(destination_device), dim_(dim) {}

// NOLINTNEXTLINE(modernize-use-equals-default)
Gather::~Gather() {}
Gather::~Gather() = default;

variable_list Gather::apply(variable_list&& inputs) {
bool all_are_zero_dim = true;
Expand Down

0 comments on commit 635d864

Please sign in to comment.