Skip to content

Commit

Permalink
[2/N] Fixes clang-tidy warnings in header files (#113727)
Browse files Browse the repository at this point in the history
This PR fixes more clang-tidy warnings in common headers.

Pull Request resolved: #113727
Approved by: https://github.com/Skylion007
  • Loading branch information
cyyever authored and pytorchmergebot committed Nov 16, 2023
1 parent ecf1295 commit f9bf104
Show file tree
Hide file tree
Showing 11 changed files with 24 additions and 24 deletions.
10 changes: 5 additions & 5 deletions aten/src/ATen/core/op_registration/op_registration.h
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ class TORCH_API RegisterOperators final {
static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");

return std::move(*this).kernel(
std::move(dispatch_key),
dispatch_key,
KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
impl::CppSignature::make<KernelFunctor>(),
detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
Expand Down Expand Up @@ -243,7 +243,7 @@ class TORCH_API RegisterOperators final {
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");

return std::move(*this).kernel(
std::move(dispatch_key),
dispatch_key,
KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
impl::CppSignature::make<FuncType>(),
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
Expand Down Expand Up @@ -287,7 +287,7 @@ class TORCH_API RegisterOperators final {
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");

return std::move(*this).kernel(
std::move(dispatch_key),
dispatch_key,
KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
impl::CppSignature::make<FuncType>(),
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
Expand Down Expand Up @@ -343,7 +343,7 @@ class TORCH_API RegisterOperators final {
static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");

return std::move(*this).kernel(
std::move(dispatch_key),
dispatch_key,
KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(functor)),
impl::CppSignature::make<Lambda>(),
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
Expand Down Expand Up @@ -403,7 +403,7 @@ class TORCH_API RegisterOperators final {
KernelRegistrationConfig config;
config.dispatch_key = dispatch_key;
config.func = std::move(func);
config.cpp_signature = std::move(cpp_signature);
config.cpp_signature = cpp_signature;
config.inferred_function_schema = std::move(inferred_function_schema);
kernels.push_back(std::move(config));
return std::move(*this);
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/record_function.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ struct ObserverContext {
virtual ~ObserverContext() = default;

protected:
ObserverContext() {}
ObserverContext() = default;
};

typedef c10::SmallVector<uint64_t, kSoftLimitCallbacks> CallbackHandles;
Expand Down
2 changes: 1 addition & 1 deletion c10/cuda/CUDAStream.h
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ class C10_CUDA_API CUDAStream {
// Note: this returns the range of priority **supported by PyTorch**, not
// the range of priority **supported by CUDA**. The former is a subset of
// the latter.
int least_priority, greatest_priority;
int least_priority = 0, greatest_priority = 0;
C10_CUDA_CHECK(
cudaDeviceGetStreamPriorityRange(&least_priority, &greatest_priority));
#ifdef USE_ROCM
Expand Down
2 changes: 1 addition & 1 deletion c10/util/Exception.h
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ class C10_API WarningHandlerGuard {
/// setWarnAlways(true) to turn it into TORCH_WARN, which can be
/// tested for more easily.
C10_API void set_warnAlways(bool) noexcept(true);
C10_API bool get_warnAlways(void) noexcept(true);
C10_API bool get_warnAlways() noexcept(true);

// A RAII guard that sets warn_always (not thread-local) on
// construction, and sets it back to the original value upon destruction.
Expand Down
5 changes: 2 additions & 3 deletions c10/util/Float8_e4m3fn-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,8 @@ namespace c10 {

/// Constructors

inline C10_HOST_DEVICE Float8_e4m3fn::Float8_e4m3fn(float value) {
x = detail::fp8e4m3fn_from_fp32_value(value);
}
inline C10_HOST_DEVICE Float8_e4m3fn::Float8_e4m3fn(float value)
: x(detail::fp8e4m3fn_from_fp32_value(value)) {}

/// Implicit conversions

Expand Down
5 changes: 2 additions & 3 deletions c10/util/Float8_e5m2-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,8 @@ namespace c10 {

/// Constructors

inline C10_HOST_DEVICE Float8_e5m2::Float8_e5m2(float value) {
x = detail::fp8e5m2_from_fp32_value(value);
}
inline C10_HOST_DEVICE Float8_e5m2::Float8_e5m2(float value)
: x(detail::fp8e5m2_from_fp32_value(value)) {}

/// Implicit conversions

Expand Down
4 changes: 2 additions & 2 deletions c10/util/FunctionRef.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class function_ref;
template <typename Ret, typename... Params>
class function_ref<Ret(Params...)> {
Ret (*callback)(intptr_t callable, Params... params) = nullptr;
intptr_t callable;
intptr_t callable{};

template <typename Callable>
static Ret callback_fn(intptr_t callable, Params... params) {
Expand All @@ -55,7 +55,7 @@ class function_ref<Ret(Params...)> {
typename std::remove_reference<Callable>::type,
function_ref>::value>::type* = nullptr,
typename std::enable_if<std::is_convertible<
typename c10::invoke_result_t<Callable, Params...>,
typename std::invoke_result_t<Callable, Params...>,
Ret>::value>::type* = nullptr)
: callback(callback_fn<typename std::remove_reference<Callable>::type>),
callable(reinterpret_cast<intptr_t>(&callable)) {}
Expand Down
2 changes: 1 addition & 1 deletion c10/util/Load.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ struct LoadImpl {
template <>
struct LoadImpl<bool> {
C10_HOST_DEVICE static bool apply(const void* src) {
static_assert(sizeof(bool) == sizeof(char), "");
static_assert(sizeof(bool) == sizeof(char));
// NOTE: [Loading boolean values]
// Protect against invalid boolean values by loading as a byte
// first, then converting to bool (see gh-54789).
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/cuda/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -674,7 +674,7 @@ PyObject* THCPModule_memorySnapshot(PyObject* _unused, PyObject* noargs) {
std::vector<py::dict> to_gather_dest;

auto add_frame_key = [&](const py::dict& d,
const std::shared_ptr<c10::GatheredContext> ctx) {
const std::shared_ptr<c10::GatheredContext>& ctx) {
if (ctx) {
auto sc = getFromContext(ctx);
to_gather_frames.emplace_back(sc);
Expand Down Expand Up @@ -1352,13 +1352,13 @@ PyObject* THCPModule_setBenchmarkLimitCuDNN(PyObject* _unused, PyObject* arg) {
"set_benchmark_limit_cudnn expects an int, "
"but got %s",
THPUtils_typename(arg));
auto benchmark_limit = static_cast<int>(THPUtils_unpackLong(arg));
#if defined(USE_ROCM)
TORCH_WARN_ONCE(
"cuDNN Benchmark limit is not supported in MIOpen and will have no effect.");
#endif
#if AT_CUDNN_ENABLED()
#if HAS_CUDNN_V8()
auto benchmark_limit = static_cast<int>(THPUtils_unpackLong(arg));
at::globalContext().setBenchmarkLimitCuDNN(benchmark_limit);
#else
TORCH_WARN_ONCE(
Expand Down
4 changes: 3 additions & 1 deletion torch/csrc/jit/frontend/source_range.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,13 @@ struct SourceRange;
struct TORCH_API StringCordView {
StringCordView();
StringCordView(const StringCordView&) = default;
StringCordView(StringCordView&&) noexcept = default;
StringCordView(
std::vector<c10::string_view> inputs,
std::vector<std::shared_ptr<std::string>> ownerships);

StringCordView& operator=(const StringCordView&) = default;
StringCordView& operator=(StringCordView&&) noexcept = default;

size_t size() const {
return accumulated_sizes_.back();
Expand Down Expand Up @@ -212,7 +214,7 @@ struct TORCH_API Source {
c10::optional<std::string> filename = c10::nullopt,
size_t starting_line_no = 0,
std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr)
: text_view_(str),
: text_view_(std::move(str)),
filename_(std::move(filename)),
starting_line_no_(starting_line_no),
gen_ranges_(std::move(gen_ranges)) {
Expand Down
8 changes: 4 additions & 4 deletions torch/csrc/jit/serialization/storage_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@ class TORCH_API SerializationStorageContext {
delete;
SerializationStorageContext(const SerializationStorageContext&) = delete;

uint64_t getOrAddStorage(c10::Storage storage) {
uint64_t getOrAddStorage(const c10::Storage& storage) {
if (!hasStorage(storage)) {
uint64_t size = storage_id_map_.size();
storage_id_map_[storage] = size;
}
return storage_id_map_[storage];
}

bool hasStorage(c10::Storage storage) {
bool hasStorage(const c10::Storage& storage) {
return storage_id_map_.find(storage) != storage_id_map_.end();
}

Expand Down Expand Up @@ -62,9 +62,9 @@ class TORCH_API DeserializationStorageContext {
const DeserializationStorageContext&) = delete;
DeserializationStorageContext(const DeserializationStorageContext&) = delete;

void addStorage(const std::string& name, c10::Storage storage) {
void addStorage(std::string name, c10::Storage storage) {
TORCH_INTERNAL_ASSERT(!hasStorage(name));
name_storage_map_.insert({name, storage});
name_storage_map_.emplace(std::move(name), std::move(storage));
}

bool hasStorage(const std::string& name) {
Expand Down

0 comments on commit f9bf104

Please sign in to comment.