-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[libc][CPP] clean up and generalize atomic implementation #118996
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
SchrodingerZhu
merged 1 commit into
llvm:main
from
SchrodingerZhu:libc/generalize-atomic
Dec 6, 2024
Merged
[libc][CPP] clean up and generalize atomic implementation #118996
SchrodingerZhu
merged 1 commit into
llvm:main
from
SchrodingerZhu:libc/generalize-atomic
Dec 6, 2024
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@llvm/pr-subscribers-libc Author: Schrodinger ZHU Yifan (SchrodingerZhu) Changes
Full diff: https://github.com/llvm/llvm-project/pull/118996.diff 8 Files Affected:
diff --git a/libc/src/__support/CPP/atomic.h b/libc/src/__support/CPP/atomic.h
index 72e7f2adde6a43..c67e4e9b6f1cbc 100644
--- a/libc/src/__support/CPP/atomic.h
+++ b/libc/src/__support/CPP/atomic.h
@@ -40,8 +40,12 @@ enum class MemoryScope : int {
};
template <typename T> struct Atomic {
- // For now, we will restrict to only arithmetic types.
- static_assert(is_arithmetic_v<T>, "Only arithmetic types can be atomic.");
+ static_assert(is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
+ is_move_constructible_v<T> && is_copy_assignable_v<T> &&
+ is_move_assignable_v<T>,
+ "atomic<T> requires T to be trivially copyable, copy "
+ "constructible, move constructible, copy assignable, "
+ "and move assignable.");
private:
// The value stored should be appropriately aligned so that
@@ -49,6 +53,14 @@ template <typename T> struct Atomic {
// correctly.
static constexpr int ALIGNMENT = sizeof(T) > alignof(T) ? sizeof(T)
: alignof(T);
+ // type conversion helper to avoid long c++ style casts
+ LIBC_INLINE static int order(MemoryOrder mem_ord) {
+ return static_cast<int>(mem_ord);
+ }
+
+ LIBC_INLINE static int scope(MemoryScope mem_scope) {
+ return static_cast<int>(mem_scope);
+ }
public:
using value_type = T;
@@ -59,131 +71,146 @@ template <typename T> struct Atomic {
// operations should be performed using the atomic methods however.
alignas(ALIGNMENT) value_type val;
- constexpr Atomic() = default;
+ LIBC_INLINE constexpr Atomic() = default;
// Intializes the value without using atomic operations.
- constexpr Atomic(value_type v) : val(v) {}
+ LIBC_INLINE constexpr Atomic(value_type v) : val(v) {}
- Atomic(const Atomic &) = delete;
- Atomic &operator=(const Atomic &) = delete;
+ LIBC_INLINE Atomic(const Atomic &) = delete;
+ LIBC_INLINE Atomic &operator=(const Atomic &) = delete;
// Atomic load.
- operator T() { return __atomic_load_n(&val, int(MemoryOrder::SEQ_CST)); }
-
- T load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
-#if __has_builtin(__scoped_atomic_load_n)
- return __scoped_atomic_load_n(&val, int(mem_ord), (int)(mem_scope));
+ LIBC_INLINE operator T() { return load(); }
+
+ LIBC_INLINE T
+ load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+ T res;
+#if __has_builtin(__scoped_atomic_load)
+ __scoped_atomic_load(&val, &res, order(mem_ord), scope(mem_scope));
#else
- return __atomic_load_n(&val, int(mem_ord));
+ __atomic_load(&val, &res, order(mem_ord));
#endif
+ return res;
}
// Atomic store.
- T operator=(T rhs) {
- __atomic_store_n(&val, rhs, int(MemoryOrder::SEQ_CST));
+ LIBC_INLINE T operator=(T rhs) {
+ store(rhs);
return rhs;
}
- void store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
-#if __has_builtin(__scoped_atomic_store_n)
- __scoped_atomic_store_n(&val, rhs, int(mem_ord), (int)(mem_scope));
+ LIBC_INLINE void
+ store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+#if __has_builtin(__scoped_atomic_store)
+ __scoped_atomic_store(&val, &rhs, order(mem_ord), scope(mem_scope));
#else
- __atomic_store_n(&val, rhs, int(mem_ord));
+ __atomic_store(&val, &rhs, order(mem_ord));
#endif
}
// Atomic compare exchange
- bool compare_exchange_strong(
+ LIBC_INLINE bool compare_exchange_strong(
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- return __atomic_compare_exchange_n(&val, &expected, desired, false,
- int(mem_ord), int(mem_ord));
+ return __atomic_compare_exchange(&val, &expected, &desired, false,
+ order(mem_ord), order(mem_ord));
}
// Atomic compare exchange (separate success and failure memory orders)
- bool compare_exchange_strong(
+ LIBC_INLINE bool compare_exchange_strong(
T &expected, T desired, MemoryOrder success_order,
MemoryOrder failure_order,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- return __atomic_compare_exchange_n(&val, &expected, desired, false,
- static_cast<int>(success_order),
- static_cast<int>(failure_order));
+ return __atomic_compare_exchange(&val, &expected, &desired, false,
+ order(success_order),
+ order(failure_order));
}
// Atomic compare exchange (weak version)
- bool compare_exchange_weak(
+ LIBC_INLINE bool compare_exchange_weak(
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- return __atomic_compare_exchange_n(&val, &expected, desired, true,
- static_cast<int>(mem_ord),
- static_cast<int>(mem_ord));
+ return __atomic_compare_exchange(&val, &expected, &desired, true,
+ order(mem_ord), order(mem_ord));
}
// Atomic compare exchange (weak version with separate success and failure
// memory orders)
- bool compare_exchange_weak(
+ LIBC_INLINE bool compare_exchange_weak(
T &expected, T desired, MemoryOrder success_order,
MemoryOrder failure_order,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- return __atomic_compare_exchange_n(&val, &expected, desired, true,
- static_cast<int>(success_order),
- static_cast<int>(failure_order));
+ return __atomic_compare_exchange(&val, &expected, &desired, true,
+ order(success_order),
+ order(failure_order));
}
- T exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
-#if __has_builtin(__scoped_atomic_exchange_n)
- return __scoped_atomic_exchange_n(&val, desired, int(mem_ord),
- (int)(mem_scope));
+ LIBC_INLINE T
+ exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+ T ret;
+#if __has_builtin(__scoped_atomic_exchange)
+ __scoped_atomic_exchange(&val, &desired, &ret, order(mem_ord),
+ scope(mem_scope));
#else
- return __atomic_exchange_n(&val, desired, int(mem_ord));
+ __atomic_exchange(&val, &desired, &ret, order(mem_ord));
#endif
+ return ret;
}
- T fetch_add(T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+ LIBC_INLINE T
+ fetch_add(T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+ static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_add)
- return __scoped_atomic_fetch_add(&val, increment, int(mem_ord),
- (int)(mem_scope));
+ return __scoped_atomic_fetch_add(&val, increment, order(mem_ord),
+ scope(mem_scope));
#else
- return __atomic_fetch_add(&val, increment, int(mem_ord));
+ return __atomic_fetch_add(&val, increment, order(mem_ord));
#endif
}
- T fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+ LIBC_INLINE T
+ fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+ static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_or)
- return __scoped_atomic_fetch_or(&val, mask, int(mem_ord), (int)(mem_scope));
+ return __scoped_atomic_fetch_or(&val, mask, order(mem_ord),
+ scope(mem_scope));
#else
- return __atomic_fetch_or(&val, mask, int(mem_ord));
+ return __atomic_fetch_or(&val, mask, order(mem_ord));
#endif
}
- T fetch_and(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+ LIBC_INLINE T
+ fetch_and(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+ static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_and)
- return __scoped_atomic_fetch_and(&val, mask, int(mem_ord),
- (int)(mem_scope));
+ return __scoped_atomic_fetch_and(&val, mask, order(mem_ord),
+ scope(mem_scope));
#else
- return __atomic_fetch_and(&val, mask, int(mem_ord));
+ return __atomic_fetch_and(&val, mask, order(mem_ord));
#endif
}
- T fetch_sub(T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+ LIBC_INLINE T
+ fetch_sub(T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
+ static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_sub)
- return __scoped_atomic_fetch_sub(&val, decrement, int(mem_ord),
- (int)(mem_scope));
+ return __scoped_atomic_fetch_sub(&val, decrement, order(mem_ord),
+ scope(mem_scope));
#else
- return __atomic_fetch_sub(&val, decrement, int(mem_ord));
+ return __atomic_fetch_sub(&val, decrement, order(mem_ord));
#endif
}
// Set the value without using an atomic operation. This is useful
// in initializing atomic values without a constructor.
- void set(T rhs) { val = rhs; }
+ LIBC_INLINE void set(T rhs) { val = rhs; }
};
// Issue a thread fence with the given memory ordering.
diff --git a/libc/src/__support/CPP/type_traits.h b/libc/src/__support/CPP/type_traits.h
index d50b6612656dbb..b9bc5b85684415 100644
--- a/libc/src/__support/CPP/type_traits.h
+++ b/libc/src/__support/CPP/type_traits.h
@@ -28,6 +28,8 @@
#include "src/__support/CPP/type_traits/is_const.h"
#include "src/__support/CPP/type_traits/is_constant_evaluated.h"
#include "src/__support/CPP/type_traits/is_convertible.h"
+#include "src/__support/CPP/type_traits/is_copy_assignable.h"
+#include "src/__support/CPP/type_traits/is_copy_constructible.h"
#include "src/__support/CPP/type_traits/is_destructible.h"
#include "src/__support/CPP/type_traits/is_enum.h"
#include "src/__support/CPP/type_traits/is_fixed_point.h"
@@ -36,6 +38,8 @@
#include "src/__support/CPP/type_traits/is_integral.h"
#include "src/__support/CPP/type_traits/is_lvalue_reference.h"
#include "src/__support/CPP/type_traits/is_member_pointer.h"
+#include "src/__support/CPP/type_traits/is_move_assignable.h"
+#include "src/__support/CPP/type_traits/is_move_constructible.h"
#include "src/__support/CPP/type_traits/is_null_pointer.h"
#include "src/__support/CPP/type_traits/is_object.h"
#include "src/__support/CPP/type_traits/is_pointer.h"
diff --git a/libc/src/__support/CPP/type_traits/is_copy_assignable.h b/libc/src/__support/CPP/type_traits/is_copy_assignable.h
new file mode 100644
index 00000000000000..9beb93d14668d9
--- /dev/null
+++ b/libc/src/__support/CPP/type_traits/is_copy_assignable.h
@@ -0,0 +1,32 @@
+//===-- is_copy_assignable type_traits --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_COPY_ASSIGNABLE_H
+#define LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_COPY_ASSIGNABLE_H
+
+#include "src/__support/CPP/type_traits/add_lvalue_reference.h"
+#include "src/__support/CPP/type_traits/integral_constant.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace cpp {
+
+// is copy assignable
+template <class T>
+struct is_copy_assignable
+ : public integral_constant<
+ bool, __is_assignable(cpp::add_lvalue_reference_t<T>,
+ cpp::add_lvalue_reference_t<const T>)> {};
+
+template <class T>
+LIBC_INLINE_VAR constexpr bool is_copy_assignable_v =
+ is_copy_assignable<T>::value;
+
+} // namespace cpp
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_COPY_ASSIGNABLE_H
diff --git a/libc/src/__support/CPP/type_traits/is_copy_constructible.h b/libc/src/__support/CPP/type_traits/is_copy_constructible.h
new file mode 100644
index 00000000000000..d8eb9ad3507eec
--- /dev/null
+++ b/libc/src/__support/CPP/type_traits/is_copy_constructible.h
@@ -0,0 +1,31 @@
+//===-- is_copy_constructible type_traits -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_COPY_CONSTRUCTIBLE_H
+#define LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_COPY_CONSTRUCTIBLE_H
+
+#include "src/__support/CPP/type_traits/add_lvalue_reference.h"
+#include "src/__support/CPP/type_traits/integral_constant.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace cpp {
+
+// is copy constructible
+template <class T>
+struct is_copy_constructible
+ : public integral_constant<
+ bool, __is_constructible(T, cpp::add_lvalue_reference_t<const T>)> {};
+
+template <class T>
+LIBC_INLINE_VAR constexpr bool is_copy_constructible_v =
+ is_copy_constructible<T>::value;
+
+} // namespace cpp
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_COPY_CONSTRUCTIBLE_H
diff --git a/libc/src/__support/CPP/type_traits/is_move_assignable.h b/libc/src/__support/CPP/type_traits/is_move_assignable.h
new file mode 100644
index 00000000000000..a788bd9074e32a
--- /dev/null
+++ b/libc/src/__support/CPP/type_traits/is_move_assignable.h
@@ -0,0 +1,33 @@
+//===-- is_move_assignable type_traits --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_MOVE_ASSIGNABLE_H
+#define LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_MOVE_ASSIGNABLE_H
+
+#include "src/__support/CPP/type_traits/add_lvalue_reference.h"
+#include "src/__support/CPP/type_traits/add_rvalue_reference.h"
+#include "src/__support/CPP/type_traits/integral_constant.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace cpp {
+
+// is move assignable
+template <class T>
+struct is_move_assignable
+ : public integral_constant<bool, __is_assignable(
+ cpp::add_lvalue_reference_t<T>,
+ cpp::add_rvalue_reference_t<T>)> {};
+
+template <class T>
+LIBC_INLINE_VAR constexpr bool is_move_assignable_v =
+ is_move_assignable<T>::value;
+
+} // namespace cpp
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_MOVE_ASSIGNABLE_H
diff --git a/libc/src/__support/CPP/type_traits/is_move_constructible.h b/libc/src/__support/CPP/type_traits/is_move_constructible.h
new file mode 100644
index 00000000000000..c8989605462584
--- /dev/null
+++ b/libc/src/__support/CPP/type_traits/is_move_constructible.h
@@ -0,0 +1,31 @@
+//===-- is_move_constructible type_traits ------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_MOVE_CONSTRUCTIBLE_H
+#define LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_MOVE_CONSTRUCTIBLE_H
+
+#include "src/__support/CPP/type_traits/add_rvalue_reference.h"
+#include "src/__support/CPP/type_traits/integral_constant.h"
+#include "src/__support/macros/config.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace cpp {
+
+// is move constructible
+template <class T>
+struct is_move_constructible
+ : public integral_constant<bool, __is_constructible(
+ T, cpp::add_rvalue_reference_t<T>)> {};
+
+template <class T>
+LIBC_INLINE_VAR constexpr bool is_move_constructible_v =
+ is_move_constructible<T>::value;
+
+} // namespace cpp
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_MOVE_CONSTRUCTIBLE_H
diff --git a/libc/src/__support/CPP/type_traits/is_trivially_copyable.h b/libc/src/__support/CPP/type_traits/is_trivially_copyable.h
index 68e56c8547834f..a3e786fe1d141d 100644
--- a/libc/src/__support/CPP/type_traits/is_trivially_copyable.h
+++ b/libc/src/__support/CPP/type_traits/is_trivially_copyable.h
@@ -19,6 +19,10 @@ template <class T>
struct is_trivially_copyable
: public integral_constant<bool, __is_trivially_copyable(T)> {};
+template <class T>
+LIBC_INLINE_VAR constexpr bool is_trivially_copyable_v =
+ is_trivially_copyable<T>::value;
+
} // namespace cpp
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/test/src/__support/CPP/atomic_test.cpp b/libc/test/src/__support/CPP/atomic_test.cpp
index 5b105c8eb3d56c..8772ad05f49ff7 100644
--- a/libc/test/src/__support/CPP/atomic_test.cpp
+++ b/libc/test/src/__support/CPP/atomic_test.cpp
@@ -32,3 +32,20 @@ TEST(LlvmLibcAtomicTest, CompareExchangeStrong) {
ASSERT_FALSE(aint.compare_exchange_strong(desired, 100));
ASSERT_EQ(aint.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED), 100);
}
+
+struct TrivialData {
+ int a;
+ int b;
+};
+
+TEST(LlvmLibcAtomicTest, TrivialCompositeData) {
+ LIBC_NAMESPACE::cpp::Atomic<TrivialData> data({1, 2});
+ ASSERT_EQ(data.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED).a, 1);
+ ASSERT_EQ(data.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED).b, 2);
+
+ auto old = data.exchange({3, 4});
+ ASSERT_EQ(data.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED).a, 3);
+ ASSERT_EQ(data.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED).b, 4);
+ ASSERT_EQ(old.a, 1);
+ ASSERT_EQ(old.b, 2);
+}
|
jhuber6
approved these changes
Dec 6, 2024
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/196/builds/2011 Here is the relevant piece of the build log for the reference
|
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
atomic
to wrap any trivial data.LIBC_INLINE
This may be useful when pushing forward windows implementation.