Skip to content
This repository has been archived by the owner on Mar 21, 2024. It is now read-only.

Tweak atomic and atomic_ref to match specification's constructor requirements #219

Merged
merged 2 commits into from
Nov 3, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// UNSUPPORTED: libcpp-has-no-threads, pre-sm-60
// UNSUPPORTED: windows && pre-sm-70

// NOTE: atomic<> of a TriviallyCopyable class is wrongly rejected by older
// clang versions. It was fixed right before the llvm 3.5 release. See PR18097.
// XFAIL: apple-clang-6.0, clang-3.4, clang-3.3

// <cuda/std/atomic>

#include <cuda/std/atomic>
#include <cuda/std/utility>
#include <cuda/std/cassert>
// #include <cuda/std/thread> // for thread_id
// #include <cuda/std/chrono> // for nanoseconds

#include "test_macros.h"

template <class T>
__host__ __device__
void test_not_copy_constructible() {
static_assert(!cuda::std::is_constructible<T, T&&>(), "");
static_assert(!cuda::std::is_constructible<T, const T&>(), "");
static_assert(!cuda::std::is_assignable<T, T&&>(), "");
static_assert(!cuda::std::is_assignable<T, const T&>(), "");
}

template <class T>
__host__ __device__
void test_copy_constructible() {
static_assert(cuda::std::is_constructible<T, T&&>(), "");
static_assert(cuda::std::is_constructible<T, const T&>(), "");
static_assert(!cuda::std::is_assignable<T, T&&>(), "");
static_assert(!cuda::std::is_assignable<T, const T&>(), "");
}

template <class T, class A>
__host__ __device__
void test_atomic_ref_copy_ctor() {
A val = 0;

T t0(val);
T t1(t0);

t0++;
t1++;

assert(t1.load() == 2);
}

template <class T, class A>
__host__ __device__
void test_atomic_ref_move_ctor() {
A val = 0;

T t0(val);
t0++;

T t1(cuda::std::move(t0));
t1++;

assert(t1.load() == 2);
}

int main(int, char**)
{
test_not_copy_constructible<cuda::std::atomic<int>>();
test_not_copy_constructible<cuda::atomic<int>>();


test_copy_constructible<cuda::std::atomic_ref<int>>();
test_copy_constructible<cuda::atomic_ref<int>>();

test_atomic_ref_copy_ctor<cuda::std::atomic_ref<int>, int>();
test_atomic_ref_copy_ctor<cuda::atomic_ref<int>, int>();
test_atomic_ref_copy_ctor<const cuda::std::atomic_ref<int>, int>();
test_atomic_ref_copy_ctor<const cuda::atomic_ref<int>, int>();

test_atomic_ref_move_ctor<cuda::std::atomic_ref<int>, int>();
test_atomic_ref_move_ctor<cuda::atomic_ref<int>, int>();
test_atomic_ref_move_ctor<const cuda::std::atomic_ref<int>, int>();
test_atomic_ref_move_ctor<const cuda::atomic_ref<int>, int>();
// test(cuda::std::this_thread::get_id());
// test(cuda::std::chrono::nanoseconds(2));

return 0;
}
29 changes: 22 additions & 7 deletions include/cuda/std/detail/libcxx/include/atomic
Original file line number Diff line number Diff line change
Expand Up @@ -1245,6 +1245,12 @@ struct __atomic_base {
mutable __cxx_atomic_impl<_Tp, _Sco> __a_;

__atomic_base() = default;
__atomic_base(const __atomic_base&) = delete;
Copy link
Collaborator

@jrhemstad jrhemstad Oct 28, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What about copy assignment? We should probably just rule of 0 it here and be explicit about the move ctor/assignment too?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch, seems like we've also lost that at some point (probably also in the refactor?).

@wmaxey seems like upstream deletes assignments directly in atomic and we don't, but I guess doing it in the base class is fine too?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My thought process was: __atomic_base implements the behavior that atomic should inherit. This way if we 'rule of zero' atomic we should get the correct behavior.

We just don't have tests enforcing that bit of the standard so it was missed. 👎

__atomic_base(__atomic_base&&) = delete;

__atomic_base& operator=(const __atomic_base&) = delete;
__atomic_base& operator=(__atomic_base&&) = delete;

_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR
__atomic_base(const _Tp& __a) _NOEXCEPT : __a_(__a) {}

Expand Down Expand Up @@ -1364,14 +1370,19 @@ struct __atomic_base_ref {
mutable __cxx_atomic_ref_impl<_Tp, _Sco> __a_;

__atomic_base_ref() = default;
__atomic_base_ref(const __atomic_base_ref&) = default;
__atomic_base_ref(__atomic_base_ref&&) = default;

__atomic_base_ref& operator=(const __atomic_base_ref&) = delete;
__atomic_base_ref& operator=(__atomic_base_ref&&) = delete;

_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR
__atomic_base_ref(_Tp& __a) _NOEXCEPT : __a_(__a) {}

#if defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE)
static _LIBCUDACXX_CONSTEXPR bool is_always_lock_free = _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(sizeof(decltype(__a_)), 0);
#endif // defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE)


_LIBCUDACXX_INLINE_VISIBILITY
bool is_lock_free() const volatile _NOEXCEPT
{return __cxx_atomic_is_lock_free(sizeof(_Tp));}
Expand Down Expand Up @@ -1483,6 +1494,11 @@ struct __atomic_base_ref {
template <class _Tp, int _Sco>
struct __atomic_base<_Tp, _Sco, true> : public __atomic_base<_Tp, _Sco, false> {
__atomic_base() = default;
__atomic_base(const __atomic_base&) = delete;
__atomic_base(__atomic_base&&) = delete;

__atomic_base& operator=(const __atomic_base&) = delete;
__atomic_base& operator=(__atomic_base&&) = delete;

_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR
__atomic_base(const _Tp& __a) _NOEXCEPT : __atomic_base<_Tp, _Sco, false>(__a) {}
Expand Down Expand Up @@ -1559,6 +1575,11 @@ struct __atomic_base<_Tp, _Sco, true> : public __atomic_base<_Tp, _Sco, false> {
template <class _Tp, int _Sco>
struct __atomic_base_ref<_Tp, _Sco, true> : public __atomic_base_ref<_Tp, _Sco, false> {
__atomic_base_ref() = default;
__atomic_base_ref(const __atomic_base_ref&) = default;
__atomic_base_ref(__atomic_base_ref&&) = default;

__atomic_base_ref& operator=(const __atomic_base_ref&) = delete;
__atomic_base_ref& operator=(__atomic_base_ref&&) = delete;

_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR
__atomic_base_ref(_Tp& __a) _NOEXCEPT : __atomic_base_ref<_Tp, _Sco, false>(__a) {}
Expand Down Expand Up @@ -1744,9 +1765,6 @@ template <class _Tp>
_LIBCUDACXX_INLINE_VISIBILITY
explicit atomic_ref(_Tp& __ref) : __base(__ref) {}

atomic_ref(const atomic_ref&) noexcept = default;
atomic_ref& operator=(const atomic_ref&) = delete;

_LIBCUDACXX_INLINE_VISIBILITY
_Tp operator=(_Tp __v) const noexcept {__base::store(__v); return __v;}
_LIBCUDACXX_INLINE_VISIBILITY
Expand All @@ -1769,9 +1787,6 @@ template <class _Tp>
_LIBCUDACXX_INLINE_VISIBILITY
explicit atomic_ref(_Tp*& __ref) : __base(__ref) {}

atomic_ref(const atomic_ref&) noexcept = default;
atomic_ref& operator=(const atomic_ref&) = delete;

_LIBCUDACXX_INLINE_VISIBILITY
_Tp* operator=(_Tp* __v) const noexcept {__base::store(__v); return __v;}
_LIBCUDACXX_INLINE_VISIBILITY
Expand Down