Skip to content

Commit

Permalink
8293117: Add atomic bitset functions
Browse files Browse the repository at this point in the history
Backport-of: 8a70664e5248cd6b9d63951729e93bf73eff004c
  • Loading branch information
shipilev committed Dec 19, 2023
1 parent a032136 commit 3809b71
Show file tree
Hide file tree
Showing 2 changed files with 304 additions and 1 deletion.
207 changes: 207 additions & 0 deletions src/hotspot/share/runtime/atomic.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,82 @@ class Atomic : AllStatic {
inline static bool replace_if_null(D* volatile* dest, T* value,
atomic_memory_order order = memory_order_conservative);

// Bitwise logical operations (and, or, xor)
//
// All operations apply the corresponding operation to the value in dest and
// bits, storing the result in dest. They return either the old value
// (fetch_then_BITOP) or the newly updated value (BITOP_then_fetch).
//
// Requirements:
// - T is an integral type
// - sizeof(T) == sizeof(int) || sizeof(T) == sizeof(void*)

// Performs atomic bitwise-and of *dest and bits, storing the result in
// *dest. Returns the prior value of *dest. That is, atomically performs
// this sequence of operations:
// { tmp = *dest; *dest &= bits; return tmp; }
template<typename T>
static T fetch_then_and(volatile T* dest, T bits,
atomic_memory_order order = memory_order_conservative) {
static_assert(std::is_integral<T>::value, "bitop with non-integral type");
return PlatformBitops<sizeof(T)>().fetch_then_and(dest, bits, order);
}

// Performs atomic bitwise-or of *dest and bits, storing the result in
// *dest. Returns the prior value of *dest. That is, atomically performs
// this sequence of operations:
// { tmp = *dest; *dest |= bits; return tmp; }
template<typename T>
static T fetch_then_or(volatile T* dest, T bits,
atomic_memory_order order = memory_order_conservative) {
static_assert(std::is_integral<T>::value, "bitop with non-integral type");
return PlatformBitops<sizeof(T)>().fetch_then_or(dest, bits, order);
}

// Performs atomic bitwise-xor of *dest and bits, storing the result in
// *dest. Returns the prior value of *dest. That is, atomically performs
// this sequence of operations:
// { tmp = *dest; *dest ^= bits; return tmp; }
template<typename T>
static T fetch_then_xor(volatile T* dest, T bits,
atomic_memory_order order = memory_order_conservative) {
static_assert(std::is_integral<T>::value, "bitop with non-integral type");
return PlatformBitops<sizeof(T)>().fetch_then_xor(dest, bits, order);
}

// Performs atomic bitwise-and of *dest and bits, storing the result in
// *dest. Returns the new value of *dest. That is, atomically performs
// this operation:
// { return *dest &= bits; }
template<typename T>
static T and_then_fetch(volatile T* dest, T bits,
atomic_memory_order order = memory_order_conservative) {
static_assert(std::is_integral<T>::value, "bitop with non-integral type");
return PlatformBitops<sizeof(T)>().and_then_fetch(dest, bits, order);
}

// Performs atomic bitwise-or of *dest and bits, storing the result in
// *dest. Returns the new value of *dest. That is, atomically performs
// this operation:
// { return *dest |= bits; }
template<typename T>
static T or_then_fetch(volatile T* dest, T bits,
atomic_memory_order order = memory_order_conservative) {
static_assert(std::is_integral<T>::value, "bitop with non-integral type");
return PlatformBitops<sizeof(T)>().or_then_fetch(dest, bits, order);
}

// Performs atomic bitwise-xor of *dest and bits, storing the result in
// *dest. Returns the new value of *dest. That is, atomically performs
// this operation:
// { return *dest ^= bits; }
template<typename T>
static T xor_then_fetch(volatile T* dest, T bits,
atomic_memory_order order = memory_order_conservative) {
static_assert(std::is_integral<T>::value, "bitop with non-integral type");
return PlatformBitops<sizeof(T)>().xor_then_fetch(dest, bits, order);
}

private:
WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private
// Test whether From is implicitly convertible to To.
Expand Down Expand Up @@ -373,6 +449,44 @@ WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointer
static T xchg_using_helper(Fn fn,
T volatile* dest,
T exchange_value);

// Platform-specific implementation of the bitops (and, or, xor). Support
// for sizes of 4 bytes and (if different) pointer size bytes are required.
// The class is a function object that must be default constructable, with
// these requirements:
//
// - T is an integral type.
// - dest is of type T*.
// - bits is of type T.
// - order is of type atomic_memory_order.
// - platform_bitops is an object of type PlatformBitops<sizeof(T)>.
//
// Then
// platform_bitops.fetch_then_and(dest, bits, order)
// platform_bitops.fetch_then_or(dest, bits, order)
// platform_bitops.fetch_then_xor(dest, bits, order)
// platform_bitops.and_then_fetch(dest, bits, order)
// platform_bitops.or_then_fetch(dest, bits, order)
// platform_bitops.xor_then_fetch(dest, bits, order)
// must all be valid expressions, returning a result convertible to T.
//
// A default definition is provided, which implements all of the operations
// using cmpxchg.
//
// For each required size, a platform must either use the default or
// entirely specialize the class for that size by providing all of the
// required operations.
//
// The second (bool) template parameter allows platforms to provide a
// partial specialization with a parameterized size, and is otherwise
// unused. The default value for that bool parameter means specializations
// don't need to mention it.
template<size_t size, bool = true> class PlatformBitops;

// Helper base classes that may be used to implement PlatformBitops.
class PrefetchBitopsUsingCmpxchg;
class PostfetchBitopsUsingCmpxchg;
class PostfetchBitopsUsingPrefetch;
};

template<typename From, typename To>
Expand Down Expand Up @@ -582,6 +696,99 @@ struct Atomic::PlatformXchg {
atomic_memory_order order) const;
};

// Implement fetch_then_bitop operations using a CAS loop.
class Atomic::PrefetchBitopsUsingCmpxchg {
template<typename T, typename Op>
T bitop(T volatile* dest, atomic_memory_order order, Op operation) const {
T old_value;
T new_value;
T fetched_value = Atomic::load(dest);
do {
old_value = fetched_value;
new_value = operation(old_value);
fetched_value = Atomic::cmpxchg(dest, old_value, new_value, order);
} while (old_value != fetched_value);
return fetched_value;
}

public:
template<typename T>
T fetch_then_and(T volatile* dest, T bits, atomic_memory_order order) const {
return bitop(dest, order, [&](T value) -> T { return value & bits; });
}

template<typename T>
T fetch_then_or(T volatile* dest, T bits, atomic_memory_order order) const {
return bitop(dest, order, [&](T value) -> T { return value | bits; });
}

template<typename T>
T fetch_then_xor(T volatile* dest, T bits, atomic_memory_order order) const {
return bitop(dest, order, [&](T value) -> T { return value ^ bits; });
}
};

// Implement bitop_then_fetch operations using a CAS loop.
class Atomic::PostfetchBitopsUsingCmpxchg {
template<typename T, typename Op>
T bitop(T volatile* dest, atomic_memory_order order, Op operation) const {
T old_value;
T new_value;
T fetched_value = Atomic::load(dest);
do {
old_value = fetched_value;
new_value = operation(old_value);
fetched_value = Atomic::cmpxchg(dest, old_value, new_value, order);
} while (old_value != fetched_value);
return new_value;
}

public:
template<typename T>
T and_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const {
return bitop(dest, order, [&](T value) -> T { return value & bits; });
}

template<typename T>
T or_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const {
return bitop(dest, order, [&](T value) -> T { return value | bits; });
}

template<typename T>
T xor_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const {
return bitop(dest, order, [&](T value) -> T { return value ^ bits; });
}
};

// Implement bitop_then_fetch operations by calling fetch_then_bitop and
// applying the operation to the result and the bits argument.
class Atomic::PostfetchBitopsUsingPrefetch {
public:
template<typename T>
T and_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const {
return bits & Atomic::fetch_then_and(dest, bits, order);
}

template<typename T>
T or_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const {
return bits | Atomic::fetch_then_or(dest, bits, order);
}

template<typename T>
T xor_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const {
return bits ^ Atomic::fetch_then_xor(dest, bits, order);
}
};

// The default definition uses cmpxchg. Platforms can override by defining a
// partial specialization providing size, either as a template parameter or as
// a specific value.
template<size_t size, bool>
class Atomic::PlatformBitops
: public PrefetchBitopsUsingCmpxchg,
public PostfetchBitopsUsingCmpxchg
{};

template <ScopedFenceType T>
class ScopedFenceGeneral: public StackObj {
public:
Expand Down
98 changes: 97 additions & 1 deletion test/hotspot/gtest/runtime/test_atomic.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -199,3 +199,99 @@ TEST(AtomicEnumTest, scoped_enum) {
Support().test_cmpxchg(B, C);
Support().test_xchg(B, C);
}

template<typename T>
struct AtomicBitopsTestSupport {
volatile T _test_value;

// At least one byte differs between _old_value and _old_value op _change_value.
static const T _old_value = static_cast<T>(UCONST64(0x7f5300007f530000));
static const T _change_value = static_cast<T>(UCONST64(0x3800530038005300));

AtomicBitopsTestSupport() : _test_value(0) {}

void fetch_then_and() {
Atomic::store(&_test_value, _old_value);
T expected = _old_value & _change_value;
T result = Atomic::fetch_then_and(&_test_value, _change_value);
EXPECT_EQ(_old_value, result);
EXPECT_EQ(expected, Atomic::load(&_test_value));
}

void fetch_then_or() {
Atomic::store(&_test_value, _old_value);
T expected = _old_value | _change_value;
T result = Atomic::fetch_then_or(&_test_value, _change_value);
EXPECT_EQ(_old_value, result);
EXPECT_EQ(expected, Atomic::load(&_test_value));
}

void fetch_then_xor() {
Atomic::store(&_test_value, _old_value);
T expected = _old_value ^ _change_value;
T result = Atomic::fetch_then_xor(&_test_value, _change_value);
EXPECT_EQ(_old_value, result);
EXPECT_EQ(expected, Atomic::load(&_test_value));
}

void and_then_fetch() {
Atomic::store(&_test_value, _old_value);
T expected = _old_value & _change_value;
T result = Atomic::and_then_fetch(&_test_value, _change_value);
EXPECT_EQ(expected, result);
EXPECT_EQ(expected, Atomic::load(&_test_value));
}

void or_then_fetch() {
Atomic::store(&_test_value, _old_value);
T expected = _old_value | _change_value;
T result = Atomic::or_then_fetch(&_test_value, _change_value);
EXPECT_EQ(expected, result);
EXPECT_EQ(expected, Atomic::load(&_test_value));
}

void xor_then_fetch() {
Atomic::store(&_test_value, _old_value);
T expected = _old_value ^ _change_value;
T result = Atomic::xor_then_fetch(&_test_value, _change_value);
EXPECT_EQ(expected, result);
EXPECT_EQ(expected, Atomic::load(&_test_value));
}

#define TEST_BITOP(name) { SCOPED_TRACE(XSTR(name)); name(); }

void operator()() {
TEST_BITOP(fetch_then_and)
TEST_BITOP(fetch_then_or)
TEST_BITOP(fetch_then_xor)
TEST_BITOP(and_then_fetch)
TEST_BITOP(or_then_fetch)
TEST_BITOP(xor_then_fetch)
}

#undef TEST_BITOP
};

template<typename T>
const T AtomicBitopsTestSupport<T>::_old_value;

template<typename T>
const T AtomicBitopsTestSupport<T>::_change_value;

TEST(AtomicBitopsTest, int32) {
AtomicBitopsTestSupport<int32_t>()();
}

TEST(AtomicBitopsTest, uint32) {
AtomicBitopsTestSupport<uint32_t>()();
}

#ifdef _LP64
TEST(AtomicBitopsTest, int64) {
AtomicBitopsTestSupport<int64_t>()();
}

TEST(AtomicBitopsTest, uint64) {
AtomicBitopsTestSupport<uint64_t>()();
}
#endif // _LP64

1 comment on commit 3809b71

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.