Skip to content

Commit

Permalink
8236778: Add Atomic::fetch_and_add
Browse files Browse the repository at this point in the history
Reviewed-by: kbarrett, dholmes
  • Loading branch information
stefank committed Jan 24, 2020
1 parent 5013cf6 commit 17106c9
Show file tree
Hide file tree
Showing 32 changed files with 151 additions and 145 deletions.
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp
Expand Up @@ -93,11 +93,14 @@ inline void post_membar(atomic_memory_order order) {


template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;

template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

template<>
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp
Expand Up @@ -28,11 +28,14 @@
// Implementation of class atomic

template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;

template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
return fetch_and_add(dest, add_value, order) + add_value;
}
};

template<>
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp
Expand Up @@ -160,11 +160,14 @@ static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
#endif // ARM

template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;

template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

template<>
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
Expand Up @@ -33,15 +33,18 @@
// See https://patchwork.kernel.org/patch/3575821/

template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
FULL_MEM_BARRIER;
return res;
}

template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

template<size_t byte_size>
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp
Expand Up @@ -67,11 +67,14 @@ inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
// For ARMv7 we add explicit barriers in the stubs.

template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;

template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

template<>
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp
Expand Up @@ -93,11 +93,14 @@ inline void post_membar(atomic_memory_order order) {


template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;

template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

template<>
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp
Expand Up @@ -75,11 +75,14 @@ inline void z196_fast_sync() {
}

template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;

template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

template<>
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp
Expand Up @@ -28,11 +28,14 @@
// Implementation of class atomic

template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;

template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

template<>
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp
Expand Up @@ -28,11 +28,14 @@
// Implementation of class atomic

template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;

template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
return fetch_and_add(dest, add_value, order) + add_value;
}
};

template<>
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
Expand Up @@ -31,11 +31,14 @@
// Implementation of class atomic

template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;

template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

template<>
Expand Down
7 changes: 6 additions & 1 deletion src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp
Expand Up @@ -31,7 +31,7 @@
template<size_t byte_size>
struct Atomic::PlatformAdd {
template<typename D, typename I>
inline D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
inline D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
D old_value = *dest;
while (true) {
D new_value = old_value + add_value;
Expand All @@ -41,6 +41,11 @@ struct Atomic::PlatformAdd {
}
return old_value + add_value;
}

template<typename D, typename I>
inline D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

template<>
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp
Expand Up @@ -41,11 +41,14 @@ extern "C" {
}

template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;

template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

// Not using add_using_helper; see comment for cmpxchg.
Expand Down
9 changes: 6 additions & 3 deletions src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp
Expand Up @@ -54,11 +54,14 @@ template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fe
#pragma warning(disable: 4035) // Disables warnings reporting missing return statement

template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;

template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};

#ifdef AMD64
Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
Expand Up @@ -209,7 +209,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
return NULL;
}

size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u);
if (cur_idx >= _chunk_capacity) {
return NULL;
}
Expand Down Expand Up @@ -282,7 +282,7 @@ void G1CMRootMemRegions::reset() {

void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint();
size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u);
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
Expand Down Expand Up @@ -310,7 +310,7 @@ const MemRegion* G1CMRootMemRegions::claim_next() {
return NULL;
}

size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u);
if (claimed_index < _num_root_regions) {
return &_root_regions[claimed_index];
}
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/g1/g1HotCardCache.cpp
Expand Up @@ -70,7 +70,7 @@ CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) {
return card_ptr;
}
// Otherwise, the card is hot.
size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1;
size_t index = Atomic::fetch_and_add(&_hot_cache_idx, 1u);
if (index == _hot_cache_size) {
// Can use relaxed store because all racing threads are writing the same
// value and there aren't any concurrent readers.
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
Expand Up @@ -261,7 +261,7 @@ class G1PretouchTask : public AbstractGangTask {
virtual void work(uint worker_id) {
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
while (true) {
char* touch_addr = Atomic::add(&_cur_addr, actual_chunk_size) - actual_chunk_size;
char* touch_addr = Atomic::fetch_and_add(&_cur_addr, actual_chunk_size);
if (touch_addr < _start_addr || touch_addr >= _end_addr) {
break;
}
Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/share/gc/g1/g1RemSet.cpp
Expand Up @@ -180,7 +180,7 @@ class G1RemSetScanState : public CHeapObj<mtGC> {

bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false;
if (marked_as_dirty) {
uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
uint allocated = Atomic::fetch_and_add(&_cur_idx, 1u);
_buffer[allocated] = region;
}
}
Expand Down Expand Up @@ -232,7 +232,7 @@ class G1RemSetScanState : public CHeapObj<mtGC> {

void work(uint worker_id) {
while (_cur_dirty_regions < _regions->size()) {
uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length;
uint next = Atomic::fetch_and_add(&_cur_dirty_regions, _chunk_length);
uint max = MIN2(next + _chunk_length, _regions->size());

for (uint i = next; i < max; i++) {
Expand Down Expand Up @@ -429,7 +429,7 @@ class G1RemSetScanState : public CHeapObj<mtGC> {

uint claim_cards_to_scan(uint region, uint increment) {
assert(region < _max_regions, "Tried to access invalid region %u", region);
return Atomic::add(&_card_table_scan_state[region], increment) - increment;
return Atomic::fetch_and_add(&_card_table_scan_state[region], increment);
}

void add_dirty_region(uint const region) {
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/parallel/psParallelCompact.cpp
Expand Up @@ -2452,7 +2452,7 @@ class TaskQueue : StackObj {
}

bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
uint claimed = Atomic::add(&_counter, 1u) - 1; // -1 is so that we start with zero
uint claimed = Atomic::fetch_and_add(&_counter, 1u);
if (claimed < _insert_index) {
reference = _backing_array[claimed];
return true;
Expand Down
Expand Up @@ -32,7 +32,7 @@ StringDedupQueue* StringDedupQueue::_queue = NULL;
volatile size_t StringDedupQueue::_claimed_index = 0;

size_t StringDedupQueue::claim() {
return Atomic::add(&_claimed_index, size_t(1)) - 1;
return Atomic::fetch_and_add(&_claimed_index, 1u);
}

void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {
Expand Down
Expand Up @@ -592,7 +592,7 @@ void StringDedupTable::finish_rehash(StringDedupTable* rehashed_table) {
}

size_t StringDedupTable::claim_table_partition(size_t partition_size) {
return Atomic::add(&_claimed_index, partition_size) - partition_size;
return Atomic::fetch_and_add(&_claimed_index, partition_size);
}

void StringDedupTable::verify() {
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
Expand Up @@ -1362,7 +1362,7 @@ class ShenandoahParallelHeapRegionTask : public AbstractGangTask {

size_t max = _heap->num_regions();
while (_index < max) {
size_t cur = Atomic::add(&_index, stride) - stride;
size_t cur = Atomic::fetch_and_add(&_index, stride);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp
Expand Up @@ -484,7 +484,7 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl)
ShenandoahNMethod** list = _array;
size_t max = (size_t)_length;
while (_claimed < max) {
size_t cur = Atomic::add(&_claimed, stride) - stride;
size_t cur = Atomic::fetch_and_add(&_claimed, stride);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
Expand Down
Expand Up @@ -90,7 +90,7 @@ void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) {

size_t max = (size_t)_length;
while (_claimed < max) {
size_t cur = Atomic::add(&_claimed, stride) - stride;
size_t cur = Atomic::fetch_and_add(&_claimed, stride);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
Expand Up @@ -522,7 +522,7 @@ class ShenandoahVerifierMarkedRegionTask : public AbstractGangTask {
_options);

while (true) {
size_t v = Atomic::add(&_claimed, 1u) - 1;
size_t v = Atomic::fetch_and_add(&_claimed, 1u);
if (v < _heap->num_regions()) {
ShenandoahHeapRegion* r = _heap->get_region(v);
if (!r->is_humongous() && !r->is_trash()) {
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/z/zArray.inline.hpp
Expand Up @@ -101,7 +101,7 @@ inline ZArrayIteratorImpl<T, parallel>::ZArrayIteratorImpl(ZArray<T>* array) :
template <typename T, bool parallel>
inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
if (parallel) {
const size_t next = Atomic::add(&_next, 1u) - 1u;
const size_t next = Atomic::fetch_and_add(&_next, 1u);
if (next < _array->size()) {
*elem = _array->at(next);
return true;
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/z/zMarkStackAllocator.cpp
Expand Up @@ -110,7 +110,7 @@ uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) {

// Increment top before end to make sure another
// thread can't steal out newly expanded space.
addr = Atomic::add(&_top, size) - size;
addr = Atomic::fetch_and_add(&_top, size);
Atomic::add(&_end, expand_size);

return addr;
Expand Down

0 comments on commit 17106c9

Please sign in to comment.