Skip to content
Permalink
Browse files

8233073: Make BitMap accessors more memory ordering friendly

Reviewed-by: kbarrett, pliden
  • Loading branch information
fisk committed Nov 7, 2019
1 parent afd91fd commit f07d377d6f25242793c087094a500ac3b4bae476
@@ -29,6 +29,7 @@
#include "c1/c1_ValueStack.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
#include "utilities/bitMap.inline.hpp"


// Implementation of Instruction
@@ -43,6 +43,7 @@
#include "opto/runtime.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/bitMap.inline.hpp"

//----------------------------GraphKit-----------------------------------------
// Main utility constructor.
@@ -41,6 +41,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/copy.hpp"

// Static array so we can figure out which bytecodes stop us from compiling
@@ -26,6 +26,7 @@
#define SHARE_UTILITIES_BITMAP_HPP

#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "utilities/align.hpp"

// Forward decl;
@@ -105,6 +106,8 @@ class BitMap {
void set_word (idx_t word) { set_word(word, ~(bm_word_t)0); }
void clear_word(idx_t word) { _map[word] = 0; }

static inline const bm_word_t load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order);

// Utilities for ranges of bits. Ranges are half-open [beg, end).

// Ranges within a single word.
@@ -204,6 +207,9 @@ class BitMap {
return (*word_addr(index) & bit_mask(index)) != 0;
}

// memory_order must be memory_order_relaxed or memory_order_acquire.
bool par_at(idx_t index, atomic_memory_order memory_order = memory_order_acquire) const;

// Align bit index up or down to the next bitmap word boundary, or check
// alignment.
static idx_t word_align_up(idx_t bit) {
@@ -220,9 +226,14 @@ class BitMap {
inline void set_bit(idx_t bit);
inline void clear_bit(idx_t bit);

// Atomically set or clear the specified bit.
inline bool par_set_bit(idx_t bit);
inline bool par_clear_bit(idx_t bit);
// Attempts to change a bit to a desired value. The operation returns true if
// this thread changed the value of the bit. It was changed with a RMW operation
// using the specified memory_order. The operation returns false if the change
// could not be set due to the bit already being observed in the desired state.
// The atomic access that observed the bit in the desired state has acquire
// semantics, unless memory_order is memory_order_relaxed or memory_order_release.
inline bool par_set_bit(idx_t bit, atomic_memory_order memory_order = memory_order_conservative);
inline bool par_clear_bit(idx_t bit, atomic_memory_order memory_order = memory_order_conservative);

// Put the given value at the given offset. The parallel version
// will CAS the value into the bitmap and is quite a bit slower.
@@ -26,6 +26,7 @@
#define SHARE_UTILITIES_BITMAP_INLINE_HPP

#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/count_trailing_zeros.hpp"

@@ -39,37 +40,58 @@ inline void BitMap::clear_bit(idx_t bit) {
*word_addr(bit) &= ~bit_mask(bit);
}

inline bool BitMap::par_set_bit(idx_t bit) {
inline const BitMap::bm_word_t BitMap::load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order) {
if (memory_order == memory_order_relaxed || memory_order == memory_order_release) {
return Atomic::load(addr);
} else {
assert(memory_order == memory_order_acq_rel ||
memory_order == memory_order_acquire ||
memory_order == memory_order_conservative,
"unexpected memory ordering");
return OrderAccess::load_acquire(addr);
}
}

inline bool BitMap::par_at(idx_t index, atomic_memory_order memory_order) const {
verify_index(index);
assert(memory_order == memory_order_acquire ||
memory_order == memory_order_relaxed,
"unexpected memory ordering");
const volatile bm_word_t* const addr = word_addr(index);
return (load_word_ordered(addr, memory_order) & bit_mask(index)) != 0;
}

inline bool BitMap::par_set_bit(idx_t bit, atomic_memory_order memory_order) {
verify_index(bit);
volatile bm_word_t* const addr = word_addr(bit);
const bm_word_t mask = bit_mask(bit);
bm_word_t old_val = *addr;
bm_word_t old_val = load_word_ordered(addr, memory_order);

do {
const bm_word_t new_val = old_val | mask;
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val, memory_order);
if (cur_val == old_val) {
return true; // Success.
}
old_val = cur_val; // The value changed, try again.
} while (true);
}

inline bool BitMap::par_clear_bit(idx_t bit) {
inline bool BitMap::par_clear_bit(idx_t bit, atomic_memory_order memory_order) {
verify_index(bit);
volatile bm_word_t* const addr = word_addr(bit);
const bm_word_t mask = ~bit_mask(bit);
bm_word_t old_val = *addr;
bm_word_t old_val = load_word_ordered(addr, memory_order);

do {
const bm_word_t new_val = old_val & mask;
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val, memory_order);
if (cur_val == old_val) {
return true; // Success.
}

0 comments on commit f07d377

Please sign in to comment.