Skip to content

Commit

Permalink
Merge
Browse files Browse the repository at this point in the history
  • Loading branch information
Daniel D. Daugherty committed Jul 15, 2020
2 parents 8cfc7e3 + f636b84 commit 24f4f14
Show file tree
Hide file tree
Showing 3 changed files with 91 additions and 16 deletions.
11 changes: 11 additions & 0 deletions src/hotspot/share/runtime/objectMonitor.hpp
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@


#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/padded.hpp" #include "memory/padded.hpp"
#include "metaprogramming/isRegisteredEnum.hpp"
#include "oops/markWord.hpp" #include "oops/markWord.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "runtime/park.hpp" #include "runtime/park.hpp"
Expand Down Expand Up @@ -277,9 +278,14 @@ class ObjectMonitor {
// _owner field. Returns the prior value of the _owner field. // _owner field. Returns the prior value of the _owner field.
void* try_set_owner_from(void* old_value, void* new_value); void* try_set_owner_from(void* old_value, void* new_value);


// Simply get _next_om field.
ObjectMonitor* next_om() const; ObjectMonitor* next_om() const;
// Get _next_om field with acquire semantics.
ObjectMonitor* next_om_acquire() const;
// Simply set _next_om field to new_value. // Simply set _next_om field to new_value.
void set_next_om(ObjectMonitor* new_value); void set_next_om(ObjectMonitor* new_value);
// Set _next_om field to new_value with release semantics.
void release_set_next_om(ObjectMonitor* new_value);
// Try to set _next_om field to new_value if the current value matches // Try to set _next_om field to new_value if the current value matches
// old_value, using Atomic::cmpxchg(). Otherwise, does not change the // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
// _next_om field. Returns the prior value of the _next_om field. // _next_om field. Returns the prior value of the _next_om field.
Expand Down Expand Up @@ -330,8 +336,10 @@ class ObjectMonitor {
void* object() const; void* object() const;
void* object_addr(); void* object_addr();
void set_object(void* obj); void set_object(void* obj);
void release_set_allocation_state(AllocationState s);
void set_allocation_state(AllocationState s); void set_allocation_state(AllocationState s);
AllocationState allocation_state() const; AllocationState allocation_state() const;
AllocationState allocation_state_acquire() const;
bool is_free() const; bool is_free() const;
bool is_old() const; bool is_old() const;
bool is_new() const; bool is_new() const;
Expand Down Expand Up @@ -374,6 +382,9 @@ class ObjectMonitor {
void install_displaced_markword_in_object(const oop obj); void install_displaced_markword_in_object(const oop obj);
}; };


// Register for atomic operations.
template<> struct IsRegisteredEnum<ObjectMonitor::AllocationState> : public TrueType {};

// Macro to use guarantee() for more strict AsyncDeflateIdleMonitors // Macro to use guarantee() for more strict AsyncDeflateIdleMonitors
// checks and assert() otherwise. // checks and assert() otherwise.
#define ADIM_guarantee(p, ...) \ #define ADIM_guarantee(p, ...) \
Expand Down
21 changes: 20 additions & 1 deletion src/hotspot/share/runtime/objectMonitor.inline.hpp
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -191,6 +191,10 @@ inline void* ObjectMonitor::try_set_owner_from(void* old_value, void* new_value)
return prev; return prev;
} }


inline void ObjectMonitor::release_set_allocation_state(ObjectMonitor::AllocationState s) {
Atomic::release_store(&_allocation_state, s);
}

inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) { inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) {
_allocation_state = s; _allocation_state = s;
} }
Expand All @@ -199,12 +203,16 @@ inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state() const {
return _allocation_state; return _allocation_state;
} }


inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state_acquire() const {
return Atomic::load_acquire(&_allocation_state);
}

inline bool ObjectMonitor::is_free() const { inline bool ObjectMonitor::is_free() const {
return _allocation_state == Free; return _allocation_state == Free;
} }


inline bool ObjectMonitor::is_old() const { inline bool ObjectMonitor::is_old() const {
return _allocation_state == Old; return allocation_state_acquire() == Old;
} }


inline bool ObjectMonitor::is_new() const { inline bool ObjectMonitor::is_new() const {
Expand All @@ -215,15 +223,26 @@ inline bool ObjectMonitor::is_new() const {
// use Atomic operations to disable compiler optimizations that // use Atomic operations to disable compiler optimizations that
// might try to elide loading and/or storing this field. // might try to elide loading and/or storing this field.


// Simply get _next_om field.
inline ObjectMonitor* ObjectMonitor::next_om() const { inline ObjectMonitor* ObjectMonitor::next_om() const {
return Atomic::load(&_next_om); return Atomic::load(&_next_om);
} }


// Get _next_om field with acquire semantics.
inline ObjectMonitor* ObjectMonitor::next_om_acquire() const {
return Atomic::load_acquire(&_next_om);
}

// Simply set _next_om field to new_value. // Simply set _next_om field to new_value.
inline void ObjectMonitor::set_next_om(ObjectMonitor* new_value) { inline void ObjectMonitor::set_next_om(ObjectMonitor* new_value) {
Atomic::store(&_next_om, new_value); Atomic::store(&_next_om, new_value);
} }


// Set _next_om field to new_value with release semantics.
inline void ObjectMonitor::release_set_next_om(ObjectMonitor* new_value) {
Atomic::release_store(&_next_om, new_value);
}

// Try to set _next_om field to new_value if the current value matches // Try to set _next_om field to new_value if the current value matches
// old_value. Otherwise, does not change the _next_om field. Returns // old_value. Otherwise, does not change the _next_om field. Returns
// the prior value of the _next_om field. // the prior value of the _next_om field.
Expand Down
75 changes: 60 additions & 15 deletions src/hotspot/share/runtime/synchronizer.cpp
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ static ObjectMonitorListGlobals om_list_globals;
// Return true if the ObjectMonitor is locked. // Return true if the ObjectMonitor is locked.
// Otherwise returns false. // Otherwise returns false.
static bool is_locked(ObjectMonitor* om) { static bool is_locked(ObjectMonitor* om) {
return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT; return ((intptr_t)om->next_om_acquire() & OM_LOCK_BIT) == OM_LOCK_BIT;
} }


// Mark an ObjectMonitor* with OM_LOCK_BIT and return it. // Mark an ObjectMonitor* with OM_LOCK_BIT and return it.
Expand Down Expand Up @@ -215,18 +215,23 @@ static void om_unlock(ObjectMonitor* om) {
" must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT); " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT);


next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT. next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT.
om->set_next_om(next); om->release_set_next_om(next);
} }


// Get the list head after locking it. Returns the list head or NULL // Get the list head after locking it. Returns the list head or NULL
// if the list is empty. // if the list is empty.
static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) { static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) {
while (true) { while (true) {
// Acquire semantics not needed on this list load since we're
// checking for NULL here or following up with a cmpxchg() via
// try_om_lock() below and we retry on cmpxchg() failure.
ObjectMonitor* mid = Atomic::load(list_p); ObjectMonitor* mid = Atomic::load(list_p);
if (mid == NULL) { if (mid == NULL) {
return NULL; // The list is empty. return NULL; // The list is empty.
} }
if (try_om_lock(mid)) { if (try_om_lock(mid)) {
// Acquire semantics not needed on this list load since memory is
// already consistent due to the cmpxchg() via try_om_lock() above.
if (Atomic::load(list_p) != mid) { if (Atomic::load(list_p) != mid) {
// The list head changed before we could lock it so we have to retry. // The list head changed before we could lock it so we have to retry.
om_unlock(mid); om_unlock(mid);
Expand All @@ -249,12 +254,17 @@ static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
int count, ObjectMonitor** list_p, int count, ObjectMonitor** list_p,
int* count_p) { int* count_p) {
while (true) { while (true) {
// Acquire semantics not needed on this list load since we're
// following up with a cmpxchg() via try_om_lock() below and we
// retry on cmpxchg() failure.
ObjectMonitor* cur = Atomic::load(list_p); ObjectMonitor* cur = Atomic::load(list_p);
// Prepend list to *list_p. // Prepend list to *list_p.
if (!try_om_lock(tail)) { if (!try_om_lock(tail)) {
// Failed to lock tail due to a list walker so try it all again. // Failed to lock tail due to a list walker so try it all again.
continue; continue;
} }
// Release semantics not needed on this "unlock" since memory is
// already consistent due to the cmpxchg() via try_om_lock() above.
tail->set_next_om(cur); // tail now points to cur (and unlocks tail) tail->set_next_om(cur); // tail now points to cur (and unlocks tail)
if (cur == NULL) { if (cur == NULL) {
// No potential race with takers or other prependers since // No potential race with takers or other prependers since
Expand Down Expand Up @@ -342,14 +352,19 @@ static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
// Lock the list head to guard against races with a list walker // Lock the list head to guard against races with a list walker
// or async deflater thread (which only races in om_in_use_list): // or async deflater thread (which only races in om_in_use_list):
if ((cur = get_list_head_locked(list_p)) != NULL) { if ((cur = get_list_head_locked(list_p)) != NULL) {
// List head is now locked so we can safely switch it. // List head is now locked so we can safely switch it. Release
// semantics not needed on this "unlock" since memory is already
// consistent due to the cmpxchg() via get_list_head_locked() above.
m->set_next_om(cur); // m now points to cur (and unlocks m) m->set_next_om(cur); // m now points to cur (and unlocks m)
OrderAccess::storestore(); // Make sure set_next_om() is seen first.
Atomic::store(list_p, m); // Switch list head to unlocked m. Atomic::store(list_p, m); // Switch list head to unlocked m.
om_unlock(cur); om_unlock(cur);
break; break;
} }
// The list is empty so try to set the list head. // The list is empty so try to set the list head.
assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
// Release semantics not needed on this "unlock" since memory
// is already consistent.
m->set_next_om(cur); // m now points to NULL (and unlocks m) m->set_next_om(cur); // m now points to NULL (and unlocks m)
if (Atomic::cmpxchg(list_p, cur, m) == cur) { if (Atomic::cmpxchg(list_p, cur, m) == cur) {
// List head is now unlocked m. // List head is now unlocked m.
Expand Down Expand Up @@ -384,7 +399,9 @@ static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
} }
ObjectMonitor* next = unmarked_next(take); ObjectMonitor* next = unmarked_next(take);
// Switch locked list head to next (which unlocks the list head, but // Switch locked list head to next (which unlocks the list head, but
// leaves take locked): // leaves take locked). Release semantics not needed on this "unlock"
// since memory is already consistent due to the cmpxchg() via
// get_list_head_locked() above.
Atomic::store(list_p, next); Atomic::store(list_p, next);
Atomic::dec(count_p); Atomic::dec(count_p);
// Unlock take, but leave the next value for any lagging list // Unlock take, but leave the next value for any lagging list
Expand Down Expand Up @@ -1343,6 +1360,7 @@ void ObjectSynchronizer::oops_do(OopClosure* f) {


void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
// Acquire semantics not needed since we're at a safepoint.
list_oops_do(Atomic::load(&om_list_globals._in_use_list), f); list_oops_do(Atomic::load(&om_list_globals._in_use_list), f);
} }


Expand Down Expand Up @@ -1408,6 +1426,9 @@ ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
// 2: try to allocate from the global om_list_globals._free_list // 2: try to allocate from the global om_list_globals._free_list
// If we're using thread-local free lists then try // If we're using thread-local free lists then try
// to reprovision the caller's free list. // to reprovision the caller's free list.
// Acquire semantics not needed on this list load since memory
// is already consistent due to the cmpxchg() via
// take_from_start_of_om_free_list() above.
if (Atomic::load(&om_list_globals._free_list) != NULL) { if (Atomic::load(&om_list_globals._free_list) != NULL) {
// Reprovision the thread's om_free_list. // Reprovision the thread's om_free_list.
// Use bulk transfers to reduce the allocation rate and heat // Use bulk transfers to reduce the allocation rate and heat
Expand Down Expand Up @@ -1536,7 +1557,9 @@ void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
// First special case: // First special case:
// 'm' matches mid, is the list head and is locked. Switch the list // 'm' matches mid, is the list head and is locked. Switch the list
// head to next which unlocks the list head, but leaves the extracted // head to next which unlocks the list head, but leaves the extracted
// mid locked: // mid locked. Release semantics not needed on this "unlock" since
// memory is already consistent due to the get_list_head_locked()
// above.
Atomic::store(&self->om_in_use_list, next); Atomic::store(&self->om_in_use_list, next);
} else if (m == next) { } else if (m == next) {
// Second special case: // Second special case:
Expand All @@ -1550,7 +1573,9 @@ void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
// Update next to what follows mid (if anything): // Update next to what follows mid (if anything):
next = unmarked_next(mid); next = unmarked_next(mid);
// Switch next after the list head to new next which unlocks the // Switch next after the list head to new next which unlocks the
// list head, but leaves the extracted mid locked: // list head, but leaves the extracted mid locked. Release semantics
// not needed on this "unlock" since memory is already consistent
// due to the get_list_head_locked() above.
self->om_in_use_list->set_next_om(next); self->om_in_use_list->set_next_om(next);
} else { } else {
// We have to search the list to find 'm'. // We have to search the list to find 'm'.
Expand All @@ -1570,7 +1595,10 @@ void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
// Update next to what follows mid (if anything): // Update next to what follows mid (if anything):
next = unmarked_next(mid); next = unmarked_next(mid);
// Switch next after the anchor to new next which unlocks the // Switch next after the anchor to new next which unlocks the
// anchor, but leaves the extracted mid locked: // anchor, but leaves the extracted mid locked. Release semantics
// not needed on this "unlock" since memory is already consistent
// due to the om_unlock() above before entering the loop or the
// om_unlock() below before looping again.
anchor->set_next_om(next); anchor->set_next_om(next);
break; break;
} else { } else {
Expand Down Expand Up @@ -1676,6 +1704,7 @@ void ObjectSynchronizer::om_flush(Thread* self) {
ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: " ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: "
"l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count); "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
Atomic::store(&self->om_in_use_count, 0); Atomic::store(&self->om_in_use_count, 0);
OrderAccess::storestore(); // Make sure counter update is seen first.
// Clear the in-use list head (which also unlocks it): // Clear the in-use list head (which also unlocks it):
Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL); Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
om_unlock(in_use_list); om_unlock(in_use_list);
Expand Down Expand Up @@ -1719,6 +1748,7 @@ void ObjectSynchronizer::om_flush(Thread* self) {
ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: " ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
"l_om_free_count=%d, free_count=%d", l_om_free_count, free_count); "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
Atomic::store(&self->om_free_count, 0); Atomic::store(&self->om_free_count, 0);
OrderAccess::storestore(); // Make sure counter update is seen first.
Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL); Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
om_unlock(free_list); om_unlock(free_list);
} }
Expand Down Expand Up @@ -1903,12 +1933,14 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object,
// Must preserve store ordering. The monitor state must // Must preserve store ordering. The monitor state must
// be stable at the time of publishing the monitor address. // be stable at the time of publishing the monitor address.
guarantee(object->mark() == markWord::INFLATING(), "invariant"); guarantee(object->mark() == markWord::INFLATING(), "invariant");
// Release semantics so that above set_object() is seen first.
object->release_set_mark(markWord::encode(m)); object->release_set_mark(markWord::encode(m));


// Once ObjectMonitor is configured and the object is associated // Once ObjectMonitor is configured and the object is associated
// with the ObjectMonitor, it is safe to allow async deflation: // with the ObjectMonitor, it is safe to allow async deflation:
assert(m->is_new(), "freshly allocated monitor must be new"); assert(m->is_new(), "freshly allocated monitor must be new");
m->set_allocation_state(ObjectMonitor::Old); // Release semantics needed to keep allocation_state from floating up.
m->release_set_allocation_state(ObjectMonitor::Old);


// Hopefully the performance counters are allocated on distinct cache lines // Hopefully the performance counters are allocated on distinct cache lines
// to avoid false sharing on MP systems ... // to avoid false sharing on MP systems ...
Expand Down Expand Up @@ -1965,6 +1997,8 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object,
// Once the ObjectMonitor is configured and object is associated // Once the ObjectMonitor is configured and object is associated
// with the ObjectMonitor, it is safe to allow async deflation: // with the ObjectMonitor, it is safe to allow async deflation:
assert(m->is_new(), "freshly allocated monitor must be new"); assert(m->is_new(), "freshly allocated monitor must be new");
// Release semantics are not needed to keep allocation_state from
// floating up since cas_set_mark() takes care of it.
m->set_allocation_state(ObjectMonitor::Old); m->set_allocation_state(ObjectMonitor::Old);


// Hopefully the performance counters are allocated on distinct // Hopefully the performance counters are allocated on distinct
Expand Down Expand Up @@ -2073,6 +2107,7 @@ bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
} }


// Restore the header back to obj // Restore the header back to obj
// XXX - I have no rationale for this "release", but it's been here forever.
obj->release_set_mark(dmw); obj->release_set_mark(dmw);
if (AsyncDeflateIdleMonitors) { if (AsyncDeflateIdleMonitors) {
// clear() expects the owner field to be NULL. // clear() expects the owner field to be NULL.
Expand Down Expand Up @@ -2275,7 +2310,8 @@ int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
// At this point mid is disconnected from the in-use list. // At this point mid is disconnected from the in-use list.
deflated_count++; deflated_count++;
Atomic::dec(count_p); Atomic::dec(count_p);
// mid is current tail in the free_head_p list so NULL terminate it: // mid is current tail in the free_head_p list so NULL terminate it.
// No release semantics needed since Atomic::dec() already provides it.
mid->set_next_om(NULL); mid->set_next_om(NULL);
} else { } else {
cur_mid_in_use = mid; cur_mid_in_use = mid;
Expand Down Expand Up @@ -2356,20 +2392,24 @@ int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
if (cur_mid_in_use == NULL) { if (cur_mid_in_use == NULL) {
// mid is the list head and it is locked. Switch the list head // mid is the list head and it is locked. Switch the list head
// to next which is also locked (if not NULL) and also leave // to next which is also locked (if not NULL) and also leave
// mid locked: // mid locked. Release semantics needed since not all code paths
Atomic::store(list_p, next); // in deflate_monitor_using_JT() ensure memory consistency.
Atomic::release_store(list_p, next);
} else { } else {
ObjectMonitor* locked_next = mark_om_ptr(next); ObjectMonitor* locked_next = mark_om_ptr(next);
// mid and cur_mid_in_use are locked. Switch cur_mid_in_use's // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
// next field to locked_next and also leave mid locked: // next field to locked_next and also leave mid locked.
cur_mid_in_use->set_next_om(locked_next); // Release semantics needed since not all code paths in
// deflate_monitor_using_JT() ensure memory consistency.
cur_mid_in_use->release_set_next_om(locked_next);
} }
// At this point mid is disconnected from the in-use list so // At this point mid is disconnected from the in-use list so
// its lock longer has any effects on in-use list. // its lock longer has any effects on in-use list.
deflated_count++; deflated_count++;
Atomic::dec(count_p); Atomic::dec(count_p);
// mid is current tail in the free_head_p list so NULL terminate it // mid is current tail in the free_head_p list so NULL terminate
// (which also unlocks it): // it (which also unlocks it). No release semantics needed since
// Atomic::dec() already provides it.
mid->set_next_om(NULL); mid->set_next_om(NULL);


// All the list management is done so move on to the next one: // All the list management is done so move on to the next one:
Expand Down Expand Up @@ -2397,6 +2437,9 @@ int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
next = next_next; next = next_next;


if (SafepointMechanism::should_block(self) && if (SafepointMechanism::should_block(self) &&
// Acquire semantics are not needed on this list load since
// it is not dependent on the following load which does have
// acquire semantics.
cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) { cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) {
// If a safepoint has started and cur_mid_in_use is not the list // If a safepoint has started and cur_mid_in_use is not the list
// head and is old, then it is safe to use as saved state. Return // head and is old, then it is safe to use as saved state. Return
Expand Down Expand Up @@ -2459,6 +2502,7 @@ void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters)


// For moribund threads, scan om_list_globals._in_use_list // For moribund threads, scan om_list_globals._in_use_list
int deflated_count = 0; int deflated_count = 0;
// Acquire semantics not needed since we are at a safepoint.
if (Atomic::load(&om_list_globals._in_use_list) != NULL) { if (Atomic::load(&om_list_globals._in_use_list) != NULL) {
// Update n_in_circulation before om_list_globals._in_use_count is // Update n_in_circulation before om_list_globals._in_use_count is
// updated by deflation. // updated by deflation.
Expand Down Expand Up @@ -2544,6 +2588,7 @@ void ObjectSynchronizer::deflate_idle_monitors_using_JT() {
ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL"); ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL");
int count = Atomic::load(&om_list_globals._wait_count); int count = Atomic::load(&om_list_globals._wait_count);
Atomic::store(&om_list_globals._wait_count, 0); Atomic::store(&om_list_globals._wait_count, 0);
OrderAccess::storestore(); // Make sure counter update is seen first.
Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL); Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL);


// Find the tail for prepend_list_to_common(). No need to mark // Find the tail for prepend_list_to_common(). No need to mark
Expand Down

0 comments on commit 24f4f14

Please sign in to comment.