Skip to content

Commit

Permalink
8319773: Avoid inflating monitors when installing hash codes for LM_L…
Browse files Browse the repository at this point in the history
…IGHTWEIGHT

Reviewed-by: shade
Backport-of: 65a0672791f868556776fc435b37319ed69f7c84
  • Loading branch information
rkennke committed Apr 9, 2024
1 parent 879f3e3 commit 11a3cf9
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 47 deletions.
14 changes: 11 additions & 3 deletions src/hotspot/cpu/x86/sharedRuntime_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/globalDefinitions.hpp"
#include "vmreg_x86.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
Expand Down Expand Up @@ -58,9 +59,16 @@ void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* mas

__ movptr(result, Address(obj_reg, oopDesc::mark_offset_in_bytes()));

// check if locked
__ testptr(result, markWord::unlocked_value);
__ jcc(Assembler::zero, slowCase);

if (LockingMode == LM_LIGHTWEIGHT) {
// check if monitor
__ testptr(result, markWord::monitor_value);
__ jcc(Assembler::notZero, slowCase);
} else {
// check if locked
__ testptr(result, markWord::unlocked_value);
__ jcc(Assembler::zero, slowCase);
}

if (UseBiasedLocking) {
// Check if biased and fall through to runtime if so
Expand Down
18 changes: 13 additions & 5 deletions src/hotspot/share/opto/library_call.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3843,14 +3843,22 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
Node* no_ctrl = nullptr;
Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);

// Test the header to see if it is unlocked.
// Test the header to see if it is safe to read w.r.t. locking.
Node *lock_mask = _gvn.MakeConX(markWord::biased_lock_mask_in_place);
Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
Node *chk_unlocked = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
Node *test_unlocked = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
if (LockingMode == LM_LIGHTWEIGHT) {
Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));

generate_slow_guard(test_unlocked, slow_region);
generate_slow_guard(test_monitor, slow_region);
} else {
Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));

generate_slow_guard(test_not_unlocked, slow_region);
}

// Get the hash value and check to see that it has been properly assigned.
// We depend on hash_mask being at most 32 bits and avoid the use of
Expand Down
61 changes: 26 additions & 35 deletions src/hotspot/share/runtime/synchronizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -462,16 +462,18 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current)
LockStack& lock_stack = current->lock_stack();
if (lock_stack.can_push()) {
markWord mark = obj()->mark_acquire();
if (mark.is_neutral()) {
assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
while (mark.is_neutral()) {
// Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
// Try to swing into 'fast-locked' state.
markWord locked_mark = mark.set_fast_locked();
markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
const markWord locked_mark = mark.set_fast_locked();
const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
if (old_mark == mark) {
// Successfully fast-locked, push object to lock-stack and return.
lock_stack.push(obj());
return;
}
mark = old_mark;
}
}
// All other paths fall-through to inflate-enter.
Expand Down Expand Up @@ -521,23 +523,15 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current)
markWord mark = object->mark();
if (LockingMode == LM_LIGHTWEIGHT) {
// Fast-locking does not use the 'lock' argument.
if (mark.is_fast_locked()) {
markWord unlocked_mark = mark.set_unlocked();
markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
if (old_mark != mark) {
// Another thread won the CAS, it must have inflated the monitor.
// It can only have installed an anonymously locked monitor at this point.
// Fetch that monitor, set owner correctly to this thread, and
// exit it (allowing waiting threads to enter).
assert(old_mark.has_monitor(), "must have monitor");
ObjectMonitor* monitor = old_mark.monitor();
assert(monitor->is_owner_anonymous(), "must be anonymous owner");
monitor->set_owner_from_anonymous(current);
monitor->exit(current);
while (mark.is_fast_locked()) {
// Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
const markWord unlocked_mark = mark.set_unlocked();
const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
if (old_mark == mark) {
current->lock_stack().remove(object);
return;
}
LockStack& lock_stack = current->lock_stack();
lock_stack.remove(object);
return;
mark = old_mark;
}
} else if (LockingMode == LM_LEGACY) {
markWord dhw = lock->displaced_header();
Expand Down Expand Up @@ -927,13 +921,6 @@ static inline intptr_t get_next_hash(Thread* current, oop obj) {
return value;
}

// Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
// calculations as part of JVM/TI tagging.
static bool is_lock_owned(Thread* thread, oop obj) {
assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
return thread->is_Java_thread() ? reinterpret_cast<JavaThread*>(thread)->lock_stack().contains(obj) : false;
}

intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
if (UseBiasedLocking) {
// NOTE: many places throughout the JVM do not expect a safepoint
Expand Down Expand Up @@ -964,7 +951,7 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
// object should remain ineligible for biased locking
assert(!mark.has_bias_pattern(), "invariant");

if (mark.is_neutral()) { // if this is a normal header
if (mark.is_neutral() || (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked())) {
hash = mark.hash();
if (hash != 0) { // if it has a hash, just return it
return hash;
Expand All @@ -976,6 +963,10 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
if (test == mark) { // if the hash was installed, return it
return hash;
}
if (LockingMode == LM_LIGHTWEIGHT) {
// CAS failed, retry
continue;
}
// Failed to install the hash. It could be that another thread
// installed the hash just before our attempt or inflation has
// occurred or... so we fall thru to inflate the monitor for
Expand Down Expand Up @@ -1007,13 +998,6 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
}
// Fall thru so we only have one place that installs the hash in
// the ObjectMonitor.
} else if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked() && is_lock_owned(current, obj)) {
// This is a fast lock owned by the calling thread so use the
// markWord from the object.
hash = mark.hash();
if (hash != 0) { // if it has a hash, just return it
return hash;
}
} else if (LockingMode == LM_LEGACY && mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// This is a stack lock owned by the calling thread so fetch the
// displaced markWord from the BasicLock on the stack.
Expand Down Expand Up @@ -1340,6 +1324,13 @@ void ObjectSynchronizer::inflate_helper(oop obj) {
(void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
}

// Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
// calculations as part of JVM/TI tagging.
static bool is_lock_owned(Thread* thread, oop obj) {
assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
return thread->is_Java_thread() ? reinterpret_cast<JavaThread*>(thread)->lock_stack().contains(obj) : false;
}

ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
const InflateCause cause) {
EventJavaMonitorInflate event;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,11 @@ public static class InflateMonitorsTest {
static WhiteBox wb = WhiteBox.getWhiteBox();
public static Object obj;

public static void main(String args[]) {
public static void main(String args[]) throws Exception {
obj = new Object();
synchronized (obj) {
// HotSpot implementation detail: asking for the hash code
// when the object is locked causes monitor inflation.
if (obj.hashCode() == 0xBAD) System.out.println("!");
// The current implementation of notify-wait requires inflation.
obj.wait(1);
Asserts.assertEQ(wb.isMonitorInflated(obj), true,
"Monitor should be inflated.");
}
Expand Down

0 comments on commit 11a3cf9

Please sign in to comment.