Skip to content
This repository was archived by the owner on Aug 16, 2023. It is now read-only.

Commit 3aa6c78

Browse files
[GR-8510] Fixes for PrintBiasedLockingStatistics on x86.
PullRequest: graal-jvmci-8/36
2 parents 334f029 + 9251b47 commit 3aa6c78

File tree

7 files changed

+42
-2
lines changed

7 files changed

+42
-2
lines changed

make/linux/makefiles/gcc.make

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,7 @@ else
264264
ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 3), 1)
265265
OPT_CFLAGS/mulnode.o += $(OPT_CFLAGS/NOOPT)
266266
endif
267+
OPT_CFLAGS/jvmciCompilerToVM.o += -fno-var-tracking-assignments
267268
endif
268269

269270
# Flags for generating make dependency flags.

src/cpu/x86/vm/c1_MacroAssembler_x86.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,10 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
4545

4646
verify_oop(obj);
4747

48+
if (PrintBiasedLockingStatistics) {
49+
atomic_incl(ExternalAddress((address)BiasedLocking::total_entry_count_addr()), disp_hdr);
50+
}
51+
4852
// save object being locked into the BasicObjectLock
4953
movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
5054

@@ -90,6 +94,10 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
9094
// for recursive locking, the result is zero => save it in the displaced header
9195
// location (NULL in the displaced hdr location indicates recursive locking)
9296
movptr(Address(disp_hdr, 0), hdr);
97+
if (PrintBiasedLockingStatistics) {
98+
cond_inc32(Assembler::zero,
99+
ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
100+
}
93101
// otherwise we don't care about the result and handle locking via runtime call
94102
jcc(Assembler::notZero, slow_case);
95103
// done

src/cpu/x86/vm/interp_masm_x86_64.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -708,6 +708,10 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
708708

709709
Label slow_case;
710710

711+
if (PrintBiasedLockingStatistics) {
712+
atomic_incl(ExternalAddress((address)BiasedLocking::total_entry_count_addr()), obj_reg);
713+
}
714+
711715
// Load object pointer into obj_reg %c_rarg3
712716
movptr(obj_reg, Address(lock_reg, obj_offset));
713717

@@ -758,6 +762,10 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
758762

759763
bind(slow_case);
760764

765+
if (PrintBiasedLockingStatistics) {
766+
atomic_incl(ExternalAddress((address) BiasedLocking::slow_path_entry_count_addr()), obj_reg);
767+
}
768+
761769
// Call the runtime routine for slow case
762770
call_VM(noreg,
763771
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),

src/cpu/x86/vm/macroAssembler_x86.cpp

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1137,7 +1137,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
11371137
pop(tmp_reg);
11381138
}
11391139
if (counters != NULL) {
1140-
cond_inc32(Assembler::zero,
1140+
cond_inc32(Assembler::equal,
11411141
ExternalAddress((address) counters->biased_lock_entry_count_addr()));
11421142
}
11431143
jcc(Assembler::equal, done);
@@ -1710,6 +1710,10 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
17101710
assert_different_registers(objReg, boxReg, tmpReg, scrReg);
17111711
}
17121712

1713+
if (PrintBiasedLockingStatistics && counters == NULL) {
1714+
counters = BiasedLocking::counters();
1715+
}
1716+
17131717
if (counters != NULL) {
17141718
atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
17151719
}
@@ -1958,6 +1962,10 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
19581962
lock();
19591963
}
19601964
cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1965+
if (counters != NULL) {
1966+
cond_inc32(Assembler::equal,
1967+
ExternalAddress((address)counters->fast_path_entry_count_addr()));
1968+
}
19611969
// Intentional fall-through into DONE_LABEL ...
19621970
#endif // _LP64
19631971

src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#include "oops/compiledICHolder.hpp"
3636
#include "prims/jvmtiRedefineClassesTrace.hpp"
3737
#include "runtime/sharedRuntime.hpp"
38+
#include "runtime/biasedLocking.hpp"
3839
#include "runtime/vframeArray.hpp"
3940
#include "vmreg_x86.inline.hpp"
4041
#ifdef COMPILER1
@@ -2326,6 +2327,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
23262327

23272328
__ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
23282329

2330+
if (PrintBiasedLockingStatistics) {
2331+
__ atomic_incl(ExternalAddress((address)BiasedLocking::total_entry_count_addr()), obj_reg);
2332+
}
2333+
23292334
// Load the oop from the handle
23302335
__ movptr(obj_reg, Address(oop_handle_reg, 0));
23312336

@@ -2348,6 +2353,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
23482353

23492354
// src -> dest iff dest == rax else rax <- dest
23502355
__ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2356+
if (PrintBiasedLockingStatistics) {
2357+
__ cond_inc32(Assembler::equal,
2358+
ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
2359+
}
23512360
__ jcc(Assembler::equal, lock_done);
23522361

23532362
// Hmm should this move to the slow path code area???
@@ -2366,6 +2375,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
23662375

23672376
// Save the test result, for recursive case, the result is zero
23682377
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2378+
if (PrintBiasedLockingStatistics) {
2379+
__ cond_inc32(Assembler::zero,
2380+
ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
2381+
}
23692382
__ jcc(Assembler::notEqual, slow_path_lock);
23702383

23712384
// Slow path will re-enter here

src/share/vm/interpreter/interpreterRuntime.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -619,7 +619,8 @@ IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, Ba
619619
thread->last_frame().interpreter_frame_verify_monitor(elem);
620620
#endif
621621
if (PrintBiasedLockingStatistics) {
622-
Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
622+
// Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
623+
// Done in InterpreterMacroAssembler::lock_object. For some mysterious reason some increments from here are missed
623624
}
624625
Handle h_obj(thread, elem->obj());
625626
assert(Universe::heap()->is_in_reserved_or_null(h_obj()),

src/share/vm/runtime/biasedLocking.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -771,5 +771,6 @@ void BiasedLockingCounters::print_on(outputStream* st) {
771771
tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
772772
tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
773773
tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
774+
tty->print_cr("# raw slow path lock entries: %d", _slow_path_entry_count);
774775
tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
775776
}

0 commit comments

Comments
 (0)