3535#include " oops/compiledICHolder.hpp"
3636#include " prims/jvmtiRedefineClassesTrace.hpp"
3737#include " runtime/sharedRuntime.hpp"
38+ #include " runtime/biasedLocking.hpp"
3839#include " runtime/vframeArray.hpp"
3940#include " vmreg_x86.inline.hpp"
4041#ifdef COMPILER1
@@ -2326,6 +2327,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
23262327
23272328 __ lea (lock_reg, Address (rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
23282329
2330+ if (PrintBiasedLockingStatistics) {
2331+ __ atomic_incl (ExternalAddress ((address)BiasedLocking::total_entry_count_addr ()), obj_reg);
2332+ }
2333+
23292334 // Load the oop from the handle
23302335 __ movptr (obj_reg, Address (oop_handle_reg, 0 ));
23312336
@@ -2348,6 +2353,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
23482353
23492354 // src -> dest iff dest == rax else rax <- dest
23502355 __ cmpxchgptr (lock_reg, Address (obj_reg, 0 ));
2356+ if (PrintBiasedLockingStatistics) {
2357+ __ cond_inc32 (Assembler::equal,
2358+ ExternalAddress ((address) BiasedLocking::fast_path_entry_count_addr ()));
2359+ }
23512360 __ jcc (Assembler::equal, lock_done);
23522361
23532362 // Hmm should this move to the slow path code area???
@@ -2366,6 +2375,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
23662375
23672376 // Save the test result, for recursive case, the result is zero
23682377 __ movptr (Address (lock_reg, mark_word_offset), swap_reg);
2378+ if (PrintBiasedLockingStatistics) {
2379+ __ cond_inc32 (Assembler::zero,
2380+ ExternalAddress ((address) BiasedLocking::fast_path_entry_count_addr ()));
2381+ }
23692382 __ jcc (Assembler::notEqual, slow_path_lock);
23702383
23712384 // Slow path will re-enter here
0 commit comments