Skip to content
Permalink
Browse files
Automatic merge of jdk:master into master
  • Loading branch information
duke committed Jun 24, 2021
2 parents 4b0f80e + 2fd7943 commit cea925744e5d22b2dbb86726c2032118999cb9d1
Showing 165 changed files with 294 additions and 5,262 deletions.
@@ -183,7 +183,6 @@ ifeq ($(call check-jvm-feature, opt-size), true)
assembler.cpp \
barrierSet.cpp \
basicLock.cpp \
biasedLocking.cpp \
bytecode.cpp \
bytecodeInterpreter.cpp \
c1_Compilation.cpp \
@@ -3788,10 +3788,6 @@ encode %{
__ br(Assembler::NE, cont);
}

if (UseBiasedLocking && !UseOptoBiasInlining) {
__ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
}

// Check for existing monitor
__ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);

@@ -3862,10 +3858,6 @@ encode %{

assert_different_registers(oop, box, tmp, disp_hdr);

if (UseBiasedLocking && !UseOptoBiasInlining) {
__ biased_locking_exit(oop, tmp, cont);
}

// Find the lock address and load the displaced header from the stack.
__ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));

@@ -8887,11 +8879,6 @@ instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFl
ins_pipe(pipe_serial);
%}


// storeLConditional is used by PhaseMacroExpand::expand_lock_node
// when attempting to rebias a lock towards the current thread. We
// must use the acquire form of cmpxchg in order to guarantee acquire
// semantics in this case.
instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
%{
match(Set cr (StoreLConditional mem (Binary oldval newval)));
@@ -3275,8 +3275,6 @@ inline const Assembler::Condition operator~(const Assembler::Condition cond) {
return Assembler::Condition(int(cond) ^ 1);
}

class BiasedLockingCounters;

extern "C" void das(uint64_t start, int len);

#endif // CPU_AARCH64_ASSEMBLER_AARCH64_HPP
@@ -2577,13 +2577,9 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
if (!UseFastLocking) {
__ b(*op->stub()->entry());
} else if (op->code() == lir_lock) {
Register scratch = noreg;
if (UseBiasedLocking) {
scratch = op->scratch_opr()->as_register();
}
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
if (op->info() != NULL) {
add_debug_info_for_null_check(null_check_offset, op->info());
}
@@ -331,11 +331,6 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {

// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
// Need a scratch register for biased locking
LIR_Opr scratch = LIR_OprFact::illegalOpr;
if (UseBiasedLocking) {
scratch = new_register(T_INT);
}

CodeEmitInfo* info_for_exception = NULL;
if (x->needs_null_check()) {
@@ -344,7 +339,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
// this CodeEmitInfo must not have the xhandlers because here the
// object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
x->monitor_no(), info_for_exception, info);
}

@@ -33,7 +33,6 @@
#include "oops/arrayOop.hpp"
#include "oops/markWord.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
@@ -61,7 +60,7 @@ void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
}
}

int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
@@ -82,11 +81,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
br(Assembler::NE, slow_case);
}

if (UseBiasedLocking) {
assert(scratch != noreg, "should have scratch register at this point");
biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case);
}

// Load object header
ldr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
@@ -122,10 +116,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
cbnz(hdr, slow_case);
// done
bind(done);
if (PrintBiasedLockingStatistics) {
lea(rscratch2, ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
addmw(Address(rscratch2, 0), 1, rscratch1);
}
return null_check_offset;
}

@@ -136,21 +126,13 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
Label done;

if (UseBiasedLocking) {
// load object
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
biased_locking_exit(obj, hdr, done);
}

// load displaced header
ldr(hdr, Address(disp_hdr, 0));
// if the loaded hdr is NULL we had recursive locking
// if we had recursive locking, we are done
cbz(hdr, done);
if (!UseBiasedLocking) {
// load object
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
}
// load object
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
verify_oop(obj);
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
@@ -179,13 +161,8 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i

void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len);
if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2);
ldr(t1, Address(klass, Klass::prototype_header_offset()));
} else {
// This assumes that all prototype bits fit in an int32_t
mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
}
// This assumes that all prototype bits fit in an int32_t
mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));

if (UseCompressedClassPointers) { // Take care not to kill klass
@@ -58,9 +58,8 @@ using MacroAssembler::null_check;
// hdr : must be r0, contents destroyed
// obj : must point to the object to lock, contents preserved
// disp_hdr: must point to the displaced header location, contents preserved
// scratch : scratch register, contents destroyed
// returns code offset at which to add null check debug information
int lock_object (Register swap, Register obj, Register disp_hdr, Register scratch, Label& slow_case);
int lock_object (Register swap, Register obj, Register disp_hdr, Label& slow_case);

// unlocking
// hdr : contents destroyed
@@ -39,7 +39,6 @@
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -754,10 +753,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
br(Assembler::NE, slow_case);
}

if (UseBiasedLocking) {
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case);
}

// Load (object->mark() | 1) into swap_reg
ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
orr(swap_reg, rscratch1, 1);
@@ -769,17 +764,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
"displached header must be first word in BasicObjectLock");

Label fail;
if (PrintBiasedLockingStatistics) {
Label fast;
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
bind(fast);
atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
rscratch2, rscratch1, tmp);
b(done);
bind(fail);
} else {
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
}
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);

// Fast check for recursive lock.
//
@@ -816,12 +801,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)

// Save the test result, for recursive case, the result is zero
str(swap_reg, Address(lock_reg, mark_offset));

if (PrintBiasedLockingStatistics) {
br(Assembler::NE, slow_case);
atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
rscratch2, rscratch1, tmp);
}
br(Assembler::EQ, done);

bind(slow_case);
@@ -872,10 +851,6 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
// Free entry
str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));

if (UseBiasedLocking) {
biased_locking_exit(obj_reg, header_reg, done);
}

// Load the old header from BasicLock structure
ldr(header_reg, Address(swap_reg,
BasicLock::displaced_header_offset_in_bytes()));

0 comments on commit cea9257

Please sign in to comment.