Skip to content
Permalink
Browse files

8235753: [lworld] Handle deoptimization when buffering scalarized inl…

…ine type args in C1
  • Loading branch information
Tobias Hartmann
Tobias Hartmann committed Mar 31, 2020
1 parent ece2a2d commit 0595ad08114f86b1916a16351e57c9edc94e2945
@@ -283,8 +283,7 @@ void LIR_Assembler::osr_entry() {

// build frame
ciMethod* m = compilation()->method();
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(),
needs_stack_repair(), NULL);
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());

// OSR buffer is
//
@@ -4269,6 +4268,9 @@ void LIR_Assembler::get_thread(LIR_Opr result_reg) {
#endif // _LP64
}

void LIR_Assembler::check_orig_pc() {
__ cmpptr(frame_map()->address_for_orig_pc_addr(), (int32_t)NULL_WORD);
}

void LIR_Assembler::peephole(LIR_List*) {
// do nothing for now
@@ -318,18 +318,21 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
}


void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes, bool needs_stack_repair, Label* verified_value_entry_label) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes, int sp_offset_for_orig_pc, bool needs_stack_repair, bool has_scalarized_args, Label* verified_value_entry_label) {
if (has_scalarized_args) {
// Initialize orig_pc to detect deoptimization during buffering in the entry points
movptr(Address(rsp, sp_offset_for_orig_pc - frame_size_in_bytes - wordSize), 0);
}
if (!needs_stack_repair && verified_value_entry_label != NULL) {
bind(*verified_value_entry_label);
}
// Make sure there is enough stack space for this method's activation.
// Note that we do this before doing an enter(). This matches the
// ordering of C2's stack overflow check / rsp decrement and allows
// the SharedRuntime stack overflow handling to be consistent
// between the two compilers.
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
generate_stack_overflow_check(bang_size_in_bytes);

if (!needs_stack_repair && verified_value_entry_label != NULL) {
bind(*verified_value_entry_label);
}
push(rbp);
if (PreserveFramePointer) {
mov(rbp, rsp);
@@ -363,13 +366,13 @@ void C1_MacroAssembler::remove_frame(int frame_size_in_bytes, bool needs_stack_r
} else {
movq(r13, Address(rsp, frame_size_in_bytes + wordSize)); // return address
movq(rbp, Address(rsp, frame_size_in_bytes));
addq(rsp, Address(rsp, frame_size_in_bytes - wordSize)); // now we are back to caller frame, without the outgoing returned address
push(r13); // restore the returned address, as pushed by caller
addq(rsp, Address(rsp, frame_size_in_bytes - wordSize)); // now we are back to caller frame, without the outgoing return address
push(r13); // restore the return address, as pushed by caller
}
}


void C1_MacroAssembler::verified_value_entry() {
void C1_MacroAssembler::verified_entry() {
if (C1Breakpoint || VerifyFPU || !UseStackBanging) {
// Verified Entry first instruction should be 5 bytes long for correct
// patching by patch_verified_entry().
@@ -385,22 +388,13 @@ void C1_MacroAssembler::verified_value_entry() {
IA32_ONLY( verify_FPU(0, "method_entry"); )
}

int C1_MacroAssembler::scalarized_entry(const CompiledEntrySignature *ces, int frame_size_in_bytes, int bang_size_in_bytes, Label& verified_value_entry_label, bool is_value_ro_entry) {
if (C1Breakpoint || VerifyFPU || !UseStackBanging) {
// Verified Entry first instruction should be 5 bytes long for correct
// patching by patch_verified_entry().
//
// C1Breakpoint and VerifyFPU have one byte first instruction.
// Also first instruction will be one byte "push(rbp)" if stack banging
// code is not generated (see build_frame() above).
// For all these cases generate long instruction first.
fat_nop();
}
if (C1Breakpoint)int3();
IA32_ONLY( verify_FPU(0, "method_entry"); )

int C1_MacroAssembler::scalarized_entry(const CompiledEntrySignature *ces, int frame_size_in_bytes, int bang_size_in_bytes, int sp_offset_for_orig_pc, Label& verified_value_entry_label, bool is_value_ro_entry) {
assert(ValueTypePassFieldsAsArgs, "sanity");
GrowableArray<SigEntry>* sig = &ces->sig();
// Make sure there is enough stack space for this method's activation.
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
generate_stack_overflow_check(bang_size_in_bytes);

GrowableArray<SigEntry>* sig = &ces->sig();
GrowableArray<SigEntry>* sig_cc = is_value_ro_entry ? &ces->sig_cc_ro() : &ces->sig_cc();
VMRegPair* regs = ces->regs();
VMRegPair* regs_cc = is_value_ro_entry ? ces->regs_cc_ro() : ces->regs_cc();
@@ -411,10 +405,8 @@ int C1_MacroAssembler::scalarized_entry(const CompiledEntrySignature *ces, int f
BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sig_cc->length());
int args_passed = sig->length();
int args_passed_cc = SigEntry::fill_sig_bt(sig_cc, sig_bt);

int extra_stack_offset = wordSize; // tos is return address.

// Create a temp frame so we can call into runtime. It must be properly set up to accommodate GC.
int sp_inc = (args_on_stack - args_on_stack_cc) * VMRegImpl::stack_slot_size;
if (sp_inc > 0) {
pop(r13);
@@ -424,6 +416,8 @@ int C1_MacroAssembler::scalarized_entry(const CompiledEntrySignature *ces, int f
} else {
sp_inc = 0;
}

// Create a temp frame so we can call into runtime. It must be properly set up to accommodate GC.
push(rbp);
if (PreserveFramePointer) {
mov(rbp, rsp);
@@ -432,11 +426,14 @@ int C1_MacroAssembler::scalarized_entry(const CompiledEntrySignature *ces, int f
if (sp_inc > 0) {
int real_frame_size = frame_size_in_bytes +
+ wordSize // pushed rbp
+ wordSize // returned address pushed by the stack extension code
+ wordSize // return address pushed by the stack extension code
+ sp_inc; // stack extension
movptr(Address(rsp, frame_size_in_bytes - wordSize), real_frame_size);
}

// Initialize orig_pc to detect deoptimization during buffering in below runtime call
movptr(Address(rsp, sp_offset_for_orig_pc), 0);

// FIXME -- call runtime only if we cannot in-line allocate all the incoming value args.
movptr(rbx, (intptr_t)(ces->method()));
if (is_value_ro_entry) {
@@ -456,10 +453,9 @@ int C1_MacroAssembler::scalarized_entry(const CompiledEntrySignature *ces, int f
assert(sp_inc == n, "must be");

if (sp_inc != 0) {
// Do the stack banging here, and skip over the stack repair code in the
// Skip over the stack banging and frame setup code in the
// verified_value_entry (which has a different real_frame_size).
assert(sp_inc > 0, "stack should not shrink");
generate_stack_overflow_check(bang_size_in_bytes);
push(rbp);
if (PreserveFramePointer) {
mov(rbp, rsp);
@@ -472,22 +472,24 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
// For C1, the runtime stub might not have oop maps, so set this flag
// outside of update_register_map.
bool caller_args = _cb->caller_must_gc_arguments(map->thread());
#ifdef COMPILER1
if (!caller_args) {
nmethod* nm = _cb->as_nmethod_or_null();
if (nm != NULL && nm->is_compiled_by_c1() &&
nm->method()->has_scalarized_args() &&
if (nm != NULL && nm->is_compiled_by_c1() && nm->method()->has_scalarized_args() &&
pc() < nm->verified_value_entry_point()) {
// The VEP and VVEP(RO) of C1-compiled methods call buffer_value_args_xxx
// before doing any argument shuffling, so we need to scan the oops
// as the caller passes them.
caller_args = true;
#ifdef ASSERT
NativeCall* call = nativeCall_before(pc());
address dest = call->destination();
if (dest == Runtime1::entry_for(Runtime1::buffer_value_args_no_receiver_id) ||
dest == Runtime1::entry_for(Runtime1::buffer_value_args_id)) {
caller_args = true;
}
assert(dest == Runtime1::entry_for(Runtime1::buffer_value_args_no_receiver_id) ||
dest == Runtime1::entry_for(Runtime1::buffer_value_args_id), "unexpected safepoint in entry point");
#endif
}
}
#endif
map->set_include_argument_oops(caller_args);
if (_cb->oop_maps() != NULL) {
OopMapSet::update_register_map(this, map);
@@ -216,6 +216,9 @@ class FrameMap : public CompilationResourceObj {
Address address_for_monitor_object(int monitor_index) const {
return make_new_address(sp_offset_for_monitor_object(monitor_index));
}
Address address_for_orig_pc_addr() const {
return make_new_address(sp_offset_for_monitor_base(_num_monitors));
}

// Creates Location describing desired slot and returns it via pointer
// to Location object. Returns true if the stack frame offset was legal
@@ -468,6 +468,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
case lir_membar_storestore: // result and info always invalid
case lir_membar_loadstore: // result and info always invalid
case lir_membar_storeload: // result and info always invalid
case lir_check_orig_pc: // result and info always invalid
case lir_on_spin_wait:
{
assert(op->as_Op0() != NULL, "must be");
@@ -1817,6 +1818,7 @@ const char * LIR_Op::name() const {
case lir_fpop_raw: s = "fpop_raw"; break;
case lir_breakpoint: s = "breakpoint"; break;
case lir_get_thread: s = "get_thread"; break;
case lir_check_orig_pc: s = "check_orig_pc"; break;
// LIR_Op1
case lir_fxch: s = "fxch"; break;
case lir_fld: s = "fld"; break;
@@ -906,6 +906,7 @@ enum LIR_Code {
, lir_membar_storeload
, lir_get_thread
, lir_on_spin_wait
, lir_check_orig_pc
, end_op0
, begin_op1
, lir_fxch
@@ -635,58 +635,55 @@ void LIR_Assembler::add_scalarized_entry_info(int pc_offset) {
// (1) (2) (3) (4)
// UEP/UVEP: VEP: UEP: UEP:
// check_icache pack receiver check_icache check_icache
// VEP/VVEP/VVEP_RO UEP/UVEP: VEP/VVEP_RO: VVEP_RO:
// body check_icache pack value args pack value args (except receiver)
// VEP/VVEP/VVEP_RO jump to VVEP VEP/VVEP_RO: VVEP_RO:
// body UEP/UVEP: pack value args pack value args (except receiver)
// check_icache jump to VVEP jump to VVEP
// VVEP/VVEP_RO UVEP: VEP:
// body check_icache pack all value args
// VVEP: UVEP:
// body check_icache
// VVEP: jump to VVEP
// body UVEP:
// check_icache
// VVEP:
// body
//
// Note: after packing, we jump to the method body.
void LIR_Assembler::emit_std_entries() {
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());

const CompiledEntrySignature* ces = compilation()->compiled_entry_signature();

_masm->align(CodeEntryAlignment);

const CompiledEntrySignature* ces = compilation()->compiled_entry_signature();
if (ces->has_scalarized_args()) {
assert(ValueTypePassFieldsAsArgs && method()->get_Method()->has_scalarized_args(), "must be");

CodeOffsets::Entries ro_entry_type = ces->c1_value_ro_entry_type();

// UEP: check icache and fall-through
if (ro_entry_type != CodeOffsets::Verified_Value_Entry) {
// This is the UEP. It will fall-through to VEP or VVEP(RO)
offsets()->set_value(CodeOffsets::Entry, _masm->offset());
if (needs_icache(compilation()->method())) {
if (needs_icache(method())) {
check_icache();
}
}

// VVEP_RO: pack all value parameters, except the receiver
if (ro_entry_type == CodeOffsets::Verified_Value_Entry_RO) {
// VVEP(RO) = pack all value parameters, except the <this> object.
add_scalarized_entry_info(emit_std_entry(CodeOffsets::Verified_Value_Entry_RO, ces));
emit_std_entry(CodeOffsets::Verified_Value_Entry_RO, ces);
}

// VEP = pack all value parameters
// VEP: pack all value parameters
_masm->align(CodeEntryAlignment);
add_scalarized_entry_info(emit_std_entry(CodeOffsets::Verified_Entry, ces));
emit_std_entry(CodeOffsets::Verified_Entry, ces);

// UVEP: check icache and fall-through
_masm->align(CodeEntryAlignment);
// This is the UVEP. It will fall-through to VVEP.
offsets()->set_value(CodeOffsets::Value_Entry, _masm->offset());
if (ro_entry_type == CodeOffsets::Verified_Value_Entry) {
// Special case if we have VVEP == VVEP(RO):
// this means UVEP (called by C1) == UEP (called by C2).
offsets()->set_value(CodeOffsets::Entry, _masm->offset());
}

if (needs_icache(compilation()->method())) {
if (needs_icache(method())) {
check_icache();
}
// VVEP = all value parameters are passed as refs - no packing.

// VVEP: all value parameters are passed as refs - no packing.
emit_std_entry(CodeOffsets::Verified_Value_Entry, NULL);

if (ro_entry_type != CodeOffsets::Verified_Value_Entry_RO) {
@@ -700,42 +697,44 @@ void LIR_Assembler::emit_std_entries() {
// All 3 entries are the same (no value-type packing)
offsets()->set_value(CodeOffsets::Entry, _masm->offset());
offsets()->set_value(CodeOffsets::Value_Entry, _masm->offset());
if (needs_icache(compilation()->method())) {
if (needs_icache(method())) {
check_icache();
}
int offset = emit_std_entry(CodeOffsets::Verified_Value_Entry, NULL);
offsets()->set_value(CodeOffsets::Verified_Entry, offset);
offsets()->set_value(CodeOffsets::Verified_Value_Entry_RO, offset);
emit_std_entry(CodeOffsets::Verified_Value_Entry, NULL);
offsets()->set_value(CodeOffsets::Verified_Entry, offsets()->value(CodeOffsets::Verified_Value_Entry));
offsets()->set_value(CodeOffsets::Verified_Value_Entry_RO, offsets()->value(CodeOffsets::Verified_Value_Entry));
}
}

int LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
void LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) {
offsets()->set_value(entry, _masm->offset());
int offset = _masm->offset();
_masm->verified_entry();
switch (entry) {
case CodeOffsets::Verified_Entry:
offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), _verified_value_entry);
if (needs_clinit_barrier_on_entry(compilation()->method())) {
clinit_barrier(compilation()->method());
case CodeOffsets::Verified_Entry: {
if (needs_clinit_barrier_on_entry(method())) {
clinit_barrier(method());
}
return offset;
case CodeOffsets::Verified_Value_Entry_RO:
offset = _masm->verified_value_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), _verified_value_entry);
if (needs_clinit_barrier_on_entry(compilation()->method())) {
clinit_barrier(compilation()->method());
int rt_call_offset = _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_value_entry);
add_scalarized_entry_info(rt_call_offset);
break;
}
case CodeOffsets::Verified_Value_Entry_RO: {
assert(!needs_clinit_barrier_on_entry(method()), "can't be static");
int rt_call_offset = _masm->verified_value_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()), _verified_value_entry);
add_scalarized_entry_info(rt_call_offset);
break;
}
case CodeOffsets::Verified_Value_Entry: {
if (needs_clinit_barrier_on_entry(method())) {
clinit_barrier(method());
}
return offset;
build_frame();
offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
break;
}
default:
{
assert(entry == CodeOffsets::Verified_Value_Entry, "must be");
_masm->verified_value_entry();
if (needs_clinit_barrier_on_entry(compilation()->method())) {
clinit_barrier(compilation()->method());
}
build_frame();
offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
return offset;
}
ShouldNotReachHere();
break;
}
}

@@ -814,6 +813,10 @@ void LIR_Assembler::emit_op0(LIR_Op0* op) {
on_spin_wait();
break;

case lir_check_orig_pc:
check_orig_pc();
break;

default:
ShouldNotReachHere();
break;
@@ -907,8 +910,8 @@ void LIR_Assembler::emit_op2(LIR_Op2* op) {


void LIR_Assembler::build_frame() {
_masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(),
compilation()->needs_stack_repair(),
_masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), in_bytes(frame_map()->sp_offset_for_orig_pc()),
compilation()->needs_stack_repair(), method()->has_scalarized_args(),
&_verified_value_entry);
}

@@ -212,7 +212,7 @@ class LIR_Assembler: public CompilationResourceObj {
void emit_profile_type(LIR_OpProfileType* op);
void emit_delay(LIR_OpDelay* op);
void emit_std_entries();
int emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces);
void emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces);
void add_scalarized_entry_info(int call_offset);

void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
@@ -262,6 +262,7 @@ class LIR_Assembler: public CompilationResourceObj {
void membar_storeload();
void on_spin_wait();
void get_thread(LIR_Opr result);
void check_orig_pc();

void verify_oop_map(CodeEmitInfo* info);

0 comments on commit 0595ad0

Please sign in to comment.
You can’t perform that action at this time.