Skip to content
Permalink
Browse files
8259937: guarantee(loc != NULL) failed: missing saved register with n…
…ative invoker

Reviewed-by: kvn, jvernee, vlivanov
  • Loading branch information
rwestrel committed Mar 1, 2021
1 parent c569f1d commit 6baecf39d5cd449624f6e8153f54acf334cf3d59
Showing 28 changed files with 384 additions and 182 deletions.
@@ -355,10 +355,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());

if (jfa->saved_fp_address()) {
update_map_with_saved_link(map, jfa->saved_fp_address());
}

return fr;
}

@@ -31,9 +31,6 @@
// FP value associated with _last_Java_sp:
intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to

// (Optional) location of saved FP register, which GCs want to inspect
intptr_t** volatile _saved_fp_address;

public:
// Each arch must define reset, save, restore
// These are used by objects that only care about:
@@ -47,7 +44,6 @@
OrderAccess::release();
_last_Java_fp = NULL;
_last_Java_pc = NULL;
_saved_fp_address = NULL;
}

void copy(JavaFrameAnchor* src) {
@@ -66,8 +62,6 @@
_last_Java_pc = src->_last_Java_pc;
// Must be last so profiler will always see valid frame if has_last_frame() is true
_last_Java_sp = src->_last_Java_sp;

_saved_fp_address = src->_saved_fp_address;
}

bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
@@ -78,12 +72,9 @@

address last_Java_pc(void) { return _last_Java_pc; }

intptr_t** saved_fp_address(void) const { return _saved_fp_address; }

private:

static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
static ByteSize saved_fp_address_offset() { return byte_offset_of(JavaFrameAnchor, _saved_fp_address); }

public:

@@ -320,8 +320,6 @@ void MacroAssembler::reset_last_Java_frame(bool clear_fp) {

// Always clear the pc because it could have been set by make_walkable()
str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));

str(zr, Address(rthread, JavaThread::saved_fp_address_offset()));
}

// Calls to C land
@@ -3072,7 +3072,6 @@ void OptoRuntime::generate_exception_blob() {
// Set exception blob
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
}
#endif // COMPILER2

// ---------------------------------------------------------------

@@ -3082,6 +3081,10 @@ class NativeInvokerGenerator : public StubCodeGenerator {

const GrowableArray<VMReg>& _input_registers;
const GrowableArray<VMReg>& _output_registers;

int _frame_complete;
int _framesize;
OopMapSet* _oop_maps;
public:
NativeInvokerGenerator(CodeBuffer* buffer,
address call_target,
@@ -3092,9 +3095,90 @@ class NativeInvokerGenerator : public StubCodeGenerator {
_call_target(call_target),
_shadow_space_bytes(shadow_space_bytes),
_input_registers(input_registers),
_output_registers(output_registers) {}
_output_registers(output_registers),
_frame_complete(0),
_framesize(0),
_oop_maps(NULL) {
assert(_output_registers.length() <= 1
|| (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
}

void generate();

int spill_size_in_bytes() const {
if (_output_registers.length() == 0) {
return 0;
}
VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
if (reg->is_Register()) {
return 8;
} else if (reg->is_FloatRegister()) {
bool use_sve = Matcher::supports_scalable_vector();
if (use_sve) {
return Matcher::scalable_vector_reg_size(T_BYTE);
}
return 16;
} else {
ShouldNotReachHere();
}
return 0;
}

void spill_output_registers() {
if (_output_registers.length() == 0) {
return;
}
VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
MacroAssembler* masm = _masm;
if (reg->is_Register()) {
__ spill(reg->as_Register(), true, 0);
} else if (reg->is_FloatRegister()) {
bool use_sve = Matcher::supports_scalable_vector();
if (use_sve) {
__ spill_sve_vector(reg->as_FloatRegister(), 0, Matcher::scalable_vector_reg_size(T_BYTE));
} else {
__ spill(reg->as_FloatRegister(), __ Q, 0);
}
} else {
ShouldNotReachHere();
}
}

void fill_output_registers() {
if (_output_registers.length() == 0) {
return;
}
VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
MacroAssembler* masm = _masm;
if (reg->is_Register()) {
__ unspill(reg->as_Register(), true, 0);
} else if (reg->is_FloatRegister()) {
bool use_sve = Matcher::supports_scalable_vector();
if (use_sve) {
__ unspill_sve_vector(reg->as_FloatRegister(), 0, Matcher::scalable_vector_reg_size(T_BYTE));
} else {
__ unspill(reg->as_FloatRegister(), __ Q, 0);
}
} else {
ShouldNotReachHere();
}
}

int frame_complete() const {
return _frame_complete;
}

int framesize() const {
return (_framesize >> (LogBytesPerWord - LogBytesPerInt));
}

OopMapSet* oop_maps() const {
return _oop_maps;
}

private:
#ifdef ASSERT
bool target_uses_register(VMReg reg) {
@@ -3105,21 +3189,23 @@ class NativeInvokerGenerator : public StubCodeGenerator {

static const int native_invoker_code_size = 1024;

BufferBlob* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
BufferBlob* _invoke_native_blob =
BufferBlob::create("nep_invoker_blob", native_invoker_code_size);
if (_invoke_native_blob == NULL)
return NULL; // allocation failure

CodeBuffer code(_invoke_native_blob);
RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
int locs_size = 64;
CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers);
g.generate();
code.log_section_sizes("nep_invoker_blob");

return _invoke_native_blob;
RuntimeStub* stub =
RuntimeStub::new_runtime_stub("nep_invoker_blob",
&code,
g.frame_complete(),
g.framesize(),
g.oop_maps(), false);
return stub;
}

void NativeInvokerGenerator::generate() {
@@ -3128,26 +3214,40 @@ void NativeInvokerGenerator::generate() {
|| target_uses_register(rthread->as_VMReg())),
"Register conflict");

enum layout {
rbp_off,
rbp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};

assert(_shadow_space_bytes == 0, "not expecting shadow space on AArch64");
_framesize = align_up(framesize + (spill_size_in_bytes() >> LogBytesPerInt), 4);
assert(is_even(_framesize/2), "sp not 16-byte aligned");

_oop_maps = new OopMapSet();
MacroAssembler* masm = _masm;

__ set_last_Java_frame(sp, noreg, lr, rscratch1);
address start = __ pc();

__ enter();

// Store a pointer to the previous R29 (RFP) saved on the stack as it
// may contain an oop if PreserveFramePointer is off. This value is
// retrieved later by frame::sender_for_entry_frame() when the stack
// is walked.
__ mov(rscratch1, sp);
__ str(rscratch1, Address(rthread, JavaThread::saved_fp_address_offset()));
// lr and fp are already in place
__ sub(sp, rfp, ((unsigned)_framesize-4) << LogBytesPerInt); // prolog

_frame_complete = __ pc() - start;

address the_pc = __ pc();
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
OopMap* map = new OopMap(_framesize, 0);
_oop_maps->add_gc_map(the_pc - start, map);

// State transition
__ mov(rscratch1, _thread_in_native);
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
__ stlrw(rscratch1, rscratch2);

assert(_shadow_space_bytes == 0, "not expecting shadow space on AArch64");

rt_call(masm, _call_target);

__ mov(rscratch1, _thread_in_native_trans);
@@ -3193,27 +3293,14 @@ void NativeInvokerGenerator::generate() {
__ bind(L_safepoint_poll_slow_path);

// Need to save the native result registers around any runtime calls.
RegSet spills;
FloatRegSet fp_spills;
for (int i = 0; i < _output_registers.length(); i++) {
VMReg output = _output_registers.at(i);
if (output->is_Register()) {
spills += RegSet::of(output->as_Register());
} else if (output->is_FloatRegister()) {
fp_spills += FloatRegSet::of(output->as_FloatRegister());
}
}

__ push(spills, sp);
__ push_fp(fp_spills, sp);
spill_output_registers();

__ mov(c_rarg0, rthread);
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
__ blr(rscratch1);

__ pop_fp(fp_spills, sp);
__ pop(spills, sp);
fill_output_registers();

__ b(L_after_safepoint_poll);
__ block_comment("} L_safepoint_poll_slow_path");
@@ -3223,13 +3310,11 @@ void NativeInvokerGenerator::generate() {
__ block_comment("{ L_reguard");
__ bind(L_reguard);

__ push(spills, sp);
__ push_fp(fp_spills, sp);
spill_output_registers();

rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));

__ pop_fp(fp_spills, sp);
__ pop(spills, sp);
fill_output_registers();

__ b(L_after_reguard);

@@ -3239,3 +3324,4 @@ void NativeInvokerGenerator::generate() {

__ flush();
}
#endif // COMPILER2
@@ -1898,10 +1898,12 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}

BufferBlob* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
#ifdef COMPILER2
RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
Unimplemented();
return nullptr;
}
#endif
@@ -3442,10 +3442,12 @@ void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
reverse_words(m, (unsigned long *)m_ints, longwords);
}

BufferBlob* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
#ifdef COMPILER2
RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
Unimplemented();
return nullptr;
}
#endif
@@ -3468,10 +3468,12 @@ int SpinPause() {
return 0;
}

BufferBlob* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
#ifdef COMPILER2
RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
Unimplemented();
return nullptr;
}
#endif
@@ -346,10 +346,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());

if (jfa->saved_rbp_address()) {
update_map_with_saved_link(map, jfa->saved_rbp_address());
}

return fr;
}

1 comment on commit 6baecf3

@openjdk-notifier
Copy link

@openjdk-notifier openjdk-notifier bot commented on 6baecf3 Mar 1, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.