Skip to content
Permalink
Browse files
8266257: Fix foreign linker build issues for ppc and s390
Reviewed-by: jvernee, vlivanov
  • Loading branch information
mcimadamore committed Jun 3, 2021
1 parent c8f4c02 commit 29ab16284a4f1ac7ed691fd12cb622b0440c04be
Showing 15 changed files with 311 additions and 243 deletions.
@@ -878,7 +878,7 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// 64 bits items (Aarch64 abi) even though java would only store
// 32bits for a parameter. On 32bit it will simply be 32 bits
// So this routine will do 32->32 on 32bit and 32->64 on 64bit
void SharedRuntime::move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
@@ -979,7 +979,7 @@ static void object_move(MacroAssembler* masm,
}

// A float arg may have to do float reg int reg conversion
void SharedRuntime::float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(src.first()->is_stack() && dst.first()->is_stack() ||
src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
if (src.first()->is_stack()) {
@@ -998,7 +998,7 @@ void SharedRuntime::float_move(MacroAssembler* masm, VMRegPair src, VMRegPair ds
}

// A long move
void SharedRuntime::long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
@@ -1022,7 +1022,7 @@ void SharedRuntime::long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst


// A double move
void SharedRuntime::double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(src.first()->is_stack() && dst.first()->is_stack() ||
src.first()->is_reg() && dst.first()->is_reg(), "Unexpected error");
if (src.first()->is_stack()) {
@@ -34,3 +34,8 @@ const BufferLayout ForeignGlobals::parse_buffer_layout_impl(jobject jlayout) con
Unimplemented();
return {};
}

const CallRegs ForeignGlobals::parse_call_regs_impl(jobject jconv) const {
Unimplemented();
return {};
}
@@ -29,3 +29,12 @@ address ProgrammableUpcallHandler::generate_upcall_stub(jobject rec, jobject jab
Unimplemented();
return nullptr;
}

address ProgrammableUpcallHandler::generate_optimized_upcall_stub(jobject mh, Method* entry, jobject jabi, jobject jconv) {
ShouldNotCallThis();
return nullptr;
}

bool ProgrammableUpcallHandler::supports_optimized_upcalls() {
return false;
}
@@ -36,3 +36,8 @@ const BufferLayout ForeignGlobals::parse_buffer_layout_impl(jobject jlayout) con
Unimplemented();
return {};
}

const CallRegs ForeignGlobals::parse_call_regs_impl(jobject jconv) const {
Unimplemented();
return {};
}
@@ -30,3 +30,12 @@ address ProgrammableUpcallHandler::generate_upcall_stub(jobject rec, jobject jab
Unimplemented();
return nullptr;
}

address ProgrammableUpcallHandler::generate_optimized_upcall_stub(jobject mh, Method* entry, jobject jabi, jobject jconv) {
ShouldNotCallThis();
return nullptr;
}

bool ProgrammableUpcallHandler::supports_optimized_upcalls() {
return false;
}
@@ -34,3 +34,8 @@ const BufferLayout ForeignGlobals::parse_buffer_layout_impl(jobject jlayout) con
Unimplemented();
return {};
}

const CallRegs ForeignGlobals::parse_call_regs_impl(jobject jconv) const {
Unimplemented();
return {};
}
@@ -29,3 +29,12 @@ address ProgrammableUpcallHandler::generate_upcall_stub(jobject rec, jobject jab
Unimplemented();
return nullptr;
}

address ProgrammableUpcallHandler::generate_optimized_upcall_stub(jobject mh, Method* entry, jobject jabi, jobject jconv) {
ShouldNotCallThis();
return nullptr;
}

bool ProgrammableUpcallHandler::supports_optimized_upcalls() {
return false;
}
@@ -913,6 +913,221 @@ void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
Disassembler::decode((address)pc, (address)pc+32);
}

// The java_calling_convention describes stack locations as ideal slots on
// a frame with no abi restrictions. Since we must observe abi restrictions
// (like the placement of the register window) the slots must be biased by
// the following value.
static int reg2offset_in(VMReg r) {
// Account for saved rbp and return address
// This should really be in_preserve_stack_slots
return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
}

static int reg2offset_out(VMReg r) {
return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
}

// A long move
void MacroAssembler::long_move(VMRegPair src, VMRegPair dst) {

// The calling conventions assures us that each VMregpair is either
// all really one physical register or adjacent stack slots.

if (src.is_single_phys_reg() ) {
if (dst.is_single_phys_reg()) {
if (dst.first() != src.first()) {
mov(dst.first()->as_Register(), src.first()->as_Register());
}
} else {
assert(dst.is_single_reg(), "not a stack pair");
movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
}
} else if (dst.is_single_phys_reg()) {
assert(src.is_single_reg(), "not a stack pair");
movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
} else {
assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
movq(rax, Address(rbp, reg2offset_in(src.first())));
movq(Address(rsp, reg2offset_out(dst.first())), rax);
}
}

// A double move
void MacroAssembler::double_move(VMRegPair src, VMRegPair dst) {

// The calling conventions assures us that each VMregpair is either
// all really one physical register or adjacent stack slots.

if (src.is_single_phys_reg() ) {
if (dst.is_single_phys_reg()) {
// In theory these overlap but the ordering is such that this is likely a nop
if ( src.first() != dst.first()) {
movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
}
} else {
assert(dst.is_single_reg(), "not a stack pair");
movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
}
} else if (dst.is_single_phys_reg()) {
assert(src.is_single_reg(), "not a stack pair");
movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
} else {
assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
movq(rax, Address(rbp, reg2offset_in(src.first())));
movq(Address(rsp, reg2offset_out(dst.first())), rax);
}
}


// A float arg may have to do float reg int reg conversion
void MacroAssembler::float_move(VMRegPair src, VMRegPair dst) {
assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");

// The calling conventions assures us that each VMregpair is either
// all really one physical register or adjacent stack slots.

if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
movl(rax, Address(rbp, reg2offset_in(src.first())));
movptr(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
// stack to reg
assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
}
} else if (dst.first()->is_stack()) {
// reg to stack
assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
} else {
// reg to reg
// In theory these overlap but the ordering is such that this is likely a nop
if ( src.first() != dst.first()) {
movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
}
}
}

// On 64 bit we will store integer like items to the stack as
// 64 bits items (x86_32/64 abi) even though java would only store
// 32bits for a parameter. On 32bit it will simply be 32 bits
// So this routine will do 32->32 on 32bit and 32->64 on 64bit
void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
movslq(rax, Address(rbp, reg2offset_in(src.first())));
movq(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
// stack to reg
movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
}
} else if (dst.first()->is_stack()) {
// reg to stack
// Do we really have to sign extend???
// __ movslq(src.first()->as_Register(), src.first()->as_Register());
movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
} else {
// Do we really have to sign extend???
// __ movslq(dst.first()->as_Register(), src.first()->as_Register());
if (dst.first() != src.first()) {
movq(dst.first()->as_Register(), src.first()->as_Register());
}
}
}

void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
movq(rax, Address(rbp, reg2offset_in(src.first())));
movq(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
// stack to reg
movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
}
} else if (dst.first()->is_stack()) {
// reg to stack
movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
} else {
if (dst.first() != src.first()) {
movq(dst.first()->as_Register(), src.first()->as_Register());
}
}
}

// An oop arg. Must pass a handle not the oop itself
void MacroAssembler::object_move(OopMap* map,
int oop_handle_offset,
int framesize_in_slots,
VMRegPair src,
VMRegPair dst,
bool is_receiver,
int* receiver_offset) {

// must pass a handle. First figure out the location we use as a handle

Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();

// See if oop is NULL if it is we need no handle

if (src.first()->is_stack()) {

// Oop is already on the stack as an argument
int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
if (is_receiver) {
*receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
}

cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
lea(rHandle, Address(rbp, reg2offset_in(src.first())));
// conditionally move a NULL
cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
} else {

// Oop is in an a register we must store it to the space we reserve
// on the stack for oop_handles and pass a handle if oop is non-NULL

const Register rOop = src.first()->as_Register();
int oop_slot;
if (rOop == j_rarg0)
oop_slot = 0;
else if (rOop == j_rarg1)
oop_slot = 1;
else if (rOop == j_rarg2)
oop_slot = 2;
else if (rOop == j_rarg3)
oop_slot = 3;
else if (rOop == j_rarg4)
oop_slot = 4;
else {
assert(rOop == j_rarg5, "wrong register");
oop_slot = 5;
}

oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
int offset = oop_slot*VMRegImpl::stack_slot_size;

map->set_oop(VMRegImpl::stack2reg(oop_slot));
// Store oop in handle area, may be NULL
movptr(Address(rsp, offset), rOop);
if (is_receiver) {
*receiver_offset = offset;
}

cmpptr(rOop, (int32_t)NULL_WORD);
lea(rHandle, Address(rsp, offset));
// conditionally move a NULL from the handle area where it was just stored
cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
}

// If arg is on the stack then place it otherwise it is already in correct reg.
if (dst.first()->is_stack()) {
movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
}
}

#endif // _LP64

// Now versions that are common to 32/64 bit
@@ -26,6 +26,8 @@
#define CPU_X86_MACROASSEMBLER_X86_HPP

#include "asm/assembler.hpp"
#include "code/vmreg.inline.hpp"
#include "compiler/oopMap.hpp"
#include "utilities/macros.hpp"
#include "runtime/rtmLocking.hpp"
#include "runtime/vm_version.hpp"
@@ -206,6 +208,22 @@ class MacroAssembler: public Assembler {
// The pointer will be loaded into the thread register.
void get_thread(Register thread);

#ifdef _LP64
// Support for argument shuffling

void move32_64(VMRegPair src, VMRegPair dst);
void long_move(VMRegPair src, VMRegPair dst);
void float_move(VMRegPair src, VMRegPair dst);
void double_move(VMRegPair src, VMRegPair dst);
void move_ptr(VMRegPair src, VMRegPair dst);
void object_move(OopMap* map,
int oop_handle_offset,
int framesize_in_slots,
VMRegPair src,
VMRegPair dst,
bool is_receiver,
int* receiver_offset);
#endif // _LP64

// Support for VM calls
//
@@ -1124,7 +1124,7 @@ static void object_move(MacroAssembler* masm,
}

// A float arg may have to do float reg int reg conversion
void SharedRuntime::float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");

// Because of the calling convention we know that src is either a stack location
@@ -1142,7 +1142,7 @@ void SharedRuntime::float_move(MacroAssembler* masm, VMRegPair src, VMRegPair ds
}

// A long move
void SharedRuntime::long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {

// The only legal possibility for a long_move VMRegPair is:
// 1: two stack slots (possibly unaligned)
@@ -1161,7 +1161,7 @@ void SharedRuntime::long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst
}

// A double move
void SharedRuntime::double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {

// The only legal possibilities for a double_move VMRegPair are:
// The painful thing here is that like long_move a VMRegPair might be

1 comment on commit 29ab162

@openjdk-notifier
Copy link

@openjdk-notifier openjdk-notifier bot commented on 29ab162 Jun 3, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.