Skip to content

Commit

Permalink
8292638: x86: Improve scratch register handling in VM stubs
Browse files Browse the repository at this point in the history
Co-authored-by: Aleksey Shipilev <shade@openjdk.org>
Reviewed-by: kvn, shade
  • Loading branch information
Vladimir Ivanov and shipilev committed Aug 23, 2022
1 parent d24b7b7 commit f3be673
Show file tree
Hide file tree
Showing 14 changed files with 371 additions and 507 deletions.
134 changes: 73 additions & 61 deletions src/hotspot/cpu/x86/assembler_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12165,81 +12165,93 @@ void Assembler::set_byte_if_not_zero(Register dst) {

#else // LP64

// 64bit only pieces of the assembler

void Assembler::set_byte_if_not_zero(Register dst) {
int enc = prefix_and_encode(dst->encoding(), true);
emit_int24(0x0F, (unsigned char)0x95, (0xC0 | enc));
}

// 64bit only pieces of the assembler
// This should only be used by 64bit instructions that can use rip-relative
// it cannot be used by instructions that want an immediate value.

bool Assembler::reachable(AddressLiteral adr) {
int64_t disp;
relocInfo::relocType relocType = adr.reloc();

// None will force a 64bit literal to the code stream. Likely a placeholder
// for something that will be patched later and we need to certain it will
// always be reachable.
if (relocType == relocInfo::none) {
return false;
}
if (relocType == relocInfo::internal_word_type) {
// This should be rip relative and easily reachable.
return true;
}
if (relocType == relocInfo::virtual_call_type ||
relocType == relocInfo::opt_virtual_call_type ||
relocType == relocInfo::static_call_type ||
relocType == relocInfo::static_stub_type ) {
// This should be rip relative within the code cache and easily
// Determine whether an address is always reachable in rip-relative addressing mode
// when accessed from the code cache.
static bool is_always_reachable(address target, relocInfo::relocType reloc_type) {
switch (reloc_type) {
// This should be rip-relative and easily reachable.
case relocInfo::internal_word_type: {
return true;
}
// This should be rip-relative within the code cache and easily
// reachable until we get huge code caches. (At which point
// ic code is going to have issues).
return true;
}
if (relocType != relocInfo::external_word_type &&
relocType != relocInfo::poll_return_type && // these are really external_word but need special
relocType != relocInfo::poll_type && // relocs to identify them
relocType != relocInfo::runtime_call_type ) {
return false;
// IC code is going to have issues).
case relocInfo::virtual_call_type:
case relocInfo::opt_virtual_call_type:
case relocInfo::static_call_type:
case relocInfo::static_stub_type: {
return true;
}
case relocInfo::runtime_call_type:
case relocInfo::external_word_type:
case relocInfo::poll_return_type: // these are really external_word but need special
case relocInfo::poll_type: { // relocs to identify them
return CodeCache::contains(target);
}
default: {
return false;
}
}
}

// Stress the correction code
if (ForceUnreachable) {
// Must be runtimecall reloc, see if it is in the codecache
// Flipping stuff in the codecache to be unreachable causes issues
// with things like inline caches where the additional instructions
// are not handled.
if (CodeCache::find_blob(adr._target) == NULL) {
// Determine whether an address is reachable in rip-relative addressing mode from the code cache.
static bool is_reachable(address target, relocInfo::relocType reloc_type) {
if (is_always_reachable(target, reloc_type)) {
return true;
}
switch (reloc_type) {
// None will force a 64bit literal to the code stream. Likely a placeholder
// for something that will be patched later and we need to certain it will
// always be reachable.
case relocInfo::none: {
return false;
}
case relocInfo::runtime_call_type:
case relocInfo::external_word_type:
case relocInfo::poll_return_type: // these are really external_word but need special
case relocInfo::poll_type: { // relocs to identify them
assert(!CodeCache::contains(target), "always reachable");
if (ForceUnreachable) {
return false; // stress the correction code
}
// For external_word_type/runtime_call_type if it is reachable from where we
// are now (possibly a temp buffer) and where we might end up
// anywhere in the code cache then we are always reachable.
// This would have to change if we ever save/restore shared code to be more pessimistic.
// Code buffer has to be allocated in the code cache, so check against
// code cache boundaries cover that case.
//
// In rip-relative addressing mode, an effective address is formed by adding displacement
// to the 64-bit RIP of the next instruction which is not known yet. Considering target address
// is guaranteed to be outside of the code cache, checking against code cache boundaries is enough
// to account for that.
return Assembler::is_simm32(target - CodeCache::low_bound()) &&
Assembler::is_simm32(target - CodeCache::high_bound());
}
default: {
return false;
}
}
// For external_word_type/runtime_call_type if it is reachable from where we
// are now (possibly a temp buffer) and where we might end up
// anywhere in the codeCache then we are always reachable.
// This would have to change if we ever save/restore shared code
// to be more pessimistic.
disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
if (!is_simm32(disp)) return false;
disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
if (!is_simm32(disp)) return false;

disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));

// Because rip relative is a disp + address_of_next_instruction and we
// don't know the value of address_of_next_instruction we apply a fudge factor
// to make sure we will be ok no matter the size of the instruction we get placed into.
// We don't have to fudge the checks above here because they are already worst case.

// 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
// + 4 because better safe than sorry.
const int fudge = 12 + 4;
if (disp < 0) {
disp -= fudge;
} else {
disp += fudge;
}
return is_simm32(disp);
}

bool Assembler::reachable(AddressLiteral adr) {
assert(CodeCache::contains(pc()), "required");
return is_reachable(adr.target(), adr.reloc());
}

bool Assembler::always_reachable(AddressLiteral adr) {
assert(CodeCache::contains(pc()), "required");
return is_always_reachable(adr.target(), adr.reloc());
}

void Assembler::emit_data64(jlong data,
Expand Down
8 changes: 5 additions & 3 deletions src/hotspot/cpu/x86/assembler_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -802,16 +802,18 @@ class Assembler : public AbstractAssembler {
void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32);

protected:
#ifdef ASSERT
#ifdef ASSERT
void check_relocation(RelocationHolder const& rspec, int format);
#endif
#endif

void emit_data(jint data, relocInfo::relocType rtype, int format);
void emit_data(jint data, RelocationHolder const& rspec, int format);
void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);

bool reachable(AddressLiteral adr) NOT_LP64({ return true;});
bool always_reachable(AddressLiteral adr) NOT_LP64( { return true; } );
bool reachable(AddressLiteral adr) NOT_LP64( { return true; } );


// These are all easily abused and hence protected

Expand Down
80 changes: 50 additions & 30 deletions src/hotspot/cpu/x86/macroAssembler_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -520,14 +520,15 @@ void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {

}

void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) {
assert(!src2.is_lval(), "should use cmpptr");
assert(rscratch != noreg || always_reachable(src2), "missing");

if (reachable(src2)) {
cmpq(src1, as_Address(src2));
} else {
lea(rscratch1, src2);
Assembler::cmpq(src1, Address(rscratch1, 0));
lea(rscratch, src2);
Assembler::cmpq(src1, Address(rscratch, 0));
}
}

Expand Down Expand Up @@ -1122,30 +1123,36 @@ void MacroAssembler::addptr(Address dst, Register src) {
LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
}

void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");

if (reachable(src)) {
Assembler::addsd(dst, as_Address(src));
} else {
lea(rscratch1, src);
Assembler::addsd(dst, Address(rscratch1, 0));
lea(rscratch, src);
Assembler::addsd(dst, Address(rscratch, 0));
}
}

void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");

if (reachable(src)) {
addss(dst, as_Address(src));
} else {
lea(rscratch1, src);
addss(dst, Address(rscratch1, 0));
lea(rscratch, src);
addss(dst, Address(rscratch, 0));
}
}

void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src) {
void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");

if (reachable(src)) {
Assembler::addpd(dst, as_Address(src));
} else {
lea(rscratch1, src);
Assembler::addpd(dst, Address(rscratch1, 0));
lea(rscratch, src);
Assembler::addpd(dst, Address(rscratch, 0));
}
}

Expand Down Expand Up @@ -2124,12 +2131,13 @@ void MacroAssembler::empty_FPU_stack() {
}
#endif // !LP64

void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src) {
void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");
if (reachable(src)) {
Assembler::mulpd(dst, as_Address(src));
} else {
lea(rscratch1, src);
Assembler::mulpd(dst, Address(rscratch1, 0));
lea(rscratch, src);
Assembler::mulpd(dst, Address(rscratch, 0));
}
}

Expand Down Expand Up @@ -2469,21 +2477,23 @@ void MacroAssembler::movbyte(ArrayAddress dst, int src) {
movb(as_Address(dst), src);
}

void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");
if (reachable(src)) {
movdl(dst, as_Address(src));
} else {
lea(rscratch1, src);
movdl(dst, Address(rscratch1, 0));
lea(rscratch, src);
movdl(dst, Address(rscratch, 0));
}
}

void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");
if (reachable(src)) {
movq(dst, as_Address(src));
} else {
lea(rscratch1, src);
movq(dst, Address(rscratch1, 0));
lea(rscratch, src);
movq(dst, Address(rscratch, 0));
}
}

Expand Down Expand Up @@ -2683,16 +2693,20 @@ void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral s
}

void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge,
int vector_len, Register scratch_reg) {
int vector_len, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");

if (reachable(src)) {
Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len);
} else {
lea(scratch_reg, src);
Assembler::evmovdquq(dst, mask, Address(scratch_reg, 0), merge, vector_len);
lea(rscratch, src);
Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len);
}
}

void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");

if (reachable(src)) {
Assembler::evmovdquq(dst, as_Address(src), vector_len);
} else {
Expand All @@ -2710,12 +2724,14 @@ void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) {
}
}

void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");

if (reachable(src)) {
Assembler::movsd(dst, as_Address(src));
} else {
lea(rscratch1, src);
Assembler::movsd(dst, Address(rscratch1, 0));
lea(rscratch, src);
Assembler::movsd(dst, Address(rscratch, 0));
}
}

Expand Down Expand Up @@ -2746,12 +2762,14 @@ void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_le
}
}

void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) {
assert(rscratch != noreg || always_reachable(src), "missing");

if (reachable(src)) {
Assembler::mulsd(dst, as_Address(src));
} else {
lea(rscratch1, src);
Assembler::mulsd(dst, Address(rscratch1, 0));
lea(rscratch, src);
Assembler::mulsd(dst, Address(rscratch, 0));
}
}

Expand Down Expand Up @@ -3246,6 +3264,8 @@ void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src

void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
assert(UseAVX > 0, "requires some form of AVX");
assert(rscratch != noreg || always_reachable(src), "missing");

if (reachable(src)) {
Assembler::vpaddd(dst, nds, as_Address(src), vector_len);
} else {
Expand Down

1 comment on commit f3be673

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.