Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 1 addition & 59 deletions src/hotspot/cpu/x86/nativeInst_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,7 @@ void NativeCall::print() {
// Inserts a native call instruction at a given pc
void NativeCall::insert(address code_pos, address entry) {
intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
#ifdef AMD64
guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
#endif // AMD64
*code_pos = instruction_code;
*((int32_t *)(code_pos+1)) = (int32_t) disp;
ICache::invalidate_range(code_pos, instruction_size);
Expand Down Expand Up @@ -157,7 +155,6 @@ void NativeCall::set_destination_mt_safe(address dest) {


void NativeMovConstReg::verify() {
#ifdef AMD64
// make sure code pattern is actually a mov reg64, imm64 instruction
bool valid_rex_prefix = ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB;
bool valid_rex2_prefix = ubyte_at(0) == Assembler::REX2 &&
Expand All @@ -169,12 +166,6 @@ void NativeMovConstReg::verify() {
print();
fatal("not a REX.W[B] mov reg64, imm64");
}
#else
// make sure code pattern is actually a mov reg, imm32 instruction
u_char test_byte = *(u_char*)instruction_address();
u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32");
#endif // AMD64
}


Expand All @@ -192,12 +183,10 @@ int NativeMovRegMem::instruction_start() const {
// See comment in Assembler::locate_operand() about VEX prefixes.
if (instr_0 == instruction_VEX_prefix_2bytes) {
assert((UseAVX > 0), "shouldn't have VEX prefix");
NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
return 2;
}
if (instr_0 == instruction_VEX_prefix_3bytes) {
assert((UseAVX > 0), "shouldn't have VEX prefix");
NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
return 3;
}
if (instr_0 == instruction_EVEX_prefix_4bytes) {
Expand Down Expand Up @@ -313,8 +302,7 @@ void NativeMovRegMem::print() {
void NativeLoadAddress::verify() {
// make sure code pattern is actually a mov [reg+offset], reg instruction
u_char test_byte = *(u_char*)instruction_address();
if ( ! ((test_byte == lea_instruction_code)
LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
if ((test_byte != lea_instruction_code) && (test_byte != mov64_instruction_code)) {
fatal ("not a lea reg, [reg+offs] instruction");
}
}
Expand All @@ -340,9 +328,7 @@ void NativeJump::verify() {

void NativeJump::insert(address code_pos, address entry) {
intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
#ifdef AMD64
guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
#endif // AMD64

*code_pos = instruction_code;
*((int32_t*)(code_pos + 1)) = (int32_t)disp;
Expand All @@ -355,11 +341,7 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_
// in use. The patching in that instance must happen only when certain
// alignment restrictions are true. These guarantees check those
// conditions.
#ifdef AMD64
const int linesize = 64;
#else
const int linesize = 32;
#endif // AMD64

// Must be wordSize aligned
guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0,
Expand All @@ -386,7 +368,6 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_
//
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
// complete jump instruction (to be inserted) is in code_buffer;
#ifdef _LP64
union {
jlong cb_long;
unsigned char code_buffer[8];
Expand All @@ -402,43 +383,6 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add

Atomic::store((jlong *) verified_entry, u.cb_long);
ICache::invalidate_range(verified_entry, 8);

#else
unsigned char code_buffer[5];
code_buffer[0] = instruction_code;
intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4);
*(int32_t*)(code_buffer + 1) = (int32_t)disp;

check_verified_entry_alignment(entry, verified_entry);

// Can't call nativeJump_at() because it's asserts jump exists
NativeJump* n_jump = (NativeJump*) verified_entry;

//First patch dummy jmp in place

unsigned char patch[4];
assert(sizeof(patch)==sizeof(int32_t), "sanity check");
patch[0] = 0xEB; // jmp rel8
patch[1] = 0xFE; // jmp to self
patch[2] = 0xEB;
patch[3] = 0xFE;

// First patch dummy jmp in place
*(int32_t*)verified_entry = *(int32_t *)patch;

n_jump->wrote(0);

// Patch 5th byte (from jump instruction)
verified_entry[4] = code_buffer[4];

n_jump->wrote(4);

// Patch bytes 0-3 (from jump instruction)
*(int32_t*)verified_entry = *(int32_t *)code_buffer;
// Invalidate. Opteron requires a flush after every write.
n_jump->wrote(0);
#endif // _LP64

}

void NativeIllegalInstruction::insert(address code_pos) {
Expand All @@ -455,9 +399,7 @@ void NativeGeneralJump::verify() {

void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
#ifdef AMD64
guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
#endif // AMD64

*code_pos = unconditional_long_jump;
*((int32_t *)(code_pos+1)) = (int32_t) disp;
Expand Down
23 changes: 0 additions & 23 deletions src/hotspot/cpu/x86/nativeInst_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,8 @@ class NativeCall: public NativeInstruction {
address return_address() const { return addr_at(return_address_offset); }
address destination() const;
void set_destination(address dest) {
#ifdef AMD64
intptr_t disp = dest - return_address();
guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
#endif // AMD64
set_int_at(displacement_offset, (int)(dest - return_address()));
}
// Returns whether the 4-byte displacement operand is 4-byte aligned.
Expand Down Expand Up @@ -211,15 +209,9 @@ class NativeCallReg: public NativeInstruction {
// Instruction format for implied addressing mode immediate operand move to register instruction:
// [REX/REX2] [OPCODE] [IMM32]
class NativeMovConstReg: public NativeInstruction {
#ifdef AMD64
static const bool has_rex = true;
static const int rex_size = 1;
static const int rex2_size = 2;
#else
static const bool has_rex = false;
static const int rex_size = 0;
static const int rex2_size = 0;
#endif // AMD64
public:
enum Intel_specific_constants {
instruction_code = 0xB8,
Expand Down Expand Up @@ -390,13 +382,8 @@ inline NativeMovRegMem* nativeMovRegMem_at (address address) {
// leal reg, [reg + offset]

class NativeLoadAddress: public NativeMovRegMem {
#ifdef AMD64
static const bool has_rex = true;
static const int rex_size = 1;
#else
static const bool has_rex = false;
static const int rex_size = 0;
#endif // AMD64
public:
enum Intel_specific_constants {
instruction_prefix_wide = Assembler::REX_W,
Expand Down Expand Up @@ -447,9 +434,7 @@ class NativeJump: public NativeInstruction {
if (dest == (address) -1) {
val = -5; // jump to self
}
#ifdef AMD64
assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
#endif // AMD64
set_int_at(data_offset, (jint)val);
}

Expand Down Expand Up @@ -572,19 +557,14 @@ inline bool NativeInstruction::is_jump_reg() {
inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
(ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
inline bool NativeInstruction::is_safepoint_poll() {
#ifdef AMD64
const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix;
const int test_offset = has_rex2_prefix() ? 2 : (has_rex_prefix ? 1 : 0);
#else
const int test_offset = 0;
#endif
const bool is_test_opcode = ubyte_at(test_offset) == NativeTstRegMem::instruction_code_memXregl;
const bool is_rax_target = (ubyte_at(test_offset + 1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg;
return is_test_opcode && is_rax_target;
}

inline bool NativeInstruction::is_mov_literal64() {
#ifdef AMD64
bool valid_rex_prefix = ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB;
bool valid_rex2_prefix = ubyte_at(0) == Assembler::REX2 &&
(ubyte_at(1) == Assembler::REX2BIT_W ||
Expand All @@ -593,9 +573,6 @@ inline bool NativeInstruction::is_mov_literal64() {

int opcode = has_rex2_prefix() ? ubyte_at(2) : ubyte_at(1);
return ((valid_rex_prefix || valid_rex2_prefix) && (opcode & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8);
#else
return false;
#endif // AMD64
}

class NativePostCallNop: public NativeInstruction {
Expand Down