From 4bbfc50172de0f38cfdfa732d41c71578f2fd43c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20H=C3=A4ssig?= Date: Thu, 24 Apr 2025 18:36:53 +0200 Subject: [PATCH 1/2] Clean up 32-bit x86 code in nativeInst_x86.* --- src/hotspot/cpu/x86/nativeInst_x86.cpp | 60 +------------------------- src/hotspot/cpu/x86/nativeInst_x86.hpp | 23 ---------- 2 files changed, 1 insertion(+), 82 deletions(-) diff --git a/src/hotspot/cpu/x86/nativeInst_x86.cpp b/src/hotspot/cpu/x86/nativeInst_x86.cpp index 4ee741077dc06..9392c3d463c62 100644 --- a/src/hotspot/cpu/x86/nativeInst_x86.cpp +++ b/src/hotspot/cpu/x86/nativeInst_x86.cpp @@ -67,9 +67,7 @@ void NativeCall::print() { // Inserts a native call instruction at a given pc void NativeCall::insert(address code_pos, address entry) { intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); -#ifdef AMD64 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); -#endif // AMD64 *code_pos = instruction_code; *((int32_t *)(code_pos+1)) = (int32_t) disp; ICache::invalidate_range(code_pos, instruction_size); @@ -157,7 +155,6 @@ void NativeCall::set_destination_mt_safe(address dest) { void NativeMovConstReg::verify() { -#ifdef AMD64 // make sure code pattern is actually a mov reg64, imm64 instruction bool valid_rex_prefix = ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB; bool valid_rex2_prefix = ubyte_at(0) == Assembler::REX2 && @@ -169,12 +166,6 @@ void NativeMovConstReg::verify() { print(); fatal("not a REX.W[B] mov reg64, imm64"); } -#else - // make sure code pattern is actually a mov reg, imm32 instruction - u_char test_byte = *(u_char*)instruction_address(); - u_char test_byte_2 = test_byte & ( 0xff ^ register_mask); - if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32"); -#endif // AMD64 } @@ -192,12 +183,10 @@ int NativeMovRegMem::instruction_start() const { // See comment in Assembler::locate_operand() about VEX prefixes. if (instr_0 == instruction_VEX_prefix_2bytes) { assert((UseAVX > 0), "shouldn't have VEX prefix"); - NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); return 2; } if (instr_0 == instruction_VEX_prefix_3bytes) { assert((UseAVX > 0), "shouldn't have VEX prefix"); - NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); return 3; } if (instr_0 == instruction_EVEX_prefix_4bytes) { @@ -313,8 +302,7 @@ void NativeMovRegMem::print() { void NativeLoadAddress::verify() { // make sure code pattern is actually a mov [reg+offset], reg instruction u_char test_byte = *(u_char*)instruction_address(); - if ( ! ((test_byte == lea_instruction_code) - LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) { + if (!((test_byte == lea_instruction_code) || (test_byte == mov64_instruction_code))) { fatal ("not a lea reg, [reg+offs] instruction"); } } @@ -340,9 +328,7 @@ void NativeJump::verify() { void NativeJump::insert(address code_pos, address entry) { intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); -#ifdef AMD64 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); -#endif // AMD64 *code_pos = instruction_code; *((int32_t*)(code_pos + 1)) = (int32_t)disp; @@ -355,11 +341,7 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_ // in use. The patching in that instance must happen only when certain // alignment restrictions are true. These guarantees check those // conditions. -#ifdef AMD64 const int linesize = 64; -#else - const int linesize = 32; -#endif // AMD64 // Must be wordSize aligned guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0, @@ -386,7 +368,6 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_ // void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { // complete jump instruction (to be inserted) is in code_buffer; -#ifdef _LP64 union { jlong cb_long; unsigned char code_buffer[8]; @@ -402,43 +383,6 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add Atomic::store((jlong *) verified_entry, u.cb_long); ICache::invalidate_range(verified_entry, 8); - -#else - unsigned char code_buffer[5]; - code_buffer[0] = instruction_code; - intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4); - *(int32_t*)(code_buffer + 1) = (int32_t)disp; - - check_verified_entry_alignment(entry, verified_entry); - - // Can't call nativeJump_at() because it's asserts jump exists - NativeJump* n_jump = (NativeJump*) verified_entry; - - //First patch dummy jmp in place - - unsigned char patch[4]; - assert(sizeof(patch)==sizeof(int32_t), "sanity check"); - patch[0] = 0xEB; // jmp rel8 - patch[1] = 0xFE; // jmp to self - patch[2] = 0xEB; - patch[3] = 0xFE; - - // First patch dummy jmp in place - *(int32_t*)verified_entry = *(int32_t *)patch; - - n_jump->wrote(0); - - // Patch 5th byte (from jump instruction) - verified_entry[4] = code_buffer[4]; - - n_jump->wrote(4); - - // Patch bytes 0-3 (from jump instruction) - *(int32_t*)verified_entry = *(int32_t *)code_buffer; - // Invalidate. Opteron requires a flush after every write. - n_jump->wrote(0); -#endif // _LP64 - } void NativeIllegalInstruction::insert(address code_pos) { @@ -455,9 +399,7 @@ void NativeGeneralJump::verify() { void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); -#ifdef AMD64 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); -#endif // AMD64 *code_pos = unconditional_long_jump; *((int32_t *)(code_pos+1)) = (int32_t) disp; diff --git a/src/hotspot/cpu/x86/nativeInst_x86.hpp b/src/hotspot/cpu/x86/nativeInst_x86.hpp index d02387aa9ffbb..a8fde8fdfd5f5 100644 --- a/src/hotspot/cpu/x86/nativeInst_x86.hpp +++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp @@ -126,10 +126,8 @@ class NativeCall: public NativeInstruction { address return_address() const { return addr_at(return_address_offset); } address destination() const; void set_destination(address dest) { -#ifdef AMD64 intptr_t disp = dest - return_address(); guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); -#endif // AMD64 set_int_at(displacement_offset, (int)(dest - return_address())); } // Returns whether the 4-byte displacement operand is 4-byte aligned. @@ -211,15 +209,9 @@ class NativeCallReg: public NativeInstruction { // Instruction format for implied addressing mode immediate operand move to register instruction: // [REX/REX2] [OPCODE] [IMM32] class NativeMovConstReg: public NativeInstruction { -#ifdef AMD64 static const bool has_rex = true; static const int rex_size = 1; static const int rex2_size = 2; -#else - static const bool has_rex = false; - static const int rex_size = 0; - static const int rex2_size = 0; -#endif // AMD64 public: enum Intel_specific_constants { instruction_code = 0xB8, @@ -390,13 +382,8 @@ inline NativeMovRegMem* nativeMovRegMem_at (address address) { // leal reg, [reg + offset] class NativeLoadAddress: public NativeMovRegMem { -#ifdef AMD64 static const bool has_rex = true; static const int rex_size = 1; -#else - static const bool has_rex = false; - static const int rex_size = 0; -#endif // AMD64 public: enum Intel_specific_constants { instruction_prefix_wide = Assembler::REX_W, @@ -447,9 +434,7 @@ class NativeJump: public NativeInstruction { if (dest == (address) -1) { val = -5; // jump to self } -#ifdef AMD64 assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1"); -#endif // AMD64 set_int_at(data_offset, (jint)val); } @@ -572,19 +557,14 @@ inline bool NativeInstruction::is_jump_reg() { inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } inline bool NativeInstruction::is_safepoint_poll() { -#ifdef AMD64 const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix; const int test_offset = has_rex2_prefix() ? 2 : (has_rex_prefix ? 1 : 0); -#else - const int test_offset = 0; -#endif const bool is_test_opcode = ubyte_at(test_offset) == NativeTstRegMem::instruction_code_memXregl; const bool is_rax_target = (ubyte_at(test_offset + 1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg; return is_test_opcode && is_rax_target; } inline bool NativeInstruction::is_mov_literal64() { -#ifdef AMD64 bool valid_rex_prefix = ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB; bool valid_rex2_prefix = ubyte_at(0) == Assembler::REX2 && (ubyte_at(1) == Assembler::REX2BIT_W || @@ -593,9 +573,6 @@ inline bool NativeInstruction::is_mov_literal64() { int opcode = has_rex2_prefix() ? ubyte_at(2) : ubyte_at(1); return ((valid_rex_prefix || valid_rex2_prefix) && (opcode & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8); -#else - return false; -#endif // AMD64 } class NativePostCallNop: public NativeInstruction { From f2382dce7ccc30fc32138d5362ad92c6e9c51407 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20H=C3=A4ssig?= Date: Mon, 28 Apr 2025 14:11:02 +0200 Subject: [PATCH 2/2] Apply readability suggestion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Aleksey Shipilëv --- src/hotspot/cpu/x86/nativeInst_x86.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/cpu/x86/nativeInst_x86.cpp b/src/hotspot/cpu/x86/nativeInst_x86.cpp index 9392c3d463c62..a3fe22c0ec2b8 100644 --- a/src/hotspot/cpu/x86/nativeInst_x86.cpp +++ b/src/hotspot/cpu/x86/nativeInst_x86.cpp @@ -302,7 +302,7 @@ void NativeMovRegMem::print() { void NativeLoadAddress::verify() { // make sure code pattern is actually a mov [reg+offset], reg instruction u_char test_byte = *(u_char*)instruction_address(); - if (!((test_byte == lea_instruction_code) || (test_byte == mov64_instruction_code))) { + if ((test_byte != lea_instruction_code) && (test_byte != mov64_instruction_code)) { fatal ("not a lea reg, [reg+offs] instruction"); } }