From a66b84950c0841966b5b1336a8ea691d40798eea Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Fri, 28 Mar 2025 17:30:56 +0100 Subject: [PATCH 1/4] Fix --- src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp | 14 - src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp | 119 +----- src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp | 21 +- src/hotspot/cpu/x86/matcher_x86.hpp | 31 -- src/hotspot/cpu/x86/x86.ad | 395 +++--------------- 5 files changed, 73 insertions(+), 507 deletions(-) diff --git a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp index 83ecdee52199b..b4f8e9d95147d 100644 --- a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp +++ b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp @@ -43,22 +43,8 @@ void C2SafepointPollStub::emit(C2_MacroAssembler& masm) { __ bind(entry()); InternalAddress safepoint_pc(masm.pc() - masm.offset() + _safepoint_offset); -#ifdef _LP64 __ lea(rscratch1, safepoint_pc); __ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1); -#else - const Register tmp1 = rcx; - const Register tmp2 = rdx; - __ push(tmp1); - __ push(tmp2); - - __ lea(tmp1, safepoint_pc); - __ get_thread(tmp2); - __ movptr(Address(tmp2, JavaThread::saved_exception_pc_offset()), tmp1); - - __ pop(tmp2); - __ pop(tmp1); -#endif __ jump(callback_addr); } diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index 8cf721f5b203c..dbba337b03712 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -107,16 +107,6 @@ void C2_MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool movptr(Address(rsp, framesize), (int32_t)0xbadb100d); } -#ifndef _LP64 - // If method sets FPU control word do it now - if (fp_mode_24b) { - fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_24())); - } - if (UseSSE >= 2 && VerifyFPU) { - verify_FPU(0, "FPU stack must be clean on entry"); - } -#endif - #ifdef ASSERT if (VerifyStackAtCalls) { Label L; @@ -133,7 +123,6 @@ void C2_MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool if (!is_stub) { BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); - #ifdef _LP64 // We put the non-hot code of the nmethod entry barrier out-of-line in a stub. Label dummy_slow_path; Label dummy_continuation; @@ -147,10 +136,6 @@ void C2_MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool continuation = &stub->continuation(); } bs->nmethod_entry_barrier(this, slow_path, continuation); -#else - // Don't bother with out-of-line nmethod entry barrier stub for x86_32. - bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */); -#endif } } @@ -299,7 +284,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp // Locked by current thread if difference with current SP is less than one page. subptr(tmpReg, rsp); // Next instruction set ZFlag == 1 (Success) if difference is less then one page. - andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - (int)os::vm_page_size())) ); + andptr(tmpReg, (int32_t) (7 - (int)os::vm_page_size()) ); movptr(Address(boxReg, 0), tmpReg); } jmp(DONE_LABEL); @@ -307,10 +292,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp bind(IsInflated); // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markWord::monitor_value -#ifndef _LP64 - // Just take slow path to avoid dealing with 64 bit atomic instructions here. - orl(boxReg, 1); // set ICC.ZF=0 to indicate failure -#else // Unconditionally set box->_displaced_header = markWord::unused_mark(). // Without cast to int32_t this style of movptr will destroy r10 which is typically obj. movptr(Address(boxReg, 0), checked_cast(markWord::unused_mark().value())); @@ -329,7 +310,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp jccb(Assembler::notEqual, NO_COUNT); // If not recursive, ZF = 0 at this point (fail) incq(Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); xorq(rax, rax); // Set ZF = 1 (success) for recursive lock, denoting locking success -#endif // _LP64 bind(DONE_LABEL); // ZFlag == 1 count in fast path @@ -338,10 +318,8 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp bind(COUNT); if (LockingMode == LM_LEGACY) { -#ifdef _LP64 // Count monitors in fast path increment(Address(thread, JavaThread::held_monitor_count_offset())); -#endif } xorl(tmpReg, tmpReg); // Set ZF == 1 @@ -404,11 +382,6 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t // It's inflated. -#ifndef _LP64 - // Just take slow path to avoid dealing with 64 bit atomic instructions here. - orl(boxReg, 1); // set ICC.ZF=0 to indicate failure - jmpb(DONE_LABEL); -#else // Despite our balanced locking property we still check that m->_owner == Self // as java routines or native JNI code called by this thread might // have released the lock. @@ -462,7 +435,6 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t bind (LSuccess); testl (boxReg, 0); // set ICC.ZF=1 to indicate success jmpb (DONE_LABEL); -#endif // _LP64 if (LockingMode == LM_LEGACY) { bind (Stacked); @@ -482,9 +454,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t if (LockingMode == LM_LEGACY) { // Count monitors in fast path -#ifdef _LP64 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); -#endif } xorl(tmpReg, tmpReg); // Set ZF == 1 @@ -563,11 +533,6 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist { // Handle inflated monitor. bind(inflated); -#ifndef _LP64 - // Just take slow path to avoid dealing with 64 bit atomic instructions here. - orl(box, 1); // set ICC.ZF=0 to indicate failure - jmpb(slow_path); -#else const Register monitor = t; if (!UseObjectMonitorTable) { @@ -633,7 +598,6 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist increment(recursions_address); bind(monitor_locked); -#endif // _LP64 } bind(locked); @@ -746,11 +710,6 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, bind(inflated); -#ifndef _LP64 - // Just take slow path to avoid dealing with 64 bit atomic instructions here. - orl(t, 1); // set ICC.ZF=0 to indicate failure - jmpb(slow_path); -#else if (!UseObjectMonitorTable) { assert(mark == monitor, "should be the same here"); } else { @@ -800,7 +759,6 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, // Recursive unlock. bind(recursive); decrement(recursions_address); -#endif // _LP64 } bind(unlocked); @@ -1522,7 +1480,6 @@ void C2_MacroAssembler::vinsert(BasicType typ, XMMRegister dst, XMMRegister src, } } -#ifdef _LP64 void C2_MacroAssembler::vgather8b_masked_offset(BasicType elem_bt, XMMRegister dst, Register base, Register idx_base, @@ -1561,7 +1518,6 @@ void C2_MacroAssembler::vgather8b_masked_offset(BasicType elem_bt, } } } -#endif // _LP64 void C2_MacroAssembler::vgather8b_offset(BasicType elem_bt, XMMRegister dst, Register base, Register idx_base, @@ -1633,7 +1589,7 @@ void C2_MacroAssembler::vgather_subword(BasicType elem_ty, XMMRegister dst, if (mask == noreg) { vgather8b_offset(elem_ty, temp_dst, base, idx_base, offset, rtmp, vlen_enc); } else { - LP64_ONLY(vgather8b_masked_offset(elem_ty, temp_dst, base, idx_base, offset, mask, mask_idx, rtmp, vlen_enc)); + vgather8b_masked_offset(elem_ty, temp_dst, base, idx_base, offset, mask, mask_idx, rtmp, vlen_enc); } // TEMP_PERM_VEC(temp_dst) = PERMUTE TMP_VEC_64(temp_dst) PERM_INDEX(xtmp1) vpermd(temp_dst, xtmp1, temp_dst, vlen_enc == Assembler::AVX_512bit ? vlen_enc : Assembler::AVX_256bit); @@ -2037,7 +1993,6 @@ void C2_MacroAssembler::reduceI(int opcode, int vlen, } } -#ifdef _LP64 void C2_MacroAssembler::reduceL(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2) { @@ -2049,7 +2004,6 @@ void C2_MacroAssembler::reduceL(int opcode, int vlen, default: assert(false, "wrong vector length"); } } -#endif // _LP64 void C2_MacroAssembler::reduceF(int opcode, int vlen, XMMRegister dst, XMMRegister src, XMMRegister vtmp1, XMMRegister vtmp2) { switch (vlen) { @@ -2299,7 +2253,6 @@ void C2_MacroAssembler::reduce32S(int opcode, Register dst, Register src1, XMMRe reduce16S(opcode, dst, src1, vtmp1, vtmp1, vtmp2); } -#ifdef _LP64 void C2_MacroAssembler::reduce2L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2) { pshufd(vtmp2, src2, 0xE); reduce_operation_128(T_LONG, opcode, vtmp2, src2); @@ -2325,7 +2278,6 @@ void C2_MacroAssembler::genmask(KRegister dst, Register len, Register temp) { bzhiq(temp, temp, len); kmovql(dst, temp); } -#endif // _LP64 void C2_MacroAssembler::reduce2F(int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp) { reduce_operation_128(T_FLOAT, opcode, dst, src); @@ -2741,7 +2693,6 @@ void C2_MacroAssembler::vpadd(BasicType elem_bt, XMMRegister dst, XMMRegister sr } } -#ifdef _LP64 void C2_MacroAssembler::vpbroadcast(BasicType elem_bt, XMMRegister dst, Register src, int vlen_enc) { assert(UseAVX >= 2, "required"); bool is_bw = ((elem_bt == T_BYTE) || (elem_bt == T_SHORT)); @@ -2770,7 +2721,6 @@ void C2_MacroAssembler::vpbroadcast(BasicType elem_bt, XMMRegister dst, Register } } } -#endif void C2_MacroAssembler::vconvert_b2x(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vlen_enc) { switch (to_elem_bt) { @@ -3698,7 +3648,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, XMMRegister vec1, int ae, KRegister mask) { ShortBranchVerifier sbv(this); Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL; - Label COMPARE_WIDE_VECTORS_LOOP_FAILED; // used only _LP64 && AVX3 + Label COMPARE_WIDE_VECTORS_LOOP_FAILED; // used only AVX3 int stride, stride2, adr_stride, adr_stride1, adr_stride2; int stride2x2 = 0x40; Address::ScaleFactor scale = Address::no_scale; @@ -3768,7 +3718,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR; Label COMPARE_WIDE_VECTORS_LOOP_AVX2; Label COMPARE_TAIL_LONG; - Label COMPARE_WIDE_VECTORS_LOOP_AVX3; // used only _LP64 && AVX3 + Label COMPARE_WIDE_VECTORS_LOOP_AVX3; // used only AVX3 int pcmpmask = 0x19; if (ae == StrIntrinsicNode::LL) { @@ -3838,7 +3788,6 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, // In a loop, compare 16-chars (32-bytes) at once using (vpxor+vptest) bind(COMPARE_WIDE_VECTORS_LOOP); -#ifdef _LP64 if ((AVX3Threshold == 0) && VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop cmpl(cnt2, stride2x2); jccb(Assembler::below, COMPARE_WIDE_VECTORS_LOOP_AVX2); @@ -3862,8 +3811,6 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, vpxor(vec1, vec1); jmpb(COMPARE_WIDE_TAIL); }//if (VM_Version::supports_avx512vlbw()) -#endif // _LP64 - bind(COMPARE_WIDE_VECTORS_LOOP_AVX2); if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { @@ -4032,7 +3979,6 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, } jmpb(DONE_LABEL); -#ifdef _LP64 if (VM_Version::supports_avx512vlbw()) { bind(COMPARE_WIDE_VECTORS_LOOP_FAILED); @@ -4058,7 +4004,6 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, subl(result, cnt1); jmpb(POP_LABEL); }//if (VM_Version::supports_avx512vlbw()) -#endif // _LP64 // Discard the stored length difference bind(POP_LABEL); @@ -4133,7 +4078,6 @@ void C2_MacroAssembler::count_positives(Register ary1, Register len, // check the tail for absense of negatives // ~(~0 << len) applied up to two times (for 32-bit scenario) -#ifdef _LP64 { Register tmp3_aliased = len; mov64(tmp3_aliased, 0xFFFFFFFFFFFFFFFF); @@ -4141,33 +4085,7 @@ void C2_MacroAssembler::count_positives(Register ary1, Register len, notq(tmp3_aliased); kmovql(mask2, tmp3_aliased); } -#else - Label k_init; - jmp(k_init); - - // We could not read 64-bits from a general purpose register thus we move - // data required to compose 64 1's to the instruction stream - // We emit 64 byte wide series of elements from 0..63 which later on would - // be used as a compare targets with tail count contained in tmp1 register. - // Result would be a k register having tmp1 consecutive number or 1 - // counting from least significant bit. - address tmp = pc(); - emit_int64(0x0706050403020100); - emit_int64(0x0F0E0D0C0B0A0908); - emit_int64(0x1716151413121110); - emit_int64(0x1F1E1D1C1B1A1918); - emit_int64(0x2726252423222120); - emit_int64(0x2F2E2D2C2B2A2928); - emit_int64(0x3736353433323130); - emit_int64(0x3F3E3D3C3B3A3938); - - bind(k_init); - lea(len, InternalAddress(tmp)); - // create mask to test for negative byte inside a vector - evpbroadcastb(vec1, tmp1, Assembler::AVX_512bit); - evpcmpgtb(mask2, vec1, Address(len, 0), Assembler::AVX_512bit); -#endif evpcmpgtb(mask1, mask2, vec2, Address(ary1, 0), Assembler::AVX_512bit); ktestq(mask1, mask2); jcc(Assembler::zero, DONE); @@ -4414,7 +4332,6 @@ void C2_MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register lea(ary2, Address(ary2, limit, Address::times_1)); negptr(limit); -#ifdef _LP64 if ((AVX3Threshold == 0) && VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop Label COMPARE_WIDE_VECTORS_LOOP_AVX2, COMPARE_WIDE_VECTORS_LOOP_AVX3; @@ -4451,7 +4368,7 @@ void C2_MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register bind(COMPARE_WIDE_VECTORS_LOOP_AVX2); }//if (VM_Version::supports_avx512vlbw()) -#endif //_LP64 + bind(COMPARE_WIDE_VECTORS); vmovdqu(vec1, Address(ary1, limit, scaleFactor)); if (expand_ary2) { @@ -4618,8 +4535,6 @@ void C2_MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register } } -#ifdef _LP64 - static void convertF2I_slowpath(C2_MacroAssembler& masm, C2GeneralStub& stub) { #define __ masm. Register dst = stub.data<0>(); @@ -4666,8 +4581,6 @@ void C2_MacroAssembler::convertF2I(BasicType dst_bt, BasicType src_bt, Register bind(stub->continuation()); } -#endif // _LP64 - void C2_MacroAssembler::evmasked_op(int ideal_opc, BasicType eType, KRegister mask, XMMRegister dst, XMMRegister src1, int imm8, bool merge, int vlen_enc) { switch(ideal_opc) { @@ -5327,7 +5240,6 @@ void C2_MacroAssembler::vector_castD2X_evex(BasicType to_elem_bt, XMMRegister ds } } -#ifdef _LP64 void C2_MacroAssembler::vector_round_double_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, AddressLiteral new_mxcsr, int vec_enc, Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2) { @@ -5379,7 +5291,6 @@ void C2_MacroAssembler::vector_round_float_avx(XMMRegister dst, XMMRegister src, ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), tmp /*rscratch*/); } -#endif // _LP64 void C2_MacroAssembler::vector_unsigned_cast(XMMRegister dst, XMMRegister src, int vlen_enc, BasicType from_elem_bt, BasicType to_elem_bt) { @@ -5510,7 +5421,6 @@ void C2_MacroAssembler::evpternlog(XMMRegister dst, int func, KRegister mask, XM } } -#ifdef _LP64 void C2_MacroAssembler::vector_long_to_maskvec(XMMRegister dst, Register src, Register rtmp1, Register rtmp2, XMMRegister xtmp, int mask_len, int vec_enc) { @@ -5768,7 +5678,6 @@ void C2_MacroAssembler::vector_compress_expand(int opcode, XMMRegister dst, XMMR } } } -#endif void C2_MacroAssembler::vector_signum_evex(int opcode, XMMRegister dst, XMMRegister src, XMMRegister zero, XMMRegister one, KRegister ktmp1, int vec_enc) { @@ -5833,10 +5742,8 @@ void C2_MacroAssembler::vector_maskall_operation(KRegister dst, Register src, in void C2_MacroAssembler::vbroadcast(BasicType bt, XMMRegister dst, int imm32, Register rtmp, int vec_enc) { int lane_size = type2aelembytes(bt); - bool is_LP64 = LP64_ONLY(true) NOT_LP64(false); - if ((is_LP64 || lane_size < 8) && - ((is_non_subword_integral_type(bt) && VM_Version::supports_avx512vl()) || - (is_subword_type(bt) && VM_Version::supports_avx512vlbw()))) { + if ((is_non_subword_integral_type(bt) && VM_Version::supports_avx512vl()) || + (is_subword_type(bt) && VM_Version::supports_avx512vlbw())) { movptr(rtmp, imm32); switch(lane_size) { case 1 : evpbroadcastb(dst, rtmp, vec_enc); break; @@ -5848,7 +5755,7 @@ void C2_MacroAssembler::vbroadcast(BasicType bt, XMMRegister dst, int imm32, Reg } } else { movptr(rtmp, imm32); - LP64_ONLY(movq(dst, rtmp)) NOT_LP64(movdl(dst, rtmp)); + movq(dst, rtmp); switch(lane_size) { case 1 : vpbroadcastb(dst, dst, vec_enc); break; case 2 : vpbroadcastw(dst, dst, vec_enc); break; @@ -5983,14 +5890,6 @@ void C2_MacroAssembler::vector_popcount_integral_evex(BasicType bt, XMMRegister } } -#ifndef _LP64 -void C2_MacroAssembler::vector_maskall_operation32(KRegister dst, Register src, KRegister tmp, int mask_len) { - assert(VM_Version::supports_avx512bw(), ""); - kmovdl(tmp, src); - kunpckdql(dst, tmp, tmp); -} -#endif - // Bit reversal algorithm first reverses the bits of each byte followed by // a byte level reversal for multi-byte primitive types (short/int/long). // Algorithm performs a lookup table access to get reverse bit sequence @@ -6450,7 +6349,6 @@ void C2_MacroAssembler::udivmodI(Register rax, Register divisor, Register rdx, R bind(done); } -#ifdef _LP64 void C2_MacroAssembler::reverseI(Register dst, Register src, XMMRegister xtmp1, XMMRegister xtmp2, Register rtmp) { if(VM_Version::supports_gfni()) { @@ -6614,7 +6512,6 @@ void C2_MacroAssembler::udivmodL(Register rax, Register divisor, Register rdx, R subq(rdx, tmp); // remainder bind(done); } -#endif void C2_MacroAssembler::rearrange_bytes(XMMRegister dst, XMMRegister shuffle, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, KRegister ktmp, diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp index 4fe2cc397b5ae..8ef2827258890 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp @@ -130,9 +130,7 @@ // Covert B2X void vconvert_b2x(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vlen_enc); -#ifdef _LP64 void vpbroadcast(BasicType elem_bt, XMMRegister dst, Register src, int vlen_enc); -#endif // blend void evpcmp(BasicType typ, KRegister kdmask, KRegister ksmask, XMMRegister src1, XMMRegister src2, int comparison, int vector_len); @@ -152,10 +150,8 @@ // dst = src1 reduce(op, src2) using vtmp as temps void reduceI(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); -#ifdef _LP64 void reduceL(int opcode, int vlen, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); void genmask(KRegister dst, Register len, Register temp); -#endif // _LP64 // dst = reduce(op, src2) using vtmp as temps void reduce_fp(int opcode, int vlen, @@ -202,11 +198,9 @@ void reduce32S(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); // Long Reduction -#ifdef _LP64 void reduce2L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); void reduce4L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); void reduce8L(int opcode, Register dst, Register src1, XMMRegister src2, XMMRegister vtmp1, XMMRegister vtmp2); -#endif // _LP64 // Float Reduction void reduce2F (int opcode, XMMRegister dst, XMMRegister src, XMMRegister vtmp); @@ -237,7 +231,6 @@ void unordered_reduce_operation_256(BasicType typ, int opcode, XMMRegister dst, XMMRegister src1, XMMRegister src2); public: -#ifdef _LP64 void vector_mask_operation_helper(int opc, Register dst, Register tmp, int masklen); void vector_mask_operation(int opc, Register dst, KRegister mask, Register tmp, int masklen, int masksize, int vec_enc); @@ -246,14 +239,9 @@ Register tmp, int masklen, BasicType bt, int vec_enc); void vector_long_to_maskvec(XMMRegister dst, Register src, Register rtmp1, Register rtmp2, XMMRegister xtmp, int mask_len, int vec_enc); -#endif void vector_maskall_operation(KRegister dst, Register src, int mask_len); -#ifndef _LP64 - void vector_maskall_operation32(KRegister dst, Register src, KRegister ktmp, int mask_len); -#endif - void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp); @@ -313,9 +301,7 @@ void arrays_hashcode_elvload(XMMRegister dst, AddressLiteral src, BasicType eltype); void arrays_hashcode_elvcast(XMMRegister dst, BasicType eltype); -#ifdef _LP64 void convertF2I(BasicType dst_bt, BasicType src_bt, Register dst, XMMRegister src); -#endif void evmasked_op(int ideal_opc, BasicType eType, KRegister mask, XMMRegister dst, XMMRegister src1, XMMRegister src2, @@ -390,7 +376,6 @@ void vector_mask_cast(XMMRegister dst, XMMRegister src, BasicType dst_bt, BasicType src_bt, int vlen); -#ifdef _LP64 void vector_round_double_evex(XMMRegister dst, XMMRegister src, AddressLiteral double_sign_flip, AddressLiteral new_mxcsr, int vec_enc, Register tmp, XMMRegister xtmp1, XMMRegister xtmp2, KRegister ktmp1, KRegister ktmp2); @@ -403,13 +388,11 @@ void vector_compress_expand_avx2(int opcode, XMMRegister dst, XMMRegister src, XMMRegister mask, Register rtmp, Register rscratch, XMMRegister permv, XMMRegister xtmp, BasicType bt, int vec_enc); -#endif // _LP64 void udivI(Register rax, Register divisor, Register rdx); void umodI(Register rax, Register divisor, Register rdx); void udivmodI(Register rax, Register divisor, Register rdx, Register tmp); -#ifdef _LP64 void reverseI(Register dst, Register src, XMMRegister xtmp1, XMMRegister xtmp2, Register rtmp); void reverseL(Register dst, Register src, XMMRegister xtmp1, @@ -417,7 +400,6 @@ void udivL(Register rax, Register divisor, Register rdx); void umodL(Register rax, Register divisor, Register rdx); void udivmodL(Register rax, Register divisor, Register rdx, Register tmp); -#endif void evpternlog(XMMRegister dst, int func, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, BasicType bt, int vlen_enc); @@ -511,10 +493,9 @@ Register mask, XMMRegister xtmp1, XMMRegister xtmp2, XMMRegister xtmp3, Register rtmp, Register midx, Register length, int vector_len, int vlen_enc); -#ifdef _LP64 void vgather8b_masked_offset(BasicType elem_bt, XMMRegister dst, Register base, Register idx_base, Register offset, Register mask, Register midx, Register rtmp, int vlen_enc); -#endif + void vgather8b_offset(BasicType elem_bt, XMMRegister dst, Register base, Register idx_base, Register offset, Register rtmp, int vlen_enc); diff --git a/src/hotspot/cpu/x86/matcher_x86.hpp b/src/hotspot/cpu/x86/matcher_x86.hpp index 51756903792d6..8ca16e545894c 100644 --- a/src/hotspot/cpu/x86/matcher_x86.hpp +++ b/src/hotspot/cpu/x86/matcher_x86.hpp @@ -59,53 +59,34 @@ static constexpr bool isSimpleConstant64(jlong value) { // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. //return value == (int) value; // Cf. storeImmL and immL32. - // Probably always true, even if a temp register is required. -#ifdef _LP64 return true; -#else - return false; -#endif } -#ifdef _LP64 // No additional cost for CMOVL. static constexpr int long_cmove_cost() { return 0; } -#else - // Needs 2 CMOV's for longs. - static constexpr int long_cmove_cost() { return 1; } -#endif -#ifdef _LP64 // No CMOVF/CMOVD with SSE2 static int float_cmove_cost() { return ConditionalMoveLimit; } -#else - // No CMOVF/CMOVD with SSE/SSE2 - static int float_cmove_cost() { return (UseSSE>=1) ? ConditionalMoveLimit : 0; } -#endif static bool narrow_oop_use_complex_address() { - NOT_LP64(ShouldNotCallThis();) assert(UseCompressedOops, "only for compressed oops code"); return (LogMinObjAlignmentInBytes <= 3); } static bool narrow_klass_use_complex_address() { - NOT_LP64(ShouldNotCallThis();) assert(UseCompressedClassPointers, "only for compressed klass code"); return (CompressedKlassPointers::shift() <= 3); } // Prefer ConN+DecodeN over ConP. static bool const_oop_prefer_decode() { - NOT_LP64(ShouldNotCallThis();) // Prefer ConN+DecodeN over ConP. return true; } // Prefer ConP over ConNKlass+DecodeNKlass. static bool const_klass_prefer_decode() { - NOT_LP64(ShouldNotCallThis();) return false; } @@ -123,24 +104,12 @@ // Are floats converted to double when stored to stack during deoptimization? // On x64 it is stored without conversion so we can use normal access. - // On x32 it is stored with conversion only when FPU is used for floats. -#ifdef _LP64 static constexpr bool float_in_double() { return false; } -#else - static bool float_in_double() { - return (UseSSE == 0); - } -#endif // Do ints take an entire long register or just half? -#ifdef _LP64 static const bool int_in_long = true; -#else - static const bool int_in_long = false; -#endif - // Does the CPU supports vector variable shift instructions? static bool supports_vector_variable_shifts(void) { diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad index 8b2c583554470..762c640dd0965 100644 --- a/src/hotspot/cpu/x86/x86.ad +++ b/src/hotspot/cpu/x86/x86.ad @@ -210,8 +210,6 @@ reg_def XMM7n( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(13)); reg_def XMM7o( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(14)); reg_def XMM7p( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(15)); -#ifdef _LP64 - reg_def XMM8 ( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()); reg_def XMM8b( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(1)); reg_def XMM8c( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(2)); @@ -620,13 +618,7 @@ reg_def XMM31n( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(13)); reg_def XMM31o( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(14)); reg_def XMM31p( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(15)); -#endif // _LP64 - -#ifdef _LP64 reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad()); -#else -reg_def RFLAGS(SOC, SOC, 0, 8, VMRegImpl::Bad()); -#endif // _LP64 // AVX3 Mask Registers. reg_def K1 (SOC, SOC, Op_RegI, 1, k1->as_VMReg()); @@ -658,17 +650,16 @@ alloc_class chunk1(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p, XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p, XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p, - XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p -#ifdef _LP64 - ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p, + XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p, + XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p, XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p, XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p, XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p, XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p, XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p, XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p, - XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p - ,XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p, + XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p, + XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p, XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p, XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p, XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p, @@ -683,9 +674,7 @@ alloc_class chunk1(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p, XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p, XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p, - XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p -#endif - ); + XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p); alloc_class chunk2(K7, K7_H, K6, K6_H, @@ -726,18 +715,15 @@ reg_class float_reg_legacy(XMM0, XMM4, XMM5, XMM6, - XMM7 -#ifdef _LP64 - ,XMM8, + XMM7, + XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, - XMM15 -#endif - ); + XMM15); // Class for evex float registers reg_class float_reg_evex(XMM0, @@ -747,9 +733,8 @@ reg_class float_reg_evex(XMM0, XMM4, XMM5, XMM6, - XMM7 -#ifdef _LP64 - ,XMM8, + XMM7, + XMM8, XMM9, XMM10, XMM11, @@ -772,9 +757,7 @@ reg_class float_reg_evex(XMM0, XMM28, XMM29, XMM30, - XMM31 -#endif - ); + XMM31); reg_class_dynamic float_reg(float_reg_evex, float_reg_legacy, %{ VM_Version::supports_evex() %} ); reg_class_dynamic float_reg_vl(float_reg_evex, float_reg_legacy, %{ VM_Version::supports_evex() && VM_Version::supports_avx512vl() %} ); @@ -787,18 +770,15 @@ reg_class double_reg_legacy(XMM0, XMM0b, XMM4, XMM4b, XMM5, XMM5b, XMM6, XMM6b, - XMM7, XMM7b -#ifdef _LP64 - ,XMM8, XMM8b, + XMM7, XMM7b, + XMM8, XMM8b, XMM9, XMM9b, XMM10, XMM10b, XMM11, XMM11b, XMM12, XMM12b, XMM13, XMM13b, XMM14, XMM14b, - XMM15, XMM15b -#endif - ); + XMM15, XMM15b); // Class for evex double registers reg_class double_reg_evex(XMM0, XMM0b, @@ -808,9 +788,8 @@ reg_class double_reg_evex(XMM0, XMM0b, XMM4, XMM4b, XMM5, XMM5b, XMM6, XMM6b, - XMM7, XMM7b -#ifdef _LP64 - ,XMM8, XMM8b, + XMM7, XMM7b, + XMM8, XMM8b, XMM9, XMM9b, XMM10, XMM10b, XMM11, XMM11b, @@ -833,9 +812,7 @@ reg_class double_reg_evex(XMM0, XMM0b, XMM28, XMM28b, XMM29, XMM29b, XMM30, XMM30b, - XMM31, XMM31b -#endif - ); + XMM31, XMM31b); reg_class_dynamic double_reg(double_reg_evex, double_reg_legacy, %{ VM_Version::supports_evex() %} ); reg_class_dynamic double_reg_vl(double_reg_evex, double_reg_legacy, %{ VM_Version::supports_evex() && VM_Version::supports_avx512vl() %} ); @@ -848,18 +825,15 @@ reg_class vectors_reg_legacy(XMM0, XMM4, XMM5, XMM6, - XMM7 -#ifdef _LP64 - ,XMM8, + XMM7, + XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, - XMM15 -#endif - ); + XMM15); // Class for evex 32bit vector registers reg_class vectors_reg_evex(XMM0, @@ -869,9 +843,8 @@ reg_class vectors_reg_evex(XMM0, XMM4, XMM5, XMM6, - XMM7 -#ifdef _LP64 - ,XMM8, + XMM7, + XMM8, XMM9, XMM10, XMM11, @@ -894,9 +867,7 @@ reg_class vectors_reg_evex(XMM0, XMM28, XMM29, XMM30, - XMM31 -#endif - ); + XMM31); reg_class_dynamic vectors_reg(vectors_reg_evex, vectors_reg_legacy, %{ VM_Version::supports_evex() %} ); reg_class_dynamic vectors_reg_vlbwdq(vectors_reg_evex, vectors_reg_legacy, %{ VM_Version::supports_avx512vlbwdq() %} ); @@ -909,18 +880,15 @@ reg_class vectord_reg_legacy(XMM0, XMM0b, XMM4, XMM4b, XMM5, XMM5b, XMM6, XMM6b, - XMM7, XMM7b -#ifdef _LP64 - ,XMM8, XMM8b, + XMM7, XMM7b, + XMM8, XMM8b, XMM9, XMM9b, XMM10, XMM10b, XMM11, XMM11b, XMM12, XMM12b, XMM13, XMM13b, XMM14, XMM14b, - XMM15, XMM15b -#endif - ); + XMM15, XMM15b); // Class for all 64bit vector registers reg_class vectord_reg_evex(XMM0, XMM0b, @@ -930,9 +898,8 @@ reg_class vectord_reg_evex(XMM0, XMM0b, XMM4, XMM4b, XMM5, XMM5b, XMM6, XMM6b, - XMM7, XMM7b -#ifdef _LP64 - ,XMM8, XMM8b, + XMM7, XMM7b, + XMM8, XMM8b, XMM9, XMM9b, XMM10, XMM10b, XMM11, XMM11b, @@ -955,9 +922,7 @@ reg_class vectord_reg_evex(XMM0, XMM0b, XMM28, XMM28b, XMM29, XMM29b, XMM30, XMM30b, - XMM31, XMM31b -#endif - ); + XMM31, XMM31b); reg_class_dynamic vectord_reg(vectord_reg_evex, vectord_reg_legacy, %{ VM_Version::supports_evex() %} ); reg_class_dynamic vectord_reg_vlbwdq(vectord_reg_evex, vectord_reg_legacy, %{ VM_Version::supports_avx512vlbwdq() %} ); @@ -970,18 +935,15 @@ reg_class vectorx_reg_legacy(XMM0, XMM0b, XMM0c, XMM0d, XMM4, XMM4b, XMM4c, XMM4d, XMM5, XMM5b, XMM5c, XMM5d, XMM6, XMM6b, XMM6c, XMM6d, - XMM7, XMM7b, XMM7c, XMM7d -#ifdef _LP64 - ,XMM8, XMM8b, XMM8c, XMM8d, + XMM7, XMM7b, XMM7c, XMM7d, + XMM8, XMM8b, XMM8c, XMM8d, XMM9, XMM9b, XMM9c, XMM9d, XMM10, XMM10b, XMM10c, XMM10d, XMM11, XMM11b, XMM11c, XMM11d, XMM12, XMM12b, XMM12c, XMM12d, XMM13, XMM13b, XMM13c, XMM13d, XMM14, XMM14b, XMM14c, XMM14d, - XMM15, XMM15b, XMM15c, XMM15d -#endif - ); + XMM15, XMM15b, XMM15c, XMM15d); // Class for all 128bit vector registers reg_class vectorx_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, @@ -991,9 +953,8 @@ reg_class vectorx_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, XMM4, XMM4b, XMM4c, XMM4d, XMM5, XMM5b, XMM5c, XMM5d, XMM6, XMM6b, XMM6c, XMM6d, - XMM7, XMM7b, XMM7c, XMM7d -#ifdef _LP64 - ,XMM8, XMM8b, XMM8c, XMM8d, + XMM7, XMM7b, XMM7c, XMM7d, + XMM8, XMM8b, XMM8c, XMM8d, XMM9, XMM9b, XMM9c, XMM9d, XMM10, XMM10b, XMM10c, XMM10d, XMM11, XMM11b, XMM11c, XMM11d, @@ -1016,9 +977,7 @@ reg_class vectorx_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, XMM28, XMM28b, XMM28c, XMM28d, XMM29, XMM29b, XMM29c, XMM29d, XMM30, XMM30b, XMM30c, XMM30d, - XMM31, XMM31b, XMM31c, XMM31d -#endif - ); + XMM31, XMM31b, XMM31c, XMM31d); reg_class_dynamic vectorx_reg(vectorx_reg_evex, vectorx_reg_legacy, %{ VM_Version::supports_evex() %} ); reg_class_dynamic vectorx_reg_vlbwdq(vectorx_reg_evex, vectorx_reg_legacy, %{ VM_Version::supports_avx512vlbwdq() %} ); @@ -1031,18 +990,15 @@ reg_class vectory_reg_legacy(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0 XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, - XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h -#ifdef _LP64 - ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, + XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, + XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, - XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h -#endif - ); + XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h); // Class for all 256bit vector registers reg_class vectory_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, @@ -1052,9 +1008,8 @@ reg_class vectory_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, - XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h -#ifdef _LP64 - ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, + XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, + XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, @@ -1077,9 +1032,7 @@ reg_class vectory_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, - XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h -#endif - ); + XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h); reg_class_dynamic vectory_reg(vectory_reg_evex, vectory_reg_legacy, %{ VM_Version::supports_evex() %} ); reg_class_dynamic vectory_reg_vlbwdq(vectory_reg_evex, vectory_reg_legacy, %{ VM_Version::supports_avx512vlbwdq() %} ); @@ -1092,17 +1045,16 @@ reg_class vectorz_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p, XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p, XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p, - XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p -#ifdef _LP64 - ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p, + XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p, + XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p, XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p, XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p, XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p, XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p, XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p, XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p, - XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p - ,XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p, + XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p, + XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p, XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p, XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p, XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p, @@ -1117,9 +1069,7 @@ reg_class vectorz_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p, XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p, XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p, - XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p -#endif - ); + XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p); // Class for restricted 512bit vector registers reg_class vectorz_reg_legacy(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p, @@ -1129,18 +1079,15 @@ reg_class vectorz_reg_legacy(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0 XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p, XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p, XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p, - XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p -#ifdef _LP64 - ,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p, + XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p, + XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p, XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p, XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p, XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p, XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p, XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p, XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p, - XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p -#endif - ); + XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p); reg_class_dynamic vectorz_reg (vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() %} ); reg_class_dynamic vectorz_reg_vl(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() && VM_Version::supports_avx512vl() %} ); @@ -1199,21 +1146,10 @@ class HandlerImpl { return NativeJump::instruction_size; } -#ifdef _LP64 static uint size_deopt_handler() { // three 5 byte instructions plus one move for unreachable address. return 15+3; } -#else - static uint size_deopt_handler() { - // NativeCall instruction size is the same as NativeJump. - // exception handler starts out as jump and can be patched to - // a call be deoptimization. (4932387) - // Note that this value is also credited (in output.cpp) to - // the size of the code section. - return 5 + NativeJump::instruction_size; // pushl(); jmp; - } -#endif }; inline Assembler::AvxVectorLen vector_length_encoding(int bytes) { @@ -1334,7 +1270,6 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) { } int offset = __ offset(); -#ifdef _LP64 address the_pc = (address) __ pc(); Label next; // push a "the_pc" on the stack without destroying any registers @@ -1345,10 +1280,6 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) { __ bind(next); // adjust it so it matches "the_pc" __ subptr(Address(rsp, 0), __ offset() - offset); -#else - InternalAddress here(__ pc()); - __ pushptr(here.addr(), noreg); -#endif __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow %d", (__ offset() - offset)); @@ -1372,17 +1303,10 @@ static Assembler::Width widthForType(BasicType bt) { //============================================================================= // Float masks come from different places depending on platform. -#ifdef _LP64 static address float_signmask() { return StubRoutines::x86::float_sign_mask(); } static address float_signflip() { return StubRoutines::x86::float_sign_flip(); } static address double_signmask() { return StubRoutines::x86::double_sign_mask(); } static address double_signflip() { return StubRoutines::x86::double_sign_flip(); } -#else - static address float_signmask() { return (address)float_signmask_pool; } - static address float_signflip() { return (address)float_signflip_pool; } - static address double_signmask() { return (address)double_signmask_pool; } - static address double_signflip() { return (address)double_signflip_pool; } -#endif static address vector_short_to_byte_mask() { return StubRoutines::x86::vector_short_to_byte_mask(); } static address vector_int_to_byte_mask() { return StubRoutines::x86::vector_int_to_byte_mask(); } static address vector_byte_perm_mask() { return StubRoutines::x86::vector_byte_perm_mask(); } @@ -1404,7 +1328,6 @@ bool Matcher::match_rule_supported(int opcode) { if (!has_match_rule(opcode)) { return false; // no match rule present } - const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false); switch (opcode) { case Op_AbsVL: case Op_StoreVectorScatter: @@ -1506,7 +1429,7 @@ bool Matcher::match_rule_supported(int opcode) { } break; case Op_PopulateIndex: - if (!is_LP64 || (UseAVX < 2)) { + if (UseAVX < 2) { return false; } break; @@ -1521,9 +1444,7 @@ bool Matcher::match_rule_supported(int opcode) { } break; case Op_CompareAndSwapL: -#ifdef _LP64 case Op_CompareAndSwapP: -#endif break; case Op_StrIndexOf: if (!UseSSE42Intrinsics) { @@ -1552,7 +1473,6 @@ bool Matcher::match_rule_supported(int opcode) { return false; } break; -#ifdef _LP64 case Op_MaxD: case Op_MaxF: case Op_MinD: @@ -1561,7 +1481,6 @@ bool Matcher::match_rule_supported(int opcode) { return false; } break; -#endif case Op_CacheWB: case Op_CacheWBPreSync: case Op_CacheWBPostSync: @@ -1604,7 +1523,7 @@ bool Matcher::match_rule_supported(int opcode) { case Op_VectorCmpMasked: case Op_VectorMaskGen: - if (!is_LP64 || UseAVX < 3 || !VM_Version::supports_bmi2()) { + if (UseAVX < 3 || !VM_Version::supports_bmi2()) { return false; } break; @@ -1612,50 +1531,29 @@ bool Matcher::match_rule_supported(int opcode) { case Op_VectorMaskLastTrue: case Op_VectorMaskTrueCount: case Op_VectorMaskToLong: - if (!is_LP64 || UseAVX < 1) { + if (UseAVX < 1) { return false; } break; case Op_RoundF: case Op_RoundD: - if (!is_LP64) { - return false; - } break; case Op_CopySignD: case Op_CopySignF: - if (UseAVX < 3 || !is_LP64) { + if (UseAVX < 3) { return false; } if (!VM_Version::supports_avx512vl()) { return false; } break; -#ifndef _LP64 - case Op_AddReductionVF: - case Op_AddReductionVD: - case Op_MulReductionVF: - case Op_MulReductionVD: - if (UseSSE < 1) { // requires at least SSE - return false; - } - break; - case Op_MulAddVS2VI: - case Op_RShiftVL: - case Op_AbsVD: - case Op_NegVD: - if (UseSSE < 2) { - return false; - } - break; -#endif // !LP64 case Op_CompressBits: - if (!VM_Version::supports_bmi2() || (!is_LP64 && UseSSE < 2)) { + if (!VM_Version::supports_bmi2() || (UseSSE < 2)) { return false; } break; case Op_ExpandBits: - if (!VM_Version::supports_bmi2() || (!is_LP64 && (UseSSE < 2 || !VM_Version::supports_bmi1()))) { + if (!VM_Version::supports_bmi2() || ((UseSSE < 2 || !VM_Version::supports_bmi1()))) { return false; } break; @@ -1680,14 +1578,9 @@ bool Matcher::match_rule_supported(int opcode) { } break; case Op_SqrtD: -#ifdef _LP64 if (UseSSE < 2) { return false; } -#else - // x86_32.ad has a special match rule for SqrtD. - // Together with common x86 rules, this handles all UseSSE cases. -#endif break; case Op_ConvF2HF: case Op_ConvHF2F: @@ -1719,7 +1612,6 @@ bool Matcher::match_rule_supported_auto_vectorization(int opcode, int vlen, Basi // Identify extra cases that we might want to provide match rules for vector nodes and // other intrinsics guarded with vector length (vlen) and element type (bt). bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { - const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false); if (!match_rule_supported(opcode)) { return false; } @@ -1766,7 +1658,7 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { case Op_ClearArray: case Op_VectorMaskGen: case Op_VectorCmpMasked: - if (!is_LP64 || !VM_Version::supports_avx512bw()) { + if (!VM_Version::supports_avx512bw()) { return false; } if ((size_in_bits != 512) && !VM_Version::supports_avx512vl()) { @@ -1816,19 +1708,7 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { if (is_subword_type(bt) && (UseSSE < 4)) { return false; } -#ifndef _LP64 - if (bt == T_BYTE || bt == T_LONG) { - return false; - } -#endif - break; -#ifndef _LP64 - case Op_VectorInsert: - if (bt == T_LONG || bt == T_DOUBLE) { - return false; - } break; -#endif case Op_MinReductionV: case Op_MaxReductionV: if ((bt == T_INT || is_subword_type(bt)) && UseSSE < 4) { @@ -1843,11 +1723,6 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { if (UseAVX > 2 && (!VM_Version::supports_avx512dq() && size_in_bits == 512)) { return false; } -#ifndef _LP64 - if (bt == T_BYTE || bt == T_LONG) { - return false; - } -#endif break; case Op_VectorTest: if (UseSSE < 4) { @@ -1932,10 +1807,9 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { return false; } if (is_subword_type(bt) && - (!is_LP64 || - (size_in_bits > 256 && !VM_Version::supports_avx512bw()) || - (size_in_bits < 64) || - (bt == T_SHORT && !VM_Version::supports_bmi2()))) { + ((size_in_bits > 256 && !VM_Version::supports_avx512bw()) || + (size_in_bits < 64) || + (bt == T_SHORT && !VM_Version::supports_bmi2()))) { return false; } break; @@ -2004,14 +1878,11 @@ bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { if (is_subword_type(bt) && !VM_Version::supports_avx512_vbmi2()) { return false; } - if (!is_LP64 && !VM_Version::supports_avx512vl() && size_in_bits < 512) { - return false; - } if (size_in_bits < 128 ) { return false; } case Op_VectorLongToMask: - if (UseAVX < 1 || !is_LP64) { + if (UseAVX < 1) { return false; } if (UseAVX < 3 && !VM_Version::supports_bmi2()) { @@ -2059,7 +1930,6 @@ bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType return false; } - const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false); int size_in_bits = vlen * type2aelembytes(bt) * BitsPerByte; if (size_in_bits != 512 && !VM_Version::supports_avx512vl()) { return false; @@ -2395,7 +2265,6 @@ static bool clone_shift(Node* shift, Matcher* matcher, Matcher::MStack& mstack, address_visited.set(shift->_idx); // Flag as address_visited mstack.push(shift->in(2), Matcher::Visit); Node *conv = shift->in(1); -#ifdef _LP64 // Allow Matcher to match the rule which bypass // ConvI2L operation for an array index on LP64 // if the index value is positive. @@ -2405,9 +2274,9 @@ static bool clone_shift(Node* shift, Matcher* matcher, Matcher::MStack& mstack, !matcher->is_visited(conv)) { address_visited.set(conv->_idx); // Flag as address_visited mstack.push(conv->in(1), Matcher::Pre_Visit); - } else -#endif + } else { mstack.push(conv, Matcher::Pre_Visit); + } return true; } return false; @@ -2545,7 +2414,7 @@ bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, if (adr->is_AddP() && !adr->in(AddPNode::Base)->is_top() && !adr->in(AddPNode::Offset)->is_Con() && - LP64_ONLY( off->get_long() == (int) (off->get_long()) && ) // immL32 + off->get_long() == (int) (off->get_long()) && // immL32 // Are there other uses besides address expressions? !is_visited(adr)) { address_visited.set(adr->_idx); // Flag as address_visited @@ -2619,26 +2488,18 @@ static void vec_mov_helper(C2_MacroAssembler *masm, int src_lo, int dst_lo, case Op_VecS: // copy whole register case Op_VecD: case Op_VecX: -#ifndef _LP64 - __ movdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); -#else if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { __ movdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); } else { __ vextractf32x4(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo]), 0x0); } -#endif break; case Op_VecY: -#ifndef _LP64 - __ vmovdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); -#else if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { __ vmovdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); } else { __ vextractf64x4(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo]), 0x0); } -#endif break; case Op_VecZ: __ evmovdquq(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo]), 2); @@ -2677,28 +2538,10 @@ void vec_spill_helper(C2_MacroAssembler *masm, bool is_load, __ movq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); break; case Op_VecX: -#ifndef _LP64 __ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); -#else - if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { - __ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); - } else { - __ vpxor(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), 2); - __ vinsertf32x4(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset),0x0); - } -#endif break; case Op_VecY: -#ifndef _LP64 __ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); -#else - if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { - __ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); - } else { - __ vpxor(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), 2); - __ vinsertf64x4(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset),0x0); - } -#endif break; case Op_VecZ: __ evmovdquq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset), 2); @@ -2715,28 +2558,10 @@ void vec_spill_helper(C2_MacroAssembler *masm, bool is_load, __ movq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); break; case Op_VecX: -#ifndef _LP64 __ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); -#else - if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { - __ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); - } - else { - __ vextractf32x4(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]), 0x0); - } -#endif break; case Op_VecY: -#ifndef _LP64 __ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); -#else - if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { - __ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); - } - else { - __ vextractf64x4(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]), 0x0); - } -#endif break; case Op_VecZ: __ evmovdquq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]), 2); @@ -3967,7 +3792,6 @@ instruct reinterpret_shrink(vec dst, legVec src) %{ // ---------------------------------------------------------------------------------------------------- -#ifdef _LP64 instruct roundD_reg(legRegD dst, legRegD src, immU8 rmode) %{ match(Set dst (RoundDoubleMode src rmode)); format %{ "roundsd $dst,$src" %} @@ -4038,7 +3862,6 @@ instruct vround8D_mem(vec dst, memory mem, immU8 rmode) %{ %} ins_pipe( pipe_slow ); %} -#endif // _LP64 instruct onspinwait() %{ match(OnSpinWait); @@ -4256,7 +4079,6 @@ instruct vgather_subwordGT8B_off(vec dst, memory mem, rRegP idx_base, rRegI offs %} -#ifdef _LP64 instruct vgather_masked_subwordLE8B_avx3(vec dst, memory mem, rRegP idx_base, immI_0 offset, kReg mask, rRegL mask_idx, rRegP tmp, rRegI rtmp, rRegL rtmp2, rFlagsReg cr) %{ predicate(VM_Version::supports_avx512bw() && is_subword_type(Matcher::vector_element_basic_type(n)) && Matcher::vector_length_in_bytes(n) <= 8); match(Set dst (LoadVectorGatherMasked mem (Binary idx_base (Binary mask offset)))); @@ -4419,7 +4241,6 @@ instruct vgather_masked_subwordGT8B_off_avx2(vec dst, memory mem, rRegP idx_base %} ins_pipe( pipe_slow ); %} -#endif // ====================Scatter======================================= @@ -4535,7 +4356,6 @@ instruct vReplS_reg(vec dst, rRegI src) %{ ins_pipe( pipe_slow ); %} -#ifdef _LP64 instruct ReplHF_imm(vec dst, immH con, rRegI rtmp) %{ match(Set dst (Replicate con)); effect(TEMP rtmp); @@ -4562,7 +4382,6 @@ instruct ReplHF_reg(vec dst, regF src, rRegI rtmp) %{ %} ins_pipe( pipe_slow ); %} -#endif instruct ReplS_mem(vec dst, memory mem) %{ predicate(UseAVX >= 2 && Matcher::vector_element_basic_type(n) == T_SHORT); @@ -4659,7 +4478,6 @@ instruct ReplI_M1(vec dst, immI_M1 con) %{ // ====================ReplicateL======================================= -#ifdef _LP64 // Replicate long (8 byte) scalar to be vector instruct ReplL_reg(vec dst, rRegL src) %{ predicate(Matcher::vector_element_basic_type(n) == T_LONG); @@ -4680,61 +4498,6 @@ instruct ReplL_reg(vec dst, rRegL src) %{ %} ins_pipe( pipe_slow ); %} -#else // _LP64 -// Replicate long (8 byte) scalar to be vector -instruct ReplL_reg(vec dst, eRegL src, vec tmp) %{ - predicate(Matcher::vector_length(n) <= 4 && Matcher::vector_element_basic_type(n) == T_LONG); - match(Set dst (Replicate src)); - effect(TEMP dst, USE src, TEMP tmp); - format %{ "replicateL $dst,$src" %} - ins_encode %{ - uint vlen = Matcher::vector_length(this); - if (vlen == 2) { - __ movdl($dst$$XMMRegister, $src$$Register); - __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); - __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands - int vlen_enc = Assembler::AVX_256bit; - __ movdl($dst$$XMMRegister, $src$$Register); - __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); - __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); - __ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); - } else { - __ movdl($dst$$XMMRegister, $src$$Register); - __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); - __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - } - %} - ins_pipe( pipe_slow ); -%} - -instruct ReplL_reg_leg(legVec dst, eRegL src, legVec tmp) %{ - predicate(Matcher::vector_length(n) == 8 && Matcher::vector_element_basic_type(n) == T_LONG); - match(Set dst (Replicate src)); - effect(TEMP dst, USE src, TEMP tmp); - format %{ "replicateL $dst,$src" %} - ins_encode %{ - if (VM_Version::supports_avx512vl()) { - __ movdl($dst$$XMMRegister, $src$$Register); - __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); - __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); - } else { - int vlen_enc = Assembler::AVX_512bit; - __ movdl($dst$$XMMRegister, $src$$Register); - __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); - __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); - __ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); - } - %} - ins_pipe( pipe_slow ); -%} -#endif // _LP64 instruct ReplL_mem(vec dst, memory mem) %{ predicate(Matcher::vector_element_basic_type(n) == T_LONG); @@ -5008,7 +4771,6 @@ instruct insert64(vec dst, vec src, rRegI val, immU8 idx, legVec vtmp) %{ ins_pipe( pipe_slow ); %} -#ifdef _LP64 instruct insert2L(vec dst, rRegL val, immU8 idx) %{ predicate(Matcher::vector_length(n) == 2); match(Set dst (VectorInsert (Binary dst val) idx)); @@ -5059,7 +4821,6 @@ instruct insert8L(vec dst, vec src, rRegL val, immU8 idx, legVec vtmp) %{ %} ins_pipe( pipe_slow ); %} -#endif instruct insertF(vec dst, regF val, immU8 idx) %{ predicate(Matcher::vector_length(n) < 8); @@ -5105,7 +4866,6 @@ instruct vinsertF(vec dst, vec src, regF val, immU8 idx, vec vtmp) %{ ins_pipe( pipe_slow ); %} -#ifdef _LP64 instruct insert2D(vec dst, regD val, immU8 idx, rRegL tmp) %{ predicate(Matcher::vector_length(n) == 2); match(Set dst (VectorInsert (Binary dst val) idx)); @@ -5160,7 +4920,6 @@ instruct insert8D(vec dst, vec src, regD val, immI idx, rRegL tmp, legVec vtmp) %} ins_pipe( pipe_slow ); %} -#endif // ====================REDUCTION ARITHMETIC======================================= @@ -5187,7 +4946,6 @@ instruct reductionI(rRegI dst, rRegI src1, legVec src2, legVec vtmp1, legVec vtm // =======================Long Reduction========================================== -#ifdef _LP64 instruct reductionL(rRegL dst, rRegL src1, legVec src2, legVec vtmp1, legVec vtmp2) %{ predicate(Matcher::vector_element_basic_type(n->in(2)) == T_LONG && !VM_Version::supports_avx512dq()); match(Set dst (AddReductionVL src1 src2)); @@ -5225,7 +4983,6 @@ instruct reductionL_avx512dq(rRegL dst, rRegL src1, vec src2, vec vtmp1, vec vtm %} ins_pipe( pipe_slow ); %} -#endif // _LP64 // =======================Float Reduction========================================== @@ -5437,7 +5194,6 @@ instruct unordered_reduction8D(regD dst, regD src1, legVec src2, legVec vtmp1, l // =======================Byte Reduction========================================== -#ifdef _LP64 instruct reductionB(rRegI dst, rRegI src1, legVec src2, legVec vtmp1, legVec vtmp2) %{ predicate(Matcher::vector_element_basic_type(n->in(2)) == T_BYTE && !VM_Version::supports_avx512bw()); match(Set dst (AddReductionVI src1 src2)); @@ -5473,7 +5229,6 @@ instruct reductionB_avx512bw(rRegI dst, rRegI src1, vec src2, vec vtmp1, vec vtm %} ins_pipe( pipe_slow ); %} -#endif // =======================Short Reduction========================================== @@ -6774,7 +6529,6 @@ instruct signumV_reg_evex(vec dst, vec src, vec zero, vec one, kReg ktmp1) %{ // Result going from high bit to low bit is 0x11100100 = 0xe4 // --------------------------------------- -#ifdef _LP64 instruct copySignF_reg(regF dst, regF src, regF tmp1, rRegI tmp2) %{ match(Set dst (CopySignF dst src)); effect(TEMP tmp1, TEMP tmp2); @@ -6800,8 +6554,6 @@ instruct copySignD_imm(regD dst, regD src, regD tmp1, rRegL tmp2, immD zero) %{ ins_pipe( pipe_slow ); %} -#endif // _LP64 - //----------------------------- CompressBits/ExpandBits ------------------------ instruct compressBitsI_reg(rRegI dst, rRegI src, rRegI mask) %{ @@ -7974,7 +7726,6 @@ instruct vucast(vec dst, vec src) %{ ins_pipe( pipe_slow ); %} -#ifdef _LP64 instruct vround_float_avx(vec dst, vec src, rRegP tmp, vec xtmp1, vec xtmp2, vec xtmp3, vec xtmp4, rFlagsReg cr) %{ predicate(!VM_Version::supports_avx512vl() && Matcher::vector_length_in_bytes(n) < 64 && @@ -8024,8 +7775,6 @@ instruct vround_reg_evex(vec dst, vec src, rRegP tmp, vec xtmp1, vec xtmp2, kReg ins_pipe( pipe_slow ); %} -#endif // _LP64 - // --------------------------------- VectorMaskCmp -------------------------------------- instruct vcmpFD(legVec dst, legVec src1, legVec src2, immI8 cond) %{ @@ -8235,9 +7984,7 @@ instruct extractI(rRegI dst, legVec src, immU8 idx) %{ predicate(Matcher::vector_length_in_bytes(n->in(1)) <= 16); // src match(Set dst (ExtractI src idx)); match(Set dst (ExtractS src idx)); -#ifdef _LP64 match(Set dst (ExtractB src idx)); -#endif format %{ "extractI $dst,$src,$idx\t!" %} ins_encode %{ assert($idx$$constant < (int)Matcher::vector_length(this, $src), "out of bounds"); @@ -8253,9 +8000,7 @@ instruct vextractI(rRegI dst, legVec src, immI idx, legVec vtmp) %{ Matcher::vector_length_in_bytes(n->in(1)) == 64); // src match(Set dst (ExtractI src idx)); match(Set dst (ExtractS src idx)); -#ifdef _LP64 match(Set dst (ExtractB src idx)); -#endif effect(TEMP vtmp); format %{ "vextractI $dst,$src,$idx\t! using $vtmp as TEMP" %} ins_encode %{ @@ -8268,7 +8013,6 @@ instruct vextractI(rRegI dst, legVec src, immI idx, legVec vtmp) %{ ins_pipe( pipe_slow ); %} -#ifdef _LP64 instruct extractL(rRegL dst, legVec src, immU8 idx) %{ predicate(Matcher::vector_length(n->in(1)) <= 2); // src match(Set dst (ExtractL src idx)); @@ -8296,7 +8040,6 @@ instruct vextractL(rRegL dst, legVec src, immU8 idx, legVec vtmp) %{ %} ins_pipe( pipe_slow ); %} -#endif instruct extractF(legRegF dst, legVec src, immU8 idx, legVec vtmp) %{ predicate(Matcher::vector_length(n->in(1)) <= 4); @@ -8561,7 +8304,6 @@ instruct vabsnegD(vec dst, vec src) %{ //------------------------------------- VectorTest -------------------------------------------- -#ifdef _LP64 instruct vptest_lt16(rFlagsRegU cr, legVec src1, legVec src2, legVec vtmp) %{ predicate(Matcher::vector_length_in_bytes(n->in(1)) < 16); match(Set cr (VectorTest src1 src2)); @@ -8629,7 +8371,6 @@ instruct ktest_ge8(rFlagsRegU cr, kReg src1, kReg src2) %{ %} ins_pipe( pipe_slow ); %} -#endif //------------------------------------- LoadMask -------------------------------------------- @@ -8880,7 +8621,6 @@ instruct loadIotaIndices(vec dst, immI_0 src) %{ ins_pipe( pipe_slow ); %} -#ifdef _LP64 instruct VectorPopulateIndex(vec dst, rRegI src1, immI_1 src2, vec vtmp) %{ match(Set dst (PopulateIndex src1 src2)); effect(TEMP dst, TEMP vtmp); @@ -8912,7 +8652,7 @@ instruct VectorPopulateLIndex(vec dst, rRegL src1, immI_1 src2, vec vtmp) %{ %} ins_pipe( pipe_slow ); %} -#endif + //-------------------------------- Rearrange ---------------------------------- // LoadShuffle/Rearrange for Byte @@ -9493,7 +9233,6 @@ instruct vmasked_store_evex(memory mem, vec src, kReg mask) %{ ins_pipe( pipe_slow ); %} -#ifdef _LP64 instruct verify_vector_alignment(rRegP addr, immL32 mask, rFlagsReg cr) %{ match(Set addr (VerifyVectorAlignment addr mask)); effect(KILL cr); @@ -9707,7 +9446,6 @@ instruct vmask_first_or_last_true_avx(rRegI dst, vec mask, immI size, rRegL tmp, %} // --------------------------------- Compress/Expand Operations --------------------------- -#ifdef _LP64 instruct vcompress_reg_avx(vec dst, vec src, vec mask, rRegI rtmp, rRegL rscratch, vec perm, vec xtmp, rFlagsReg cr) %{ predicate(!VM_Version::supports_avx512vl() && Matcher::vector_length_in_bytes(n) <= 32); match(Set dst (CompressV src mask)); @@ -9723,7 +9461,6 @@ instruct vcompress_reg_avx(vec dst, vec src, vec mask, rRegI rtmp, rRegL rscratc %} ins_pipe( pipe_slow ); %} -#endif instruct vcompress_expand_reg_evex(vec dst, vec src, kReg mask) %{ predicate(VM_Version::supports_avx512vl() || Matcher::vector_length_in_bytes(n) == 64); @@ -9751,8 +9488,6 @@ instruct vcompress_mask_reg_evex(kReg dst, kReg mask, rRegL rtmp1, rRegL rtmp2, ins_pipe( pipe_slow ); %} -#endif // _LP64 - // -------------------------------- Bit and Byte Reversal Vector Operations ------------------------ instruct vreverse_reg(vec dst, vec src, vec xtmp1, vec xtmp2, rRegI rtmp) %{ @@ -10473,7 +10208,6 @@ instruct mask_all_evexI_LE32(kReg dst, rRegI src) %{ ins_pipe( pipe_slow ); %} -#ifdef _LP64 instruct mask_not_immLT8(kReg dst, kReg src, rRegI rtmp, kReg ktmp, immI_M1 cnt) %{ predicate(Matcher::vector_length(n) < 8 && VM_Version::supports_avx512dq()); match(Set dst (XorVMask src (MaskAll cnt))); @@ -10538,7 +10272,6 @@ instruct long_to_mask_evex(kReg dst, rRegL src) %{ %} ins_pipe( pipe_slow ); %} -#endif instruct mask_opers_evex(kReg dst, kReg src1, kReg src2, kReg kscratch) %{ match(Set dst (AndVMask src1 src2)); From 6edebc7b836c37e43f028b01820314163f14b256 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Fri, 28 Mar 2025 21:25:57 +0100 Subject: [PATCH 2/4] Touchup --- src/hotspot/cpu/x86/x86.ad | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad index 762c640dd0965..155d65e531e7e 100644 --- a/src/hotspot/cpu/x86/x86.ad +++ b/src/hotspot/cpu/x86/x86.ad @@ -1548,12 +1548,8 @@ bool Matcher::match_rule_supported(int opcode) { } break; case Op_CompressBits: - if (!VM_Version::supports_bmi2() || (UseSSE < 2)) { - return false; - } - break; case Op_ExpandBits: - if (!VM_Version::supports_bmi2() || ((UseSSE < 2 || !VM_Version::supports_bmi1()))) { + if (!VM_Version::supports_bmi2()) { return false; } break; From c5cacd6e6ebe70ccc1441f43730433afd98f0271 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Mon, 7 Apr 2025 10:25:27 +0200 Subject: [PATCH 3/4] Revert some accidental removals --- src/hotspot/cpu/x86/x86.ad | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad index db543df604957..2d7548c37a87e 100644 --- a/src/hotspot/cpu/x86/x86.ad +++ b/src/hotspot/cpu/x86/x86.ad @@ -2537,10 +2537,20 @@ void vec_spill_helper(C2_MacroAssembler *masm, bool is_load, __ movq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); break; case Op_VecX: - __ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); + if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { + __ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); + } else { + __ vpxor(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), 2); + __ vinsertf32x4(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset),0x0); + } break; case Op_VecY: - __ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); + if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { + __ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); + } else { + __ vpxor(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), 2); + __ vinsertf64x4(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset),0x0); + } break; case Op_VecZ: __ evmovdquq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset), 2); @@ -2557,10 +2567,20 @@ void vec_spill_helper(C2_MacroAssembler *masm, bool is_load, __ movq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); break; case Op_VecX: - __ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); + if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { + __ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); + } + else { + __ vextractf32x4(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]), 0x0); + } break; case Op_VecY: - __ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); + if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { + __ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); + } + else { + __ vextractf64x4(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]), 0x0); + } break; case Op_VecZ: __ evmovdquq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]), 2); From 5d0c852ea738bf07f2274e8bce600a9f0b8228a0 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Mon, 7 Apr 2025 10:43:50 +0200 Subject: [PATCH 4/4] Cleanup ADLC as well --- src/hotspot/share/adlc/archDesc.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/hotspot/share/adlc/archDesc.cpp b/src/hotspot/share/adlc/archDesc.cpp index 8a7d0ee40c40f..263752c521d6f 100644 --- a/src/hotspot/share/adlc/archDesc.cpp +++ b/src/hotspot/share/adlc/archDesc.cpp @@ -751,12 +751,11 @@ bool ArchDesc::check_usage() { callback.do_form_by_name("sRegL"); // special generic vector operands only used in Matcher::pd_specialize_generic_vector_operand - // x86_32 combine x86.ad and x86_32.ad, the vec*/legVec* can not be cleaned from IA32 #if defined(AARCH64) callback.do_form_by_name("vecA"); callback.do_form_by_name("vecD"); callback.do_form_by_name("vecX"); -#elif defined(IA32) || defined(AMD64) +#elif defined(AMD64) callback.do_form_by_name("vecS"); callback.do_form_by_name("vecD"); callback.do_form_by_name("vecX");