diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp index 154b62db47fdf..66da765c7b629 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp @@ -502,7 +502,7 @@ void C2_MacroAssembler::string_indexof_char(Register str1, Register cnt1, bind(DO_LONG); mv(orig_cnt, cnt1); - if (AvoidUnalignedAccesses) { + if (!UseUnalignedAccesses) { Label ALIGNED; andi(unaligned_elems, str1, 0x7); beqz(unaligned_elems, ALIGNED); @@ -1012,7 +1012,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne slli(tmp3, result_tmp, haystack_chr_shift); // result as tmp add(haystack, haystack, tmp3); neg(hlen_neg, tmp3); - if (AvoidUnalignedAccesses) { + if (!UseUnalignedAccesses) { // preload first value, then we will read by 1 character per loop, instead of four // just shifting previous ch2 right by size of character in bits add(tmp3, haystack, hlen_neg); @@ -1028,7 +1028,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne bind(CH1_LOOP); add(tmp3, haystack, hlen_neg); - if (AvoidUnalignedAccesses) { + if (!UseUnalignedAccesses) { srli(ch2, ch2, isLL ? 8 : 16); (this->*haystack_load_1chr)(tmp3, Address(tmp3, isLL ? 3 : 6), noreg); slli(tmp3, tmp3, isLL ? 24 : 48); @@ -1053,7 +1053,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne slli(tmp3, result_tmp, haystack_chr_shift); add(haystack, haystack, tmp3); neg(hlen_neg, tmp3); - if (AvoidUnalignedAccesses) { + if (!UseUnalignedAccesses) { // preload first value, then we will read by 1 character per loop, instead of two // just shifting previous ch2 right by size of character in bits add(tmp3, haystack, hlen_neg); @@ -1062,7 +1062,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne } bind(CH1_LOOP); add(tmp3, haystack, hlen_neg); - if (AvoidUnalignedAccesses) { + if (!UseUnalignedAccesses) { srli(ch2, ch2, isLL ? 8 : 16); (this->*haystack_load_1chr)(tmp3, Address(tmp3, isLL ? 1 : 2), noreg); slli(tmp3, tmp3, isLL ? 8 : 16); @@ -1093,7 +1093,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne bind(FIRST_LOOP); add(ch2, haystack, hlen_neg); - if (AvoidUnalignedAccesses) { + if (!UseUnalignedAccesses) { (this->*haystack_load_1chr)(tmp2, Address(ch2, isLL ? 1 : 2), noreg); // we need a temp register, we can safely use hlen_tmp here, which is a synonym for tmp2 (this->*haystack_load_1chr)(ch2, Address(ch2), noreg); slli(tmp2, tmp2, isLL ? 8 : 16); @@ -1164,7 +1164,7 @@ void C2_MacroAssembler::string_compare_long_same_encoding(Register result, Regis // load first parts of strings and finish initialization while loading beq(str1, str2, *DONE); // Alignment - if (AvoidUnalignedAccesses && (base_offset % 8) != 0) { + if (!UseUnalignedAccesses && (base_offset % 8) != 0) { lwu(tmp1, Address(str1)); lwu(tmp2, Address(str2)); bne(tmp1, tmp2, DIFFERENCE); @@ -1177,7 +1177,7 @@ void C2_MacroAssembler::string_compare_long_same_encoding(Register result, Regis ble(cnt2, t0, *SHORT_STRING); } #ifdef ASSERT - if (AvoidUnalignedAccesses) { + if (!UseUnalignedAccesses) { Label align_ok; orr(t0, str1, str2); andi(t0, t0, 0x7); @@ -1206,7 +1206,7 @@ void C2_MacroAssembler::string_compare_long_same_encoding(Register result, Regis // main loop bind(NEXT_WORD); - // 8-byte aligned loads when AvoidUnalignedAccesses is enabled + // 8-byte aligned loads when UseUnalignedAccesses is not enabled add(t0, str1, cnt2); ld(tmp1, Address(t0)); add(t0, str2, cnt2); @@ -1494,7 +1494,7 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2, la(a2, Address(a2, base_offset)); // Load 4 bytes once to compare for alignment before main loop. - if (AvoidUnalignedAccesses && (base_offset % 8) != 0) { + if (!UseUnalignedAccesses && (base_offset % 8) != 0) { subi(cnt1, cnt1, elem_per_word / 2); bltz(cnt1, TAIL03); lwu(tmp1, Address(a1)); @@ -1509,7 +1509,7 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2, bltz(cnt1, SHORT); #ifdef ASSERT - if (AvoidUnalignedAccesses) { + if (!UseUnalignedAccesses) { Label align_ok; orr(t0, a1, a2); andi(t0, t0, 0x7); @@ -1600,7 +1600,7 @@ void C2_MacroAssembler::string_equals(Register a1, Register a2, mv(result, false); // Load 4 bytes once to compare for alignment before main loop. - if (AvoidUnalignedAccesses && (base_offset % 8) != 0) { + if (!UseUnalignedAccesses && (base_offset % 8) != 0) { subi(cnt1, cnt1, 4); bltz(cnt1, TAIL03); lwu(tmp1, Address(a1)); @@ -1615,7 +1615,7 @@ void C2_MacroAssembler::string_equals(Register a1, Register a2, bltz(cnt1, SHORT); #ifdef ASSERT - if (AvoidUnalignedAccesses) { + if (!UseUnalignedAccesses) { Label align_ok; orr(t0, a1, a2); andi(t0, t0, 0x7); diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index 115b90a0087d7..bdeb62e487820 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -2894,7 +2894,7 @@ void MacroAssembler::load_short_misaligned(Register dst, Address src, Register t if (granularity != 1 && granularity != 2) { ShouldNotReachHere(); } - if (AvoidUnalignedAccesses && (granularity != 2)) { + if (!UseUnalignedAccesses && (granularity != 2)) { assert_different_registers(dst, tmp); assert_different_registers(tmp, src.base()); is_signed ? lb(tmp, Address(src.base(), src.offset() + 1)) : lbu(tmp, Address(src.base(), src.offset() + 1)); @@ -2908,7 +2908,7 @@ void MacroAssembler::load_short_misaligned(Register dst, Address src, Register t // granularity is 1, 2 OR 4 bytes per load, if granularity 2 or 4 then dst and src.base() allowed to be the same register void MacroAssembler::load_int_misaligned(Register dst, Address src, Register tmp, bool is_signed, int granularity) { - if (AvoidUnalignedAccesses && (granularity != 4)) { + if (!UseUnalignedAccesses && (granularity != 4)) { switch(granularity) { case 1: assert_different_registers(dst, tmp, src.base()); @@ -2941,7 +2941,7 @@ void MacroAssembler::load_int_misaligned(Register dst, Address src, Register tmp // granularity is 1, 2, 4 or 8 bytes per load, if granularity 4 or 8 then dst and src.base() allowed to be same register void MacroAssembler::load_long_misaligned(Register dst, Address src, Register tmp, int granularity) { - if (AvoidUnalignedAccesses && (granularity != 8)) { + if (!UseUnalignedAccesses && (granularity != 8)) { switch(granularity){ case 1: assert_different_registers(dst, tmp, src.base()); diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index ec268d9bb65e7..30a4b9549ed5c 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -2617,7 +2617,7 @@ class StubGenerator: public StubCodeGenerator { (UseCompressedClassPointers ? 8 : 4))) == 0, "Must be"); #ifdef ASSERT - if (AvoidUnalignedAccesses) { + if (!UseUnalignedAccesses) { Label align_ok; __ andi(t0, strL, 0x7); __ beqz(t0, align_ok); @@ -2679,7 +2679,7 @@ class StubGenerator: public StubCodeGenerator { tmpU = isLU ? tmp2 : tmp1, // where to keep U for comparison tmpL = isLU ? tmp1 : tmp2; // where to keep L for comparison - if (AvoidUnalignedAccesses && (base_offset % 8) != 0) { + if (!UseUnalignedAccesses && (base_offset % 8) != 0) { // Load 4 bytes from strL to make sure main loop is 8-byte aligned // cnt2 is >= 68 here, no need to check it for >= 0 __ lwu(tmpL, Address(strL)); @@ -2693,7 +2693,7 @@ class StubGenerator: public StubCodeGenerator { __ subi(cnt2, cnt2, wordSize / 2); } - // we are now 8-bytes aligned on strL when AvoidUnalignedAccesses is true + // we are now 8-bytes aligned on strL when UseUnalignedAccesses is false __ subi(cnt2, cnt2, wordSize * 2); __ bltz(cnt2, TAIL); __ bind(SMALL_LOOP); // smaller loop diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.cpp b/src/hotspot/cpu/riscv/vm_version_riscv.cpp index 53f7ff9bc66f8..dcda78a25c92e 100644 --- a/src/hotspot/cpu/riscv/vm_version_riscv.cpp +++ b/src/hotspot/cpu/riscv/vm_version_riscv.cpp @@ -158,12 +158,7 @@ void VM_Version::common_initialize() { } } - if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) { - FLAG_SET_DEFAULT(AvoidUnalignedAccesses, - unaligned_scalar.value() != MISALIGNED_SCALAR_FAST); - } - - if (!AvoidUnalignedAccesses) { + if (UseUnalignedAccesses) { if (FLAG_IS_DEFAULT(UsePoly1305Intrinsics)) { FLAG_SET_DEFAULT(UsePoly1305Intrinsics, true); } @@ -219,7 +214,7 @@ void VM_Version::common_initialize() { // Misc Intrinsics that could depend on RVV. - if (!AvoidUnalignedAccesses && (UseZba || UseRVV)) { + if (UseUnalignedAccesses && (UseZba || UseRVV)) { if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { FLAG_SET_DEFAULT(UseCRC32Intrinsics, true); } @@ -309,7 +304,7 @@ void VM_Version::c2_initialize() { FLAG_SET_DEFAULT(UseMulAddIntrinsic, true); } - if (!AvoidUnalignedAccesses) { + if (UseUnalignedAccesses) { if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true); } @@ -318,7 +313,7 @@ void VM_Version::c2_initialize() { FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false); } - if (!AvoidUnalignedAccesses) { + if (UseUnalignedAccesses) { if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, true); } @@ -327,7 +322,7 @@ void VM_Version::c2_initialize() { FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false); } - if (!AvoidUnalignedAccesses) { + if (UseUnalignedAccesses) { if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true); } @@ -336,7 +331,7 @@ void VM_Version::c2_initialize() { FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false); } - if (!AvoidUnalignedAccesses) { + if (UseUnalignedAccesses) { if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true); } @@ -371,7 +366,7 @@ void VM_Version::c2_initialize() { FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false); } - if (!AvoidUnalignedAccesses) { + if (UseUnalignedAccesses) { if (FLAG_IS_DEFAULT(UseMD5Intrinsics)) { FLAG_SET_DEFAULT(UseMD5Intrinsics, true); } @@ -386,7 +381,7 @@ void VM_Version::c2_initialize() { } // SHA-1, no RVV required though. - if (UseSHA && !AvoidUnalignedAccesses) { + if (UseSHA && UseUnalignedAccesses) { if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); }