From dd0f5aaa73a07d49d7dcfbe4556d29968ecd1770 Mon Sep 17 00:00:00 2001 From: Paul Hohensee Date: Fri, 13 Aug 2021 20:04:09 +0000 Subject: [PATCH] Backport 6a91c73dda1cb197d74f0cb3b491d9c47f4bb40c --- src/hotspot/cpu/aarch64/aarch64.ad | 18 +-- .../cpu/aarch64/c1_LIRAssembler_aarch64.cpp | 2 +- src/hotspot/cpu/aarch64/immediate_aarch64.cpp | 105 +++++++++--------- src/hotspot/cpu/aarch64/immediate_aarch64.hpp | 8 +- .../cpu/aarch64/macroAssembler_aarch64.cpp | 54 ++++----- .../cpu/aarch64/macroAssembler_aarch64.hpp | 16 +-- .../cpu/aarch64/templateTable_aarch64.cpp | 4 +- 7 files changed, 104 insertions(+), 103 deletions(-) diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index 311a9ca046b..e9275ef7005 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -2968,7 +2968,7 @@ encode %{ enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{ MacroAssembler _masm(&cbuf); - u_int32_t con = (u_int32_t)$src$$constant; + uint32_t con = (uint32_t)$src$$constant; Register dst_reg = as_Register($dst$$reg); if (con == 0) { __ movw(dst_reg, zr); @@ -2980,7 +2980,7 @@ encode %{ enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{ MacroAssembler _masm(&cbuf); Register dst_reg = as_Register($dst$$reg); - u_int64_t con = (u_int64_t)$src$$constant; + uint64_t con = (uint64_t)$src$$constant; if (con == 0) { __ mov(dst_reg, zr); } else { @@ -3022,7 +3022,7 @@ encode %{ enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{ MacroAssembler _masm(&cbuf); Register dst_reg = as_Register($dst$$reg); - __ mov(dst_reg, (u_int64_t)1); + __ mov(dst_reg, (uint64_t)1); %} enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{ @@ -3156,7 +3156,7 @@ encode %{ enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{ MacroAssembler _masm(&cbuf); Register reg1 = as_Register($src1$$reg); - u_int32_t val = (u_int32_t)$src2$$constant; + uint32_t val = (uint32_t)$src2$$constant; __ movw(rscratch1, val); __ cmpw(reg1, rscratch1); %} @@ -3178,7 +3178,7 @@ encode %{ __ adds(zr, reg, -val); } else { // aargh, Long.MIN_VALUE is a special case - __ orr(rscratch1, zr, (u_int64_t)val); + __ orr(rscratch1, zr, (uint64_t)val); __ subs(zr, reg, rscratch1); } %} @@ -3186,7 +3186,7 @@ encode %{ enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{ MacroAssembler _masm(&cbuf); Register reg1 = as_Register($src1$$reg); - u_int64_t val = (u_int64_t)$src2$$constant; + uint64_t val = (uint64_t)$src2$$constant; __ mov(rscratch1, val); __ cmp(reg1, rscratch1); %} @@ -13444,8 +13444,8 @@ instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlag instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr) %{ - predicate((u_int64_t)n->in(2)->get_long() - < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord)); + predicate((uint64_t)n->in(2)->get_long() + < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord)); match(Set dummy (ClearArray cnt base)); effect(USE_KILL base); @@ -13453,7 +13453,7 @@ instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg format %{ "ClearArray $cnt, $base" %} ins_encode %{ - __ zero_words($base$$Register, (u_int64_t)$cnt$$constant); + __ zero_words($base$$Register, (uint64_t)$cnt$$constant); %} ins_pipe(pipe_class_memory); diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index b4229b2026b..670fb58121e 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -2042,7 +2042,7 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op } else if (code == lir_cmp_l2i) { Label done; __ cmp(left->as_register_lo(), right->as_register_lo()); - __ mov(dst->as_register(), (u_int64_t)-1L); + __ mov(dst->as_register(), (uint64_t)-1L); __ br(Assembler::LT, done); __ csinc(dst->as_register(), zr, zr, Assembler::EQ); __ bind(done); diff --git a/src/hotspot/cpu/aarch64/immediate_aarch64.cpp b/src/hotspot/cpu/aarch64/immediate_aarch64.cpp index 81a799587b9..28284a865ac 100644 --- a/src/hotspot/cpu/aarch64/immediate_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/immediate_aarch64.cpp @@ -23,6 +23,7 @@ */ #include +#include #include "immediate_aarch64.hpp" // there are at most 2^13 possible logical immediate encodings @@ -34,14 +35,14 @@ static int li_table_entry_count; // for forward lookup we just use a direct array lookup // and assume that the cient has supplied a valid encoding // table[encoding] = immediate -static u_int64_t LITable[LI_TABLE_SIZE]; +static uint64_t LITable[LI_TABLE_SIZE]; // for reverse lookup we need a sparse map so we store a table of // immediate and encoding pairs sorted by immediate value struct li_pair { - u_int64_t immediate; - u_int32_t encoding; + uint64_t immediate; + uint32_t encoding; }; static struct li_pair InverseLITable[LI_TABLE_SIZE]; @@ -63,9 +64,9 @@ int compare_immediate_pair(const void *i1, const void *i2) // helper functions used by expandLogicalImmediate // for i = 1, ... N result = 1 other bits are zero -static inline u_int64_t ones(int N) +static inline uint64_t ones(int N) { - return (N == 64 ? (u_int64_t)-1UL : ((1UL << N) - 1)); + return (N == 64 ? (uint64_t)-1UL : ((1UL << N) - 1)); } /* @@ -73,49 +74,49 @@ static inline u_int64_t ones(int N) */ // 32 bit mask with bits [hi,...,lo] set -static inline u_int32_t mask32(int hi = 31, int lo = 0) +static inline uint32_t mask32(int hi = 31, int lo = 0) { int nbits = (hi + 1) - lo; return ((1 << nbits) - 1) << lo; } -static inline u_int64_t mask64(int hi = 63, int lo = 0) +static inline uint64_t mask64(int hi = 63, int lo = 0) { int nbits = (hi + 1) - lo; return ((1L << nbits) - 1) << lo; } // pick bits [hi,...,lo] from val -static inline u_int32_t pick32(u_int32_t val, int hi = 31, int lo = 0) +static inline uint32_t pick32(uint32_t val, int hi = 31, int lo = 0) { return (val & mask32(hi, lo)); } // pick bits [hi,...,lo] from val -static inline u_int64_t pick64(u_int64_t val, int hi = 31, int lo = 0) +static inline uint64_t pick64(uint64_t val, int hi = 31, int lo = 0) { return (val & mask64(hi, lo)); } // mask [hi,lo] and shift down to start at bit 0 -static inline u_int32_t pickbits32(u_int32_t val, int hi = 31, int lo = 0) +static inline uint32_t pickbits32(uint32_t val, int hi = 31, int lo = 0) { return (pick32(val, hi, lo) >> lo); } // mask [hi,lo] and shift down to start at bit 0 -static inline u_int64_t pickbits64(u_int64_t val, int hi = 63, int lo = 0) +static inline uint64_t pickbits64(uint64_t val, int hi = 63, int lo = 0) { return (pick64(val, hi, lo) >> lo); } // result<0> to val -static inline u_int64_t pickbit(u_int64_t val, int N) +static inline uint64_t pickbit(uint64_t val, int N) { return pickbits64(val, N, N); } -static inline u_int32_t uimm(u_int32_t val, int hi, int lo) +static inline uint32_t uimm(uint32_t val, int hi, int lo) { return pickbits32(val, hi, lo); } @@ -123,11 +124,11 @@ static inline u_int32_t uimm(u_int32_t val, int hi, int lo) // SPEC bits(M*N) Replicate(bits(M) x, integer N); // this is just an educated guess -u_int64_t replicate(u_int64_t bits, int nbits, int count) +uint64_t replicate(uint64_t bits, int nbits, int count) { - u_int64_t result = 0; + uint64_t result = 0; // nbits may be 64 in which case we want mask to be -1 - u_int64_t mask = ones(nbits); + uint64_t mask = ones(nbits); for (int i = 0; i < count ; i++) { result <<= nbits; result |= (bits & mask); @@ -140,24 +141,24 @@ u_int64_t replicate(u_int64_t bits, int nbits, int count) // encoding must be treated as an UNALLOC instruction // construct a 32 bit immediate value for a logical immediate operation -int expandLogicalImmediate(u_int32_t immN, u_int32_t immr, - u_int32_t imms, u_int64_t &bimm) +int expandLogicalImmediate(uint32_t immN, uint32_t immr, + uint32_t imms, uint64_t &bimm) { - int len; // ought to be <= 6 - u_int32_t levels; // 6 bits - u_int32_t tmask_and; // 6 bits - u_int32_t wmask_and; // 6 bits - u_int32_t tmask_or; // 6 bits - u_int32_t wmask_or; // 6 bits - u_int64_t imm64; // 64 bits - u_int64_t tmask, wmask; // 64 bits - u_int32_t S, R, diff; // 6 bits? + int len; // ought to be <= 6 + uint32_t levels; // 6 bits + uint32_t tmask_and; // 6 bits + uint32_t wmask_and; // 6 bits + uint32_t tmask_or; // 6 bits + uint32_t wmask_or; // 6 bits + uint64_t imm64; // 64 bits + uint64_t tmask, wmask; // 64 bits + uint32_t S, R, diff; // 6 bits? if (immN == 1) { len = 6; // looks like 7 given the spec above but this cannot be! } else { len = 0; - u_int32_t val = (~imms & 0x3f); + uint32_t val = (~imms & 0x3f); for (int i = 5; i > 0; i--) { if (val & (1 << i)) { len = i; @@ -170,7 +171,7 @@ int expandLogicalImmediate(u_int32_t immN, u_int32_t immr, // for valid inputs leading 1s in immr must be less than leading // zeros in imms int len2 = 0; // ought to be < len - u_int32_t val2 = (~immr & 0x3f); + uint32_t val2 = (~immr & 0x3f); for (int i = 5; i > 0; i--) { if (!(val2 & (1 << i))) { len2 = i; @@ -199,12 +200,12 @@ int expandLogicalImmediate(u_int32_t immN, u_int32_t immr, for (int i = 0; i < 6; i++) { int nbits = 1 << i; - u_int64_t and_bit = pickbit(tmask_and, i); - u_int64_t or_bit = pickbit(tmask_or, i); - u_int64_t and_bits_sub = replicate(and_bit, 1, nbits); - u_int64_t or_bits_sub = replicate(or_bit, 1, nbits); - u_int64_t and_bits_top = (and_bits_sub << nbits) | ones(nbits); - u_int64_t or_bits_top = (0 << nbits) | or_bits_sub; + uint64_t and_bit = pickbit(tmask_and, i); + uint64_t or_bit = pickbit(tmask_or, i); + uint64_t and_bits_sub = replicate(and_bit, 1, nbits); + uint64_t or_bits_sub = replicate(or_bit, 1, nbits); + uint64_t and_bits_top = (and_bits_sub << nbits) | ones(nbits); + uint64_t or_bits_top = (0 << nbits) | or_bits_sub; tmask = ((tmask & (replicate(and_bits_top, 2 * nbits, 32 / nbits))) @@ -218,12 +219,12 @@ int expandLogicalImmediate(u_int32_t immN, u_int32_t immr, for (int i = 0; i < 6; i++) { int nbits = 1 << i; - u_int64_t and_bit = pickbit(wmask_and, i); - u_int64_t or_bit = pickbit(wmask_or, i); - u_int64_t and_bits_sub = replicate(and_bit, 1, nbits); - u_int64_t or_bits_sub = replicate(or_bit, 1, nbits); - u_int64_t and_bits_top = (ones(nbits) << nbits) | and_bits_sub; - u_int64_t or_bits_top = (or_bits_sub << nbits) | 0; + uint64_t and_bit = pickbit(wmask_and, i); + uint64_t or_bit = pickbit(wmask_or, i); + uint64_t and_bits_sub = replicate(and_bit, 1, nbits); + uint64_t or_bits_sub = replicate(or_bit, 1, nbits); + uint64_t and_bits_top = (ones(nbits) << nbits) | and_bits_sub; + uint64_t or_bits_top = (or_bits_sub << nbits) | 0; wmask = ((wmask & (replicate(and_bits_top, 2 * nbits, 32 / nbits))) @@ -248,9 +249,9 @@ static void initLITables() { li_table_entry_count = 0; for (unsigned index = 0; index < LI_TABLE_SIZE; index++) { - u_int32_t N = uimm(index, 12, 12); - u_int32_t immr = uimm(index, 11, 6); - u_int32_t imms = uimm(index, 5, 0); + uint32_t N = uimm(index, 12, 12); + uint32_t immr = uimm(index, 11, 6); + uint32_t imms = uimm(index, 5, 0); if (expandLogicalImmediate(N, immr, imms, LITable[index])) { InverseLITable[li_table_entry_count].immediate = LITable[index]; InverseLITable[li_table_entry_count].encoding = index; @@ -264,12 +265,12 @@ static void initLITables() // public APIs provided for logical immediate lookup and reverse lookup -u_int64_t logical_immediate_for_encoding(u_int32_t encoding) +uint64_t logical_immediate_for_encoding(uint32_t encoding) { return LITable[encoding]; } -u_int32_t encoding_for_logical_immediate(u_int64_t immediate) +uint32_t encoding_for_logical_immediate(uint64_t immediate) { struct li_pair pair; struct li_pair *result; @@ -293,15 +294,15 @@ u_int32_t encoding_for_logical_immediate(u_int64_t immediate) // fpimm[3:0] = fraction (assuming leading 1) // i.e. F = s * 1.f * 2^(e - b) -u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp) +uint64_t fp_immediate_for_encoding(uint32_t imm8, int is_dp) { union { float fpval; double dpval; - u_int64_t val; + uint64_t val; }; - u_int32_t s, e, f; + uint32_t s, e, f; s = (imm8 >> 7 ) & 0x1; e = (imm8 >> 4) & 0x7; f = imm8 & 0xf; @@ -329,7 +330,7 @@ u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp) return val; } -u_int32_t encoding_for_fp_immediate(float immediate) +uint32_t encoding_for_fp_immediate(float immediate) { // given a float which is of the form // @@ -341,10 +342,10 @@ u_int32_t encoding_for_fp_immediate(float immediate) union { float fpval; - u_int32_t val; + uint32_t val; }; fpval = immediate; - u_int32_t s, r, f, res; + uint32_t s, r, f, res; // sign bit is 31 s = (val >> 31) & 0x1; // exponent is bits 30-23 but we only want the bottom 3 bits diff --git a/src/hotspot/cpu/aarch64/immediate_aarch64.hpp b/src/hotspot/cpu/aarch64/immediate_aarch64.hpp index 66107722e56..0cbdb562088 100644 --- a/src/hotspot/cpu/aarch64/immediate_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/immediate_aarch64.hpp @@ -46,9 +46,9 @@ * encoding then a map lookup will return 0xffffffff. */ -u_int64_t logical_immediate_for_encoding(u_int32_t encoding); -u_int32_t encoding_for_logical_immediate(u_int64_t immediate); -u_int64_t fp_immediate_for_encoding(u_int32_t imm8, int is_dp); -u_int32_t encoding_for_fp_immediate(float immediate); +uint64_t logical_immediate_for_encoding(uint32_t encoding); +uint32_t encoding_for_logical_immediate(uint64_t immediate); +uint64_t fp_immediate_for_encoding(uint32_t imm8, int is_dp); +uint32_t encoding_for_fp_immediate(float immediate); #endif // _IMMEDIATE_H diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 6429b9dde09..3075da1781a 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -93,7 +93,7 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) { offset = target-branch; int shift = Instruction_aarch64::extract(insn, 31, 31); if (shift) { - u_int64_t dest = (u_int64_t)target; + uint64_t dest = (uint64_t)target; uint64_t pc_page = (uint64_t)branch >> 12; uint64_t adr_page = (uint64_t)target >> 12; unsigned offset_lo = dest & 0xfff; @@ -146,7 +146,7 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) { Instruction_aarch64::spatch(branch, 23, 5, offset); Instruction_aarch64::patch(branch, 30, 29, offset_lo); } else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) { - u_int64_t dest = (u_int64_t)target; + uint64_t dest = (uint64_t)target; // Move wide constant assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch"); assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch"); @@ -272,13 +272,13 @@ address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) { ShouldNotReachHere(); } } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) { - u_int32_t *insns = (u_int32_t *)insn_addr; + uint32_t *insns = (uint32_t *)insn_addr; // Move wide constant: movz, movk, movk. See movptr(). assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); - return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5)) - + (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) - + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); + return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5)) + + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) + + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { return 0; @@ -1480,7 +1480,7 @@ void MacroAssembler::null_check(Register reg, int offset) { void MacroAssembler::mov(Register r, Address dest) { code_section()->relocate(pc(), dest.rspec()); - u_int64_t imm64 = (u_int64_t)dest.target(); + uint64_t imm64 = (uint64_t)dest.target(); movptr(r, imm64); } @@ -1513,20 +1513,20 @@ void MacroAssembler::movptr(Register r, uintptr_t imm64) { // imm32 == hex abcdefgh T2S: Vd = abcdefghabcdefgh // imm32 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh // T1D/T2D: invalid -void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) { +void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32) { assert(T != T1D && T != T2D, "invalid arrangement"); if (T == T8B || T == T16B) { assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)"); movi(Vd, T, imm32 & 0xff, 0); return; } - u_int32_t nimm32 = ~imm32; + uint32_t nimm32 = ~imm32; if (T == T4H || T == T8H) { assert((imm32 & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)"); imm32 &= 0xffff; nimm32 &= 0xffff; } - u_int32_t x = imm32; + uint32_t x = imm32; int movi_cnt = 0; int movn_cnt = 0; while (x) { if (x & 0xff) movi_cnt++; x >>= 8; } @@ -1550,7 +1550,7 @@ void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) } } -void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) +void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) { #ifndef PRODUCT { @@ -1564,7 +1564,7 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) } else { // we can use a combination of MOVZ or MOVN with // MOVK to build up the constant - u_int64_t imm_h[4]; + uint64_t imm_h[4]; int zero_count = 0; int neg_count = 0; int i; @@ -1585,7 +1585,7 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) } else if (zero_count == 3) { for (i = 0; i < 4; i++) { if (imm_h[i] != 0L) { - movz(dst, (u_int32_t)imm_h[i], (i << 4)); + movz(dst, (uint32_t)imm_h[i], (i << 4)); break; } } @@ -1593,7 +1593,7 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) // one MOVN will do for (int i = 0; i < 4; i++) { if (imm_h[i] != 0xffffL) { - movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); + movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); break; } } @@ -1601,69 +1601,69 @@ void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) // one MOVZ and one MOVK will do for (i = 0; i < 3; i++) { if (imm_h[i] != 0L) { - movz(dst, (u_int32_t)imm_h[i], (i << 4)); + movz(dst, (uint32_t)imm_h[i], (i << 4)); i++; break; } } for (;i < 4; i++) { if (imm_h[i] != 0L) { - movk(dst, (u_int32_t)imm_h[i], (i << 4)); + movk(dst, (uint32_t)imm_h[i], (i << 4)); } } } else if (neg_count == 2) { // one MOVN and one MOVK will do for (i = 0; i < 4; i++) { if (imm_h[i] != 0xffffL) { - movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); + movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); i++; break; } } for (;i < 4; i++) { if (imm_h[i] != 0xffffL) { - movk(dst, (u_int32_t)imm_h[i], (i << 4)); + movk(dst, (uint32_t)imm_h[i], (i << 4)); } } } else if (zero_count == 1) { // one MOVZ and two MOVKs will do for (i = 0; i < 4; i++) { if (imm_h[i] != 0L) { - movz(dst, (u_int32_t)imm_h[i], (i << 4)); + movz(dst, (uint32_t)imm_h[i], (i << 4)); i++; break; } } for (;i < 4; i++) { if (imm_h[i] != 0x0L) { - movk(dst, (u_int32_t)imm_h[i], (i << 4)); + movk(dst, (uint32_t)imm_h[i], (i << 4)); } } } else if (neg_count == 1) { // one MOVN and two MOVKs will do for (i = 0; i < 4; i++) { if (imm_h[i] != 0xffffL) { - movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4)); + movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); i++; break; } } for (;i < 4; i++) { if (imm_h[i] != 0xffffL) { - movk(dst, (u_int32_t)imm_h[i], (i << 4)); + movk(dst, (uint32_t)imm_h[i], (i << 4)); } } } else { // use a MOVZ and 3 MOVKs (makes it easier to debug) - movz(dst, (u_int32_t)imm_h[0], 0); + movz(dst, (uint32_t)imm_h[0], 0); for (i = 1; i < 4; i++) { - movk(dst, (u_int32_t)imm_h[i], (i << 4)); + movk(dst, (uint32_t)imm_h[i], (i << 4)); } } } } -void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32) +void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) { #ifndef PRODUCT { @@ -1677,7 +1677,7 @@ void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32) } else { // we can use MOVZ, MOVN or two calls to MOVK to build up the // constant - u_int32_t imm_h[2]; + uint32_t imm_h[2]; imm_h[0] = imm32 & 0xffff; imm_h[1] = ((imm32 >> 16) & 0xffff); if (imm_h[0] == 0) { @@ -5441,7 +5441,7 @@ address MacroAssembler::zero_words(Register ptr, Register cnt) // base: Address of a buffer to be zeroed, 8 bytes aligned. // cnt: Immediate count in HeapWords. #define SmallArraySize (18 * BytesPerLong) -void MacroAssembler::zero_words(Register base, u_int64_t cnt) +void MacroAssembler::zero_words(Register base, uint64_t cnt) { BLOCK_COMMENT("zero_words {"); int i = cnt & 1; // store any odd word to start diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index d3d0340dab6..c3f4d16e066 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -448,8 +448,8 @@ class MacroAssembler: public Assembler { // first two private routines for loading 32 bit or 64 bit constants private: - void mov_immediate64(Register dst, u_int64_t imm64); - void mov_immediate32(Register dst, u_int32_t imm32); + void mov_immediate64(Register dst, uint64_t imm64); + void mov_immediate32(Register dst, uint32_t imm32); int push(unsigned int bitset, Register stack); int pop(unsigned int bitset, Register stack); @@ -472,22 +472,22 @@ class MacroAssembler: public Assembler { inline void mov(Register dst, address addr) { - mov_immediate64(dst, (u_int64_t)addr); + mov_immediate64(dst, (uint64_t)addr); } - inline void mov(Register dst, u_int64_t imm64) + inline void mov(Register dst, uint64_t imm64) { mov_immediate64(dst, imm64); } - inline void movw(Register dst, u_int32_t imm32) + inline void movw(Register dst, uint32_t imm32) { mov_immediate32(dst, imm32); } inline void mov(Register dst, long l) { - mov(dst, (u_int64_t)l); + mov(dst, (uint64_t)l); } inline void mov(Register dst, int i) @@ -504,7 +504,7 @@ class MacroAssembler: public Assembler { void movptr(Register r, uintptr_t imm64); - void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32); + void mov(FloatRegister Vd, SIMD_Arrangement T, uint32_t imm32); void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { orr(Vd, T, Vn, Vn); @@ -1232,7 +1232,7 @@ class MacroAssembler: public Assembler { int elem_size); void fill_words(Register base, Register cnt, Register value); - void zero_words(Register base, u_int64_t cnt); + void zero_words(Register base, uint64_t cnt); address zero_words(Register ptr, Register cnt); void zero_dcache_blocks(Register base, Register cnt); diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp index be6f3479083..dfd204085d9 100644 --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -1705,7 +1705,7 @@ void TemplateTable::lcmp() Label done; __ pop_l(r1); __ cmp(r1, r0); - __ mov(r0, (u_int64_t)-1L); + __ mov(r0, (uint64_t)-1L); __ br(Assembler::LT, done); // __ mov(r0, 1UL); // __ csel(r0, r0, zr, Assembler::NE); @@ -1729,7 +1729,7 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result) if (unordered_result < 0) { // we want -1 for unordered or less than, 0 for equal and 1 for // greater than. - __ mov(r0, (u_int64_t)-1L); + __ mov(r0, (uint64_t)-1L); // for FP LT tests less than or unordered __ br(Assembler::LT, done); // install 0 for EQ otherwise 1