diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 50f53bbb04b62..06fe716a22db0 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -5091,10 +5091,24 @@ bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, case ISD::BUILD_PAIR: return false; - case ISD::SETCC: + case ISD::SETCC: { // Integer setcc cannot create undef or poison. - // FIXME: Support FP. - return !Op.getOperand(0).getValueType().isInteger(); + if (Op.getOperand(0).getValueType().isInteger()) + return false; + + // FP compares are more complicated. They can create poison for nan/infinity + // based on options and flags. The options and flags also cause special + // nonan condition codes to be used. Those condition codes may be preserved + // even if the nonan flag is dropped somewhere. + ISD::CondCode CCCode = cast(Op.getOperand(2))->get(); + if (((unsigned)CCCode & 0x10U)) + return true; + + const TargetOptions &Options = getTarget().Options; + return Options.NoNaNsFPMath || Options.NoInfsFPMath || + (ConsiderFlags && + (Op->getFlags().hasNoNaNs() || Op->getFlags().hasNoInfs())); + } // Matches hasPoisonGeneratingFlags(). case ISD::ZERO_EXTEND: diff --git a/llvm/test/CodeGen/RISCV/bfloat-convert.ll b/llvm/test/CodeGen/RISCV/bfloat-convert.ll index 0216d00be2185..d5041c2a7ca78 100644 --- a/llvm/test/CodeGen/RISCV/bfloat-convert.ll +++ b/llvm/test/CodeGen/RISCV/bfloat-convert.ll @@ -479,9 +479,8 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind { ; RV32IZFBFMIN-NEXT: feq.s a2, fs0, fs0 ; RV32IZFBFMIN-NEXT: neg a2, a2 ; RV32IZFBFMIN-NEXT: lui a4, 524288 -; RV32IZFBFMIN-NEXT: li a5, 1 ; RV32IZFBFMIN-NEXT: lui a3, 524288 -; RV32IZFBFMIN-NEXT: bne s2, a5, .LBB10_2 +; RV32IZFBFMIN-NEXT: beqz s2, .LBB10_2 ; RV32IZFBFMIN-NEXT: # %bb.1: # %start ; RV32IZFBFMIN-NEXT: mv a3, a1 ; RV32IZFBFMIN-NEXT: .LBB10_2: # %start @@ -525,9 +524,8 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind { ; R32IDZFBFMIN-NEXT: feq.s a2, fs0, fs0 ; R32IDZFBFMIN-NEXT: neg a2, a2 ; R32IDZFBFMIN-NEXT: lui a4, 524288 -; R32IDZFBFMIN-NEXT: li a5, 1 ; R32IDZFBFMIN-NEXT: lui a3, 524288 -; R32IDZFBFMIN-NEXT: bne s2, a5, .LBB10_2 +; R32IDZFBFMIN-NEXT: beqz s2, .LBB10_2 ; R32IDZFBFMIN-NEXT: # %bb.1: # %start ; R32IDZFBFMIN-NEXT: mv a3, a1 ; R32IDZFBFMIN-NEXT: .LBB10_2: # %start @@ -548,50 +546,43 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind { ; ; RV32ID-LABEL: fcvt_l_bf16_sat: ; RV32ID: # %bb.0: # %start -; RV32ID-NEXT: addi sp, sp, -32 -; RV32ID-NEXT: sw ra, 28(sp) # 4-byte Folded Spill -; RV32ID-NEXT: sw s0, 24(sp) # 4-byte Folded Spill -; RV32ID-NEXT: sw s1, 20(sp) # 4-byte Folded Spill -; RV32ID-NEXT: sw s2, 16(sp) # 4-byte Folded Spill -; RV32ID-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32ID-NEXT: addi sp, sp, -16 +; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32ID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32ID-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill -; RV32ID-NEXT: lui a0, %hi(.LCPI10_0) -; RV32ID-NEXT: flw fa5, %lo(.LCPI10_0)(a0) ; RV32ID-NEXT: fmv.x.w a0, fa0 ; RV32ID-NEXT: slli a0, a0, 16 ; RV32ID-NEXT: fmv.w.x fs0, a0 -; RV32ID-NEXT: flt.s s0, fa5, fs0 -; RV32ID-NEXT: neg s1, s0 ; RV32ID-NEXT: lui a0, 913408 ; RV32ID-NEXT: fmv.w.x fa5, a0 -; RV32ID-NEXT: fle.s s2, fa5, fs0 -; RV32ID-NEXT: neg s3, s2 +; RV32ID-NEXT: fle.s s0, fa5, fs0 ; RV32ID-NEXT: fmv.s fa0, fs0 ; RV32ID-NEXT: call __fixsfdi -; RV32ID-NEXT: and a0, s3, a0 -; RV32ID-NEXT: or a0, s1, a0 -; RV32ID-NEXT: feq.s a2, fs0, fs0 -; RV32ID-NEXT: neg a2, a2 ; RV32ID-NEXT: lui a4, 524288 -; RV32ID-NEXT: li a5, 1 -; RV32ID-NEXT: lui a3, 524288 -; RV32ID-NEXT: bne s2, a5, .LBB10_2 +; RV32ID-NEXT: lui a2, 524288 +; RV32ID-NEXT: beqz s0, .LBB10_2 ; RV32ID-NEXT: # %bb.1: # %start -; RV32ID-NEXT: mv a3, a1 +; RV32ID-NEXT: mv a2, a1 ; RV32ID-NEXT: .LBB10_2: # %start -; RV32ID-NEXT: and a0, a2, a0 -; RV32ID-NEXT: beqz s0, .LBB10_4 +; RV32ID-NEXT: lui a1, %hi(.LCPI10_0) +; RV32ID-NEXT: flw fa5, %lo(.LCPI10_0)(a1) +; RV32ID-NEXT: flt.s a3, fa5, fs0 +; RV32ID-NEXT: beqz a3, .LBB10_4 ; RV32ID-NEXT: # %bb.3: -; RV32ID-NEXT: addi a3, a4, -1 +; RV32ID-NEXT: addi a2, a4, -1 ; RV32ID-NEXT: .LBB10_4: # %start -; RV32ID-NEXT: and a1, a2, a3 -; RV32ID-NEXT: lw ra, 28(sp) # 4-byte Folded Reload -; RV32ID-NEXT: lw s0, 24(sp) # 4-byte Folded Reload -; RV32ID-NEXT: lw s1, 20(sp) # 4-byte Folded Reload -; RV32ID-NEXT: lw s2, 16(sp) # 4-byte Folded Reload -; RV32ID-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32ID-NEXT: feq.s a1, fs0, fs0 +; RV32ID-NEXT: neg a4, a1 +; RV32ID-NEXT: and a1, a4, a2 +; RV32ID-NEXT: neg a2, a3 +; RV32ID-NEXT: neg a3, s0 +; RV32ID-NEXT: and a0, a3, a0 +; RV32ID-NEXT: or a0, a2, a0 +; RV32ID-NEXT: and a0, a4, a0 +; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32ID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32ID-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload -; RV32ID-NEXT: addi sp, sp, 32 +; RV32ID-NEXT: addi sp, sp, 16 ; RV32ID-NEXT: ret ; ; CHECK64ZFBFMIN-LABEL: fcvt_l_bf16_sat: @@ -675,8 +666,7 @@ define i64 @fcvt_lu_bf16_sat(bfloat %a) nounwind { ; CHECK32ZFBFMIN-NEXT: neg s0, a0 ; CHECK32ZFBFMIN-NEXT: fmv.w.x fa5, zero ; CHECK32ZFBFMIN-NEXT: fle.s a0, fa5, fa0 -; CHECK32ZFBFMIN-NEXT: xori a0, a0, 1 -; CHECK32ZFBFMIN-NEXT: addi s1, a0, -1 +; CHECK32ZFBFMIN-NEXT: neg s1, a0 ; CHECK32ZFBFMIN-NEXT: call __fixunssfdi ; CHECK32ZFBFMIN-NEXT: and a0, s1, a0 ; CHECK32ZFBFMIN-NEXT: or a0, s0, a0 @@ -703,8 +693,7 @@ define i64 @fcvt_lu_bf16_sat(bfloat %a) nounwind { ; RV32ID-NEXT: neg s0, a0 ; RV32ID-NEXT: fmv.w.x fa5, zero ; RV32ID-NEXT: fle.s a0, fa5, fa0 -; RV32ID-NEXT: xori a0, a0, 1 -; RV32ID-NEXT: addi s1, a0, -1 +; RV32ID-NEXT: neg s1, a0 ; RV32ID-NEXT: call __fixunssfdi ; RV32ID-NEXT: and a0, s1, a0 ; RV32ID-NEXT: or a0, s0, a0 diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll index 93cc32e76af4a..3700a18bafc61 100644 --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -1034,8 +1034,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind { ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: addi s1, a0, -1 +; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 @@ -1062,27 +1061,28 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind { ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) -; RV32IZFINXZDINX-NEXT: call __fixunsdfdi ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 -; RV32IZFINXZDINX-NEXT: lui a3, %hi(.LCPI14_0) -; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI14_0)(a3) -; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI14_0+4)(a3) -; RV32IZFINXZDINX-NEXT: xori a2, a2, 1 -; RV32IZFINXZDINX-NEXT: addi a2, a2, -1 -; RV32IZFINXZDINX-NEXT: and a0, a2, a0 -; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0 -; RV32IZFINXZDINX-NEXT: neg a3, a3 -; RV32IZFINXZDINX-NEXT: or a0, a3, a0 -; RV32IZFINXZDINX-NEXT: and a1, a2, a1 -; RV32IZFINXZDINX-NEXT: or a1, a3, a1 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI14_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI14_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI14_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll index ff2d8e0063007..7cdf18e2fea9c 100644 --- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll @@ -218,8 +218,7 @@ define i64 @test_floor_ui64(double %x) nounwind { ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: addi s1, a0, -1 +; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 @@ -246,28 +245,33 @@ define i64 @test_floor_ui64(double %x) nounwind { ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call floor ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) -; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 -; RV32IZFINXZDINX-NEXT: lui a3, %hi(.LCPI3_0) -; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI3_0)(a3) -; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI3_0+4)(a3) -; RV32IZFINXZDINX-NEXT: xori a2, a2, 1 -; RV32IZFINXZDINX-NEXT: addi a2, a2, -1 -; RV32IZFINXZDINX-NEXT: and a0, a2, a0 -; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0 -; RV32IZFINXZDINX-NEXT: neg a3, a3 -; RV32IZFINXZDINX-NEXT: or a0, a3, a0 -; RV32IZFINXZDINX-NEXT: and a1, a2, a1 -; RV32IZFINXZDINX-NEXT: or a1, a3, a1 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI3_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI3_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI3_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; @@ -494,8 +498,7 @@ define i64 @test_ceil_ui64(double %x) nounwind { ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: addi s1, a0, -1 +; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 @@ -522,28 +525,33 @@ define i64 @test_ceil_ui64(double %x) nounwind { ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call ceil ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) -; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 -; RV32IZFINXZDINX-NEXT: lui a3, %hi(.LCPI7_0) -; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI7_0)(a3) -; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI7_0+4)(a3) -; RV32IZFINXZDINX-NEXT: xori a2, a2, 1 -; RV32IZFINXZDINX-NEXT: addi a2, a2, -1 -; RV32IZFINXZDINX-NEXT: and a0, a2, a0 -; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0 -; RV32IZFINXZDINX-NEXT: neg a3, a3 -; RV32IZFINXZDINX-NEXT: or a0, a3, a0 -; RV32IZFINXZDINX-NEXT: and a1, a2, a1 -; RV32IZFINXZDINX-NEXT: or a1, a3, a1 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI7_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI7_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI7_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; @@ -770,8 +778,7 @@ define i64 @test_trunc_ui64(double %x) nounwind { ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: addi s1, a0, -1 +; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 @@ -798,28 +805,33 @@ define i64 @test_trunc_ui64(double %x) nounwind { ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call trunc ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) -; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 -; RV32IZFINXZDINX-NEXT: lui a3, %hi(.LCPI11_0) -; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI11_0)(a3) -; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI11_0+4)(a3) -; RV32IZFINXZDINX-NEXT: xori a2, a2, 1 -; RV32IZFINXZDINX-NEXT: addi a2, a2, -1 -; RV32IZFINXZDINX-NEXT: and a0, a2, a0 -; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0 -; RV32IZFINXZDINX-NEXT: neg a3, a3 -; RV32IZFINXZDINX-NEXT: or a0, a3, a0 -; RV32IZFINXZDINX-NEXT: and a1, a2, a1 -; RV32IZFINXZDINX-NEXT: or a1, a3, a1 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI11_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI11_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI11_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; @@ -1046,8 +1058,7 @@ define i64 @test_round_ui64(double %x) nounwind { ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: addi s1, a0, -1 +; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 @@ -1074,28 +1085,33 @@ define i64 @test_round_ui64(double %x) nounwind { ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call round ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) -; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 -; RV32IZFINXZDINX-NEXT: lui a3, %hi(.LCPI15_0) -; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI15_0)(a3) -; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI15_0+4)(a3) -; RV32IZFINXZDINX-NEXT: xori a2, a2, 1 -; RV32IZFINXZDINX-NEXT: addi a2, a2, -1 -; RV32IZFINXZDINX-NEXT: and a0, a2, a0 -; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0 -; RV32IZFINXZDINX-NEXT: neg a3, a3 -; RV32IZFINXZDINX-NEXT: or a0, a3, a0 -; RV32IZFINXZDINX-NEXT: and a1, a2, a1 -; RV32IZFINXZDINX-NEXT: or a1, a3, a1 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI15_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI15_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI15_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; @@ -1322,8 +1338,7 @@ define i64 @test_roundeven_ui64(double %x) nounwind { ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: addi s1, a0, -1 +; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 @@ -1350,28 +1365,33 @@ define i64 @test_roundeven_ui64(double %x) nounwind { ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call roundeven ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) -; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 -; RV32IZFINXZDINX-NEXT: lui a3, %hi(.LCPI19_0) -; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI19_0)(a3) -; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI19_0+4)(a3) -; RV32IZFINXZDINX-NEXT: xori a2, a2, 1 -; RV32IZFINXZDINX-NEXT: addi a2, a2, -1 -; RV32IZFINXZDINX-NEXT: and a0, a2, a0 -; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0 -; RV32IZFINXZDINX-NEXT: neg a3, a3 -; RV32IZFINXZDINX-NEXT: or a0, a3, a0 -; RV32IZFINXZDINX-NEXT: and a1, a2, a1 -; RV32IZFINXZDINX-NEXT: or a1, a3, a1 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI19_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI19_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI19_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; @@ -1598,8 +1618,7 @@ define i64 @test_rint_ui64(double %x) nounwind { ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: addi s1, a0, -1 +; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 @@ -1626,28 +1645,33 @@ define i64 @test_rint_ui64(double %x) nounwind { ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call rint ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) -; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 -; RV32IZFINXZDINX-NEXT: lui a3, %hi(.LCPI23_0) -; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI23_0)(a3) -; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI23_0+4)(a3) -; RV32IZFINXZDINX-NEXT: xori a2, a2, 1 -; RV32IZFINXZDINX-NEXT: addi a2, a2, -1 -; RV32IZFINXZDINX-NEXT: and a0, a2, a0 -; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0 -; RV32IZFINXZDINX-NEXT: neg a3, a3 -; RV32IZFINXZDINX-NEXT: or a0, a3, a0 -; RV32IZFINXZDINX-NEXT: and a1, a2, a1 -; RV32IZFINXZDINX-NEXT: or a1, a3, a1 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI23_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI23_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI23_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll index 1a0e4e1829115..9fb78d4c4d521 100644 --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -615,47 +615,42 @@ define i64 @fcvt_l_s(float %a) nounwind { define i64 @fcvt_l_s_sat(float %a) nounwind { ; RV32IF-LABEL: fcvt_l_s_sat: ; RV32IF: # %bb.0: # %start -; RV32IF-NEXT: addi sp, sp, -32 -; RV32IF-NEXT: sw ra, 28(sp) # 4-byte Folded Spill -; RV32IF-NEXT: sw s0, 24(sp) # 4-byte Folded Spill -; RV32IF-NEXT: sw s1, 20(sp) # 4-byte Folded Spill -; RV32IF-NEXT: sw s2, 16(sp) # 4-byte Folded Spill -; RV32IF-NEXT: sw s3, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI12_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI12_0)(a0) +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IF-NEXT: fsw fs0, 0(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.s fs0, fa0 -; RV32IF-NEXT: flt.s s0, fa5, fa0 -; RV32IF-NEXT: neg s1, s0 ; RV32IF-NEXT: lui a0, 913408 ; RV32IF-NEXT: fmv.w.x fa5, a0 -; RV32IF-NEXT: fle.s s2, fa5, fa0 -; RV32IF-NEXT: neg s3, s2 +; RV32IF-NEXT: fle.s s0, fa5, fa0 +; RV32IF-NEXT: neg s1, s0 ; RV32IF-NEXT: call __fixsfdi -; RV32IF-NEXT: and a0, s3, a0 -; RV32IF-NEXT: or a0, s1, a0 +; RV32IF-NEXT: lui a2, %hi(.LCPI12_0) +; RV32IF-NEXT: flw fa5, %lo(.LCPI12_0)(a2) +; RV32IF-NEXT: and a0, s1, a0 +; RV32IF-NEXT: flt.s a4, fa5, fs0 +; RV32IF-NEXT: neg a2, a4 +; RV32IF-NEXT: or a0, a2, a0 ; RV32IF-NEXT: feq.s a2, fs0, fs0 ; RV32IF-NEXT: neg a2, a2 -; RV32IF-NEXT: lui a4, 524288 -; RV32IF-NEXT: li a5, 1 +; RV32IF-NEXT: lui a5, 524288 ; RV32IF-NEXT: lui a3, 524288 -; RV32IF-NEXT: bne s2, a5, .LBB12_2 +; RV32IF-NEXT: beqz s0, .LBB12_2 ; RV32IF-NEXT: # %bb.1: # %start ; RV32IF-NEXT: mv a3, a1 ; RV32IF-NEXT: .LBB12_2: # %start ; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: beqz s0, .LBB12_4 +; RV32IF-NEXT: beqz a4, .LBB12_4 ; RV32IF-NEXT: # %bb.3: -; RV32IF-NEXT: addi a3, a4, -1 +; RV32IF-NEXT: addi a3, a5, -1 ; RV32IF-NEXT: .LBB12_4: # %start ; RV32IF-NEXT: and a1, a2, a3 -; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload -; RV32IF-NEXT: lw s0, 24(sp) # 4-byte Folded Reload -; RV32IF-NEXT: lw s1, 20(sp) # 4-byte Folded Reload -; RV32IF-NEXT: lw s2, 16(sp) # 4-byte Folded Reload -; RV32IF-NEXT: lw s3, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 32 +; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IF-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload +; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_l_s_sat: @@ -683,24 +678,23 @@ define i64 @fcvt_l_s_sat(float %a) nounwind { ; RV32IZFINX-NEXT: lui a2, %hi(.LCPI12_0) ; RV32IZFINX-NEXT: lw a2, %lo(.LCPI12_0)(a2) ; RV32IZFINX-NEXT: and a0, s2, a0 -; RV32IZFINX-NEXT: flt.s a3, a2, s0 -; RV32IZFINX-NEXT: neg a2, a3 +; RV32IZFINX-NEXT: flt.s a4, a2, s0 +; RV32IZFINX-NEXT: neg a2, a4 ; RV32IZFINX-NEXT: or a0, a2, a0 ; RV32IZFINX-NEXT: feq.s a2, s0, s0 ; RV32IZFINX-NEXT: neg a2, a2 ; RV32IZFINX-NEXT: lui a5, 524288 -; RV32IZFINX-NEXT: li a6, 1 -; RV32IZFINX-NEXT: lui a4, 524288 -; RV32IZFINX-NEXT: bne s1, a6, .LBB12_2 +; RV32IZFINX-NEXT: lui a3, 524288 +; RV32IZFINX-NEXT: beqz s1, .LBB12_2 ; RV32IZFINX-NEXT: # %bb.1: # %start -; RV32IZFINX-NEXT: mv a4, a1 +; RV32IZFINX-NEXT: mv a3, a1 ; RV32IZFINX-NEXT: .LBB12_2: # %start ; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: beqz a3, .LBB12_4 +; RV32IZFINX-NEXT: beqz a4, .LBB12_4 ; RV32IZFINX-NEXT: # %bb.3: -; RV32IZFINX-NEXT: addi a4, a5, -1 +; RV32IZFINX-NEXT: addi a3, a5, -1 ; RV32IZFINX-NEXT: .LBB12_4: # %start -; RV32IZFINX-NEXT: and a1, a2, a4 +; RV32IZFINX-NEXT: and a1, a2, a3 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -882,8 +876,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind { ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.w.x fa5, zero ; RV32IF-NEXT: fle.s a0, fa5, fa0 -; RV32IF-NEXT: xori a0, a0, 1 -; RV32IF-NEXT: addi s1, a0, -1 +; RV32IF-NEXT: neg s1, a0 ; RV32IF-NEXT: call __fixunssfdi ; RV32IF-NEXT: and a0, s1, a0 ; RV32IF-NEXT: or a0, s0, a0 @@ -915,8 +908,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind { ; RV32IZFINX-NEXT: flt.s a1, a1, a0 ; RV32IZFINX-NEXT: neg s0, a1 ; RV32IZFINX-NEXT: fle.s a1, zero, a0 -; RV32IZFINX-NEXT: xori a1, a1, 1 -; RV32IZFINX-NEXT: addi s1, a1, -1 +; RV32IZFINX-NEXT: neg s1, a1 ; RV32IZFINX-NEXT: call __fixunssfdi ; RV32IZFINX-NEXT: and a0, s1, a0 ; RV32IZFINX-NEXT: or a0, s0, a0 diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll index f91aac11876d4..c72e69c92a132 100644 --- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll @@ -59,24 +59,23 @@ define i64 @test_floor_si64(float %x) nounwind { ; RV32IF-NEXT: lui a2, %hi(.LCPI1_0) ; RV32IF-NEXT: flw fa5, %lo(.LCPI1_0)(a2) ; RV32IF-NEXT: and a0, s1, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a2, a3 +; RV32IF-NEXT: flt.s a4, fa5, fs0 +; RV32IF-NEXT: neg a2, a4 ; RV32IF-NEXT: or a0, a2, a0 ; RV32IF-NEXT: feq.s a2, fs0, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: lui a5, 524288 -; RV32IF-NEXT: li a6, 1 -; RV32IF-NEXT: lui a4, 524288 -; RV32IF-NEXT: bne s0, a6, .LBB1_4 +; RV32IF-NEXT: lui a3, 524288 +; RV32IF-NEXT: beqz s0, .LBB1_4 ; RV32IF-NEXT: # %bb.3: -; RV32IF-NEXT: mv a4, a1 +; RV32IF-NEXT: mv a3, a1 ; RV32IF-NEXT: .LBB1_4: ; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: beqz a3, .LBB1_6 +; RV32IF-NEXT: beqz a4, .LBB1_6 ; RV32IF-NEXT: # %bb.5: -; RV32IF-NEXT: addi a4, a5, -1 +; RV32IF-NEXT: addi a3, a5, -1 ; RV32IF-NEXT: .LBB1_6: -; RV32IF-NEXT: and a1, a2, a4 +; RV32IF-NEXT: and a1, a2, a3 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -118,24 +117,23 @@ define i64 @test_floor_si64(float %x) nounwind { ; RV32IZFINX-NEXT: lui a2, %hi(.LCPI1_0) ; RV32IZFINX-NEXT: lw a2, %lo(.LCPI1_0)(a2) ; RV32IZFINX-NEXT: and a0, s2, a0 -; RV32IZFINX-NEXT: flt.s a3, a2, s0 -; RV32IZFINX-NEXT: neg a2, a3 +; RV32IZFINX-NEXT: flt.s a4, a2, s0 +; RV32IZFINX-NEXT: neg a2, a4 ; RV32IZFINX-NEXT: or a0, a2, a0 ; RV32IZFINX-NEXT: feq.s a2, s0, s0 ; RV32IZFINX-NEXT: neg a2, a2 ; RV32IZFINX-NEXT: lui a5, 524288 -; RV32IZFINX-NEXT: li a6, 1 -; RV32IZFINX-NEXT: lui a4, 524288 -; RV32IZFINX-NEXT: bne s1, a6, .LBB1_4 +; RV32IZFINX-NEXT: lui a3, 524288 +; RV32IZFINX-NEXT: beqz s1, .LBB1_4 ; RV32IZFINX-NEXT: # %bb.3: -; RV32IZFINX-NEXT: mv a4, a1 +; RV32IZFINX-NEXT: mv a3, a1 ; RV32IZFINX-NEXT: .LBB1_4: ; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: beqz a3, .LBB1_6 +; RV32IZFINX-NEXT: beqz a4, .LBB1_6 ; RV32IZFINX-NEXT: # %bb.5: -; RV32IZFINX-NEXT: addi a4, a5, -1 +; RV32IZFINX-NEXT: addi a3, a5, -1 ; RV32IZFINX-NEXT: .LBB1_6: -; RV32IZFINX-NEXT: and a1, a2, a4 +; RV32IZFINX-NEXT: and a1, a2, a3 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -184,7 +182,8 @@ define i64 @test_floor_ui64(float %x) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.s fs0, fa0 ; RV32IF-NEXT: lui a0, 307200 ; RV32IF-NEXT: fmv.w.x fa5, a0 @@ -196,22 +195,22 @@ define i64 @test_floor_ui64(float %x) nounwind { ; RV32IF-NEXT: fcvt.s.w fa5, a0, rdn ; RV32IF-NEXT: fsgnj.s fs0, fa5, fs0 ; RV32IF-NEXT: .LBB3_2: +; RV32IF-NEXT: fmv.w.x fa5, zero +; RV32IF-NEXT: fle.s a0, fa5, fs0 +; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: fmv.w.x fa5, zero -; RV32IF-NEXT: fle.s a2, fa5, fs0 -; RV32IF-NEXT: lui a3, %hi(.LCPI3_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI3_0)(a3) -; RV32IF-NEXT: xori a2, a2, 1 -; RV32IF-NEXT: addi a2, a2, -1 -; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a3, a3 -; RV32IF-NEXT: or a0, a3, a0 -; RV32IF-NEXT: and a1, a2, a1 -; RV32IF-NEXT: or a1, a3, a1 +; RV32IF-NEXT: lui a2, %hi(.LCPI3_0) +; RV32IF-NEXT: flw fa5, %lo(.LCPI3_0)(a2) +; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: flt.s a2, fa5, fs0 +; RV32IF-NEXT: neg a2, a2 +; RV32IF-NEXT: or a0, a2, a0 +; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: or a1, a2, a1 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; @@ -229,6 +228,7 @@ define i64 @test_floor_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: addi sp, sp, -16 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: mv s0, a0 ; RV32IZFINX-NEXT: lui a0, 307200 ; RV32IZFINX-NEXT: fabs.s a1, s0 @@ -239,21 +239,21 @@ define i64 @test_floor_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rdn ; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0 ; RV32IZFINX-NEXT: .LBB3_2: +; RV32IZFINX-NEXT: fle.s a0, zero, s0 +; RV32IZFINX-NEXT: neg s1, a0 ; RV32IZFINX-NEXT: mv a0, s0 ; RV32IZFINX-NEXT: call __fixunssfdi -; RV32IZFINX-NEXT: fle.s a2, zero, s0 -; RV32IZFINX-NEXT: lui a3, %hi(.LCPI3_0) -; RV32IZFINX-NEXT: lw a3, %lo(.LCPI3_0)(a3) -; RV32IZFINX-NEXT: xori a2, a2, 1 -; RV32IZFINX-NEXT: addi a2, a2, -1 -; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: flt.s a3, a3, s0 -; RV32IZFINX-NEXT: neg a3, a3 -; RV32IZFINX-NEXT: or a0, a3, a0 -; RV32IZFINX-NEXT: and a1, a2, a1 -; RV32IZFINX-NEXT: or a1, a3, a1 +; RV32IZFINX-NEXT: lui a2, %hi(.LCPI3_0) +; RV32IZFINX-NEXT: lw a2, %lo(.LCPI3_0)(a2) +; RV32IZFINX-NEXT: and a0, s1, a0 +; RV32IZFINX-NEXT: flt.s a2, a2, s0 +; RV32IZFINX-NEXT: neg a2, a2 +; RV32IZFINX-NEXT: or a0, a2, a0 +; RV32IZFINX-NEXT: and a1, s1, a1 +; RV32IZFINX-NEXT: or a1, a2, a1 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: addi sp, sp, 16 ; RV32IZFINX-NEXT: ret ; @@ -321,24 +321,23 @@ define i64 @test_ceil_si64(float %x) nounwind { ; RV32IF-NEXT: lui a2, %hi(.LCPI5_0) ; RV32IF-NEXT: flw fa5, %lo(.LCPI5_0)(a2) ; RV32IF-NEXT: and a0, s1, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a2, a3 +; RV32IF-NEXT: flt.s a4, fa5, fs0 +; RV32IF-NEXT: neg a2, a4 ; RV32IF-NEXT: or a0, a2, a0 ; RV32IF-NEXT: feq.s a2, fs0, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: lui a5, 524288 -; RV32IF-NEXT: li a6, 1 -; RV32IF-NEXT: lui a4, 524288 -; RV32IF-NEXT: bne s0, a6, .LBB5_4 +; RV32IF-NEXT: lui a3, 524288 +; RV32IF-NEXT: beqz s0, .LBB5_4 ; RV32IF-NEXT: # %bb.3: -; RV32IF-NEXT: mv a4, a1 +; RV32IF-NEXT: mv a3, a1 ; RV32IF-NEXT: .LBB5_4: ; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: beqz a3, .LBB5_6 +; RV32IF-NEXT: beqz a4, .LBB5_6 ; RV32IF-NEXT: # %bb.5: -; RV32IF-NEXT: addi a4, a5, -1 +; RV32IF-NEXT: addi a3, a5, -1 ; RV32IF-NEXT: .LBB5_6: -; RV32IF-NEXT: and a1, a2, a4 +; RV32IF-NEXT: and a1, a2, a3 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -380,24 +379,23 @@ define i64 @test_ceil_si64(float %x) nounwind { ; RV32IZFINX-NEXT: lui a2, %hi(.LCPI5_0) ; RV32IZFINX-NEXT: lw a2, %lo(.LCPI5_0)(a2) ; RV32IZFINX-NEXT: and a0, s2, a0 -; RV32IZFINX-NEXT: flt.s a3, a2, s0 -; RV32IZFINX-NEXT: neg a2, a3 +; RV32IZFINX-NEXT: flt.s a4, a2, s0 +; RV32IZFINX-NEXT: neg a2, a4 ; RV32IZFINX-NEXT: or a0, a2, a0 ; RV32IZFINX-NEXT: feq.s a2, s0, s0 ; RV32IZFINX-NEXT: neg a2, a2 ; RV32IZFINX-NEXT: lui a5, 524288 -; RV32IZFINX-NEXT: li a6, 1 -; RV32IZFINX-NEXT: lui a4, 524288 -; RV32IZFINX-NEXT: bne s1, a6, .LBB5_4 +; RV32IZFINX-NEXT: lui a3, 524288 +; RV32IZFINX-NEXT: beqz s1, .LBB5_4 ; RV32IZFINX-NEXT: # %bb.3: -; RV32IZFINX-NEXT: mv a4, a1 +; RV32IZFINX-NEXT: mv a3, a1 ; RV32IZFINX-NEXT: .LBB5_4: ; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: beqz a3, .LBB5_6 +; RV32IZFINX-NEXT: beqz a4, .LBB5_6 ; RV32IZFINX-NEXT: # %bb.5: -; RV32IZFINX-NEXT: addi a4, a5, -1 +; RV32IZFINX-NEXT: addi a3, a5, -1 ; RV32IZFINX-NEXT: .LBB5_6: -; RV32IZFINX-NEXT: and a1, a2, a4 +; RV32IZFINX-NEXT: and a1, a2, a3 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -446,7 +444,8 @@ define i64 @test_ceil_ui64(float %x) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.s fs0, fa0 ; RV32IF-NEXT: lui a0, 307200 ; RV32IF-NEXT: fmv.w.x fa5, a0 @@ -458,22 +457,22 @@ define i64 @test_ceil_ui64(float %x) nounwind { ; RV32IF-NEXT: fcvt.s.w fa5, a0, rup ; RV32IF-NEXT: fsgnj.s fs0, fa5, fs0 ; RV32IF-NEXT: .LBB7_2: +; RV32IF-NEXT: fmv.w.x fa5, zero +; RV32IF-NEXT: fle.s a0, fa5, fs0 +; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: fmv.w.x fa5, zero -; RV32IF-NEXT: fle.s a2, fa5, fs0 -; RV32IF-NEXT: lui a3, %hi(.LCPI7_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI7_0)(a3) -; RV32IF-NEXT: xori a2, a2, 1 -; RV32IF-NEXT: addi a2, a2, -1 -; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a3, a3 -; RV32IF-NEXT: or a0, a3, a0 -; RV32IF-NEXT: and a1, a2, a1 -; RV32IF-NEXT: or a1, a3, a1 +; RV32IF-NEXT: lui a2, %hi(.LCPI7_0) +; RV32IF-NEXT: flw fa5, %lo(.LCPI7_0)(a2) +; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: flt.s a2, fa5, fs0 +; RV32IF-NEXT: neg a2, a2 +; RV32IF-NEXT: or a0, a2, a0 +; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: or a1, a2, a1 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; @@ -491,6 +490,7 @@ define i64 @test_ceil_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: addi sp, sp, -16 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: mv s0, a0 ; RV32IZFINX-NEXT: lui a0, 307200 ; RV32IZFINX-NEXT: fabs.s a1, s0 @@ -501,21 +501,21 @@ define i64 @test_ceil_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rup ; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0 ; RV32IZFINX-NEXT: .LBB7_2: +; RV32IZFINX-NEXT: fle.s a0, zero, s0 +; RV32IZFINX-NEXT: neg s1, a0 ; RV32IZFINX-NEXT: mv a0, s0 ; RV32IZFINX-NEXT: call __fixunssfdi -; RV32IZFINX-NEXT: fle.s a2, zero, s0 -; RV32IZFINX-NEXT: lui a3, %hi(.LCPI7_0) -; RV32IZFINX-NEXT: lw a3, %lo(.LCPI7_0)(a3) -; RV32IZFINX-NEXT: xori a2, a2, 1 -; RV32IZFINX-NEXT: addi a2, a2, -1 -; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: flt.s a3, a3, s0 -; RV32IZFINX-NEXT: neg a3, a3 -; RV32IZFINX-NEXT: or a0, a3, a0 -; RV32IZFINX-NEXT: and a1, a2, a1 -; RV32IZFINX-NEXT: or a1, a3, a1 +; RV32IZFINX-NEXT: lui a2, %hi(.LCPI7_0) +; RV32IZFINX-NEXT: lw a2, %lo(.LCPI7_0)(a2) +; RV32IZFINX-NEXT: and a0, s1, a0 +; RV32IZFINX-NEXT: flt.s a2, a2, s0 +; RV32IZFINX-NEXT: neg a2, a2 +; RV32IZFINX-NEXT: or a0, a2, a0 +; RV32IZFINX-NEXT: and a1, s1, a1 +; RV32IZFINX-NEXT: or a1, a2, a1 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: addi sp, sp, 16 ; RV32IZFINX-NEXT: ret ; @@ -583,24 +583,23 @@ define i64 @test_trunc_si64(float %x) nounwind { ; RV32IF-NEXT: lui a2, %hi(.LCPI9_0) ; RV32IF-NEXT: flw fa5, %lo(.LCPI9_0)(a2) ; RV32IF-NEXT: and a0, s1, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a2, a3 +; RV32IF-NEXT: flt.s a4, fa5, fs0 +; RV32IF-NEXT: neg a2, a4 ; RV32IF-NEXT: or a0, a2, a0 ; RV32IF-NEXT: feq.s a2, fs0, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: lui a5, 524288 -; RV32IF-NEXT: li a6, 1 -; RV32IF-NEXT: lui a4, 524288 -; RV32IF-NEXT: bne s0, a6, .LBB9_4 +; RV32IF-NEXT: lui a3, 524288 +; RV32IF-NEXT: beqz s0, .LBB9_4 ; RV32IF-NEXT: # %bb.3: -; RV32IF-NEXT: mv a4, a1 +; RV32IF-NEXT: mv a3, a1 ; RV32IF-NEXT: .LBB9_4: ; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: beqz a3, .LBB9_6 +; RV32IF-NEXT: beqz a4, .LBB9_6 ; RV32IF-NEXT: # %bb.5: -; RV32IF-NEXT: addi a4, a5, -1 +; RV32IF-NEXT: addi a3, a5, -1 ; RV32IF-NEXT: .LBB9_6: -; RV32IF-NEXT: and a1, a2, a4 +; RV32IF-NEXT: and a1, a2, a3 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -642,24 +641,23 @@ define i64 @test_trunc_si64(float %x) nounwind { ; RV32IZFINX-NEXT: lui a2, %hi(.LCPI9_0) ; RV32IZFINX-NEXT: lw a2, %lo(.LCPI9_0)(a2) ; RV32IZFINX-NEXT: and a0, s2, a0 -; RV32IZFINX-NEXT: flt.s a3, a2, s0 -; RV32IZFINX-NEXT: neg a2, a3 +; RV32IZFINX-NEXT: flt.s a4, a2, s0 +; RV32IZFINX-NEXT: neg a2, a4 ; RV32IZFINX-NEXT: or a0, a2, a0 ; RV32IZFINX-NEXT: feq.s a2, s0, s0 ; RV32IZFINX-NEXT: neg a2, a2 ; RV32IZFINX-NEXT: lui a5, 524288 -; RV32IZFINX-NEXT: li a6, 1 -; RV32IZFINX-NEXT: lui a4, 524288 -; RV32IZFINX-NEXT: bne s1, a6, .LBB9_4 +; RV32IZFINX-NEXT: lui a3, 524288 +; RV32IZFINX-NEXT: beqz s1, .LBB9_4 ; RV32IZFINX-NEXT: # %bb.3: -; RV32IZFINX-NEXT: mv a4, a1 +; RV32IZFINX-NEXT: mv a3, a1 ; RV32IZFINX-NEXT: .LBB9_4: ; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: beqz a3, .LBB9_6 +; RV32IZFINX-NEXT: beqz a4, .LBB9_6 ; RV32IZFINX-NEXT: # %bb.5: -; RV32IZFINX-NEXT: addi a4, a5, -1 +; RV32IZFINX-NEXT: addi a3, a5, -1 ; RV32IZFINX-NEXT: .LBB9_6: -; RV32IZFINX-NEXT: and a1, a2, a4 +; RV32IZFINX-NEXT: and a1, a2, a3 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -708,7 +706,8 @@ define i64 @test_trunc_ui64(float %x) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.s fs0, fa0 ; RV32IF-NEXT: lui a0, 307200 ; RV32IF-NEXT: fmv.w.x fa5, a0 @@ -720,22 +719,22 @@ define i64 @test_trunc_ui64(float %x) nounwind { ; RV32IF-NEXT: fcvt.s.w fa5, a0, rtz ; RV32IF-NEXT: fsgnj.s fs0, fa5, fs0 ; RV32IF-NEXT: .LBB11_2: +; RV32IF-NEXT: fmv.w.x fa5, zero +; RV32IF-NEXT: fle.s a0, fa5, fs0 +; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: fmv.w.x fa5, zero -; RV32IF-NEXT: fle.s a2, fa5, fs0 -; RV32IF-NEXT: lui a3, %hi(.LCPI11_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI11_0)(a3) -; RV32IF-NEXT: xori a2, a2, 1 -; RV32IF-NEXT: addi a2, a2, -1 -; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a3, a3 -; RV32IF-NEXT: or a0, a3, a0 -; RV32IF-NEXT: and a1, a2, a1 -; RV32IF-NEXT: or a1, a3, a1 +; RV32IF-NEXT: lui a2, %hi(.LCPI11_0) +; RV32IF-NEXT: flw fa5, %lo(.LCPI11_0)(a2) +; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: flt.s a2, fa5, fs0 +; RV32IF-NEXT: neg a2, a2 +; RV32IF-NEXT: or a0, a2, a0 +; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: or a1, a2, a1 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; @@ -753,6 +752,7 @@ define i64 @test_trunc_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: addi sp, sp, -16 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: mv s0, a0 ; RV32IZFINX-NEXT: lui a0, 307200 ; RV32IZFINX-NEXT: fabs.s a1, s0 @@ -763,21 +763,21 @@ define i64 @test_trunc_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rtz ; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0 ; RV32IZFINX-NEXT: .LBB11_2: +; RV32IZFINX-NEXT: fle.s a0, zero, s0 +; RV32IZFINX-NEXT: neg s1, a0 ; RV32IZFINX-NEXT: mv a0, s0 ; RV32IZFINX-NEXT: call __fixunssfdi -; RV32IZFINX-NEXT: fle.s a2, zero, s0 -; RV32IZFINX-NEXT: lui a3, %hi(.LCPI11_0) -; RV32IZFINX-NEXT: lw a3, %lo(.LCPI11_0)(a3) -; RV32IZFINX-NEXT: xori a2, a2, 1 -; RV32IZFINX-NEXT: addi a2, a2, -1 -; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: flt.s a3, a3, s0 -; RV32IZFINX-NEXT: neg a3, a3 -; RV32IZFINX-NEXT: or a0, a3, a0 -; RV32IZFINX-NEXT: and a1, a2, a1 -; RV32IZFINX-NEXT: or a1, a3, a1 +; RV32IZFINX-NEXT: lui a2, %hi(.LCPI11_0) +; RV32IZFINX-NEXT: lw a2, %lo(.LCPI11_0)(a2) +; RV32IZFINX-NEXT: and a0, s1, a0 +; RV32IZFINX-NEXT: flt.s a2, a2, s0 +; RV32IZFINX-NEXT: neg a2, a2 +; RV32IZFINX-NEXT: or a0, a2, a0 +; RV32IZFINX-NEXT: and a1, s1, a1 +; RV32IZFINX-NEXT: or a1, a2, a1 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: addi sp, sp, 16 ; RV32IZFINX-NEXT: ret ; @@ -845,24 +845,23 @@ define i64 @test_round_si64(float %x) nounwind { ; RV32IF-NEXT: lui a2, %hi(.LCPI13_0) ; RV32IF-NEXT: flw fa5, %lo(.LCPI13_0)(a2) ; RV32IF-NEXT: and a0, s1, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a2, a3 +; RV32IF-NEXT: flt.s a4, fa5, fs0 +; RV32IF-NEXT: neg a2, a4 ; RV32IF-NEXT: or a0, a2, a0 ; RV32IF-NEXT: feq.s a2, fs0, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: lui a5, 524288 -; RV32IF-NEXT: li a6, 1 -; RV32IF-NEXT: lui a4, 524288 -; RV32IF-NEXT: bne s0, a6, .LBB13_4 +; RV32IF-NEXT: lui a3, 524288 +; RV32IF-NEXT: beqz s0, .LBB13_4 ; RV32IF-NEXT: # %bb.3: -; RV32IF-NEXT: mv a4, a1 +; RV32IF-NEXT: mv a3, a1 ; RV32IF-NEXT: .LBB13_4: ; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: beqz a3, .LBB13_6 +; RV32IF-NEXT: beqz a4, .LBB13_6 ; RV32IF-NEXT: # %bb.5: -; RV32IF-NEXT: addi a4, a5, -1 +; RV32IF-NEXT: addi a3, a5, -1 ; RV32IF-NEXT: .LBB13_6: -; RV32IF-NEXT: and a1, a2, a4 +; RV32IF-NEXT: and a1, a2, a3 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -904,24 +903,23 @@ define i64 @test_round_si64(float %x) nounwind { ; RV32IZFINX-NEXT: lui a2, %hi(.LCPI13_0) ; RV32IZFINX-NEXT: lw a2, %lo(.LCPI13_0)(a2) ; RV32IZFINX-NEXT: and a0, s2, a0 -; RV32IZFINX-NEXT: flt.s a3, a2, s0 -; RV32IZFINX-NEXT: neg a2, a3 +; RV32IZFINX-NEXT: flt.s a4, a2, s0 +; RV32IZFINX-NEXT: neg a2, a4 ; RV32IZFINX-NEXT: or a0, a2, a0 ; RV32IZFINX-NEXT: feq.s a2, s0, s0 ; RV32IZFINX-NEXT: neg a2, a2 ; RV32IZFINX-NEXT: lui a5, 524288 -; RV32IZFINX-NEXT: li a6, 1 -; RV32IZFINX-NEXT: lui a4, 524288 -; RV32IZFINX-NEXT: bne s1, a6, .LBB13_4 +; RV32IZFINX-NEXT: lui a3, 524288 +; RV32IZFINX-NEXT: beqz s1, .LBB13_4 ; RV32IZFINX-NEXT: # %bb.3: -; RV32IZFINX-NEXT: mv a4, a1 +; RV32IZFINX-NEXT: mv a3, a1 ; RV32IZFINX-NEXT: .LBB13_4: ; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: beqz a3, .LBB13_6 +; RV32IZFINX-NEXT: beqz a4, .LBB13_6 ; RV32IZFINX-NEXT: # %bb.5: -; RV32IZFINX-NEXT: addi a4, a5, -1 +; RV32IZFINX-NEXT: addi a3, a5, -1 ; RV32IZFINX-NEXT: .LBB13_6: -; RV32IZFINX-NEXT: and a1, a2, a4 +; RV32IZFINX-NEXT: and a1, a2, a3 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -970,7 +968,8 @@ define i64 @test_round_ui64(float %x) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.s fs0, fa0 ; RV32IF-NEXT: lui a0, 307200 ; RV32IF-NEXT: fmv.w.x fa5, a0 @@ -982,22 +981,22 @@ define i64 @test_round_ui64(float %x) nounwind { ; RV32IF-NEXT: fcvt.s.w fa5, a0, rmm ; RV32IF-NEXT: fsgnj.s fs0, fa5, fs0 ; RV32IF-NEXT: .LBB15_2: +; RV32IF-NEXT: fmv.w.x fa5, zero +; RV32IF-NEXT: fle.s a0, fa5, fs0 +; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: fmv.w.x fa5, zero -; RV32IF-NEXT: fle.s a2, fa5, fs0 -; RV32IF-NEXT: lui a3, %hi(.LCPI15_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI15_0)(a3) -; RV32IF-NEXT: xori a2, a2, 1 -; RV32IF-NEXT: addi a2, a2, -1 -; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a3, a3 -; RV32IF-NEXT: or a0, a3, a0 -; RV32IF-NEXT: and a1, a2, a1 -; RV32IF-NEXT: or a1, a3, a1 +; RV32IF-NEXT: lui a2, %hi(.LCPI15_0) +; RV32IF-NEXT: flw fa5, %lo(.LCPI15_0)(a2) +; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: flt.s a2, fa5, fs0 +; RV32IF-NEXT: neg a2, a2 +; RV32IF-NEXT: or a0, a2, a0 +; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: or a1, a2, a1 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; @@ -1015,6 +1014,7 @@ define i64 @test_round_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: addi sp, sp, -16 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: mv s0, a0 ; RV32IZFINX-NEXT: lui a0, 307200 ; RV32IZFINX-NEXT: fabs.s a1, s0 @@ -1025,21 +1025,21 @@ define i64 @test_round_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rmm ; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0 ; RV32IZFINX-NEXT: .LBB15_2: +; RV32IZFINX-NEXT: fle.s a0, zero, s0 +; RV32IZFINX-NEXT: neg s1, a0 ; RV32IZFINX-NEXT: mv a0, s0 ; RV32IZFINX-NEXT: call __fixunssfdi -; RV32IZFINX-NEXT: fle.s a2, zero, s0 -; RV32IZFINX-NEXT: lui a3, %hi(.LCPI15_0) -; RV32IZFINX-NEXT: lw a3, %lo(.LCPI15_0)(a3) -; RV32IZFINX-NEXT: xori a2, a2, 1 -; RV32IZFINX-NEXT: addi a2, a2, -1 -; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: flt.s a3, a3, s0 -; RV32IZFINX-NEXT: neg a3, a3 -; RV32IZFINX-NEXT: or a0, a3, a0 -; RV32IZFINX-NEXT: and a1, a2, a1 -; RV32IZFINX-NEXT: or a1, a3, a1 +; RV32IZFINX-NEXT: lui a2, %hi(.LCPI15_0) +; RV32IZFINX-NEXT: lw a2, %lo(.LCPI15_0)(a2) +; RV32IZFINX-NEXT: and a0, s1, a0 +; RV32IZFINX-NEXT: flt.s a2, a2, s0 +; RV32IZFINX-NEXT: neg a2, a2 +; RV32IZFINX-NEXT: or a0, a2, a0 +; RV32IZFINX-NEXT: and a1, s1, a1 +; RV32IZFINX-NEXT: or a1, a2, a1 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: addi sp, sp, 16 ; RV32IZFINX-NEXT: ret ; @@ -1107,24 +1107,23 @@ define i64 @test_roundeven_si64(float %x) nounwind { ; RV32IF-NEXT: lui a2, %hi(.LCPI17_0) ; RV32IF-NEXT: flw fa5, %lo(.LCPI17_0)(a2) ; RV32IF-NEXT: and a0, s1, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a2, a3 +; RV32IF-NEXT: flt.s a4, fa5, fs0 +; RV32IF-NEXT: neg a2, a4 ; RV32IF-NEXT: or a0, a2, a0 ; RV32IF-NEXT: feq.s a2, fs0, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: lui a5, 524288 -; RV32IF-NEXT: li a6, 1 -; RV32IF-NEXT: lui a4, 524288 -; RV32IF-NEXT: bne s0, a6, .LBB17_4 +; RV32IF-NEXT: lui a3, 524288 +; RV32IF-NEXT: beqz s0, .LBB17_4 ; RV32IF-NEXT: # %bb.3: -; RV32IF-NEXT: mv a4, a1 +; RV32IF-NEXT: mv a3, a1 ; RV32IF-NEXT: .LBB17_4: ; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: beqz a3, .LBB17_6 +; RV32IF-NEXT: beqz a4, .LBB17_6 ; RV32IF-NEXT: # %bb.5: -; RV32IF-NEXT: addi a4, a5, -1 +; RV32IF-NEXT: addi a3, a5, -1 ; RV32IF-NEXT: .LBB17_6: -; RV32IF-NEXT: and a1, a2, a4 +; RV32IF-NEXT: and a1, a2, a3 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -1166,24 +1165,23 @@ define i64 @test_roundeven_si64(float %x) nounwind { ; RV32IZFINX-NEXT: lui a2, %hi(.LCPI17_0) ; RV32IZFINX-NEXT: lw a2, %lo(.LCPI17_0)(a2) ; RV32IZFINX-NEXT: and a0, s2, a0 -; RV32IZFINX-NEXT: flt.s a3, a2, s0 -; RV32IZFINX-NEXT: neg a2, a3 +; RV32IZFINX-NEXT: flt.s a4, a2, s0 +; RV32IZFINX-NEXT: neg a2, a4 ; RV32IZFINX-NEXT: or a0, a2, a0 ; RV32IZFINX-NEXT: feq.s a2, s0, s0 ; RV32IZFINX-NEXT: neg a2, a2 ; RV32IZFINX-NEXT: lui a5, 524288 -; RV32IZFINX-NEXT: li a6, 1 -; RV32IZFINX-NEXT: lui a4, 524288 -; RV32IZFINX-NEXT: bne s1, a6, .LBB17_4 +; RV32IZFINX-NEXT: lui a3, 524288 +; RV32IZFINX-NEXT: beqz s1, .LBB17_4 ; RV32IZFINX-NEXT: # %bb.3: -; RV32IZFINX-NEXT: mv a4, a1 +; RV32IZFINX-NEXT: mv a3, a1 ; RV32IZFINX-NEXT: .LBB17_4: ; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: beqz a3, .LBB17_6 +; RV32IZFINX-NEXT: beqz a4, .LBB17_6 ; RV32IZFINX-NEXT: # %bb.5: -; RV32IZFINX-NEXT: addi a4, a5, -1 +; RV32IZFINX-NEXT: addi a3, a5, -1 ; RV32IZFINX-NEXT: .LBB17_6: -; RV32IZFINX-NEXT: and a1, a2, a4 +; RV32IZFINX-NEXT: and a1, a2, a3 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -1232,7 +1230,8 @@ define i64 @test_roundeven_ui64(float %x) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.s fs0, fa0 ; RV32IF-NEXT: lui a0, 307200 ; RV32IF-NEXT: fmv.w.x fa5, a0 @@ -1244,22 +1243,22 @@ define i64 @test_roundeven_ui64(float %x) nounwind { ; RV32IF-NEXT: fcvt.s.w fa5, a0, rne ; RV32IF-NEXT: fsgnj.s fs0, fa5, fs0 ; RV32IF-NEXT: .LBB19_2: +; RV32IF-NEXT: fmv.w.x fa5, zero +; RV32IF-NEXT: fle.s a0, fa5, fs0 +; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: fmv.w.x fa5, zero -; RV32IF-NEXT: fle.s a2, fa5, fs0 -; RV32IF-NEXT: lui a3, %hi(.LCPI19_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI19_0)(a3) -; RV32IF-NEXT: xori a2, a2, 1 -; RV32IF-NEXT: addi a2, a2, -1 -; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a3, a3 -; RV32IF-NEXT: or a0, a3, a0 -; RV32IF-NEXT: and a1, a2, a1 -; RV32IF-NEXT: or a1, a3, a1 +; RV32IF-NEXT: lui a2, %hi(.LCPI19_0) +; RV32IF-NEXT: flw fa5, %lo(.LCPI19_0)(a2) +; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: flt.s a2, fa5, fs0 +; RV32IF-NEXT: neg a2, a2 +; RV32IF-NEXT: or a0, a2, a0 +; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: or a1, a2, a1 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; @@ -1277,6 +1276,7 @@ define i64 @test_roundeven_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: addi sp, sp, -16 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: mv s0, a0 ; RV32IZFINX-NEXT: lui a0, 307200 ; RV32IZFINX-NEXT: fabs.s a1, s0 @@ -1287,21 +1287,21 @@ define i64 @test_roundeven_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rne ; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0 ; RV32IZFINX-NEXT: .LBB19_2: +; RV32IZFINX-NEXT: fle.s a0, zero, s0 +; RV32IZFINX-NEXT: neg s1, a0 ; RV32IZFINX-NEXT: mv a0, s0 ; RV32IZFINX-NEXT: call __fixunssfdi -; RV32IZFINX-NEXT: fle.s a2, zero, s0 -; RV32IZFINX-NEXT: lui a3, %hi(.LCPI19_0) -; RV32IZFINX-NEXT: lw a3, %lo(.LCPI19_0)(a3) -; RV32IZFINX-NEXT: xori a2, a2, 1 -; RV32IZFINX-NEXT: addi a2, a2, -1 -; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: flt.s a3, a3, s0 -; RV32IZFINX-NEXT: neg a3, a3 -; RV32IZFINX-NEXT: or a0, a3, a0 -; RV32IZFINX-NEXT: and a1, a2, a1 -; RV32IZFINX-NEXT: or a1, a3, a1 +; RV32IZFINX-NEXT: lui a2, %hi(.LCPI19_0) +; RV32IZFINX-NEXT: lw a2, %lo(.LCPI19_0)(a2) +; RV32IZFINX-NEXT: and a0, s1, a0 +; RV32IZFINX-NEXT: flt.s a2, a2, s0 +; RV32IZFINX-NEXT: neg a2, a2 +; RV32IZFINX-NEXT: or a0, a2, a0 +; RV32IZFINX-NEXT: and a1, s1, a1 +; RV32IZFINX-NEXT: or a1, a2, a1 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: addi sp, sp, 16 ; RV32IZFINX-NEXT: ret ; @@ -1369,24 +1369,23 @@ define i64 @test_rint_si64(float %x) nounwind { ; RV32IF-NEXT: lui a2, %hi(.LCPI21_0) ; RV32IF-NEXT: flw fa5, %lo(.LCPI21_0)(a2) ; RV32IF-NEXT: and a0, s1, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a2, a3 +; RV32IF-NEXT: flt.s a4, fa5, fs0 +; RV32IF-NEXT: neg a2, a4 ; RV32IF-NEXT: or a0, a2, a0 ; RV32IF-NEXT: feq.s a2, fs0, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: lui a5, 524288 -; RV32IF-NEXT: li a6, 1 -; RV32IF-NEXT: lui a4, 524288 -; RV32IF-NEXT: bne s0, a6, .LBB21_4 +; RV32IF-NEXT: lui a3, 524288 +; RV32IF-NEXT: beqz s0, .LBB21_4 ; RV32IF-NEXT: # %bb.3: -; RV32IF-NEXT: mv a4, a1 +; RV32IF-NEXT: mv a3, a1 ; RV32IF-NEXT: .LBB21_4: ; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: beqz a3, .LBB21_6 +; RV32IF-NEXT: beqz a4, .LBB21_6 ; RV32IF-NEXT: # %bb.5: -; RV32IF-NEXT: addi a4, a5, -1 +; RV32IF-NEXT: addi a3, a5, -1 ; RV32IF-NEXT: .LBB21_6: -; RV32IF-NEXT: and a1, a2, a4 +; RV32IF-NEXT: and a1, a2, a3 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -1428,24 +1427,23 @@ define i64 @test_rint_si64(float %x) nounwind { ; RV32IZFINX-NEXT: lui a2, %hi(.LCPI21_0) ; RV32IZFINX-NEXT: lw a2, %lo(.LCPI21_0)(a2) ; RV32IZFINX-NEXT: and a0, s2, a0 -; RV32IZFINX-NEXT: flt.s a3, a2, s0 -; RV32IZFINX-NEXT: neg a2, a3 +; RV32IZFINX-NEXT: flt.s a4, a2, s0 +; RV32IZFINX-NEXT: neg a2, a4 ; RV32IZFINX-NEXT: or a0, a2, a0 ; RV32IZFINX-NEXT: feq.s a2, s0, s0 ; RV32IZFINX-NEXT: neg a2, a2 ; RV32IZFINX-NEXT: lui a5, 524288 -; RV32IZFINX-NEXT: li a6, 1 -; RV32IZFINX-NEXT: lui a4, 524288 -; RV32IZFINX-NEXT: bne s1, a6, .LBB21_4 +; RV32IZFINX-NEXT: lui a3, 524288 +; RV32IZFINX-NEXT: beqz s1, .LBB21_4 ; RV32IZFINX-NEXT: # %bb.3: -; RV32IZFINX-NEXT: mv a4, a1 +; RV32IZFINX-NEXT: mv a3, a1 ; RV32IZFINX-NEXT: .LBB21_4: ; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: beqz a3, .LBB21_6 +; RV32IZFINX-NEXT: beqz a4, .LBB21_6 ; RV32IZFINX-NEXT: # %bb.5: -; RV32IZFINX-NEXT: addi a4, a5, -1 +; RV32IZFINX-NEXT: addi a3, a5, -1 ; RV32IZFINX-NEXT: .LBB21_6: -; RV32IZFINX-NEXT: and a1, a2, a4 +; RV32IZFINX-NEXT: and a1, a2, a3 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -1494,7 +1492,8 @@ define i64 @test_rint_ui64(float %x) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.s fs0, fa0 ; RV32IF-NEXT: lui a0, 307200 ; RV32IF-NEXT: fmv.w.x fa5, a0 @@ -1506,22 +1505,22 @@ define i64 @test_rint_ui64(float %x) nounwind { ; RV32IF-NEXT: fcvt.s.w fa5, a0 ; RV32IF-NEXT: fsgnj.s fs0, fa5, fs0 ; RV32IF-NEXT: .LBB23_2: +; RV32IF-NEXT: fmv.w.x fa5, zero +; RV32IF-NEXT: fle.s a0, fa5, fs0 +; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: fmv.w.x fa5, zero -; RV32IF-NEXT: fle.s a2, fa5, fs0 -; RV32IF-NEXT: lui a3, %hi(.LCPI23_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI23_0)(a3) -; RV32IF-NEXT: xori a2, a2, 1 -; RV32IF-NEXT: addi a2, a2, -1 -; RV32IF-NEXT: and a0, a2, a0 -; RV32IF-NEXT: flt.s a3, fa5, fs0 -; RV32IF-NEXT: neg a3, a3 -; RV32IF-NEXT: or a0, a3, a0 -; RV32IF-NEXT: and a1, a2, a1 -; RV32IF-NEXT: or a1, a3, a1 +; RV32IF-NEXT: lui a2, %hi(.LCPI23_0) +; RV32IF-NEXT: flw fa5, %lo(.LCPI23_0)(a2) +; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: flt.s a2, fa5, fs0 +; RV32IF-NEXT: neg a2, a2 +; RV32IF-NEXT: or a0, a2, a0 +; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: or a1, a2, a1 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; @@ -1539,6 +1538,7 @@ define i64 @test_rint_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: addi sp, sp, -16 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZFINX-NEXT: mv s0, a0 ; RV32IZFINX-NEXT: lui a0, 307200 ; RV32IZFINX-NEXT: fabs.s a1, s0 @@ -1549,21 +1549,21 @@ define i64 @test_rint_ui64(float %x) nounwind { ; RV32IZFINX-NEXT: fcvt.s.w a0, a0 ; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0 ; RV32IZFINX-NEXT: .LBB23_2: +; RV32IZFINX-NEXT: fle.s a0, zero, s0 +; RV32IZFINX-NEXT: neg s1, a0 ; RV32IZFINX-NEXT: mv a0, s0 ; RV32IZFINX-NEXT: call __fixunssfdi -; RV32IZFINX-NEXT: fle.s a2, zero, s0 -; RV32IZFINX-NEXT: lui a3, %hi(.LCPI23_0) -; RV32IZFINX-NEXT: lw a3, %lo(.LCPI23_0)(a3) -; RV32IZFINX-NEXT: xori a2, a2, 1 -; RV32IZFINX-NEXT: addi a2, a2, -1 -; RV32IZFINX-NEXT: and a0, a2, a0 -; RV32IZFINX-NEXT: flt.s a3, a3, s0 -; RV32IZFINX-NEXT: neg a3, a3 -; RV32IZFINX-NEXT: or a0, a3, a0 -; RV32IZFINX-NEXT: and a1, a2, a1 -; RV32IZFINX-NEXT: or a1, a3, a1 +; RV32IZFINX-NEXT: lui a2, %hi(.LCPI23_0) +; RV32IZFINX-NEXT: lw a2, %lo(.LCPI23_0)(a2) +; RV32IZFINX-NEXT: and a0, s1, a0 +; RV32IZFINX-NEXT: flt.s a2, a2, s0 +; RV32IZFINX-NEXT: neg a2, a2 +; RV32IZFINX-NEXT: or a0, a2, a0 +; RV32IZFINX-NEXT: and a1, s1, a1 +; RV32IZFINX-NEXT: or a1, a2, a1 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFINX-NEXT: addi sp, sp, 16 ; RV32IZFINX-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll index 518cd7da2ab77..28ac6e272e11d 100644 --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -2168,9 +2168,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IZFH-NEXT: feq.s a2, fs0, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: lui a4, 524288 -; RV32IZFH-NEXT: li a5, 1 ; RV32IZFH-NEXT: lui a3, 524288 -; RV32IZFH-NEXT: bne s2, a5, .LBB10_2 +; RV32IZFH-NEXT: beqz s2, .LBB10_2 ; RV32IZFH-NEXT: # %bb.1: # %start ; RV32IZFH-NEXT: mv a3, a1 ; RV32IZFH-NEXT: .LBB10_2: # %start @@ -2223,9 +2222,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IDZFH-NEXT: feq.s a2, fs0, fs0 ; RV32IDZFH-NEXT: neg a2, a2 ; RV32IDZFH-NEXT: lui a4, 524288 -; RV32IDZFH-NEXT: li a5, 1 ; RV32IDZFH-NEXT: lui a3, 524288 -; RV32IDZFH-NEXT: bne s2, a5, .LBB10_2 +; RV32IDZFH-NEXT: beqz s2, .LBB10_2 ; RV32IDZFH-NEXT: # %bb.1: # %start ; RV32IDZFH-NEXT: mv a3, a1 ; RV32IDZFH-NEXT: .LBB10_2: # %start @@ -2277,9 +2275,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IZHINX-NEXT: feq.s a2, s0, s0 ; RV32IZHINX-NEXT: neg a2, a2 ; RV32IZHINX-NEXT: lui a4, 524288 -; RV32IZHINX-NEXT: li a5, 1 ; RV32IZHINX-NEXT: lui a3, 524288 -; RV32IZHINX-NEXT: bne s3, a5, .LBB10_2 +; RV32IZHINX-NEXT: beqz s3, .LBB10_2 ; RV32IZHINX-NEXT: # %bb.1: # %start ; RV32IZHINX-NEXT: mv a3, a1 ; RV32IZHINX-NEXT: .LBB10_2: # %start @@ -2331,9 +2328,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IZDINXZHINX-NEXT: feq.s a2, s0, s0 ; RV32IZDINXZHINX-NEXT: neg a2, a2 ; RV32IZDINXZHINX-NEXT: lui a4, 524288 -; RV32IZDINXZHINX-NEXT: li a5, 1 ; RV32IZDINXZHINX-NEXT: lui a3, 524288 -; RV32IZDINXZHINX-NEXT: bne s3, a5, .LBB10_2 +; RV32IZDINXZHINX-NEXT: beqz s3, .LBB10_2 ; RV32IZDINXZHINX-NEXT: # %bb.1: # %start ; RV32IZDINXZHINX-NEXT: mv a3, a1 ; RV32IZDINXZHINX-NEXT: .LBB10_2: # %start @@ -2488,9 +2484,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32ID-ILP32-NEXT: feq.s a2, fa5, fa5 ; RV32ID-ILP32-NEXT: neg a2, a2 ; RV32ID-ILP32-NEXT: lui a4, 524288 -; RV32ID-ILP32-NEXT: li a5, 1 ; RV32ID-ILP32-NEXT: lui a3, 524288 -; RV32ID-ILP32-NEXT: bne s2, a5, .LBB10_2 +; RV32ID-ILP32-NEXT: beqz s2, .LBB10_2 ; RV32ID-ILP32-NEXT: # %bb.1: # %start ; RV32ID-ILP32-NEXT: mv a3, a1 ; RV32ID-ILP32-NEXT: .LBB10_2: # %start @@ -2548,9 +2543,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32ID-NEXT: feq.s a2, fs0, fs0 ; RV32ID-NEXT: neg a2, a2 ; RV32ID-NEXT: lui a4, 524288 -; RV32ID-NEXT: li a5, 1 ; RV32ID-NEXT: lui a3, 524288 -; RV32ID-NEXT: bne s2, a5, .LBB10_2 +; RV32ID-NEXT: beqz s2, .LBB10_2 ; RV32ID-NEXT: # %bb.1: # %start ; RV32ID-NEXT: mv a3, a1 ; RV32ID-NEXT: .LBB10_2: # %start @@ -2608,9 +2602,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IFZFHMIN-NEXT: feq.s a2, fs0, fs0 ; RV32IFZFHMIN-NEXT: neg a2, a2 ; RV32IFZFHMIN-NEXT: lui a4, 524288 -; RV32IFZFHMIN-NEXT: li a5, 1 ; RV32IFZFHMIN-NEXT: lui a3, 524288 -; RV32IFZFHMIN-NEXT: bne s2, a5, .LBB10_2 +; RV32IFZFHMIN-NEXT: beqz s2, .LBB10_2 ; RV32IFZFHMIN-NEXT: # %bb.1: # %start ; RV32IFZFHMIN-NEXT: mv a3, a1 ; RV32IFZFHMIN-NEXT: .LBB10_2: # %start @@ -2664,9 +2657,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IDZFHMIN-NEXT: feq.s a2, fs0, fs0 ; RV32IDZFHMIN-NEXT: neg a2, a2 ; RV32IDZFHMIN-NEXT: lui a4, 524288 -; RV32IDZFHMIN-NEXT: li a5, 1 ; RV32IDZFHMIN-NEXT: lui a3, 524288 -; RV32IDZFHMIN-NEXT: bne s2, a5, .LBB10_2 +; RV32IDZFHMIN-NEXT: beqz s2, .LBB10_2 ; RV32IDZFHMIN-NEXT: # %bb.1: # %start ; RV32IDZFHMIN-NEXT: mv a3, a1 ; RV32IDZFHMIN-NEXT: .LBB10_2: # %start @@ -2709,9 +2701,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; CHECK32-IZHINXMIN-NEXT: feq.s a2, s0, s0 ; CHECK32-IZHINXMIN-NEXT: neg a2, a2 ; CHECK32-IZHINXMIN-NEXT: lui a4, 524288 -; CHECK32-IZHINXMIN-NEXT: li a5, 1 ; CHECK32-IZHINXMIN-NEXT: lui a3, 524288 -; CHECK32-IZHINXMIN-NEXT: bne s3, a5, .LBB10_2 +; CHECK32-IZHINXMIN-NEXT: beqz s3, .LBB10_2 ; CHECK32-IZHINXMIN-NEXT: # %bb.1: # %start ; CHECK32-IZHINXMIN-NEXT: mv a3, a1 ; CHECK32-IZHINXMIN-NEXT: .LBB10_2: # %start @@ -2764,9 +2755,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a2, s0, s0 ; CHECK32-IZDINXZHINXMIN-NEXT: neg a2, a2 ; CHECK32-IZDINXZHINXMIN-NEXT: lui a4, 524288 -; CHECK32-IZDINXZHINXMIN-NEXT: li a5, 1 ; CHECK32-IZDINXZHINXMIN-NEXT: lui a3, 524288 -; CHECK32-IZDINXZHINXMIN-NEXT: bne s3, a5, .LBB10_2 +; CHECK32-IZDINXZHINXMIN-NEXT: beqz s3, .LBB10_2 ; CHECK32-IZDINXZHINXMIN-NEXT: # %bb.1: # %start ; CHECK32-IZDINXZHINXMIN-NEXT: mv a3, a1 ; CHECK32-IZDINXZHINXMIN-NEXT: .LBB10_2: # %start @@ -2984,8 +2974,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.w.x fa5, zero ; RV32IZFH-NEXT: fle.s a0, fa5, fa0 -; RV32IZFH-NEXT: xori a0, a0, 1 -; RV32IZFH-NEXT: addi s1, a0, -1 +; RV32IZFH-NEXT: neg s1, a0 ; RV32IZFH-NEXT: call __fixunssfdi ; RV32IZFH-NEXT: and a0, s1, a0 ; RV32IZFH-NEXT: or a0, s0, a0 @@ -3019,8 +3008,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32IDZFH-NEXT: neg s0, a0 ; RV32IDZFH-NEXT: fmv.w.x fa5, zero ; RV32IDZFH-NEXT: fle.s a0, fa5, fa0 -; RV32IDZFH-NEXT: xori a0, a0, 1 -; RV32IDZFH-NEXT: addi s1, a0, -1 +; RV32IDZFH-NEXT: neg s1, a0 ; RV32IDZFH-NEXT: call __fixunssfdi ; RV32IDZFH-NEXT: and a0, s1, a0 ; RV32IDZFH-NEXT: or a0, s0, a0 @@ -3053,8 +3041,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32IZHINX-NEXT: flt.s a1, a1, a0 ; RV32IZHINX-NEXT: neg s0, a1 ; RV32IZHINX-NEXT: fle.s a1, zero, a0 -; RV32IZHINX-NEXT: xori a1, a1, 1 -; RV32IZHINX-NEXT: addi s1, a1, -1 +; RV32IZHINX-NEXT: neg s1, a1 ; RV32IZHINX-NEXT: call __fixunssfdi ; RV32IZHINX-NEXT: and a0, s1, a0 ; RV32IZHINX-NEXT: or a0, s0, a0 @@ -3087,8 +3074,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32IZDINXZHINX-NEXT: flt.s a1, a1, a0 ; RV32IZDINXZHINX-NEXT: neg s0, a1 ; RV32IZDINXZHINX-NEXT: fle.s a1, zero, a0 -; RV32IZDINXZHINX-NEXT: xori a1, a1, 1 -; RV32IZDINXZHINX-NEXT: addi s1, a1, -1 +; RV32IZDINXZHINX-NEXT: neg s1, a1 ; RV32IZDINXZHINX-NEXT: call __fixunssfdi ; RV32IZDINXZHINX-NEXT: and a0, s1, a0 ; RV32IZDINXZHINX-NEXT: or a0, s0, a0 @@ -3187,8 +3173,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32ID-ILP32-NEXT: neg s0, a1 ; RV32ID-ILP32-NEXT: fmv.w.x fa5, zero ; RV32ID-ILP32-NEXT: fle.s a1, fa5, fa4 -; RV32ID-ILP32-NEXT: xori a1, a1, 1 -; RV32ID-ILP32-NEXT: addi s1, a1, -1 +; RV32ID-ILP32-NEXT: neg s1, a1 ; RV32ID-ILP32-NEXT: call __fixunssfdi ; RV32ID-ILP32-NEXT: and a0, s1, a0 ; RV32ID-ILP32-NEXT: or a0, s0, a0 @@ -3228,8 +3213,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32ID-NEXT: neg s0, a0 ; RV32ID-NEXT: fmv.w.x fa5, zero ; RV32ID-NEXT: fle.s a0, fa5, fa0 -; RV32ID-NEXT: xori a0, a0, 1 -; RV32ID-NEXT: addi s1, a0, -1 +; RV32ID-NEXT: neg s1, a0 ; RV32ID-NEXT: call __fixunssfdi ; RV32ID-NEXT: and a0, s1, a0 ; RV32ID-NEXT: or a0, s0, a0 @@ -3268,8 +3252,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; CHECK32-IZFHMIN-NEXT: neg s0, a0 ; CHECK32-IZFHMIN-NEXT: fmv.w.x fa5, zero ; CHECK32-IZFHMIN-NEXT: fle.s a0, fa5, fa0 -; CHECK32-IZFHMIN-NEXT: xori a0, a0, 1 -; CHECK32-IZFHMIN-NEXT: addi s1, a0, -1 +; CHECK32-IZFHMIN-NEXT: neg s1, a0 ; CHECK32-IZFHMIN-NEXT: call __fixunssfdi ; CHECK32-IZFHMIN-NEXT: and a0, s1, a0 ; CHECK32-IZFHMIN-NEXT: or a0, s0, a0 @@ -3303,8 +3286,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; CHECK32-IZHINXMIN-NEXT: flt.s a1, a1, a0 ; CHECK32-IZHINXMIN-NEXT: neg s0, a1 ; CHECK32-IZHINXMIN-NEXT: fle.s a1, zero, a0 -; CHECK32-IZHINXMIN-NEXT: xori a1, a1, 1 -; CHECK32-IZHINXMIN-NEXT: addi s1, a1, -1 +; CHECK32-IZHINXMIN-NEXT: neg s1, a1 ; CHECK32-IZHINXMIN-NEXT: call __fixunssfdi ; CHECK32-IZHINXMIN-NEXT: and a0, s1, a0 ; CHECK32-IZHINXMIN-NEXT: or a0, s0, a0 @@ -3338,8 +3320,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; CHECK32-IZDINXZHINXMIN-NEXT: flt.s a1, a1, a0 ; CHECK32-IZDINXZHINXMIN-NEXT: neg s0, a1 ; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a1, zero, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: xori a1, a1, 1 -; CHECK32-IZDINXZHINXMIN-NEXT: addi s1, a1, -1 +; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a1 ; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunssfdi ; CHECK32-IZDINXZHINXMIN-NEXT: and a0, s1, a0 ; CHECK32-IZDINXZHINXMIN-NEXT: or a0, s0, a0 diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll index 647af5f5b8743..dd1115b20225b 100644 --- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll @@ -120,17 +120,16 @@ define i64 @test_floor_si64(half %x) nounwind { ; RV32IZFH-NEXT: lui a2, %hi(.LCPI1_1) ; RV32IZFH-NEXT: flw fa5, %lo(.LCPI1_1)(a2) ; RV32IZFH-NEXT: and a0, s1, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a2, a3 +; RV32IZFH-NEXT: flt.s a4, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a4 ; RV32IZFH-NEXT: or a0, a2, a0 ; RV32IZFH-NEXT: feq.s a2, fs0, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: lui a5, 524288 -; RV32IZFH-NEXT: li a6, 1 -; RV32IZFH-NEXT: lui a4, 524288 -; RV32IZFH-NEXT: bne s0, a6, .LBB1_4 +; RV32IZFH-NEXT: lui a3, 524288 +; RV32IZFH-NEXT: beqz s0, .LBB1_4 ; RV32IZFH-NEXT: # %bb.3: -; RV32IZFH-NEXT: mv a4, a1 +; RV32IZFH-NEXT: mv a3, a1 ; RV32IZFH-NEXT: .LBB1_4: ; RV32IZFH-NEXT: and a0, a2, a0 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -138,11 +137,11 @@ define i64 @test_floor_si64(half %x) nounwind { ; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 -; RV32IZFH-NEXT: beqz a3, .LBB1_6 +; RV32IZFH-NEXT: beqz a4, .LBB1_6 ; RV32IZFH-NEXT: # %bb.5: -; RV32IZFH-NEXT: addi a4, a5, -1 +; RV32IZFH-NEXT: addi a3, a5, -1 ; RV32IZFH-NEXT: .LBB1_6: -; RV32IZFH-NEXT: and a1, a2, a4 +; RV32IZFH-NEXT: and a1, a2, a3 ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: test_floor_si64: @@ -180,17 +179,16 @@ define i64 @test_floor_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lui a2, %hi(.LCPI1_1) ; RV32IZHINX-NEXT: lw a2, %lo(.LCPI1_1)(a2) ; RV32IZHINX-NEXT: and a0, s2, a0 -; RV32IZHINX-NEXT: flt.s a3, a2, s0 -; RV32IZHINX-NEXT: neg a2, a3 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 ; RV32IZHINX-NEXT: or a0, a2, a0 ; RV32IZHINX-NEXT: feq.s a2, s0, s0 ; RV32IZHINX-NEXT: neg a2, a2 ; RV32IZHINX-NEXT: lui a5, 524288 -; RV32IZHINX-NEXT: li a6, 1 -; RV32IZHINX-NEXT: lui a4, 524288 -; RV32IZHINX-NEXT: bne s1, a6, .LBB1_4 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB1_4 ; RV32IZHINX-NEXT: # %bb.3: -; RV32IZHINX-NEXT: mv a4, a1 +; RV32IZHINX-NEXT: mv a3, a1 ; RV32IZHINX-NEXT: .LBB1_4: ; RV32IZHINX-NEXT: and a0, a2, a0 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -198,11 +196,11 @@ define i64 @test_floor_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 -; RV32IZHINX-NEXT: beqz a3, .LBB1_6 +; RV32IZHINX-NEXT: beqz a4, .LBB1_6 ; RV32IZHINX-NEXT: # %bb.5: -; RV32IZHINX-NEXT: addi a4, a5, -1 +; RV32IZHINX-NEXT: addi a3, a5, -1 ; RV32IZHINX-NEXT: .LBB1_6: -; RV32IZHINX-NEXT: and a1, a2, a4 +; RV32IZHINX-NEXT: and a1, a2, a3 ; RV32IZHINX-NEXT: ret ; ; RV64IZHINX-LABEL: test_floor_si64: @@ -253,17 +251,16 @@ define i64 @test_floor_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI1_0) ; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI1_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s1, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a2, a3 +; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a4 ; RV32IZFHMIN-NEXT: or a0, a2, a0 ; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: lui a5, 524288 -; RV32IZFHMIN-NEXT: li a6, 1 -; RV32IZFHMIN-NEXT: lui a4, 524288 -; RV32IZFHMIN-NEXT: bne s0, a6, .LBB1_4 +; RV32IZFHMIN-NEXT: lui a3, 524288 +; RV32IZFHMIN-NEXT: beqz s0, .LBB1_4 ; RV32IZFHMIN-NEXT: # %bb.3: -; RV32IZFHMIN-NEXT: mv a4, a1 +; RV32IZFHMIN-NEXT: mv a3, a1 ; RV32IZFHMIN-NEXT: .LBB1_4: ; RV32IZFHMIN-NEXT: and a0, a2, a0 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -271,11 +268,11 @@ define i64 @test_floor_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 -; RV32IZFHMIN-NEXT: beqz a3, .LBB1_6 +; RV32IZFHMIN-NEXT: beqz a4, .LBB1_6 ; RV32IZFHMIN-NEXT: # %bb.5: -; RV32IZFHMIN-NEXT: addi a4, a5, -1 +; RV32IZFHMIN-NEXT: addi a3, a5, -1 ; RV32IZFHMIN-NEXT: .LBB1_6: -; RV32IZFHMIN-NEXT: and a1, a2, a4 +; RV32IZFHMIN-NEXT: and a1, a2, a3 ; RV32IZFHMIN-NEXT: ret ; ; RV64IZFHMIN-LABEL: test_floor_si64: @@ -327,17 +324,16 @@ define i64 @test_floor_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI1_0) ; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI1_0)(a2) ; RV32IZHINXMIN-NEXT: and a0, s2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0 -; RV32IZHINXMIN-NEXT: neg a2, a3 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 ; RV32IZHINXMIN-NEXT: or a0, a2, a0 ; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 ; RV32IZHINXMIN-NEXT: neg a2, a2 ; RV32IZHINXMIN-NEXT: lui a5, 524288 -; RV32IZHINXMIN-NEXT: li a6, 1 -; RV32IZHINXMIN-NEXT: lui a4, 524288 -; RV32IZHINXMIN-NEXT: bne s1, a6, .LBB1_4 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB1_4 ; RV32IZHINXMIN-NEXT: # %bb.3: -; RV32IZHINXMIN-NEXT: mv a4, a1 +; RV32IZHINXMIN-NEXT: mv a3, a1 ; RV32IZHINXMIN-NEXT: .LBB1_4: ; RV32IZHINXMIN-NEXT: and a0, a2, a0 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -345,11 +341,11 @@ define i64 @test_floor_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 -; RV32IZHINXMIN-NEXT: beqz a3, .LBB1_6 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB1_6 ; RV32IZHINXMIN-NEXT: # %bb.5: -; RV32IZHINXMIN-NEXT: addi a4, a5, -1 +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 ; RV32IZHINXMIN-NEXT: .LBB1_6: -; RV32IZHINXMIN-NEXT: and a1, a2, a4 +; RV32IZHINXMIN-NEXT: and a1, a2, a3 ; RV32IZHINXMIN-NEXT: ret ; ; RV64IZHINXMIN-LABEL: test_floor_si64: @@ -530,24 +526,25 @@ define i64 @test_floor_ui64(half %x) nounwind { ; RV32IZFH-NEXT: .LBB3_2: ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFH-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: fmv.w.x fa5, zero +; RV32IZFH-NEXT: fle.s a0, fa5, fs0 +; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: fmv.w.x fa5, zero -; RV32IZFH-NEXT: fle.s a2, fa5, fs0 -; RV32IZFH-NEXT: lui a3, %hi(.LCPI3_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI3_1)(a3) -; RV32IZFH-NEXT: xori a2, a2, 1 -; RV32IZFH-NEXT: addi a2, a2, -1 -; RV32IZFH-NEXT: and a0, a2, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a3, a3 -; RV32IZFH-NEXT: or a0, a3, a0 -; RV32IZFH-NEXT: and a1, a2, a1 -; RV32IZFH-NEXT: or a1, a3, a1 +; RV32IZFH-NEXT: lui a2, %hi(.LCPI3_1) +; RV32IZFH-NEXT: flw fa5, %lo(.LCPI3_1)(a2) +; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: flt.s a2, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a2 +; RV32IZFH-NEXT: or a0, a2, a0 +; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: or a1, a2, a1 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; @@ -575,22 +572,23 @@ define i64 @test_floor_ui64(half %x) nounwind { ; RV32IZHINX-NEXT: addi sp, sp, -16 ; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 ; RV32IZHINX-NEXT: mv a0, s0 ; RV32IZHINX-NEXT: call __fixunssfdi -; RV32IZHINX-NEXT: fle.s a2, zero, s0 -; RV32IZHINX-NEXT: lui a3, %hi(.LCPI3_1) -; RV32IZHINX-NEXT: lw a3, %lo(.LCPI3_1)(a3) -; RV32IZHINX-NEXT: xori a2, a2, 1 -; RV32IZHINX-NEXT: addi a2, a2, -1 -; RV32IZHINX-NEXT: and a0, a2, a0 -; RV32IZHINX-NEXT: flt.s a3, a3, s0 -; RV32IZHINX-NEXT: neg a3, a3 -; RV32IZHINX-NEXT: or a0, a3, a0 -; RV32IZHINX-NEXT: and a1, a2, a1 -; RV32IZHINX-NEXT: or a1, a3, a1 +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI3_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI3_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 ; RV32IZHINX-NEXT: ret ; @@ -628,25 +626,26 @@ define i64 @test_floor_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: .LBB3_2: ; RV32IZFHMIN-NEXT: addi sp, sp, -16 ; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, fa5 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero +; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0 +; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero -; RV32IZFHMIN-NEXT: fle.s a2, fa5, fs0 -; RV32IZFHMIN-NEXT: lui a3, %hi(.LCPI3_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a3) -; RV32IZFHMIN-NEXT: xori a2, a2, 1 -; RV32IZFHMIN-NEXT: addi a2, a2, -1 -; RV32IZFHMIN-NEXT: and a0, a2, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a3, a3 -; RV32IZFHMIN-NEXT: or a0, a3, a0 -; RV32IZFHMIN-NEXT: and a1, a2, a1 -; RV32IZFHMIN-NEXT: or a1, a3, a1 +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI3_0) +; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a2) +; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a2 +; RV32IZFHMIN-NEXT: or a0, a2, a0 +; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: or a1, a2, a1 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFHMIN-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 ; RV32IZFHMIN-NEXT: ret ; @@ -687,23 +686,24 @@ define i64 @test_floor_ui64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: addi sp, sp, -16 ; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 ; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 ; RV32IZHINXMIN-NEXT: mv a0, s0 ; RV32IZHINXMIN-NEXT: call __fixunssfdi -; RV32IZHINXMIN-NEXT: fle.s a2, zero, s0 -; RV32IZHINXMIN-NEXT: lui a3, %hi(.LCPI3_0) -; RV32IZHINXMIN-NEXT: lw a3, %lo(.LCPI3_0)(a3) -; RV32IZHINXMIN-NEXT: xori a2, a2, 1 -; RV32IZHINXMIN-NEXT: addi a2, a2, -1 -; RV32IZHINXMIN-NEXT: and a0, a2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a3, s0 -; RV32IZHINXMIN-NEXT: neg a3, a3 -; RV32IZHINXMIN-NEXT: or a0, a3, a0 -; RV32IZHINXMIN-NEXT: and a1, a2, a1 -; RV32IZHINXMIN-NEXT: or a1, a3, a1 +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI3_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI3_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 ; RV32IZHINXMIN-NEXT: ret ; @@ -836,17 +836,16 @@ define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZFH-NEXT: lui a2, %hi(.LCPI5_1) ; RV32IZFH-NEXT: flw fa5, %lo(.LCPI5_1)(a2) ; RV32IZFH-NEXT: and a0, s1, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a2, a3 +; RV32IZFH-NEXT: flt.s a4, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a4 ; RV32IZFH-NEXT: or a0, a2, a0 ; RV32IZFH-NEXT: feq.s a2, fs0, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: lui a5, 524288 -; RV32IZFH-NEXT: li a6, 1 -; RV32IZFH-NEXT: lui a4, 524288 -; RV32IZFH-NEXT: bne s0, a6, .LBB5_4 +; RV32IZFH-NEXT: lui a3, 524288 +; RV32IZFH-NEXT: beqz s0, .LBB5_4 ; RV32IZFH-NEXT: # %bb.3: -; RV32IZFH-NEXT: mv a4, a1 +; RV32IZFH-NEXT: mv a3, a1 ; RV32IZFH-NEXT: .LBB5_4: ; RV32IZFH-NEXT: and a0, a2, a0 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -854,11 +853,11 @@ define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 -; RV32IZFH-NEXT: beqz a3, .LBB5_6 +; RV32IZFH-NEXT: beqz a4, .LBB5_6 ; RV32IZFH-NEXT: # %bb.5: -; RV32IZFH-NEXT: addi a4, a5, -1 +; RV32IZFH-NEXT: addi a3, a5, -1 ; RV32IZFH-NEXT: .LBB5_6: -; RV32IZFH-NEXT: and a1, a2, a4 +; RV32IZFH-NEXT: and a1, a2, a3 ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: test_ceil_si64: @@ -896,17 +895,16 @@ define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lui a2, %hi(.LCPI5_1) ; RV32IZHINX-NEXT: lw a2, %lo(.LCPI5_1)(a2) ; RV32IZHINX-NEXT: and a0, s2, a0 -; RV32IZHINX-NEXT: flt.s a3, a2, s0 -; RV32IZHINX-NEXT: neg a2, a3 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 ; RV32IZHINX-NEXT: or a0, a2, a0 ; RV32IZHINX-NEXT: feq.s a2, s0, s0 ; RV32IZHINX-NEXT: neg a2, a2 ; RV32IZHINX-NEXT: lui a5, 524288 -; RV32IZHINX-NEXT: li a6, 1 -; RV32IZHINX-NEXT: lui a4, 524288 -; RV32IZHINX-NEXT: bne s1, a6, .LBB5_4 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB5_4 ; RV32IZHINX-NEXT: # %bb.3: -; RV32IZHINX-NEXT: mv a4, a1 +; RV32IZHINX-NEXT: mv a3, a1 ; RV32IZHINX-NEXT: .LBB5_4: ; RV32IZHINX-NEXT: and a0, a2, a0 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -914,11 +912,11 @@ define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 -; RV32IZHINX-NEXT: beqz a3, .LBB5_6 +; RV32IZHINX-NEXT: beqz a4, .LBB5_6 ; RV32IZHINX-NEXT: # %bb.5: -; RV32IZHINX-NEXT: addi a4, a5, -1 +; RV32IZHINX-NEXT: addi a3, a5, -1 ; RV32IZHINX-NEXT: .LBB5_6: -; RV32IZHINX-NEXT: and a1, a2, a4 +; RV32IZHINX-NEXT: and a1, a2, a3 ; RV32IZHINX-NEXT: ret ; ; RV64IZHINX-LABEL: test_ceil_si64: @@ -969,17 +967,16 @@ define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI5_0) ; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI5_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s1, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a2, a3 +; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a4 ; RV32IZFHMIN-NEXT: or a0, a2, a0 ; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: lui a5, 524288 -; RV32IZFHMIN-NEXT: li a6, 1 -; RV32IZFHMIN-NEXT: lui a4, 524288 -; RV32IZFHMIN-NEXT: bne s0, a6, .LBB5_4 +; RV32IZFHMIN-NEXT: lui a3, 524288 +; RV32IZFHMIN-NEXT: beqz s0, .LBB5_4 ; RV32IZFHMIN-NEXT: # %bb.3: -; RV32IZFHMIN-NEXT: mv a4, a1 +; RV32IZFHMIN-NEXT: mv a3, a1 ; RV32IZFHMIN-NEXT: .LBB5_4: ; RV32IZFHMIN-NEXT: and a0, a2, a0 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -987,11 +984,11 @@ define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 -; RV32IZFHMIN-NEXT: beqz a3, .LBB5_6 +; RV32IZFHMIN-NEXT: beqz a4, .LBB5_6 ; RV32IZFHMIN-NEXT: # %bb.5: -; RV32IZFHMIN-NEXT: addi a4, a5, -1 +; RV32IZFHMIN-NEXT: addi a3, a5, -1 ; RV32IZFHMIN-NEXT: .LBB5_6: -; RV32IZFHMIN-NEXT: and a1, a2, a4 +; RV32IZFHMIN-NEXT: and a1, a2, a3 ; RV32IZFHMIN-NEXT: ret ; ; RV64IZFHMIN-LABEL: test_ceil_si64: @@ -1043,17 +1040,16 @@ define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI5_0) ; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI5_0)(a2) ; RV32IZHINXMIN-NEXT: and a0, s2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0 -; RV32IZHINXMIN-NEXT: neg a2, a3 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 ; RV32IZHINXMIN-NEXT: or a0, a2, a0 ; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 ; RV32IZHINXMIN-NEXT: neg a2, a2 ; RV32IZHINXMIN-NEXT: lui a5, 524288 -; RV32IZHINXMIN-NEXT: li a6, 1 -; RV32IZHINXMIN-NEXT: lui a4, 524288 -; RV32IZHINXMIN-NEXT: bne s1, a6, .LBB5_4 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB5_4 ; RV32IZHINXMIN-NEXT: # %bb.3: -; RV32IZHINXMIN-NEXT: mv a4, a1 +; RV32IZHINXMIN-NEXT: mv a3, a1 ; RV32IZHINXMIN-NEXT: .LBB5_4: ; RV32IZHINXMIN-NEXT: and a0, a2, a0 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1061,11 +1057,11 @@ define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 -; RV32IZHINXMIN-NEXT: beqz a3, .LBB5_6 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB5_6 ; RV32IZHINXMIN-NEXT: # %bb.5: -; RV32IZHINXMIN-NEXT: addi a4, a5, -1 +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 ; RV32IZHINXMIN-NEXT: .LBB5_6: -; RV32IZHINXMIN-NEXT: and a1, a2, a4 +; RV32IZHINXMIN-NEXT: and a1, a2, a3 ; RV32IZHINXMIN-NEXT: ret ; ; RV64IZHINXMIN-LABEL: test_ceil_si64: @@ -1246,24 +1242,25 @@ define i64 @test_ceil_ui64(half %x) nounwind { ; RV32IZFH-NEXT: .LBB7_2: ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFH-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: fmv.w.x fa5, zero +; RV32IZFH-NEXT: fle.s a0, fa5, fs0 +; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: fmv.w.x fa5, zero -; RV32IZFH-NEXT: fle.s a2, fa5, fs0 -; RV32IZFH-NEXT: lui a3, %hi(.LCPI7_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI7_1)(a3) -; RV32IZFH-NEXT: xori a2, a2, 1 -; RV32IZFH-NEXT: addi a2, a2, -1 -; RV32IZFH-NEXT: and a0, a2, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a3, a3 -; RV32IZFH-NEXT: or a0, a3, a0 -; RV32IZFH-NEXT: and a1, a2, a1 -; RV32IZFH-NEXT: or a1, a3, a1 +; RV32IZFH-NEXT: lui a2, %hi(.LCPI7_1) +; RV32IZFH-NEXT: flw fa5, %lo(.LCPI7_1)(a2) +; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: flt.s a2, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a2 +; RV32IZFH-NEXT: or a0, a2, a0 +; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: or a1, a2, a1 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; @@ -1291,22 +1288,23 @@ define i64 @test_ceil_ui64(half %x) nounwind { ; RV32IZHINX-NEXT: addi sp, sp, -16 ; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 ; RV32IZHINX-NEXT: mv a0, s0 ; RV32IZHINX-NEXT: call __fixunssfdi -; RV32IZHINX-NEXT: fle.s a2, zero, s0 -; RV32IZHINX-NEXT: lui a3, %hi(.LCPI7_1) -; RV32IZHINX-NEXT: lw a3, %lo(.LCPI7_1)(a3) -; RV32IZHINX-NEXT: xori a2, a2, 1 -; RV32IZHINX-NEXT: addi a2, a2, -1 -; RV32IZHINX-NEXT: and a0, a2, a0 -; RV32IZHINX-NEXT: flt.s a3, a3, s0 -; RV32IZHINX-NEXT: neg a3, a3 -; RV32IZHINX-NEXT: or a0, a3, a0 -; RV32IZHINX-NEXT: and a1, a2, a1 -; RV32IZHINX-NEXT: or a1, a3, a1 +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI7_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI7_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 ; RV32IZHINX-NEXT: ret ; @@ -1344,25 +1342,26 @@ define i64 @test_ceil_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: .LBB7_2: ; RV32IZFHMIN-NEXT: addi sp, sp, -16 ; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, fa5 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero +; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0 +; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero -; RV32IZFHMIN-NEXT: fle.s a2, fa5, fs0 -; RV32IZFHMIN-NEXT: lui a3, %hi(.LCPI7_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI7_0)(a3) -; RV32IZFHMIN-NEXT: xori a2, a2, 1 -; RV32IZFHMIN-NEXT: addi a2, a2, -1 -; RV32IZFHMIN-NEXT: and a0, a2, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a3, a3 -; RV32IZFHMIN-NEXT: or a0, a3, a0 -; RV32IZFHMIN-NEXT: and a1, a2, a1 -; RV32IZFHMIN-NEXT: or a1, a3, a1 +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI7_0) +; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI7_0)(a2) +; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a2 +; RV32IZFHMIN-NEXT: or a0, a2, a0 +; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: or a1, a2, a1 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFHMIN-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 ; RV32IZFHMIN-NEXT: ret ; @@ -1403,23 +1402,24 @@ define i64 @test_ceil_ui64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: addi sp, sp, -16 ; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 ; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 ; RV32IZHINXMIN-NEXT: mv a0, s0 ; RV32IZHINXMIN-NEXT: call __fixunssfdi -; RV32IZHINXMIN-NEXT: fle.s a2, zero, s0 -; RV32IZHINXMIN-NEXT: lui a3, %hi(.LCPI7_0) -; RV32IZHINXMIN-NEXT: lw a3, %lo(.LCPI7_0)(a3) -; RV32IZHINXMIN-NEXT: xori a2, a2, 1 -; RV32IZHINXMIN-NEXT: addi a2, a2, -1 -; RV32IZHINXMIN-NEXT: and a0, a2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a3, s0 -; RV32IZHINXMIN-NEXT: neg a3, a3 -; RV32IZHINXMIN-NEXT: or a0, a3, a0 -; RV32IZHINXMIN-NEXT: and a1, a2, a1 -; RV32IZHINXMIN-NEXT: or a1, a3, a1 +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI7_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI7_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 ; RV32IZHINXMIN-NEXT: ret ; @@ -1552,17 +1552,16 @@ define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZFH-NEXT: lui a2, %hi(.LCPI9_1) ; RV32IZFH-NEXT: flw fa5, %lo(.LCPI9_1)(a2) ; RV32IZFH-NEXT: and a0, s1, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a2, a3 +; RV32IZFH-NEXT: flt.s a4, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a4 ; RV32IZFH-NEXT: or a0, a2, a0 ; RV32IZFH-NEXT: feq.s a2, fs0, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: lui a5, 524288 -; RV32IZFH-NEXT: li a6, 1 -; RV32IZFH-NEXT: lui a4, 524288 -; RV32IZFH-NEXT: bne s0, a6, .LBB9_4 +; RV32IZFH-NEXT: lui a3, 524288 +; RV32IZFH-NEXT: beqz s0, .LBB9_4 ; RV32IZFH-NEXT: # %bb.3: -; RV32IZFH-NEXT: mv a4, a1 +; RV32IZFH-NEXT: mv a3, a1 ; RV32IZFH-NEXT: .LBB9_4: ; RV32IZFH-NEXT: and a0, a2, a0 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1570,11 +1569,11 @@ define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 -; RV32IZFH-NEXT: beqz a3, .LBB9_6 +; RV32IZFH-NEXT: beqz a4, .LBB9_6 ; RV32IZFH-NEXT: # %bb.5: -; RV32IZFH-NEXT: addi a4, a5, -1 +; RV32IZFH-NEXT: addi a3, a5, -1 ; RV32IZFH-NEXT: .LBB9_6: -; RV32IZFH-NEXT: and a1, a2, a4 +; RV32IZFH-NEXT: and a1, a2, a3 ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: test_trunc_si64: @@ -1612,17 +1611,16 @@ define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lui a2, %hi(.LCPI9_1) ; RV32IZHINX-NEXT: lw a2, %lo(.LCPI9_1)(a2) ; RV32IZHINX-NEXT: and a0, s2, a0 -; RV32IZHINX-NEXT: flt.s a3, a2, s0 -; RV32IZHINX-NEXT: neg a2, a3 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 ; RV32IZHINX-NEXT: or a0, a2, a0 ; RV32IZHINX-NEXT: feq.s a2, s0, s0 ; RV32IZHINX-NEXT: neg a2, a2 ; RV32IZHINX-NEXT: lui a5, 524288 -; RV32IZHINX-NEXT: li a6, 1 -; RV32IZHINX-NEXT: lui a4, 524288 -; RV32IZHINX-NEXT: bne s1, a6, .LBB9_4 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB9_4 ; RV32IZHINX-NEXT: # %bb.3: -; RV32IZHINX-NEXT: mv a4, a1 +; RV32IZHINX-NEXT: mv a3, a1 ; RV32IZHINX-NEXT: .LBB9_4: ; RV32IZHINX-NEXT: and a0, a2, a0 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1630,11 +1628,11 @@ define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 -; RV32IZHINX-NEXT: beqz a3, .LBB9_6 +; RV32IZHINX-NEXT: beqz a4, .LBB9_6 ; RV32IZHINX-NEXT: # %bb.5: -; RV32IZHINX-NEXT: addi a4, a5, -1 +; RV32IZHINX-NEXT: addi a3, a5, -1 ; RV32IZHINX-NEXT: .LBB9_6: -; RV32IZHINX-NEXT: and a1, a2, a4 +; RV32IZHINX-NEXT: and a1, a2, a3 ; RV32IZHINX-NEXT: ret ; ; RV64IZHINX-LABEL: test_trunc_si64: @@ -1685,17 +1683,16 @@ define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI9_0) ; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI9_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s1, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a2, a3 +; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a4 ; RV32IZFHMIN-NEXT: or a0, a2, a0 ; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: lui a5, 524288 -; RV32IZFHMIN-NEXT: li a6, 1 -; RV32IZFHMIN-NEXT: lui a4, 524288 -; RV32IZFHMIN-NEXT: bne s0, a6, .LBB9_4 +; RV32IZFHMIN-NEXT: lui a3, 524288 +; RV32IZFHMIN-NEXT: beqz s0, .LBB9_4 ; RV32IZFHMIN-NEXT: # %bb.3: -; RV32IZFHMIN-NEXT: mv a4, a1 +; RV32IZFHMIN-NEXT: mv a3, a1 ; RV32IZFHMIN-NEXT: .LBB9_4: ; RV32IZFHMIN-NEXT: and a0, a2, a0 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1703,11 +1700,11 @@ define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 -; RV32IZFHMIN-NEXT: beqz a3, .LBB9_6 +; RV32IZFHMIN-NEXT: beqz a4, .LBB9_6 ; RV32IZFHMIN-NEXT: # %bb.5: -; RV32IZFHMIN-NEXT: addi a4, a5, -1 +; RV32IZFHMIN-NEXT: addi a3, a5, -1 ; RV32IZFHMIN-NEXT: .LBB9_6: -; RV32IZFHMIN-NEXT: and a1, a2, a4 +; RV32IZFHMIN-NEXT: and a1, a2, a3 ; RV32IZFHMIN-NEXT: ret ; ; RV64IZFHMIN-LABEL: test_trunc_si64: @@ -1759,17 +1756,16 @@ define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI9_0) ; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI9_0)(a2) ; RV32IZHINXMIN-NEXT: and a0, s2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0 -; RV32IZHINXMIN-NEXT: neg a2, a3 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 ; RV32IZHINXMIN-NEXT: or a0, a2, a0 ; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 ; RV32IZHINXMIN-NEXT: neg a2, a2 ; RV32IZHINXMIN-NEXT: lui a5, 524288 -; RV32IZHINXMIN-NEXT: li a6, 1 -; RV32IZHINXMIN-NEXT: lui a4, 524288 -; RV32IZHINXMIN-NEXT: bne s1, a6, .LBB9_4 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB9_4 ; RV32IZHINXMIN-NEXT: # %bb.3: -; RV32IZHINXMIN-NEXT: mv a4, a1 +; RV32IZHINXMIN-NEXT: mv a3, a1 ; RV32IZHINXMIN-NEXT: .LBB9_4: ; RV32IZHINXMIN-NEXT: and a0, a2, a0 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1777,11 +1773,11 @@ define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 -; RV32IZHINXMIN-NEXT: beqz a3, .LBB9_6 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB9_6 ; RV32IZHINXMIN-NEXT: # %bb.5: -; RV32IZHINXMIN-NEXT: addi a4, a5, -1 +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 ; RV32IZHINXMIN-NEXT: .LBB9_6: -; RV32IZHINXMIN-NEXT: and a1, a2, a4 +; RV32IZHINXMIN-NEXT: and a1, a2, a3 ; RV32IZHINXMIN-NEXT: ret ; ; RV64IZHINXMIN-LABEL: test_trunc_si64: @@ -1962,24 +1958,25 @@ define i64 @test_trunc_ui64(half %x) nounwind { ; RV32IZFH-NEXT: .LBB11_2: ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFH-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: fmv.w.x fa5, zero +; RV32IZFH-NEXT: fle.s a0, fa5, fs0 +; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: fmv.w.x fa5, zero -; RV32IZFH-NEXT: fle.s a2, fa5, fs0 -; RV32IZFH-NEXT: lui a3, %hi(.LCPI11_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI11_1)(a3) -; RV32IZFH-NEXT: xori a2, a2, 1 -; RV32IZFH-NEXT: addi a2, a2, -1 -; RV32IZFH-NEXT: and a0, a2, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a3, a3 -; RV32IZFH-NEXT: or a0, a3, a0 -; RV32IZFH-NEXT: and a1, a2, a1 -; RV32IZFH-NEXT: or a1, a3, a1 +; RV32IZFH-NEXT: lui a2, %hi(.LCPI11_1) +; RV32IZFH-NEXT: flw fa5, %lo(.LCPI11_1)(a2) +; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: flt.s a2, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a2 +; RV32IZFH-NEXT: or a0, a2, a0 +; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: or a1, a2, a1 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; @@ -2007,22 +2004,23 @@ define i64 @test_trunc_ui64(half %x) nounwind { ; RV32IZHINX-NEXT: addi sp, sp, -16 ; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 ; RV32IZHINX-NEXT: mv a0, s0 ; RV32IZHINX-NEXT: call __fixunssfdi -; RV32IZHINX-NEXT: fle.s a2, zero, s0 -; RV32IZHINX-NEXT: lui a3, %hi(.LCPI11_1) -; RV32IZHINX-NEXT: lw a3, %lo(.LCPI11_1)(a3) -; RV32IZHINX-NEXT: xori a2, a2, 1 -; RV32IZHINX-NEXT: addi a2, a2, -1 -; RV32IZHINX-NEXT: and a0, a2, a0 -; RV32IZHINX-NEXT: flt.s a3, a3, s0 -; RV32IZHINX-NEXT: neg a3, a3 -; RV32IZHINX-NEXT: or a0, a3, a0 -; RV32IZHINX-NEXT: and a1, a2, a1 -; RV32IZHINX-NEXT: or a1, a3, a1 +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI11_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI11_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 ; RV32IZHINX-NEXT: ret ; @@ -2060,25 +2058,26 @@ define i64 @test_trunc_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: .LBB11_2: ; RV32IZFHMIN-NEXT: addi sp, sp, -16 ; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, fa5 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero +; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0 +; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero -; RV32IZFHMIN-NEXT: fle.s a2, fa5, fs0 -; RV32IZFHMIN-NEXT: lui a3, %hi(.LCPI11_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI11_0)(a3) -; RV32IZFHMIN-NEXT: xori a2, a2, 1 -; RV32IZFHMIN-NEXT: addi a2, a2, -1 -; RV32IZFHMIN-NEXT: and a0, a2, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a3, a3 -; RV32IZFHMIN-NEXT: or a0, a3, a0 -; RV32IZFHMIN-NEXT: and a1, a2, a1 -; RV32IZFHMIN-NEXT: or a1, a3, a1 +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI11_0) +; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI11_0)(a2) +; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a2 +; RV32IZFHMIN-NEXT: or a0, a2, a0 +; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: or a1, a2, a1 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFHMIN-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 ; RV32IZFHMIN-NEXT: ret ; @@ -2119,23 +2118,24 @@ define i64 @test_trunc_ui64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: addi sp, sp, -16 ; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 ; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 ; RV32IZHINXMIN-NEXT: mv a0, s0 ; RV32IZHINXMIN-NEXT: call __fixunssfdi -; RV32IZHINXMIN-NEXT: fle.s a2, zero, s0 -; RV32IZHINXMIN-NEXT: lui a3, %hi(.LCPI11_0) -; RV32IZHINXMIN-NEXT: lw a3, %lo(.LCPI11_0)(a3) -; RV32IZHINXMIN-NEXT: xori a2, a2, 1 -; RV32IZHINXMIN-NEXT: addi a2, a2, -1 -; RV32IZHINXMIN-NEXT: and a0, a2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a3, s0 -; RV32IZHINXMIN-NEXT: neg a3, a3 -; RV32IZHINXMIN-NEXT: or a0, a3, a0 -; RV32IZHINXMIN-NEXT: and a1, a2, a1 -; RV32IZHINXMIN-NEXT: or a1, a3, a1 +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI11_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI11_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 ; RV32IZHINXMIN-NEXT: ret ; @@ -2268,17 +2268,16 @@ define i64 @test_round_si64(half %x) nounwind { ; RV32IZFH-NEXT: lui a2, %hi(.LCPI13_1) ; RV32IZFH-NEXT: flw fa5, %lo(.LCPI13_1)(a2) ; RV32IZFH-NEXT: and a0, s1, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a2, a3 +; RV32IZFH-NEXT: flt.s a4, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a4 ; RV32IZFH-NEXT: or a0, a2, a0 ; RV32IZFH-NEXT: feq.s a2, fs0, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: lui a5, 524288 -; RV32IZFH-NEXT: li a6, 1 -; RV32IZFH-NEXT: lui a4, 524288 -; RV32IZFH-NEXT: bne s0, a6, .LBB13_4 +; RV32IZFH-NEXT: lui a3, 524288 +; RV32IZFH-NEXT: beqz s0, .LBB13_4 ; RV32IZFH-NEXT: # %bb.3: -; RV32IZFH-NEXT: mv a4, a1 +; RV32IZFH-NEXT: mv a3, a1 ; RV32IZFH-NEXT: .LBB13_4: ; RV32IZFH-NEXT: and a0, a2, a0 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -2286,11 +2285,11 @@ define i64 @test_round_si64(half %x) nounwind { ; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 -; RV32IZFH-NEXT: beqz a3, .LBB13_6 +; RV32IZFH-NEXT: beqz a4, .LBB13_6 ; RV32IZFH-NEXT: # %bb.5: -; RV32IZFH-NEXT: addi a4, a5, -1 +; RV32IZFH-NEXT: addi a3, a5, -1 ; RV32IZFH-NEXT: .LBB13_6: -; RV32IZFH-NEXT: and a1, a2, a4 +; RV32IZFH-NEXT: and a1, a2, a3 ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: test_round_si64: @@ -2328,17 +2327,16 @@ define i64 @test_round_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lui a2, %hi(.LCPI13_1) ; RV32IZHINX-NEXT: lw a2, %lo(.LCPI13_1)(a2) ; RV32IZHINX-NEXT: and a0, s2, a0 -; RV32IZHINX-NEXT: flt.s a3, a2, s0 -; RV32IZHINX-NEXT: neg a2, a3 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 ; RV32IZHINX-NEXT: or a0, a2, a0 ; RV32IZHINX-NEXT: feq.s a2, s0, s0 ; RV32IZHINX-NEXT: neg a2, a2 ; RV32IZHINX-NEXT: lui a5, 524288 -; RV32IZHINX-NEXT: li a6, 1 -; RV32IZHINX-NEXT: lui a4, 524288 -; RV32IZHINX-NEXT: bne s1, a6, .LBB13_4 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB13_4 ; RV32IZHINX-NEXT: # %bb.3: -; RV32IZHINX-NEXT: mv a4, a1 +; RV32IZHINX-NEXT: mv a3, a1 ; RV32IZHINX-NEXT: .LBB13_4: ; RV32IZHINX-NEXT: and a0, a2, a0 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -2346,11 +2344,11 @@ define i64 @test_round_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 -; RV32IZHINX-NEXT: beqz a3, .LBB13_6 +; RV32IZHINX-NEXT: beqz a4, .LBB13_6 ; RV32IZHINX-NEXT: # %bb.5: -; RV32IZHINX-NEXT: addi a4, a5, -1 +; RV32IZHINX-NEXT: addi a3, a5, -1 ; RV32IZHINX-NEXT: .LBB13_6: -; RV32IZHINX-NEXT: and a1, a2, a4 +; RV32IZHINX-NEXT: and a1, a2, a3 ; RV32IZHINX-NEXT: ret ; ; RV64IZHINX-LABEL: test_round_si64: @@ -2401,17 +2399,16 @@ define i64 @test_round_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI13_0) ; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI13_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s1, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a2, a3 +; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a4 ; RV32IZFHMIN-NEXT: or a0, a2, a0 ; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: lui a5, 524288 -; RV32IZFHMIN-NEXT: li a6, 1 -; RV32IZFHMIN-NEXT: lui a4, 524288 -; RV32IZFHMIN-NEXT: bne s0, a6, .LBB13_4 +; RV32IZFHMIN-NEXT: lui a3, 524288 +; RV32IZFHMIN-NEXT: beqz s0, .LBB13_4 ; RV32IZFHMIN-NEXT: # %bb.3: -; RV32IZFHMIN-NEXT: mv a4, a1 +; RV32IZFHMIN-NEXT: mv a3, a1 ; RV32IZFHMIN-NEXT: .LBB13_4: ; RV32IZFHMIN-NEXT: and a0, a2, a0 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -2419,11 +2416,11 @@ define i64 @test_round_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 -; RV32IZFHMIN-NEXT: beqz a3, .LBB13_6 +; RV32IZFHMIN-NEXT: beqz a4, .LBB13_6 ; RV32IZFHMIN-NEXT: # %bb.5: -; RV32IZFHMIN-NEXT: addi a4, a5, -1 +; RV32IZFHMIN-NEXT: addi a3, a5, -1 ; RV32IZFHMIN-NEXT: .LBB13_6: -; RV32IZFHMIN-NEXT: and a1, a2, a4 +; RV32IZFHMIN-NEXT: and a1, a2, a3 ; RV32IZFHMIN-NEXT: ret ; ; RV64IZFHMIN-LABEL: test_round_si64: @@ -2475,17 +2472,16 @@ define i64 @test_round_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI13_0) ; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI13_0)(a2) ; RV32IZHINXMIN-NEXT: and a0, s2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0 -; RV32IZHINXMIN-NEXT: neg a2, a3 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 ; RV32IZHINXMIN-NEXT: or a0, a2, a0 ; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 ; RV32IZHINXMIN-NEXT: neg a2, a2 ; RV32IZHINXMIN-NEXT: lui a5, 524288 -; RV32IZHINXMIN-NEXT: li a6, 1 -; RV32IZHINXMIN-NEXT: lui a4, 524288 -; RV32IZHINXMIN-NEXT: bne s1, a6, .LBB13_4 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB13_4 ; RV32IZHINXMIN-NEXT: # %bb.3: -; RV32IZHINXMIN-NEXT: mv a4, a1 +; RV32IZHINXMIN-NEXT: mv a3, a1 ; RV32IZHINXMIN-NEXT: .LBB13_4: ; RV32IZHINXMIN-NEXT: and a0, a2, a0 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -2493,11 +2489,11 @@ define i64 @test_round_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 -; RV32IZHINXMIN-NEXT: beqz a3, .LBB13_6 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB13_6 ; RV32IZHINXMIN-NEXT: # %bb.5: -; RV32IZHINXMIN-NEXT: addi a4, a5, -1 +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 ; RV32IZHINXMIN-NEXT: .LBB13_6: -; RV32IZHINXMIN-NEXT: and a1, a2, a4 +; RV32IZHINXMIN-NEXT: and a1, a2, a3 ; RV32IZHINXMIN-NEXT: ret ; ; RV64IZHINXMIN-LABEL: test_round_si64: @@ -2678,24 +2674,25 @@ define i64 @test_round_ui64(half %x) nounwind { ; RV32IZFH-NEXT: .LBB15_2: ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFH-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: fmv.w.x fa5, zero +; RV32IZFH-NEXT: fle.s a0, fa5, fs0 +; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: fmv.w.x fa5, zero -; RV32IZFH-NEXT: fle.s a2, fa5, fs0 -; RV32IZFH-NEXT: lui a3, %hi(.LCPI15_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI15_1)(a3) -; RV32IZFH-NEXT: xori a2, a2, 1 -; RV32IZFH-NEXT: addi a2, a2, -1 -; RV32IZFH-NEXT: and a0, a2, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a3, a3 -; RV32IZFH-NEXT: or a0, a3, a0 -; RV32IZFH-NEXT: and a1, a2, a1 -; RV32IZFH-NEXT: or a1, a3, a1 +; RV32IZFH-NEXT: lui a2, %hi(.LCPI15_1) +; RV32IZFH-NEXT: flw fa5, %lo(.LCPI15_1)(a2) +; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: flt.s a2, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a2 +; RV32IZFH-NEXT: or a0, a2, a0 +; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: or a1, a2, a1 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; @@ -2723,22 +2720,23 @@ define i64 @test_round_ui64(half %x) nounwind { ; RV32IZHINX-NEXT: addi sp, sp, -16 ; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 ; RV32IZHINX-NEXT: mv a0, s0 ; RV32IZHINX-NEXT: call __fixunssfdi -; RV32IZHINX-NEXT: fle.s a2, zero, s0 -; RV32IZHINX-NEXT: lui a3, %hi(.LCPI15_1) -; RV32IZHINX-NEXT: lw a3, %lo(.LCPI15_1)(a3) -; RV32IZHINX-NEXT: xori a2, a2, 1 -; RV32IZHINX-NEXT: addi a2, a2, -1 -; RV32IZHINX-NEXT: and a0, a2, a0 -; RV32IZHINX-NEXT: flt.s a3, a3, s0 -; RV32IZHINX-NEXT: neg a3, a3 -; RV32IZHINX-NEXT: or a0, a3, a0 -; RV32IZHINX-NEXT: and a1, a2, a1 -; RV32IZHINX-NEXT: or a1, a3, a1 +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI15_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI15_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 ; RV32IZHINX-NEXT: ret ; @@ -2776,25 +2774,26 @@ define i64 @test_round_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: .LBB15_2: ; RV32IZFHMIN-NEXT: addi sp, sp, -16 ; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, fa5 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero +; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0 +; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero -; RV32IZFHMIN-NEXT: fle.s a2, fa5, fs0 -; RV32IZFHMIN-NEXT: lui a3, %hi(.LCPI15_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI15_0)(a3) -; RV32IZFHMIN-NEXT: xori a2, a2, 1 -; RV32IZFHMIN-NEXT: addi a2, a2, -1 -; RV32IZFHMIN-NEXT: and a0, a2, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a3, a3 -; RV32IZFHMIN-NEXT: or a0, a3, a0 -; RV32IZFHMIN-NEXT: and a1, a2, a1 -; RV32IZFHMIN-NEXT: or a1, a3, a1 +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI15_0) +; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI15_0)(a2) +; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a2 +; RV32IZFHMIN-NEXT: or a0, a2, a0 +; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: or a1, a2, a1 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFHMIN-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 ; RV32IZFHMIN-NEXT: ret ; @@ -2835,23 +2834,24 @@ define i64 @test_round_ui64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: addi sp, sp, -16 ; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 ; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 ; RV32IZHINXMIN-NEXT: mv a0, s0 ; RV32IZHINXMIN-NEXT: call __fixunssfdi -; RV32IZHINXMIN-NEXT: fle.s a2, zero, s0 -; RV32IZHINXMIN-NEXT: lui a3, %hi(.LCPI15_0) -; RV32IZHINXMIN-NEXT: lw a3, %lo(.LCPI15_0)(a3) -; RV32IZHINXMIN-NEXT: xori a2, a2, 1 -; RV32IZHINXMIN-NEXT: addi a2, a2, -1 -; RV32IZHINXMIN-NEXT: and a0, a2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a3, s0 -; RV32IZHINXMIN-NEXT: neg a3, a3 -; RV32IZHINXMIN-NEXT: or a0, a3, a0 -; RV32IZHINXMIN-NEXT: and a1, a2, a1 -; RV32IZHINXMIN-NEXT: or a1, a3, a1 +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI15_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI15_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 ; RV32IZHINXMIN-NEXT: ret ; @@ -2984,17 +2984,16 @@ define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZFH-NEXT: lui a2, %hi(.LCPI17_1) ; RV32IZFH-NEXT: flw fa5, %lo(.LCPI17_1)(a2) ; RV32IZFH-NEXT: and a0, s1, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a2, a3 +; RV32IZFH-NEXT: flt.s a4, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a4 ; RV32IZFH-NEXT: or a0, a2, a0 ; RV32IZFH-NEXT: feq.s a2, fs0, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: lui a5, 524288 -; RV32IZFH-NEXT: li a6, 1 -; RV32IZFH-NEXT: lui a4, 524288 -; RV32IZFH-NEXT: bne s0, a6, .LBB17_4 +; RV32IZFH-NEXT: lui a3, 524288 +; RV32IZFH-NEXT: beqz s0, .LBB17_4 ; RV32IZFH-NEXT: # %bb.3: -; RV32IZFH-NEXT: mv a4, a1 +; RV32IZFH-NEXT: mv a3, a1 ; RV32IZFH-NEXT: .LBB17_4: ; RV32IZFH-NEXT: and a0, a2, a0 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -3002,11 +3001,11 @@ define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 -; RV32IZFH-NEXT: beqz a3, .LBB17_6 +; RV32IZFH-NEXT: beqz a4, .LBB17_6 ; RV32IZFH-NEXT: # %bb.5: -; RV32IZFH-NEXT: addi a4, a5, -1 +; RV32IZFH-NEXT: addi a3, a5, -1 ; RV32IZFH-NEXT: .LBB17_6: -; RV32IZFH-NEXT: and a1, a2, a4 +; RV32IZFH-NEXT: and a1, a2, a3 ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: test_roundeven_si64: @@ -3044,17 +3043,16 @@ define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lui a2, %hi(.LCPI17_1) ; RV32IZHINX-NEXT: lw a2, %lo(.LCPI17_1)(a2) ; RV32IZHINX-NEXT: and a0, s2, a0 -; RV32IZHINX-NEXT: flt.s a3, a2, s0 -; RV32IZHINX-NEXT: neg a2, a3 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 ; RV32IZHINX-NEXT: or a0, a2, a0 ; RV32IZHINX-NEXT: feq.s a2, s0, s0 ; RV32IZHINX-NEXT: neg a2, a2 ; RV32IZHINX-NEXT: lui a5, 524288 -; RV32IZHINX-NEXT: li a6, 1 -; RV32IZHINX-NEXT: lui a4, 524288 -; RV32IZHINX-NEXT: bne s1, a6, .LBB17_4 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB17_4 ; RV32IZHINX-NEXT: # %bb.3: -; RV32IZHINX-NEXT: mv a4, a1 +; RV32IZHINX-NEXT: mv a3, a1 ; RV32IZHINX-NEXT: .LBB17_4: ; RV32IZHINX-NEXT: and a0, a2, a0 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -3062,11 +3060,11 @@ define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 -; RV32IZHINX-NEXT: beqz a3, .LBB17_6 +; RV32IZHINX-NEXT: beqz a4, .LBB17_6 ; RV32IZHINX-NEXT: # %bb.5: -; RV32IZHINX-NEXT: addi a4, a5, -1 +; RV32IZHINX-NEXT: addi a3, a5, -1 ; RV32IZHINX-NEXT: .LBB17_6: -; RV32IZHINX-NEXT: and a1, a2, a4 +; RV32IZHINX-NEXT: and a1, a2, a3 ; RV32IZHINX-NEXT: ret ; ; RV64IZHINX-LABEL: test_roundeven_si64: @@ -3117,17 +3115,16 @@ define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI17_0) ; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI17_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s1, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a2, a3 +; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a4 ; RV32IZFHMIN-NEXT: or a0, a2, a0 ; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: lui a5, 524288 -; RV32IZFHMIN-NEXT: li a6, 1 -; RV32IZFHMIN-NEXT: lui a4, 524288 -; RV32IZFHMIN-NEXT: bne s0, a6, .LBB17_4 +; RV32IZFHMIN-NEXT: lui a3, 524288 +; RV32IZFHMIN-NEXT: beqz s0, .LBB17_4 ; RV32IZFHMIN-NEXT: # %bb.3: -; RV32IZFHMIN-NEXT: mv a4, a1 +; RV32IZFHMIN-NEXT: mv a3, a1 ; RV32IZFHMIN-NEXT: .LBB17_4: ; RV32IZFHMIN-NEXT: and a0, a2, a0 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -3135,11 +3132,11 @@ define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 -; RV32IZFHMIN-NEXT: beqz a3, .LBB17_6 +; RV32IZFHMIN-NEXT: beqz a4, .LBB17_6 ; RV32IZFHMIN-NEXT: # %bb.5: -; RV32IZFHMIN-NEXT: addi a4, a5, -1 +; RV32IZFHMIN-NEXT: addi a3, a5, -1 ; RV32IZFHMIN-NEXT: .LBB17_6: -; RV32IZFHMIN-NEXT: and a1, a2, a4 +; RV32IZFHMIN-NEXT: and a1, a2, a3 ; RV32IZFHMIN-NEXT: ret ; ; RV64IZFHMIN-LABEL: test_roundeven_si64: @@ -3191,17 +3188,16 @@ define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI17_0) ; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI17_0)(a2) ; RV32IZHINXMIN-NEXT: and a0, s2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0 -; RV32IZHINXMIN-NEXT: neg a2, a3 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 ; RV32IZHINXMIN-NEXT: or a0, a2, a0 ; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 ; RV32IZHINXMIN-NEXT: neg a2, a2 ; RV32IZHINXMIN-NEXT: lui a5, 524288 -; RV32IZHINXMIN-NEXT: li a6, 1 -; RV32IZHINXMIN-NEXT: lui a4, 524288 -; RV32IZHINXMIN-NEXT: bne s1, a6, .LBB17_4 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB17_4 ; RV32IZHINXMIN-NEXT: # %bb.3: -; RV32IZHINXMIN-NEXT: mv a4, a1 +; RV32IZHINXMIN-NEXT: mv a3, a1 ; RV32IZHINXMIN-NEXT: .LBB17_4: ; RV32IZHINXMIN-NEXT: and a0, a2, a0 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -3209,11 +3205,11 @@ define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 -; RV32IZHINXMIN-NEXT: beqz a3, .LBB17_6 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB17_6 ; RV32IZHINXMIN-NEXT: # %bb.5: -; RV32IZHINXMIN-NEXT: addi a4, a5, -1 +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 ; RV32IZHINXMIN-NEXT: .LBB17_6: -; RV32IZHINXMIN-NEXT: and a1, a2, a4 +; RV32IZHINXMIN-NEXT: and a1, a2, a3 ; RV32IZHINXMIN-NEXT: ret ; ; RV64IZHINXMIN-LABEL: test_roundeven_si64: @@ -3394,24 +3390,25 @@ define i64 @test_roundeven_ui64(half %x) nounwind { ; RV32IZFH-NEXT: .LBB19_2: ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFH-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: fmv.w.x fa5, zero +; RV32IZFH-NEXT: fle.s a0, fa5, fs0 +; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: fmv.w.x fa5, zero -; RV32IZFH-NEXT: fle.s a2, fa5, fs0 -; RV32IZFH-NEXT: lui a3, %hi(.LCPI19_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI19_1)(a3) -; RV32IZFH-NEXT: xori a2, a2, 1 -; RV32IZFH-NEXT: addi a2, a2, -1 -; RV32IZFH-NEXT: and a0, a2, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a3, a3 -; RV32IZFH-NEXT: or a0, a3, a0 -; RV32IZFH-NEXT: and a1, a2, a1 -; RV32IZFH-NEXT: or a1, a3, a1 +; RV32IZFH-NEXT: lui a2, %hi(.LCPI19_1) +; RV32IZFH-NEXT: flw fa5, %lo(.LCPI19_1)(a2) +; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: flt.s a2, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a2 +; RV32IZFH-NEXT: or a0, a2, a0 +; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: or a1, a2, a1 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; @@ -3439,22 +3436,23 @@ define i64 @test_roundeven_ui64(half %x) nounwind { ; RV32IZHINX-NEXT: addi sp, sp, -16 ; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 ; RV32IZHINX-NEXT: mv a0, s0 ; RV32IZHINX-NEXT: call __fixunssfdi -; RV32IZHINX-NEXT: fle.s a2, zero, s0 -; RV32IZHINX-NEXT: lui a3, %hi(.LCPI19_1) -; RV32IZHINX-NEXT: lw a3, %lo(.LCPI19_1)(a3) -; RV32IZHINX-NEXT: xori a2, a2, 1 -; RV32IZHINX-NEXT: addi a2, a2, -1 -; RV32IZHINX-NEXT: and a0, a2, a0 -; RV32IZHINX-NEXT: flt.s a3, a3, s0 -; RV32IZHINX-NEXT: neg a3, a3 -; RV32IZHINX-NEXT: or a0, a3, a0 -; RV32IZHINX-NEXT: and a1, a2, a1 -; RV32IZHINX-NEXT: or a1, a3, a1 +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI19_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI19_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 ; RV32IZHINX-NEXT: ret ; @@ -3492,25 +3490,26 @@ define i64 @test_roundeven_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: .LBB19_2: ; RV32IZFHMIN-NEXT: addi sp, sp, -16 ; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, fa5 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero +; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0 +; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero -; RV32IZFHMIN-NEXT: fle.s a2, fa5, fs0 -; RV32IZFHMIN-NEXT: lui a3, %hi(.LCPI19_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI19_0)(a3) -; RV32IZFHMIN-NEXT: xori a2, a2, 1 -; RV32IZFHMIN-NEXT: addi a2, a2, -1 -; RV32IZFHMIN-NEXT: and a0, a2, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a3, a3 -; RV32IZFHMIN-NEXT: or a0, a3, a0 -; RV32IZFHMIN-NEXT: and a1, a2, a1 -; RV32IZFHMIN-NEXT: or a1, a3, a1 +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI19_0) +; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI19_0)(a2) +; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a2 +; RV32IZFHMIN-NEXT: or a0, a2, a0 +; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: or a1, a2, a1 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFHMIN-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 ; RV32IZFHMIN-NEXT: ret ; @@ -3551,23 +3550,24 @@ define i64 @test_roundeven_ui64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: addi sp, sp, -16 ; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 ; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 ; RV32IZHINXMIN-NEXT: mv a0, s0 ; RV32IZHINXMIN-NEXT: call __fixunssfdi -; RV32IZHINXMIN-NEXT: fle.s a2, zero, s0 -; RV32IZHINXMIN-NEXT: lui a3, %hi(.LCPI19_0) -; RV32IZHINXMIN-NEXT: lw a3, %lo(.LCPI19_0)(a3) -; RV32IZHINXMIN-NEXT: xori a2, a2, 1 -; RV32IZHINXMIN-NEXT: addi a2, a2, -1 -; RV32IZHINXMIN-NEXT: and a0, a2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a3, s0 -; RV32IZHINXMIN-NEXT: neg a3, a3 -; RV32IZHINXMIN-NEXT: or a0, a3, a0 -; RV32IZHINXMIN-NEXT: and a1, a2, a1 -; RV32IZHINXMIN-NEXT: or a1, a3, a1 +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI19_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI19_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 ; RV32IZHINXMIN-NEXT: ret ; @@ -3700,17 +3700,16 @@ define i64 @test_rint_si64(half %x) nounwind { ; RV32IZFH-NEXT: lui a2, %hi(.LCPI21_1) ; RV32IZFH-NEXT: flw fa5, %lo(.LCPI21_1)(a2) ; RV32IZFH-NEXT: and a0, s1, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a2, a3 +; RV32IZFH-NEXT: flt.s a4, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a4 ; RV32IZFH-NEXT: or a0, a2, a0 ; RV32IZFH-NEXT: feq.s a2, fs0, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: lui a5, 524288 -; RV32IZFH-NEXT: li a6, 1 -; RV32IZFH-NEXT: lui a4, 524288 -; RV32IZFH-NEXT: bne s0, a6, .LBB21_4 +; RV32IZFH-NEXT: lui a3, 524288 +; RV32IZFH-NEXT: beqz s0, .LBB21_4 ; RV32IZFH-NEXT: # %bb.3: -; RV32IZFH-NEXT: mv a4, a1 +; RV32IZFH-NEXT: mv a3, a1 ; RV32IZFH-NEXT: .LBB21_4: ; RV32IZFH-NEXT: and a0, a2, a0 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -3718,11 +3717,11 @@ define i64 @test_rint_si64(half %x) nounwind { ; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 -; RV32IZFH-NEXT: beqz a3, .LBB21_6 +; RV32IZFH-NEXT: beqz a4, .LBB21_6 ; RV32IZFH-NEXT: # %bb.5: -; RV32IZFH-NEXT: addi a4, a5, -1 +; RV32IZFH-NEXT: addi a3, a5, -1 ; RV32IZFH-NEXT: .LBB21_6: -; RV32IZFH-NEXT: and a1, a2, a4 +; RV32IZFH-NEXT: and a1, a2, a3 ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: test_rint_si64: @@ -3760,17 +3759,16 @@ define i64 @test_rint_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lui a2, %hi(.LCPI21_1) ; RV32IZHINX-NEXT: lw a2, %lo(.LCPI21_1)(a2) ; RV32IZHINX-NEXT: and a0, s2, a0 -; RV32IZHINX-NEXT: flt.s a3, a2, s0 -; RV32IZHINX-NEXT: neg a2, a3 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 ; RV32IZHINX-NEXT: or a0, a2, a0 ; RV32IZHINX-NEXT: feq.s a2, s0, s0 ; RV32IZHINX-NEXT: neg a2, a2 ; RV32IZHINX-NEXT: lui a5, 524288 -; RV32IZHINX-NEXT: li a6, 1 -; RV32IZHINX-NEXT: lui a4, 524288 -; RV32IZHINX-NEXT: bne s1, a6, .LBB21_4 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB21_4 ; RV32IZHINX-NEXT: # %bb.3: -; RV32IZHINX-NEXT: mv a4, a1 +; RV32IZHINX-NEXT: mv a3, a1 ; RV32IZHINX-NEXT: .LBB21_4: ; RV32IZHINX-NEXT: and a0, a2, a0 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -3778,11 +3776,11 @@ define i64 @test_rint_si64(half %x) nounwind { ; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 -; RV32IZHINX-NEXT: beqz a3, .LBB21_6 +; RV32IZHINX-NEXT: beqz a4, .LBB21_6 ; RV32IZHINX-NEXT: # %bb.5: -; RV32IZHINX-NEXT: addi a4, a5, -1 +; RV32IZHINX-NEXT: addi a3, a5, -1 ; RV32IZHINX-NEXT: .LBB21_6: -; RV32IZHINX-NEXT: and a1, a2, a4 +; RV32IZHINX-NEXT: and a1, a2, a3 ; RV32IZHINX-NEXT: ret ; ; RV64IZHINX-LABEL: test_rint_si64: @@ -3833,17 +3831,16 @@ define i64 @test_rint_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI21_0) ; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI21_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s1, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a2, a3 +; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a4 ; RV32IZFHMIN-NEXT: or a0, a2, a0 ; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: lui a5, 524288 -; RV32IZFHMIN-NEXT: li a6, 1 -; RV32IZFHMIN-NEXT: lui a4, 524288 -; RV32IZFHMIN-NEXT: bne s0, a6, .LBB21_4 +; RV32IZFHMIN-NEXT: lui a3, 524288 +; RV32IZFHMIN-NEXT: beqz s0, .LBB21_4 ; RV32IZFHMIN-NEXT: # %bb.3: -; RV32IZFHMIN-NEXT: mv a4, a1 +; RV32IZFHMIN-NEXT: mv a3, a1 ; RV32IZFHMIN-NEXT: .LBB21_4: ; RV32IZFHMIN-NEXT: and a0, a2, a0 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -3851,11 +3848,11 @@ define i64 @test_rint_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 -; RV32IZFHMIN-NEXT: beqz a3, .LBB21_6 +; RV32IZFHMIN-NEXT: beqz a4, .LBB21_6 ; RV32IZFHMIN-NEXT: # %bb.5: -; RV32IZFHMIN-NEXT: addi a4, a5, -1 +; RV32IZFHMIN-NEXT: addi a3, a5, -1 ; RV32IZFHMIN-NEXT: .LBB21_6: -; RV32IZFHMIN-NEXT: and a1, a2, a4 +; RV32IZFHMIN-NEXT: and a1, a2, a3 ; RV32IZFHMIN-NEXT: ret ; ; RV64IZFHMIN-LABEL: test_rint_si64: @@ -3907,17 +3904,16 @@ define i64 @test_rint_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI21_0) ; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI21_0)(a2) ; RV32IZHINXMIN-NEXT: and a0, s2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0 -; RV32IZHINXMIN-NEXT: neg a2, a3 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 ; RV32IZHINXMIN-NEXT: or a0, a2, a0 ; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 ; RV32IZHINXMIN-NEXT: neg a2, a2 ; RV32IZHINXMIN-NEXT: lui a5, 524288 -; RV32IZHINXMIN-NEXT: li a6, 1 -; RV32IZHINXMIN-NEXT: lui a4, 524288 -; RV32IZHINXMIN-NEXT: bne s1, a6, .LBB21_4 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB21_4 ; RV32IZHINXMIN-NEXT: # %bb.3: -; RV32IZHINXMIN-NEXT: mv a4, a1 +; RV32IZHINXMIN-NEXT: mv a3, a1 ; RV32IZHINXMIN-NEXT: .LBB21_4: ; RV32IZHINXMIN-NEXT: and a0, a2, a0 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -3925,11 +3921,11 @@ define i64 @test_rint_si64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 -; RV32IZHINXMIN-NEXT: beqz a3, .LBB21_6 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB21_6 ; RV32IZHINXMIN-NEXT: # %bb.5: -; RV32IZHINXMIN-NEXT: addi a4, a5, -1 +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 ; RV32IZHINXMIN-NEXT: .LBB21_6: -; RV32IZHINXMIN-NEXT: and a1, a2, a4 +; RV32IZHINXMIN-NEXT: and a1, a2, a3 ; RV32IZHINXMIN-NEXT: ret ; ; RV64IZHINXMIN-LABEL: test_rint_si64: @@ -4110,24 +4106,25 @@ define i64 @test_rint_ui64(half %x) nounwind { ; RV32IZFH-NEXT: .LBB23_2: ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFH-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: fmv.w.x fa5, zero +; RV32IZFH-NEXT: fle.s a0, fa5, fs0 +; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: fmv.w.x fa5, zero -; RV32IZFH-NEXT: fle.s a2, fa5, fs0 -; RV32IZFH-NEXT: lui a3, %hi(.LCPI23_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI23_1)(a3) -; RV32IZFH-NEXT: xori a2, a2, 1 -; RV32IZFH-NEXT: addi a2, a2, -1 -; RV32IZFH-NEXT: and a0, a2, a0 -; RV32IZFH-NEXT: flt.s a3, fa5, fs0 -; RV32IZFH-NEXT: neg a3, a3 -; RV32IZFH-NEXT: or a0, a3, a0 -; RV32IZFH-NEXT: and a1, a2, a1 -; RV32IZFH-NEXT: or a1, a3, a1 +; RV32IZFH-NEXT: lui a2, %hi(.LCPI23_1) +; RV32IZFH-NEXT: flw fa5, %lo(.LCPI23_1)(a2) +; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: flt.s a2, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a2 +; RV32IZFH-NEXT: or a0, a2, a0 +; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: or a1, a2, a1 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; @@ -4155,22 +4152,23 @@ define i64 @test_rint_ui64(half %x) nounwind { ; RV32IZHINX-NEXT: addi sp, sp, -16 ; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 ; RV32IZHINX-NEXT: mv a0, s0 ; RV32IZHINX-NEXT: call __fixunssfdi -; RV32IZHINX-NEXT: fle.s a2, zero, s0 -; RV32IZHINX-NEXT: lui a3, %hi(.LCPI23_1) -; RV32IZHINX-NEXT: lw a3, %lo(.LCPI23_1)(a3) -; RV32IZHINX-NEXT: xori a2, a2, 1 -; RV32IZHINX-NEXT: addi a2, a2, -1 -; RV32IZHINX-NEXT: and a0, a2, a0 -; RV32IZHINX-NEXT: flt.s a3, a3, s0 -; RV32IZHINX-NEXT: neg a3, a3 -; RV32IZHINX-NEXT: or a0, a3, a0 -; RV32IZHINX-NEXT: and a1, a2, a1 -; RV32IZHINX-NEXT: or a1, a3, a1 +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI23_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI23_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 ; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINX-NEXT: addi sp, sp, 16 ; RV32IZHINX-NEXT: ret ; @@ -4208,25 +4206,26 @@ define i64 @test_rint_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: .LBB23_2: ; RV32IZFHMIN-NEXT: addi sp, sp, -16 ; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, fa5 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero +; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0 +; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: fmv.w.x fa5, zero -; RV32IZFHMIN-NEXT: fle.s a2, fa5, fs0 -; RV32IZFHMIN-NEXT: lui a3, %hi(.LCPI23_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI23_0)(a3) -; RV32IZFHMIN-NEXT: xori a2, a2, 1 -; RV32IZFHMIN-NEXT: addi a2, a2, -1 -; RV32IZFHMIN-NEXT: and a0, a2, a0 -; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0 -; RV32IZFHMIN-NEXT: neg a3, a3 -; RV32IZFHMIN-NEXT: or a0, a3, a0 -; RV32IZFHMIN-NEXT: and a1, a2, a1 -; RV32IZFHMIN-NEXT: or a1, a3, a1 +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI23_0) +; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI23_0)(a2) +; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 +; RV32IZFHMIN-NEXT: neg a2, a2 +; RV32IZFHMIN-NEXT: or a0, a2, a0 +; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: or a1, a2, a1 ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFHMIN-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFHMIN-NEXT: addi sp, sp, 16 ; RV32IZFHMIN-NEXT: ret ; @@ -4267,23 +4266,24 @@ define i64 @test_rint_ui64(half %x) nounwind { ; RV32IZHINXMIN-NEXT: addi sp, sp, -16 ; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 ; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 ; RV32IZHINXMIN-NEXT: mv a0, s0 ; RV32IZHINXMIN-NEXT: call __fixunssfdi -; RV32IZHINXMIN-NEXT: fle.s a2, zero, s0 -; RV32IZHINXMIN-NEXT: lui a3, %hi(.LCPI23_0) -; RV32IZHINXMIN-NEXT: lw a3, %lo(.LCPI23_0)(a3) -; RV32IZHINXMIN-NEXT: xori a2, a2, 1 -; RV32IZHINXMIN-NEXT: addi a2, a2, -1 -; RV32IZHINXMIN-NEXT: and a0, a2, a0 -; RV32IZHINXMIN-NEXT: flt.s a3, a3, s0 -; RV32IZHINXMIN-NEXT: neg a3, a3 -; RV32IZHINXMIN-NEXT: or a0, a3, a0 -; RV32IZHINXMIN-NEXT: and a1, a2, a1 -; RV32IZHINXMIN-NEXT: or a1, a3, a1 +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI23_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI23_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 ; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IZHINXMIN-NEXT: addi sp, sp, 16 ; RV32IZHINXMIN-NEXT: ret ;