Skip to content

[RISCV] Use X0_Pair for 0.0 and -0.0 with Zdinx. #141641

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 28, 2025

Conversation

topperc
Copy link
Collaborator

@topperc topperc commented May 27, 2025

No description provided.

@llvmbot
Copy link
Member

llvmbot commented May 27, 2025

@llvm/pr-subscribers-backend-risc-v

Author: Craig Topper (topperc)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/141641.diff

8 Files Affected:

  • (modified) llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (+13-8)
  • (modified) llvm/test/CodeGen/RISCV/double-arith-strict.ll (+7-12)
  • (modified) llvm/test/CodeGen/RISCV/double-arith.ll (+13-21)
  • (modified) llvm/test/CodeGen/RISCV/double-convert.ll (+8-11)
  • (modified) llvm/test/CodeGen/RISCV/double-imm.ll (+1-2)
  • (modified) llvm/test/CodeGen/RISCV/double-mem.ll (+31)
  • (modified) llvm/test/CodeGen/RISCV/double-round-conv-sat.ll (+6-12)
  • (modified) llvm/test/CodeGen/RISCV/fold-addi-loadstore-zilsd.ll (+13-12)
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 18af1545d5a34..7f65216282a82 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -902,18 +902,23 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
   case ISD::ConstantFP: {
     const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
 
+    bool Is64Bit = Subtarget->is64Bit();
+    bool HasZdinx = Subtarget->hasStdExtZdinx();
+
     bool NegZeroF64 = APF.isNegZero() && VT == MVT::f64;
     SDValue Imm;
     // For +0.0 or f64 -0.0 we need to start from X0. For all others, we will
     // create an integer immediate.
-    if (APF.isPosZero() || NegZeroF64)
-      Imm = CurDAG->getRegister(RISCV::X0, XLenVT);
-    else
+    if (APF.isPosZero() || NegZeroF64) {
+      if (VT == MVT::f64 && HasZdinx && !Is64Bit)
+        Imm = CurDAG->getRegister(RISCV::X0_Pair, MVT::f64);
+      else
+        Imm = CurDAG->getRegister(RISCV::X0, XLenVT);
+    } else {
       Imm = selectImm(CurDAG, DL, XLenVT, APF.bitcastToAPInt().getSExtValue(),
                       *Subtarget);
+    }
 
-    bool HasZdinx = Subtarget->hasStdExtZdinx();
-    bool Is64Bit = Subtarget->is64Bit();
     unsigned Opc;
     switch (VT.SimpleTy) {
     default:
@@ -932,10 +937,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       // For RV32, we can't move from a GPR, we need to convert instead. This
       // should only happen for +0.0 and -0.0.
       assert((Subtarget->is64Bit() || APF.isZero()) && "Unexpected constant");
-      if (Is64Bit)
-        Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
+      if (HasZdinx)
+        Opc = RISCV::COPY;
       else
-        Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
+        Opc = Is64Bit ? RISCV::FMV_D_X : RISCV::FCVT_D_W;
       break;
     }
 
diff --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
index 23336933abff9..4e48e54b3ca81 100644
--- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
@@ -379,8 +379,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ;
 ; RV32IZFINXZDINX-LABEL: fmsub_d:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, zero
 ; RV32IZFINXZDINX-NEXT:    fmsub.d a0, a0, a2, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -470,9 +469,8 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ;
 ; RV32IZFINXZDINX-LABEL: fnmadd_d:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, a6
-; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
+; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, zero
 ; RV32IZFINXZDINX-NEXT:    fnmadd.d a0, a0, a2, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -581,9 +579,8 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
 ;
 ; RV32IZFINXZDINX-LABEL: fnmadd_d_2:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, a6
-; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, zero
 ; RV32IZFINXZDINX-NEXT:    fnmadd.d a0, a2, a0, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -691,8 +688,7 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ;
 ; RV32IZFINXZDINX-LABEL: fnmsub_d:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
 ; RV32IZFINXZDINX-NEXT:    fnmsub.d a0, a0, a2, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -776,8 +772,7 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
 ;
 ; RV32IZFINXZDINX-LABEL: fnmsub_d_2:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
 ; RV32IZFINXZDINX-NEXT:    fnmsub.d a0, a2, a0, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index 44d7bc590a797..d6c4f8d5f350f 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -539,8 +539,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ;
 ; RV32IZFINXZDINX-LABEL: fmsub_d:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, zero
 ; RV32IZFINXZDINX-NEXT:    fmsub.d a0, a0, a2, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -630,9 +629,8 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ;
 ; RV32IZFINXZDINX-LABEL: fnmadd_d:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, a6
-; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
+; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, zero
 ; RV32IZFINXZDINX-NEXT:    fnmadd.d a0, a0, a2, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -741,9 +739,8 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ;
 ; RV32IZFINXZDINX-LABEL: fnmadd_d_2:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, a6
-; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, zero
 ; RV32IZFINXZDINX-NEXT:    fnmadd.d a0, a2, a0, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -941,8 +938,7 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ;
 ; RV32IZFINXZDINX-LABEL: fnmsub_d:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
 ; RV32IZFINXZDINX-NEXT:    fnmsub.d a0, a0, a2, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -1026,8 +1022,7 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ;
 ; RV32IZFINXZDINX-LABEL: fnmsub_d_2:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
 ; RV32IZFINXZDINX-NEXT:    fnmsub.d a0, a2, a0, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -1167,8 +1162,7 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 ;
 ; RV32IZFINXZDINX-LABEL: fmsub_d_contract:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, zero
 ; RV32IZFINXZDINX-NEXT:    fmsub.d a0, a0, a2, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -1268,10 +1262,9 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 ;
 ; RV32IZFINXZDINX-LABEL: fnmadd_d_contract:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, a6
-; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, a6
-; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
+; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, zero
 ; RV32IZFINXZDINX-NEXT:    fnmadd.d a0, a0, a2, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
@@ -1398,9 +1391,8 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
 ;
 ; RV32IZFINXZDINX-LABEL: fnmsub_d_contract:
 ; RV32IZFINXZDINX:       # %bb.0:
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
-; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, a6
-; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, a6
+; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
+; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
 ; RV32IZFINXZDINX-NEXT:    fnmsub.d a0, a0, a2, a4
 ; RV32IZFINXZDINX-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 0716650374d0d..c41239f93cf26 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -987,16 +987,15 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    mv s1, a1
 ; RV32IZFINXZDINX-NEXT:    mv s0, a0
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT:    lui a4, %hi(.LCPI14_0)
-; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
-; RV32IZFINXZDINX-NEXT:    lw a6, %lo(.LCPI14_0)(a4)
-; RV32IZFINXZDINX-NEXT:    addi a3, a4, %lo(.LCPI14_0)
-; RV32IZFINXZDINX-NEXT:    lw a7, 4(a3)
+; RV32IZFINXZDINX-NEXT:    fle.d a2, zero, s0
+; RV32IZFINXZDINX-NEXT:    lui a3, %hi(.LCPI14_0)
+; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI14_0)(a3)
+; RV32IZFINXZDINX-NEXT:    addi a3, a3, %lo(.LCPI14_0)
+; RV32IZFINXZDINX-NEXT:    lw a5, 4(a3)
 ; RV32IZFINXZDINX-NEXT:    neg a2, a2
 ; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
 ; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
-; RV32IZFINXZDINX-NEXT:    flt.d a2, a6, s0
+; RV32IZFINXZDINX-NEXT:    flt.d a2, a4, s0
 ; RV32IZFINXZDINX-NEXT:    neg a2, a2
 ; RV32IZFINXZDINX-NEXT:    or a0, a2, a0
 ; RV32IZFINXZDINX-NEXT:    or a1, a2, a1
@@ -1860,8 +1859,7 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI28_0)(a2)
 ; RV32IZFINXZDINX-NEXT:    addi a2, a2, %lo(.LCPI28_0)
 ; RV32IZFINXZDINX-NEXT:    lw a5, 4(a2)
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT:    fmax.d a0, a0, a2
+; RV32IZFINXZDINX-NEXT:    fmax.d a0, a0, zero
 ; RV32IZFINXZDINX-NEXT:    fmin.d a0, a0, a4
 ; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
 ; RV32IZFINXZDINX-NEXT:    ret
@@ -2237,8 +2235,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
 ; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI32_0)(a2)
 ; RV32IZFINXZDINX-NEXT:    addi a2, a2, %lo(.LCPI32_0)
 ; RV32IZFINXZDINX-NEXT:    lw a5, 4(a2)
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT:    fmax.d a0, a0, a2
+; RV32IZFINXZDINX-NEXT:    fmax.d a0, a0, zero
 ; RV32IZFINXZDINX-NEXT:    fmin.d a0, a0, a4
 ; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
 ; RV32IZFINXZDINX-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll
index d134f8ff16149..1119fd6d74a25 100644
--- a/llvm/test/CodeGen/RISCV/double-imm.ll
+++ b/llvm/test/CodeGen/RISCV/double-imm.ll
@@ -146,8 +146,7 @@ define dso_local double @negzero_sel(i16 noundef %a, double noundef %d) nounwind
 ; CHECKRV32ZDINX-NEXT:    slli a0, a0, 16
 ; CHECKRV32ZDINX-NEXT:    beqz a0, .LBB4_2
 ; CHECKRV32ZDINX-NEXT:  # %bb.1: # %entry
-; CHECKRV32ZDINX-NEXT:    fcvt.d.w a0, zero
-; CHECKRV32ZDINX-NEXT:    fneg.d a2, a0
+; CHECKRV32ZDINX-NEXT:    fneg.d a2, zero
 ; CHECKRV32ZDINX-NEXT:    j .LBB4_3
 ; CHECKRV32ZDINX-NEXT:  .LBB4_2:
 ; CHECKRV32ZDINX-NEXT:    mv a3, a2
diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index 0d02ce127c325..0210e0252deca 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -402,3 +402,34 @@ define dso_local void @fsd_trunc(ptr %a, double %b) nounwind noinline optnone {
   store float %1, ptr %a, align 4
   ret void
 }
+
+define dso_local void @store_zero(ptr %a) {
+; RV32IFD-LABEL: store_zero:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    fcvt.d.w fa5, zero
+; RV32IFD-NEXT:    fsd fa5, 0(a0)
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: store_zero:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    sd zero, 0(a0)
+; RV64IFD-NEXT:    ret
+;
+; RV32IZFINXZDINX-LABEL: store_zero:
+; RV32IZFINXZDINX:       # %bb.0:
+; RV32IZFINXZDINX-NEXT:    sw zero, 0(a0)
+; RV32IZFINXZDINX-NEXT:    sw zero, 4(a0)
+; RV32IZFINXZDINX-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: store_zero:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    sd zero, 0(a0)
+; RV64IZFINXZDINX-NEXT:    ret
+;
+; RV32IZFINXZDINXZILSD-LABEL: store_zero:
+; RV32IZFINXZDINXZILSD:       # %bb.0:
+; RV32IZFINXZDINXZILSD-NEXT:    sd zero, 0(a0)
+; RV32IZFINXZDINXZILSD-NEXT:    ret
+  store double 0.0, ptr %a
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
index e685d21cc0928..a574e68671a74 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
@@ -231,8 +231,7 @@ define i64 @test_floor_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    neg s2, a0
 ; RV32IZFINXZDINX-NEXT:    mv a0, s0
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
+; RV32IZFINXZDINX-NEXT:    fle.d a2, zero, s0
 ; RV32IZFINXZDINX-NEXT:    neg a2, a2
 ; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
 ; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
@@ -481,8 +480,7 @@ define i64 @test_ceil_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    neg s2, a0
 ; RV32IZFINXZDINX-NEXT:    mv a0, s0
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
+; RV32IZFINXZDINX-NEXT:    fle.d a2, zero, s0
 ; RV32IZFINXZDINX-NEXT:    neg a2, a2
 ; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
 ; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
@@ -731,8 +729,7 @@ define i64 @test_trunc_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    neg s2, a0
 ; RV32IZFINXZDINX-NEXT:    mv a0, s0
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
+; RV32IZFINXZDINX-NEXT:    fle.d a2, zero, s0
 ; RV32IZFINXZDINX-NEXT:    neg a2, a2
 ; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
 ; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
@@ -981,8 +978,7 @@ define i64 @test_round_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    neg s2, a0
 ; RV32IZFINXZDINX-NEXT:    mv a0, s0
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
+; RV32IZFINXZDINX-NEXT:    fle.d a2, zero, s0
 ; RV32IZFINXZDINX-NEXT:    neg a2, a2
 ; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
 ; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
@@ -1231,8 +1227,7 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    neg s2, a0
 ; RV32IZFINXZDINX-NEXT:    mv a0, s0
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
+; RV32IZFINXZDINX-NEXT:    fle.d a2, zero, s0
 ; RV32IZFINXZDINX-NEXT:    neg a2, a2
 ; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
 ; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
@@ -1481,8 +1476,7 @@ define i64 @test_rint_ui64(double %x) nounwind {
 ; RV32IZFINXZDINX-NEXT:    neg s2, a0
 ; RV32IZFINXZDINX-NEXT:    mv a0, s0
 ; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
-; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
-; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
+; RV32IZFINXZDINX-NEXT:    fle.d a2, zero, s0
 ; RV32IZFINXZDINX-NEXT:    neg a2, a2
 ; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
 ; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
diff --git a/llvm/test/CodeGen/RISCV/fold-addi-loadstore-zilsd.ll b/llvm/test/CodeGen/RISCV/fold-addi-loadstore-zilsd.ll
index 66fbba6f038ef..a719e1ee5a831 100644
--- a/llvm/test/CodeGen/RISCV/fold-addi-loadstore-zilsd.ll
+++ b/llvm/test/CodeGen/RISCV/fold-addi-loadstore-zilsd.ll
@@ -21,8 +21,7 @@ define void @store_g_0() nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:  .Lpcrel_hi1:
 ; CHECK-NEXT:    auipc a0, %pcrel_hi(g_0)
-; CHECK-NEXT:    fcvt.d.w a2, zero
-; CHECK-NEXT:    sd a2, %pcrel_lo(.Lpcrel_hi1)(a0)
+; CHECK-NEXT:    sd zero, %pcrel_lo(.Lpcrel_hi1)(a0)
 ; CHECK-NEXT:    ret
 entry:
   store double 0.0, ptr @g_0
@@ -43,25 +42,27 @@ define double @fold_addi_from_different_bb(i32 %k, i32 %n, ptr %a) nounwind {
 ; CHECK-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    blez a1, .LBB2_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.lr.ph
-; CHECK-NEXT:    mv s0, a2
-; CHECK-NEXT:    mv s1, a1
+; CHECK-NEXT:    mv s2, a2
+; CHECK-NEXT:    mv s3, a1
+; CHECK-NEXT:    li s0, 0
+; CHECK-NEXT:    li s1, 0
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add s4, a2, a0
-; CHECK-NEXT:    fcvt.d.w s2, zero
 ; CHECK-NEXT:  .LBB2_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    mv a0, s0
+; CHECK-NEXT:    mv a0, s2
 ; CHECK-NEXT:    call f
 ; CHECK-NEXT:    ld a0, 8(s4)
-; CHECK-NEXT:    addi s1, s1, -1
-; CHECK-NEXT:    fadd.d s2, a0, s2
-; CHECK-NEXT:    bnez s1, .LBB2_2
+; CHECK-NEXT:    addi s3, s3, -1
+; CHECK-NEXT:    fadd.d s0, a0, s0
+; CHECK-NEXT:    bnez s3, .LBB2_2
 ; CHECK-NEXT:    j .LBB2_4
 ; CHECK-NEXT:  .LBB2_3:
-; CHECK-NEXT:    fcvt.d.w s2, zero
+; CHECK-NEXT:    li s0, 0
+; CHECK-NEXT:    li s1, 0
 ; CHECK-NEXT:  .LBB2_4: # %for.cond.cleanup
-; CHECK-NEXT:    mv a0, s2
-; CHECK-NEXT:    mv a1, s3
+; CHECK-NEXT:    mv a0, s0
+; CHECK-NEXT:    mv a1, s1
 ; CHECK-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload

Copy link
Contributor

@wangpc-pp wangpc-pp left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM.

@topperc topperc merged commit 0b75a20 into llvm:main May 28, 2025
13 checks passed
@topperc topperc deleted the pr/x0-pair-zero branch May 28, 2025 04:52
sivan-shani pushed a commit to sivan-shani/llvm-project that referenced this pull request Jun 3, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

4 participants