Skip to content

Conversation

@jplehr
Copy link
Contributor

@jplehr jplehr commented Oct 18, 2025

Reverts #162352

Broke our buildbot: https://lab.llvm.org/buildbot/#/builders/10/builds/15674
To reproduce

cd llvm-project
cmake -S llvm -B thebuild -C offload/cmake/caches/AMDGPULibcBot.cmake -GNinja
cd thebuild
ninja
ninja check-libc-amdgcn-amd-amdhsa

@llvmbot
Copy link
Member

llvmbot commented Oct 18, 2025

@llvm/pr-subscribers-llvm-globalisel

Author: Jan Patrick Lehr (jplehr)

Changes

Reverts llvm/llvm-project#162352

Broke our buildbot: https://lab.llvm.org/buildbot/#/builders/10/builds/15674
To reproduce

cd llvm-project
cmake -S llvm -B thebuild -C offload/cmake/caches/AMDGPULibcBot.cmake -GNinja
cd thebuild
ninja
ninja check-libc-amdgcn-amd-amdhsa


Patch is 564.23 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/164116.diff

36 Files Affected:

  • (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.cpp (+6-63)
  • (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.h (-24)
  • (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll (+2)
  • (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll (+2)
  • (modified) llvm/test/CodeGen/AMDGPU/addsub64_carry.ll (+28-8)
  • (modified) llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll (+666-594)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll (+78-57)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll (+116-94)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll (+360-225)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll (+52-38)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll (+52-38)
  • (modified) llvm/test/CodeGen/AMDGPU/carryout-selection.ll (+345-269)
  • (modified) llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll (+2-1)
  • (modified) llvm/test/CodeGen/AMDGPU/ctpop16.ll (+2)
  • (modified) llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll (+20-5)
  • (modified) llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll (+25-10)
  • (modified) llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll (+88-40)
  • (modified) llvm/test/CodeGen/AMDGPU/fptrunc.ll (+23-13)
  • (modified) llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll (+85-30)
  • (modified) llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll (+57-24)
  • (modified) llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll (+57-24)
  • (modified) llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll (+85-30)
  • (modified) llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll (+11-9)
  • (modified) llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll (+72-56)
  • (modified) llvm/test/CodeGen/AMDGPU/optimize-compare.mir (+5-77)
  • (modified) llvm/test/CodeGen/AMDGPU/s_cmp_0.ll (+30)
  • (modified) llvm/test/CodeGen/AMDGPU/s_uaddo_usubo_pseudo.ll (+4)
  • (modified) llvm/test/CodeGen/AMDGPU/sdiv64.ll (+79-67)
  • (modified) llvm/test/CodeGen/AMDGPU/srem.ll (+361-293)
  • (modified) llvm/test/CodeGen/AMDGPU/srem64.ll (+114-93)
  • (modified) llvm/test/CodeGen/AMDGPU/uaddo.ll (+36-18)
  • (modified) llvm/test/CodeGen/AMDGPU/udiv64.ll (+45-35)
  • (modified) llvm/test/CodeGen/AMDGPU/urem64.ll (+81-65)
  • (modified) llvm/test/CodeGen/AMDGPU/usubo.ll (+36-18)
  • (modified) llvm/test/CodeGen/AMDGPU/wave32.ll (+102-88)
  • (modified) llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll (+8)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 942e784870989..50447f48a628c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -10626,59 +10626,6 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
   if (SrcReg2 && !getFoldableImm(SrcReg2, *MRI, CmpValue))
     return false;
 
-  const auto optimizeCmpSelect = [&CmpInstr, SrcReg, CmpValue, MRI,
-                                  this]() -> bool {
-    if (CmpValue != 0)
-      return false;
-
-    MachineInstr *Def = MRI->getUniqueVRegDef(SrcReg);
-    if (!Def || Def->getParent() != CmpInstr.getParent())
-      return false;
-
-    bool CanOptimize = false;
-
-    // For S_OP that set SCC = DST!=0, do the transformation
-    //
-    //   s_cmp_lg_* (S_OP ...), 0 => (S_OP ...)
-    if (setsSCCifResultIsNonZero(*Def))
-      CanOptimize = true;
-
-    // s_cmp_lg_* is redundant because the SCC input value for S_CSELECT* has
-    // the same value that will be calculated by s_cmp_lg_*
-    //
-    //   s_cmp_lg_* (S_CSELECT* (non-zero imm), 0), 0 => (S_CSELECT* (non-zero
-    //   imm), 0)
-    if (Def->getOpcode() == AMDGPU::S_CSELECT_B32 ||
-        Def->getOpcode() == AMDGPU::S_CSELECT_B64) {
-      bool Op1IsNonZeroImm =
-          Def->getOperand(1).isImm() && Def->getOperand(1).getImm() != 0;
-      bool Op2IsZeroImm =
-          Def->getOperand(2).isImm() && Def->getOperand(2).getImm() == 0;
-      if (Op1IsNonZeroImm && Op2IsZeroImm)
-        CanOptimize = true;
-    }
-
-    if (!CanOptimize)
-      return false;
-
-    MachineInstr *KillsSCC = nullptr;
-    for (MachineInstr &MI :
-         make_range(std::next(Def->getIterator()), CmpInstr.getIterator())) {
-      if (MI.modifiesRegister(AMDGPU::SCC, &RI))
-        return false;
-      if (MI.killsRegister(AMDGPU::SCC, &RI))
-        KillsSCC = &MI;
-    }
-
-    if (MachineOperand *SccDef =
-            Def->findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr))
-      SccDef->setIsDead(false);
-    if (KillsSCC)
-      KillsSCC->clearRegisterKills(AMDGPU::SCC, /*TRI=*/nullptr);
-    CmpInstr.eraseFromParent();
-    return true;
-  };
-
   const auto optimizeCmpAnd = [&CmpInstr, SrcReg, CmpValue, MRI,
                                this](int64_t ExpectedValue, unsigned SrcSize,
                                      bool IsReversible, bool IsSigned) -> bool {
@@ -10753,20 +10700,16 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
     if (IsReversedCC && !MRI->hasOneNonDBGUse(DefReg))
       return false;
 
-    MachineInstr *KillsSCC = nullptr;
-    for (MachineInstr &MI :
-         make_range(std::next(Def->getIterator()), CmpInstr.getIterator())) {
-      if (MI.modifiesRegister(AMDGPU::SCC, &RI))
+    for (auto I = std::next(Def->getIterator()), E = CmpInstr.getIterator();
+         I != E; ++I) {
+      if (I->modifiesRegister(AMDGPU::SCC, &RI) ||
+          I->killsRegister(AMDGPU::SCC, &RI))
         return false;
-      if (MI.killsRegister(AMDGPU::SCC, &RI))
-        KillsSCC = &MI;
     }
 
     MachineOperand *SccDef =
         Def->findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr);
     SccDef->setIsDead(false);
-    if (KillsSCC)
-      KillsSCC->clearRegisterKills(AMDGPU::SCC, /*TRI=*/nullptr);
     CmpInstr.eraseFromParent();
 
     if (!MRI->use_nodbg_empty(DefReg)) {
@@ -10810,7 +10753,7 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
   case AMDGPU::S_CMP_LG_I32:
   case AMDGPU::S_CMPK_LG_U32:
   case AMDGPU::S_CMPK_LG_I32:
-    return optimizeCmpAnd(0, 32, true, false) || optimizeCmpSelect();
+    return optimizeCmpAnd(0, 32, true, false);
   case AMDGPU::S_CMP_GT_U32:
   case AMDGPU::S_CMPK_GT_U32:
     return optimizeCmpAnd(0, 32, false, false);
@@ -10818,7 +10761,7 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
   case AMDGPU::S_CMPK_GT_I32:
     return optimizeCmpAnd(0, 32, false, true);
   case AMDGPU::S_CMP_LG_U64:
-    return optimizeCmpAnd(0, 64, true, false) || optimizeCmpSelect();
+    return optimizeCmpAnd(0, 64, true, false);
   }
 
   return false;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index ee99a740705e9..df27ec1f8de8c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -709,30 +709,6 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
     }
   }
 
-  static bool setsSCCifResultIsNonZero(const MachineInstr &MI) {
-    if (!MI.findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr))
-      return false;
-    // Compares have no result
-    if (MI.isCompare())
-      return false;
-    switch (MI.getOpcode()) {
-    default:
-      return true;
-    case AMDGPU::S_ADD_I32:
-    case AMDGPU::S_ADD_U32:
-    case AMDGPU::S_ADDC_U32:
-    case AMDGPU::S_SUB_I32:
-    case AMDGPU::S_SUB_U32:
-    case AMDGPU::S_SUBB_U32:
-    case AMDGPU::S_MIN_I32:
-    case AMDGPU::S_MIN_U32:
-    case AMDGPU::S_MAX_I32:
-    case AMDGPU::S_MAX_U32:
-    case AMDGPU::S_ADDK_I32:
-      return false;
-    }
-  }
-
   static bool isEXP(const MachineInstr &MI) {
     return MI.getDesc().TSFlags & SIInstrFlags::EXP;
   }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
index 7714c032d1737..51714035352a3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
@@ -140,6 +140,7 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_non_compare(i32 %v) {
 ; CHECK-NEXT:    v_and_b32_e32 v0, 1, v0
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
 ; CHECK-NEXT:    s_and_b32 s0, vcc_lo, exec_lo
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
 ; CHECK-NEXT:    s_cbranch_scc0 .LBB9_2
 ; CHECK-NEXT:  ; %bb.1: ; %false
 ; CHECK-NEXT:    s_mov_b32 s0, 33
@@ -344,6 +345,7 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_and(i32 %v1, i32 %v2) {
 ; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 12, v0
 ; CHECK-NEXT:    v_cmp_lt_u32_e64 s0, 34, v1
 ; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
 ; CHECK-NEXT:    s_cbranch_scc0 .LBB17_2
 ; CHECK-NEXT:  ; %bb.1: ; %false
 ; CHECK-NEXT:    s_mov_b32 s0, 33
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
index 7b8166948610b..7b01f13b9ef1c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
@@ -143,6 +143,7 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_non_compare(i32 %v) {
 ; CHECK-NEXT:    v_and_b32_e32 v0, 1, v0
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; CHECK-NEXT:    s_and_b64 s[0:1], vcc, exec
+; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
 ; CHECK-NEXT:    s_cbranch_scc0 .LBB9_2
 ; CHECK-NEXT:  ; %bb.1: ; %false
 ; CHECK-NEXT:    s_mov_b32 s0, 33
@@ -347,6 +348,7 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_and(i32 %v1, i32 %v2) {
 ; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc, 12, v0
 ; CHECK-NEXT:    v_cmp_lt_u32_e64 s[0:1], 34, v1
 ; CHECK-NEXT:    s_and_b64 s[0:1], vcc, s[0:1]
+; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
 ; CHECK-NEXT:    s_cbranch_scc0 .LBB17_2
 ; CHECK-NEXT:  ; %bb.1: ; %false
 ; CHECK-NEXT:    s_mov_b32 s0, 33
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
index 8088c1b4c8fc7..b72eba8cdb519 100644
--- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -180,7 +180,11 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B
 ; CHECK-LABEL: s_add64_32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_add_u32 s0, s0, s2
+; CHECK-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; CHECK-NEXT:    s_addc_u32 s1, s1, s3
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
 ; CHECK-NEXT:    s_addc_u32 s2, s4, 0
 ; CHECK-NEXT:    ; return to shader part epilog
   %sum64 = add i64 %val64A, %val64B
@@ -195,10 +199,14 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B
 define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_uadd_v2i64:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s6, s2, s6
-; CHECK-NEXT:    s_addc_u32 s7, s3, s7
+; CHECK-NEXT:    s_add_u32 s10, s2, s6
+; CHECK-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; CHECK-NEXT:    s_addc_u32 s8, s3, s7
 ; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
 ; CHECK-NEXT:    s_add_u32 s0, s0, s4
+; CHECK-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; CHECK-NEXT:    s_addc_u32 s1, s1, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
@@ -207,8 +215,8 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
 ; CHECK-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v7
 ; CHECK-NEXT:    v_readfirstlane_b32 s2, v6
-; CHECK-NEXT:    v_mov_b32_e32 v4, s6
-; CHECK-NEXT:    v_mov_b32_e32 v5, s7
+; CHECK-NEXT:    v_mov_b32_e32 v4, s10
+; CHECK-NEXT:    v_mov_b32_e32 v5, s8
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_mov_b32 s3, s2
 ; CHECK-NEXT:    flat_store_dwordx4 v[0:1], v[2:5]
@@ -225,10 +233,14 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
 define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_v2i64:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_sub_u32 s6, s2, s6
-; CHECK-NEXT:    s_subb_u32 s7, s3, s7
+; CHECK-NEXT:    s_sub_u32 s10, s2, s6
+; CHECK-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; CHECK-NEXT:    s_subb_u32 s8, s3, s7
 ; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
 ; CHECK-NEXT:    s_sub_u32 s0, s0, s4
+; CHECK-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; CHECK-NEXT:    s_subb_u32 s1, s1, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
@@ -237,8 +249,8 @@ define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
 ; CHECK-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v7
 ; CHECK-NEXT:    v_readfirstlane_b32 s2, v6
-; CHECK-NEXT:    v_mov_b32_e32 v4, s6
-; CHECK-NEXT:    v_mov_b32_e32 v5, s7
+; CHECK-NEXT:    v_mov_b32_e32 v4, s10
+; CHECK-NEXT:    v_mov_b32_e32 v5, s8
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_mov_b32 s3, s2
 ; CHECK-NEXT:    flat_store_dwordx4 v[0:1], v[2:5]
@@ -256,6 +268,8 @@ define amdgpu_ps i64 @s_uadd_i64(i64 inreg %val0, i64 inreg %val1, ptr %ptrval)
 ; CHECK-LABEL: s_uadd_i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_add_u32 s0, s0, s2
+; CHECK-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; CHECK-NEXT:    s_addc_u32 s1, s1, s3
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
@@ -278,6 +292,8 @@ define amdgpu_ps i64 @s_uadd_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_uadd_p1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_add_u32 s0, s0, 1
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
 ; CHECK-NEXT:    s_addc_u32 s1, s1, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
@@ -323,6 +339,8 @@ define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_p1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_sub_u32 s0, s0, 1
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
 ; CHECK-NEXT:    s_subb_u32 s1, s1, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
@@ -345,6 +363,8 @@ define amdgpu_ps i64 @s_usub_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_n1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_sub_u32 s0, s0, -1
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
 ; CHECK-NEXT:    s_subb_u32 s1, s1, -1
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index 51df8c34cc55e..948811ea45f77 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -7821,9 +7821,10 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    s_addc_u32 s15, 0, s16
 ; GFX6-NEXT:    s_add_u32 s16, s0, s1
 ; GFX6-NEXT:    v_mov_b32_e32 v0, s16
-; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
 ; GFX6-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
 ; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
 ; GFX6-NEXT:    s_addc_u32 s14, s14, s15
 ; GFX6-NEXT:    s_mul_i32 s0, s12, s14
 ; GFX6-NEXT:    v_readfirstlane_b32 s1, v0
@@ -7854,6 +7855,7 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    s_add_u32 s15, s16, s0
 ; GFX6-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
 ; GFX6-NEXT:    s_addc_u32 s14, s14, s12
 ; GFX6-NEXT:    s_ashr_i32 s12, s7, 31
 ; GFX6-NEXT:    s_add_u32 s0, s6, s12
@@ -7879,50 +7881,52 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    v_readfirstlane_b32 s4, v0
 ; GFX6-NEXT:    s_addc_u32 s4, s4, 0
 ; GFX6-NEXT:    s_mul_i32 s14, s7, s14
-; GFX6-NEXT:    s_add_u32 s16, s1, s14
-; GFX6-NEXT:    v_mov_b32_e32 v0, s16
+; GFX6-NEXT:    s_add_u32 s14, s1, s14
+; GFX6-NEXT:    v_mov_b32_e32 v0, s14
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s10, v0
-; GFX6-NEXT:    s_addc_u32 s17, 0, s4
+; GFX6-NEXT:    s_addc_u32 s15, 0, s4
 ; GFX6-NEXT:    s_mov_b32 s1, s5
-; GFX6-NEXT:    s_mul_i32 s4, s10, s17
+; GFX6-NEXT:    s_mul_i32 s4, s10, s15
 ; GFX6-NEXT:    v_readfirstlane_b32 s5, v0
 ; GFX6-NEXT:    s_add_i32 s4, s5, s4
-; GFX6-NEXT:    s_mul_i32 s5, s11, s16
-; GFX6-NEXT:    s_add_i32 s18, s4, s5
-; GFX6-NEXT:    s_sub_i32 s14, s7, s18
-; GFX6-NEXT:    s_mul_i32 s4, s10, s16
+; GFX6-NEXT:    s_mul_i32 s5, s11, s14
+; GFX6-NEXT:    s_add_i32 s16, s4, s5
+; GFX6-NEXT:    s_sub_i32 s17, s7, s16
+; GFX6-NEXT:    s_mul_i32 s4, s10, s14
 ; GFX6-NEXT:    s_sub_u32 s6, s6, s4
 ; GFX6-NEXT:    s_cselect_b64 s[4:5], -1, 0
-; GFX6-NEXT:    s_or_b32 s15, s4, s5
-; GFX6-NEXT:    s_subb_u32 s19, s14, s11
-; GFX6-NEXT:    s_sub_u32 s20, s6, s10
-; GFX6-NEXT:    s_cselect_b64 s[14:15], -1, 0
-; GFX6-NEXT:    s_or_b32 s14, s14, s15
-; GFX6-NEXT:    s_subb_u32 s14, s19, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s14, s11
-; GFX6-NEXT:    s_cselect_b32 s15, -1, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s20, s10
-; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
-; GFX6-NEXT:    s_cmp_eq_u32 s14, s11
-; GFX6-NEXT:    s_cselect_b32 s14, s19, s15
-; GFX6-NEXT:    s_add_u32 s15, s16, 1
-; GFX6-NEXT:    s_addc_u32 s19, s17, 0
-; GFX6-NEXT:    s_add_u32 s20, s16, 2
-; GFX6-NEXT:    s_addc_u32 s21, s17, 0
-; GFX6-NEXT:    s_cmp_lg_u32 s14, 0
-; GFX6-NEXT:    s_cselect_b32 s14, s20, s15
-; GFX6-NEXT:    s_cselect_b32 s15, s21, s19
+; GFX6-NEXT:    s_or_b32 s18, s4, s5
+; GFX6-NEXT:    s_cmp_lg_u32 s18, 0
+; GFX6-NEXT:    s_subb_u32 s17, s17, s11
+; GFX6-NEXT:    s_sub_u32 s19, s6, s10
+; GFX6-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX6-NEXT:    s_or_b32 s4, s4, s5
-; GFX6-NEXT:    s_subb_u32 s4, s7, s18
+; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX6-NEXT:    s_subb_u32 s4, s17, 0
 ; GFX6-NEXT:    s_cmp_ge_u32 s4, s11
 ; GFX6-NEXT:    s_cselect_b32 s5, -1, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s6, s10
-; GFX6-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s19, s10
+; GFX6-NEXT:    s_cselect_b32 s17, -1, 0
 ; GFX6-NEXT:    s_cmp_eq_u32 s4, s11
-; GFX6-NEXT:    s_cselect_b32 s4, s6, s5
+; GFX6-NEXT:    s_cselect_b32 s4, s17, s5
+; GFX6-NEXT:    s_add_u32 s5, s14, 1
+; GFX6-NEXT:    s_addc_u32 s17, s15, 0
+; GFX6-NEXT:    s_add_u32 s19, s14, 2
+; GFX6-NEXT:    s_addc_u32 s20, s15, 0
 ; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX6-NEXT:    s_cselect_b32 s5, s15, s17
-; GFX6-NEXT:    s_cselect_b32 s4, s14, s16
+; GFX6-NEXT:    s_cselect_b32 s4, s19, s5
+; GFX6-NEXT:    s_cselect_b32 s5, s20, s17
+; GFX6-NEXT:    s_cmp_lg_u32 s18, 0
+; GFX6-NEXT:    s_subb_u32 s7, s7, s16
+; GFX6-NEXT:    s_cmp_ge_u32 s7, s11
+; GFX6-NEXT:    s_cselect_b32 s16, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s6, s10
+; GFX6-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s7, s11
+; GFX6-NEXT:    s_cselect_b32 s6, s6, s16
+; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX6-NEXT:    s_cselect_b32 s5, s5, s15
+; GFX6-NEXT:    s_cselect_b32 s4, s4, s14
 ; GFX6-NEXT:    s_xor_b64 s[6:7], s[12:13], s[8:9]
 ; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
 ; GFX6-NEXT:    s_sub_u32 s4, s4, s6
@@ -7945,8 +7949,8 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s8
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s9
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT:    s_sub_u32 s4, 0, s8
-; GFX9-NEXT:    s_subb_u32 s5, 0, s9
+; GFX9-NEXT:    s_sub_u32 s10, 0, s8
+; GFX9-NEXT:    s_subb_u32 s11, 0, s9
 ; GFX9-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
@@ -7956,52 +7960,56 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    v_madmk_f32 v1, v2, 0xcf800000, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s10, v2
-; GFX9-NEXT:    v_readfirstlane_b32 s11, v1
-; GFX9-NEXT:    s_mul_i32 s12, s4, s10
-; GFX9-NEXT:    s_mul_hi_u32 s14, s4, s11
-; GFX9-NEXT:    s_mul_i32 s13, s5, s11
-; GFX9-NEXT:    s_add_i32 s12, s14, s12
-; GFX9-NEXT:    s_mul_i32 s15, s4, s11
-; GFX9-NEXT:    s_add_i32 s12, s12, s13
-; GFX9-NEXT:    s_mul_hi_u32 s14, s11, s15
-; GFX9-NEXT:    s_mul_i32 s16, s11, s12
-; GFX9-NEXT:    s_mul_hi_u32 s13, s11, s12
+; GFX9-NEXT:    v_readfirstlane_b32 s12, v2
+; GFX9-NEXT:    v_readfirstlane_b32 s4, v1
+; GFX9-NEXT:    s_mul_i32 s5, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s4
+; GFX9-NEXT:    s_mul_i32 s13, s11, s4
+; GFX9-NEXT:    s_add_i32 s5, s14, s5
+; GFX9-NEXT:    s_mul_i32 s15, s10, s4
+; GFX9-NEXT:    s_add_i32 s5, s5, s13
+; GFX9-NEXT:    s_mul_hi_u32 s14, s4, s15
+; GFX9-NEXT:    s_mul_i32 s16, s4, s5
+; GFX9-NEXT:    s_mul_hi_u32 s13, s4, s5
 ; GFX9-NEXT:    s_add_u32 s14, s14, s16
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s13
-; GFX9-NEXT:    s_mul_hi_u32 s17, s10, s15
-; GFX9-NEXT:    s_mul_i32 s15, s10, s15
+; GFX9-NEXT:    s_mul_hi_u32 s17, s12, s15
+; GFX9-NEXT:    s_mul_i32 s15, s12, s15
 ; GFX9-NEXT:    s_add_u32 s14, s14, s15
-; GFX9-NEXT:    s_mul_hi_u32 s16, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s16, s12, s5
 ; GFX9-NEXT:    s_addc_u32 s13, s13, s17
 ; GFX9-NEXT:    s_addc_u32 s14, s16, 0
-; GFX9-NEXT:    s_mul_i32 s12, s10, s12
-; GFX9-NEXT:    s_add_u32 s12, s13, s12
+; GFX9-NEXT:    s_mul_i32 s5, s12, s5
+; GFX9-NEXT:    s_add_u32 s5, s13, s5
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s14
-; GFX9-NEXT:    s_add_u32 s11, s11, s12
-; GFX9-NEXT:    s_addc_u32 s10, s10, s13
-; GFX9-NEXT:    s_mul_i32 s12, s4, s10
-; GFX9-NEXT:    s_mul_hi_u32 s13, s4, s11
-; GFX9-NEXT:    s_add_i32 s12, s13, s12
-; GFX9-NEXT:    s_mul_i32 s5, s5, s11
-; GFX9-NEXT:    s_add_i32 s12, s12, s5
-; GFX9-NEXT:    s_mul_i32 s4, s4, s11
-; GFX9-NEXT:    s_mul_hi_u32 s13, s10, s4
-; GFX9-NEXT:    s_mul_i32 s14, s10, s4
-; GFX9-NEXT:    s_mul_i32 s16, s11, s12
-; GFX9-NEXT:    s_mul_hi_u32 s4, s11, s4
-; GFX9-NEXT:    s_mul_hi_u32 s15, s11, s12
-; GFX9-NEXT:    s_add_u32 s4, s4, s16
+; GFX9-NEXT:    s_add_u32 s14, s4, s5
+; GFX9-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s12, s12, s13
+; GFX9-NEXT:    s_mul_i32 s4, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s5, s10, s14
+; GFX9-NEXT:    s_add_i32 s4, s5, s4
+; GFX9-NEXT:    s_mul_i32 s11, s11, s14
+; GFX9-NEXT:    s_add_i32 s4, s...
[truncated]

@llvmbot
Copy link
Member

llvmbot commented Oct 18, 2025

@llvm/pr-subscribers-backend-amdgpu

Author: Jan Patrick Lehr (jplehr)

Changes

Reverts llvm/llvm-project#162352

Broke our buildbot: https://lab.llvm.org/buildbot/#/builders/10/builds/15674
To reproduce

cd llvm-project
cmake -S llvm -B thebuild -C offload/cmake/caches/AMDGPULibcBot.cmake -GNinja
cd thebuild
ninja
ninja check-libc-amdgcn-amd-amdhsa


Patch is 564.23 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/164116.diff

36 Files Affected:

  • (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.cpp (+6-63)
  • (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.h (-24)
  • (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll (+2)
  • (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll (+2)
  • (modified) llvm/test/CodeGen/AMDGPU/addsub64_carry.ll (+28-8)
  • (modified) llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll (+666-594)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll (+78-57)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll (+116-94)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll (+360-225)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll (+52-38)
  • (modified) llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll (+52-38)
  • (modified) llvm/test/CodeGen/AMDGPU/carryout-selection.ll (+345-269)
  • (modified) llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll (+2-1)
  • (modified) llvm/test/CodeGen/AMDGPU/ctpop16.ll (+2)
  • (modified) llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll (+20-5)
  • (modified) llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll (+25-10)
  • (modified) llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll (+88-40)
  • (modified) llvm/test/CodeGen/AMDGPU/fptrunc.ll (+23-13)
  • (modified) llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll (+85-30)
  • (modified) llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll (+57-24)
  • (modified) llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll (+57-24)
  • (modified) llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll (+85-30)
  • (modified) llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll (+11-9)
  • (modified) llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll (+72-56)
  • (modified) llvm/test/CodeGen/AMDGPU/optimize-compare.mir (+5-77)
  • (modified) llvm/test/CodeGen/AMDGPU/s_cmp_0.ll (+30)
  • (modified) llvm/test/CodeGen/AMDGPU/s_uaddo_usubo_pseudo.ll (+4)
  • (modified) llvm/test/CodeGen/AMDGPU/sdiv64.ll (+79-67)
  • (modified) llvm/test/CodeGen/AMDGPU/srem.ll (+361-293)
  • (modified) llvm/test/CodeGen/AMDGPU/srem64.ll (+114-93)
  • (modified) llvm/test/CodeGen/AMDGPU/uaddo.ll (+36-18)
  • (modified) llvm/test/CodeGen/AMDGPU/udiv64.ll (+45-35)
  • (modified) llvm/test/CodeGen/AMDGPU/urem64.ll (+81-65)
  • (modified) llvm/test/CodeGen/AMDGPU/usubo.ll (+36-18)
  • (modified) llvm/test/CodeGen/AMDGPU/wave32.ll (+102-88)
  • (modified) llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll (+8)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 942e784870989..50447f48a628c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -10626,59 +10626,6 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
   if (SrcReg2 && !getFoldableImm(SrcReg2, *MRI, CmpValue))
     return false;
 
-  const auto optimizeCmpSelect = [&CmpInstr, SrcReg, CmpValue, MRI,
-                                  this]() -> bool {
-    if (CmpValue != 0)
-      return false;
-
-    MachineInstr *Def = MRI->getUniqueVRegDef(SrcReg);
-    if (!Def || Def->getParent() != CmpInstr.getParent())
-      return false;
-
-    bool CanOptimize = false;
-
-    // For S_OP that set SCC = DST!=0, do the transformation
-    //
-    //   s_cmp_lg_* (S_OP ...), 0 => (S_OP ...)
-    if (setsSCCifResultIsNonZero(*Def))
-      CanOptimize = true;
-
-    // s_cmp_lg_* is redundant because the SCC input value for S_CSELECT* has
-    // the same value that will be calculated by s_cmp_lg_*
-    //
-    //   s_cmp_lg_* (S_CSELECT* (non-zero imm), 0), 0 => (S_CSELECT* (non-zero
-    //   imm), 0)
-    if (Def->getOpcode() == AMDGPU::S_CSELECT_B32 ||
-        Def->getOpcode() == AMDGPU::S_CSELECT_B64) {
-      bool Op1IsNonZeroImm =
-          Def->getOperand(1).isImm() && Def->getOperand(1).getImm() != 0;
-      bool Op2IsZeroImm =
-          Def->getOperand(2).isImm() && Def->getOperand(2).getImm() == 0;
-      if (Op1IsNonZeroImm && Op2IsZeroImm)
-        CanOptimize = true;
-    }
-
-    if (!CanOptimize)
-      return false;
-
-    MachineInstr *KillsSCC = nullptr;
-    for (MachineInstr &MI :
-         make_range(std::next(Def->getIterator()), CmpInstr.getIterator())) {
-      if (MI.modifiesRegister(AMDGPU::SCC, &RI))
-        return false;
-      if (MI.killsRegister(AMDGPU::SCC, &RI))
-        KillsSCC = &MI;
-    }
-
-    if (MachineOperand *SccDef =
-            Def->findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr))
-      SccDef->setIsDead(false);
-    if (KillsSCC)
-      KillsSCC->clearRegisterKills(AMDGPU::SCC, /*TRI=*/nullptr);
-    CmpInstr.eraseFromParent();
-    return true;
-  };
-
   const auto optimizeCmpAnd = [&CmpInstr, SrcReg, CmpValue, MRI,
                                this](int64_t ExpectedValue, unsigned SrcSize,
                                      bool IsReversible, bool IsSigned) -> bool {
@@ -10753,20 +10700,16 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
     if (IsReversedCC && !MRI->hasOneNonDBGUse(DefReg))
       return false;
 
-    MachineInstr *KillsSCC = nullptr;
-    for (MachineInstr &MI :
-         make_range(std::next(Def->getIterator()), CmpInstr.getIterator())) {
-      if (MI.modifiesRegister(AMDGPU::SCC, &RI))
+    for (auto I = std::next(Def->getIterator()), E = CmpInstr.getIterator();
+         I != E; ++I) {
+      if (I->modifiesRegister(AMDGPU::SCC, &RI) ||
+          I->killsRegister(AMDGPU::SCC, &RI))
         return false;
-      if (MI.killsRegister(AMDGPU::SCC, &RI))
-        KillsSCC = &MI;
     }
 
     MachineOperand *SccDef =
         Def->findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr);
     SccDef->setIsDead(false);
-    if (KillsSCC)
-      KillsSCC->clearRegisterKills(AMDGPU::SCC, /*TRI=*/nullptr);
     CmpInstr.eraseFromParent();
 
     if (!MRI->use_nodbg_empty(DefReg)) {
@@ -10810,7 +10753,7 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
   case AMDGPU::S_CMP_LG_I32:
   case AMDGPU::S_CMPK_LG_U32:
   case AMDGPU::S_CMPK_LG_I32:
-    return optimizeCmpAnd(0, 32, true, false) || optimizeCmpSelect();
+    return optimizeCmpAnd(0, 32, true, false);
   case AMDGPU::S_CMP_GT_U32:
   case AMDGPU::S_CMPK_GT_U32:
     return optimizeCmpAnd(0, 32, false, false);
@@ -10818,7 +10761,7 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
   case AMDGPU::S_CMPK_GT_I32:
     return optimizeCmpAnd(0, 32, false, true);
   case AMDGPU::S_CMP_LG_U64:
-    return optimizeCmpAnd(0, 64, true, false) || optimizeCmpSelect();
+    return optimizeCmpAnd(0, 64, true, false);
   }
 
   return false;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index ee99a740705e9..df27ec1f8de8c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -709,30 +709,6 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
     }
   }
 
-  static bool setsSCCifResultIsNonZero(const MachineInstr &MI) {
-    if (!MI.findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr))
-      return false;
-    // Compares have no result
-    if (MI.isCompare())
-      return false;
-    switch (MI.getOpcode()) {
-    default:
-      return true;
-    case AMDGPU::S_ADD_I32:
-    case AMDGPU::S_ADD_U32:
-    case AMDGPU::S_ADDC_U32:
-    case AMDGPU::S_SUB_I32:
-    case AMDGPU::S_SUB_U32:
-    case AMDGPU::S_SUBB_U32:
-    case AMDGPU::S_MIN_I32:
-    case AMDGPU::S_MIN_U32:
-    case AMDGPU::S_MAX_I32:
-    case AMDGPU::S_MAX_U32:
-    case AMDGPU::S_ADDK_I32:
-      return false;
-    }
-  }
-
   static bool isEXP(const MachineInstr &MI) {
     return MI.getDesc().TSFlags & SIInstrFlags::EXP;
   }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
index 7714c032d1737..51714035352a3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
@@ -140,6 +140,7 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_non_compare(i32 %v) {
 ; CHECK-NEXT:    v_and_b32_e32 v0, 1, v0
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
 ; CHECK-NEXT:    s_and_b32 s0, vcc_lo, exec_lo
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
 ; CHECK-NEXT:    s_cbranch_scc0 .LBB9_2
 ; CHECK-NEXT:  ; %bb.1: ; %false
 ; CHECK-NEXT:    s_mov_b32 s0, 33
@@ -344,6 +345,7 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_and(i32 %v1, i32 %v2) {
 ; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 12, v0
 ; CHECK-NEXT:    v_cmp_lt_u32_e64 s0, 34, v1
 ; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
 ; CHECK-NEXT:    s_cbranch_scc0 .LBB17_2
 ; CHECK-NEXT:  ; %bb.1: ; %false
 ; CHECK-NEXT:    s_mov_b32 s0, 33
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
index 7b8166948610b..7b01f13b9ef1c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
@@ -143,6 +143,7 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_non_compare(i32 %v) {
 ; CHECK-NEXT:    v_and_b32_e32 v0, 1, v0
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; CHECK-NEXT:    s_and_b64 s[0:1], vcc, exec
+; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
 ; CHECK-NEXT:    s_cbranch_scc0 .LBB9_2
 ; CHECK-NEXT:  ; %bb.1: ; %false
 ; CHECK-NEXT:    s_mov_b32 s0, 33
@@ -347,6 +348,7 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_and(i32 %v1, i32 %v2) {
 ; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc, 12, v0
 ; CHECK-NEXT:    v_cmp_lt_u32_e64 s[0:1], 34, v1
 ; CHECK-NEXT:    s_and_b64 s[0:1], vcc, s[0:1]
+; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
 ; CHECK-NEXT:    s_cbranch_scc0 .LBB17_2
 ; CHECK-NEXT:  ; %bb.1: ; %false
 ; CHECK-NEXT:    s_mov_b32 s0, 33
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
index 8088c1b4c8fc7..b72eba8cdb519 100644
--- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -180,7 +180,11 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B
 ; CHECK-LABEL: s_add64_32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_add_u32 s0, s0, s2
+; CHECK-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; CHECK-NEXT:    s_addc_u32 s1, s1, s3
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
 ; CHECK-NEXT:    s_addc_u32 s2, s4, 0
 ; CHECK-NEXT:    ; return to shader part epilog
   %sum64 = add i64 %val64A, %val64B
@@ -195,10 +199,14 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B
 define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_uadd_v2i64:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s6, s2, s6
-; CHECK-NEXT:    s_addc_u32 s7, s3, s7
+; CHECK-NEXT:    s_add_u32 s10, s2, s6
+; CHECK-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; CHECK-NEXT:    s_addc_u32 s8, s3, s7
 ; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
 ; CHECK-NEXT:    s_add_u32 s0, s0, s4
+; CHECK-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; CHECK-NEXT:    s_addc_u32 s1, s1, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
@@ -207,8 +215,8 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
 ; CHECK-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v7
 ; CHECK-NEXT:    v_readfirstlane_b32 s2, v6
-; CHECK-NEXT:    v_mov_b32_e32 v4, s6
-; CHECK-NEXT:    v_mov_b32_e32 v5, s7
+; CHECK-NEXT:    v_mov_b32_e32 v4, s10
+; CHECK-NEXT:    v_mov_b32_e32 v5, s8
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_mov_b32 s3, s2
 ; CHECK-NEXT:    flat_store_dwordx4 v[0:1], v[2:5]
@@ -225,10 +233,14 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
 define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_v2i64:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_sub_u32 s6, s2, s6
-; CHECK-NEXT:    s_subb_u32 s7, s3, s7
+; CHECK-NEXT:    s_sub_u32 s10, s2, s6
+; CHECK-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; CHECK-NEXT:    s_subb_u32 s8, s3, s7
 ; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
 ; CHECK-NEXT:    s_sub_u32 s0, s0, s4
+; CHECK-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; CHECK-NEXT:    s_subb_u32 s1, s1, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
@@ -237,8 +249,8 @@ define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
 ; CHECK-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v7
 ; CHECK-NEXT:    v_readfirstlane_b32 s2, v6
-; CHECK-NEXT:    v_mov_b32_e32 v4, s6
-; CHECK-NEXT:    v_mov_b32_e32 v5, s7
+; CHECK-NEXT:    v_mov_b32_e32 v4, s10
+; CHECK-NEXT:    v_mov_b32_e32 v5, s8
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_mov_b32 s3, s2
 ; CHECK-NEXT:    flat_store_dwordx4 v[0:1], v[2:5]
@@ -256,6 +268,8 @@ define amdgpu_ps i64 @s_uadd_i64(i64 inreg %val0, i64 inreg %val1, ptr %ptrval)
 ; CHECK-LABEL: s_uadd_i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_add_u32 s0, s0, s2
+; CHECK-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; CHECK-NEXT:    s_addc_u32 s1, s1, s3
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
@@ -278,6 +292,8 @@ define amdgpu_ps i64 @s_uadd_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_uadd_p1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_add_u32 s0, s0, 1
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
 ; CHECK-NEXT:    s_addc_u32 s1, s1, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
@@ -323,6 +339,8 @@ define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_p1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_sub_u32 s0, s0, 1
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
 ; CHECK-NEXT:    s_subb_u32 s1, s1, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
@@ -345,6 +363,8 @@ define amdgpu_ps i64 @s_usub_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_n1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_sub_u32 s0, s0, -1
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
 ; CHECK-NEXT:    s_subb_u32 s1, s1, -1
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, s1
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index 51df8c34cc55e..948811ea45f77 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -7821,9 +7821,10 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    s_addc_u32 s15, 0, s16
 ; GFX6-NEXT:    s_add_u32 s16, s0, s1
 ; GFX6-NEXT:    v_mov_b32_e32 v0, s16
-; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
 ; GFX6-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
 ; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
 ; GFX6-NEXT:    s_addc_u32 s14, s14, s15
 ; GFX6-NEXT:    s_mul_i32 s0, s12, s14
 ; GFX6-NEXT:    v_readfirstlane_b32 s1, v0
@@ -7854,6 +7855,7 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    s_add_u32 s15, s16, s0
 ; GFX6-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
 ; GFX6-NEXT:    s_addc_u32 s14, s14, s12
 ; GFX6-NEXT:    s_ashr_i32 s12, s7, 31
 ; GFX6-NEXT:    s_add_u32 s0, s6, s12
@@ -7879,50 +7881,52 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    v_readfirstlane_b32 s4, v0
 ; GFX6-NEXT:    s_addc_u32 s4, s4, 0
 ; GFX6-NEXT:    s_mul_i32 s14, s7, s14
-; GFX6-NEXT:    s_add_u32 s16, s1, s14
-; GFX6-NEXT:    v_mov_b32_e32 v0, s16
+; GFX6-NEXT:    s_add_u32 s14, s1, s14
+; GFX6-NEXT:    v_mov_b32_e32 v0, s14
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s10, v0
-; GFX6-NEXT:    s_addc_u32 s17, 0, s4
+; GFX6-NEXT:    s_addc_u32 s15, 0, s4
 ; GFX6-NEXT:    s_mov_b32 s1, s5
-; GFX6-NEXT:    s_mul_i32 s4, s10, s17
+; GFX6-NEXT:    s_mul_i32 s4, s10, s15
 ; GFX6-NEXT:    v_readfirstlane_b32 s5, v0
 ; GFX6-NEXT:    s_add_i32 s4, s5, s4
-; GFX6-NEXT:    s_mul_i32 s5, s11, s16
-; GFX6-NEXT:    s_add_i32 s18, s4, s5
-; GFX6-NEXT:    s_sub_i32 s14, s7, s18
-; GFX6-NEXT:    s_mul_i32 s4, s10, s16
+; GFX6-NEXT:    s_mul_i32 s5, s11, s14
+; GFX6-NEXT:    s_add_i32 s16, s4, s5
+; GFX6-NEXT:    s_sub_i32 s17, s7, s16
+; GFX6-NEXT:    s_mul_i32 s4, s10, s14
 ; GFX6-NEXT:    s_sub_u32 s6, s6, s4
 ; GFX6-NEXT:    s_cselect_b64 s[4:5], -1, 0
-; GFX6-NEXT:    s_or_b32 s15, s4, s5
-; GFX6-NEXT:    s_subb_u32 s19, s14, s11
-; GFX6-NEXT:    s_sub_u32 s20, s6, s10
-; GFX6-NEXT:    s_cselect_b64 s[14:15], -1, 0
-; GFX6-NEXT:    s_or_b32 s14, s14, s15
-; GFX6-NEXT:    s_subb_u32 s14, s19, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s14, s11
-; GFX6-NEXT:    s_cselect_b32 s15, -1, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s20, s10
-; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
-; GFX6-NEXT:    s_cmp_eq_u32 s14, s11
-; GFX6-NEXT:    s_cselect_b32 s14, s19, s15
-; GFX6-NEXT:    s_add_u32 s15, s16, 1
-; GFX6-NEXT:    s_addc_u32 s19, s17, 0
-; GFX6-NEXT:    s_add_u32 s20, s16, 2
-; GFX6-NEXT:    s_addc_u32 s21, s17, 0
-; GFX6-NEXT:    s_cmp_lg_u32 s14, 0
-; GFX6-NEXT:    s_cselect_b32 s14, s20, s15
-; GFX6-NEXT:    s_cselect_b32 s15, s21, s19
+; GFX6-NEXT:    s_or_b32 s18, s4, s5
+; GFX6-NEXT:    s_cmp_lg_u32 s18, 0
+; GFX6-NEXT:    s_subb_u32 s17, s17, s11
+; GFX6-NEXT:    s_sub_u32 s19, s6, s10
+; GFX6-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX6-NEXT:    s_or_b32 s4, s4, s5
-; GFX6-NEXT:    s_subb_u32 s4, s7, s18
+; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX6-NEXT:    s_subb_u32 s4, s17, 0
 ; GFX6-NEXT:    s_cmp_ge_u32 s4, s11
 ; GFX6-NEXT:    s_cselect_b32 s5, -1, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s6, s10
-; GFX6-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s19, s10
+; GFX6-NEXT:    s_cselect_b32 s17, -1, 0
 ; GFX6-NEXT:    s_cmp_eq_u32 s4, s11
-; GFX6-NEXT:    s_cselect_b32 s4, s6, s5
+; GFX6-NEXT:    s_cselect_b32 s4, s17, s5
+; GFX6-NEXT:    s_add_u32 s5, s14, 1
+; GFX6-NEXT:    s_addc_u32 s17, s15, 0
+; GFX6-NEXT:    s_add_u32 s19, s14, 2
+; GFX6-NEXT:    s_addc_u32 s20, s15, 0
 ; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX6-NEXT:    s_cselect_b32 s5, s15, s17
-; GFX6-NEXT:    s_cselect_b32 s4, s14, s16
+; GFX6-NEXT:    s_cselect_b32 s4, s19, s5
+; GFX6-NEXT:    s_cselect_b32 s5, s20, s17
+; GFX6-NEXT:    s_cmp_lg_u32 s18, 0
+; GFX6-NEXT:    s_subb_u32 s7, s7, s16
+; GFX6-NEXT:    s_cmp_ge_u32 s7, s11
+; GFX6-NEXT:    s_cselect_b32 s16, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s6, s10
+; GFX6-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s7, s11
+; GFX6-NEXT:    s_cselect_b32 s6, s6, s16
+; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX6-NEXT:    s_cselect_b32 s5, s5, s15
+; GFX6-NEXT:    s_cselect_b32 s4, s4, s14
 ; GFX6-NEXT:    s_xor_b64 s[6:7], s[12:13], s[8:9]
 ; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
 ; GFX6-NEXT:    s_sub_u32 s4, s4, s6
@@ -7945,8 +7949,8 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s8
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s9
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT:    s_sub_u32 s4, 0, s8
-; GFX9-NEXT:    s_subb_u32 s5, 0, s9
+; GFX9-NEXT:    s_sub_u32 s10, 0, s8
+; GFX9-NEXT:    s_subb_u32 s11, 0, s9
 ; GFX9-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
@@ -7956,52 +7960,56 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    v_madmk_f32 v1, v2, 0xcf800000, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s10, v2
-; GFX9-NEXT:    v_readfirstlane_b32 s11, v1
-; GFX9-NEXT:    s_mul_i32 s12, s4, s10
-; GFX9-NEXT:    s_mul_hi_u32 s14, s4, s11
-; GFX9-NEXT:    s_mul_i32 s13, s5, s11
-; GFX9-NEXT:    s_add_i32 s12, s14, s12
-; GFX9-NEXT:    s_mul_i32 s15, s4, s11
-; GFX9-NEXT:    s_add_i32 s12, s12, s13
-; GFX9-NEXT:    s_mul_hi_u32 s14, s11, s15
-; GFX9-NEXT:    s_mul_i32 s16, s11, s12
-; GFX9-NEXT:    s_mul_hi_u32 s13, s11, s12
+; GFX9-NEXT:    v_readfirstlane_b32 s12, v2
+; GFX9-NEXT:    v_readfirstlane_b32 s4, v1
+; GFX9-NEXT:    s_mul_i32 s5, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s4
+; GFX9-NEXT:    s_mul_i32 s13, s11, s4
+; GFX9-NEXT:    s_add_i32 s5, s14, s5
+; GFX9-NEXT:    s_mul_i32 s15, s10, s4
+; GFX9-NEXT:    s_add_i32 s5, s5, s13
+; GFX9-NEXT:    s_mul_hi_u32 s14, s4, s15
+; GFX9-NEXT:    s_mul_i32 s16, s4, s5
+; GFX9-NEXT:    s_mul_hi_u32 s13, s4, s5
 ; GFX9-NEXT:    s_add_u32 s14, s14, s16
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s13
-; GFX9-NEXT:    s_mul_hi_u32 s17, s10, s15
-; GFX9-NEXT:    s_mul_i32 s15, s10, s15
+; GFX9-NEXT:    s_mul_hi_u32 s17, s12, s15
+; GFX9-NEXT:    s_mul_i32 s15, s12, s15
 ; GFX9-NEXT:    s_add_u32 s14, s14, s15
-; GFX9-NEXT:    s_mul_hi_u32 s16, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s16, s12, s5
 ; GFX9-NEXT:    s_addc_u32 s13, s13, s17
 ; GFX9-NEXT:    s_addc_u32 s14, s16, 0
-; GFX9-NEXT:    s_mul_i32 s12, s10, s12
-; GFX9-NEXT:    s_add_u32 s12, s13, s12
+; GFX9-NEXT:    s_mul_i32 s5, s12, s5
+; GFX9-NEXT:    s_add_u32 s5, s13, s5
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s14
-; GFX9-NEXT:    s_add_u32 s11, s11, s12
-; GFX9-NEXT:    s_addc_u32 s10, s10, s13
-; GFX9-NEXT:    s_mul_i32 s12, s4, s10
-; GFX9-NEXT:    s_mul_hi_u32 s13, s4, s11
-; GFX9-NEXT:    s_add_i32 s12, s13, s12
-; GFX9-NEXT:    s_mul_i32 s5, s5, s11
-; GFX9-NEXT:    s_add_i32 s12, s12, s5
-; GFX9-NEXT:    s_mul_i32 s4, s4, s11
-; GFX9-NEXT:    s_mul_hi_u32 s13, s10, s4
-; GFX9-NEXT:    s_mul_i32 s14, s10, s4
-; GFX9-NEXT:    s_mul_i32 s16, s11, s12
-; GFX9-NEXT:    s_mul_hi_u32 s4, s11, s4
-; GFX9-NEXT:    s_mul_hi_u32 s15, s11, s12
-; GFX9-NEXT:    s_add_u32 s4, s4, s16
+; GFX9-NEXT:    s_add_u32 s14, s4, s5
+; GFX9-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s12, s12, s13
+; GFX9-NEXT:    s_mul_i32 s4, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s5, s10, s14
+; GFX9-NEXT:    s_add_i32 s4, s5, s4
+; GFX9-NEXT:    s_mul_i32 s11, s11, s14
+; GFX9-NEXT:    s_add_i32 s4, s...
[truncated]

@jplehr jplehr merged commit 023b1f6 into main Oct 18, 2025
13 checks passed
@jplehr jplehr deleted the revert-162352-remove_s_cmp branch October 18, 2025 20:38
searlmc1 pushed a commit to ROCm/llvm-project that referenced this pull request Oct 19, 2025
Reverts llvm#162352

Broke our buildbot:
https://lab.llvm.org/buildbot/#/builders/10/builds/15674
To reproduce

cd llvm-project
cmake -S llvm -B thebuild -C offload/cmake/caches/AMDGPULibcBot.cmake
-GNinja
cd thebuild
ninja
ninja check-libc-amdgcn-amd-amdhsa
LU-JOHN added a commit to LU-JOHN/llvm-project that referenced this pull request Oct 20, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants