-
Notifications
You must be signed in to change notification settings - Fork 15.2k
PeepholeOpt: Fix losing subregister indexes on full copies #161310
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
PeepholeOpt: Fix losing subregister indexes on full copies #161310
Conversation
This stack of pull requests is managed by Graphite. Learn more about stacking. |
@llvm/pr-subscribers-backend-mips @llvm/pr-subscribers-backend-amdgpu Author: Matt Arsenault (arsenm) ChangesPreviously if we had a subregister extract reading from a There's one ugly rvv regression, but it's a downstream Patch is 528.59 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/161310.diff 34 Files Affected:
diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index fb3e6482bb096..b1543599c9b26 100644
--- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -1905,7 +1905,28 @@ ValueTrackerResult ValueTracker::getNextSourceFromCopy() {
const MachineOperand &Src = Def->getOperand(1);
if (Src.isUndef())
return ValueTrackerResult();
- return ValueTrackerResult(Src.getReg(), Src.getSubReg());
+
+ Register SrcReg = Src.getReg();
+ unsigned SubReg = Src.getSubReg();
+ if (DefSubReg) {
+ const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
+ SubReg = TRI->composeSubRegIndices(SubReg, DefSubReg);
+ if (SubReg) {
+ if (SrcReg.isVirtual()) {
+ // TODO: Try constraining on rewrite if we can
+ const TargetRegisterClass *RegRC = MRI.getRegClass(SrcReg);
+ const TargetRegisterClass *SrcWithSubRC =
+ TRI->getSubClassWithSubReg(RegRC, SubReg);
+ if (RegRC != SrcWithSubRC)
+ return ValueTrackerResult();
+ } else {
+ if (!TRI->getSubReg(SrcReg, SubReg))
+ return ValueTrackerResult();
+ }
+ }
+ }
+
+ return ValueTrackerResult(SrcReg, SubReg);
}
ValueTrackerResult ValueTracker::getNextSourceFromBitcast() {
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
index 666523c88860c..ff618c05e2b80 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
@@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, s16
-; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
+; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen
; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, s16
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen
; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v6, s20
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB14_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v6, s20
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB14_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
index 351502816ae6e..007417c83e324 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
@@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, s16
-; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
+; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen
; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, s16
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen
; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v6, s20
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB14_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v6, s20
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB14_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
index ba5a8e9c68a1f..9e412b6c7cd0a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
@@ -209,48 +209,48 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1)
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v2
; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s10, v3, v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v6, s9
-; GFX8-NEXT: v_mov_b32_e32 v5, s11
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, s8, v0
; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s11, v4, v[1:2]
-; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s8, v0
-; GFX8-NEXT: v_subb_u32_e64 v6, s[0:1], v6, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s9
+; GFX8-NEXT: v_mov_b32_e32 v5, s11
+; GFX8-NEXT: v_subb_u32_e64 v7, s[0:1], v2, v1, vcc
; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s9, v1
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v6
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v7
; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2
-; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v6
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v6
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1]
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v7
; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[0:1]
-; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s10, v2
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1]
+; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s10, v6
; GFX8-NEXT: v_subbrev_u32_e64 v8, s[0...
[truncated]
|
372833b
to
45ec3c0
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Makes sense to me
Previously if we had a subregister extract reading from a full copy, the no-subregister incoming copy would overwrite the DefSubReg index of the folding context. There's one ugly rvv regression, but it's a downstream issue of this; an unnecessary same class reg-to-reg full copy was avoided.
45ec3c0
to
6e8af57
Compare
Previously if we had a subregister extract reading from a full copy, the no-subregister incoming copy would overwrite the DefSubReg index of the folding context. There's one ugly rvv regression, but it's a downstream issue of this; an unnecessary same class reg-to-reg full copy was avoided.
Previously if we had a subregister extract reading from a
full copy, the no-subregister incoming copy would overwrite
the DefSubReg index of the folding context.
There's one ugly rvv regression, but it's a downstream
issue of this; an unnecessary same class reg-to-reg full copy
was avoided.