-
Notifications
You must be signed in to change notification settings - Fork 15.2k
AMDGPU: Fix constrain register logic for physregs #161794
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
arsenm
merged 4 commits into
main
from
users/arsenm/amdgpu/fix-attempt-constrain-constant-physreg
Oct 3, 2025
Merged
AMDGPU: Fix constrain register logic for physregs #161794
arsenm
merged 4 commits into
main
from
users/arsenm/amdgpu/fix-attempt-constrain-constant-physreg
Oct 3, 2025
+665
−966
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This constrainRegClass check would never pass for a physical register.
This is trying to constrain the register class of a physical register, which makes no sense.
It's nonsensical to call constrainRegClass on a physical register, and we should not see virtual registers here.
We do not need to reconstrain physical registers. Enables an additional fold for constant physregs.
This was referenced Oct 3, 2025
This was referenced Oct 3, 2025
@llvm/pr-subscribers-backend-amdgpu Author: Matt Arsenault (arsenm) ChangesWe do not need to reconstrain physical registers. Enables an Patch is 260.21 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/161794.diff 6 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index fed37788802b9..82789bc4968c5 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -722,7 +722,8 @@ bool SIFoldOperandsImpl::updateOperand(FoldCandidate &Fold) const {
return false;
}
- if (!MRI->constrainRegClass(New->getReg(), ConstrainRC)) {
+ if (New->getReg().isVirtual() &&
+ !MRI->constrainRegClass(New->getReg(), ConstrainRC)) {
LLVM_DEBUG(dbgs() << "Cannot constrain " << printReg(New->getReg(), TRI)
<< TRI->getRegClassName(ConstrainRC) << '\n');
return false;
diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll
index aac499f2fc602..b486fabb19497 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll
@@ -9,15 +9,14 @@ target triple = "amdgcn-amd-amdhsa"
define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %ptr) {
; GFX1250-SDAG-LABEL: use_private_to_flat_addrspacecast:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_lshlrev_b32 v1, 20, v0
-; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s2, -1
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_lshlrev_b32 v1, 20, v0
+; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s0, -1
; GFX1250-SDAG-NEXT: s_cselect_b32 vcc_lo, -1, 0
-; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], src_flat_scratch_base_lo, v[0:1]
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, 0, v1
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo
@@ -56,13 +55,11 @@ define amdgpu_kernel void @use_private_to_flat_addrspacecast_nonnull(ptr addrspa
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 20, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], src_flat_scratch_base_lo, v[0:1]
; GFX1250-SDAG-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS
; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
; GFX1250-SDAG-NEXT: s_endpgm
@@ -91,10 +88,9 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) {
; GFX1250-LABEL: use_flat_to_private_addrspacecast:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
; GFX1250-NEXT: v_mov_b32_e32 v0, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_sub_co_i32 s2, s0, s2
+; GFX1250-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo
; GFX1250-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1250-NEXT: s_cselect_b32 s0, s2, -1
; GFX1250-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS
@@ -110,9 +106,8 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) {
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, 0
-; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, s1
+; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS
; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
; GFX1250-SDAG-NEXT: s_endpgm
@@ -122,9 +117,7 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) {
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, s1
+; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS
; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
; GFX1250-GISEL-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll
index ef52694910da3..54871a622189b 100644
--- a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll
@@ -538,58 +538,61 @@ define double @flat_system_atomic_fadd_f64(ptr %ptr, double %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base
; GFX1250-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
-; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v5
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB34_6
-; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1
-; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_cbranch_execnz .LBB34_3
+; GFX1250-NEXT: ; %bb.1: ; %Flow2
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB34_8
+; GFX1250-NEXT: .LBB34_2: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: .LBB34_3: ; %atomicrmw.check.private
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1250-NEXT: s_cbranch_execz .LBB34_3
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global
-; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_cbranch_execz .LBB34_5
+; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.global
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[4:5], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB34_3: ; %Flow
+; GFX1250-NEXT: .LBB34_5: ; %Flow
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1
-; GFX1250-NEXT: s_cbranch_execz .LBB34_5
-; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_cbranch_execz .LBB34_7
+; GFX1250-NEXT: ; %bb.6: ; %atomicrmw.private
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
-; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3]
-; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off
-; GFX1250-NEXT: .LBB34_5: ; %Flow1
+; GFX1250-NEXT: v_add_f64_e32 v[2:3], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-NEXT: .LBB34_7: ; %Flow1
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB34_6: ; %Flow2
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB34_8
-; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-NEXT: s_cbranch_execz .LBB34_2
+; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.shared
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3]
-; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.phi
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_wait_dscnt 0x0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fadd ptr %ptr, double %val monotonic
ret double %result
@@ -600,58 +603,61 @@ define double @flat_one_as_atomic_fadd_f64(ptr %ptr, double %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base
; GFX1250-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
-; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v5
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB35_6
-; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1
-; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_cbranch_execnz .LBB35_3
+; GFX1250-NEXT: ; %bb.1: ; %Flow2
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB35_8
+; GFX1250-NEXT: .LBB35_2: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: .LBB35_3: ; %atomicrmw.check.private
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1
-; GFX1250-NEXT: s_cbranch_execz .LBB35_3
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global
-; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_cbranch_execz .LBB35_5
+; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.global
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[4:5], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB35_3: ; %Flow
+; GFX1250-NEXT: .LBB35_5: ; %Flow
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1
-; GFX1250-NEXT: s_cbranch_execz .LBB35_5
-; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_cbranch_execz .LBB35_7
+; GFX1250-NEXT: ; %bb.6: ; %atomicrmw.private
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
-; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3]
-; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off
-; GFX1250-NEXT: .LBB35_5: ; %Flow1
+; GFX1250-NEXT: v_add_f64_e32 v[2:3], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v4, v[2:3], off
+; GFX1250-NEXT: .LBB35_7: ; %Flow1
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB35_6: ; %Flow2
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB35_8
-; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-NEXT: s_cbranch_execz .LBB35_2
+; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.shared
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3]
-; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.phi
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_wait_dscnt 0x0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fadd ptr %ptr, double %val syncscope("one-as") monotonic
ret double %result
@@ -686,40 +692,42 @@ define double @flat_system_atomic_fmin_f64(ptr %ptr, double %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB38_2
-; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
-; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_cbranch_execnz .LBB38_3
+; GFX1250-NEXT: ; %bb.1: ; %Flow
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB38_4
+; GFX1250-NEXT: .LBB38_2: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: .LBB38_3: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GFX1250-NEXT: .LBB38_2: ; %Flow
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB38_4
-; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
-; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
-; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_cbranch_execz .LBB38_2
+; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.private
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
-; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
-; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
-; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off
-; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.phi
+; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
+; GFX1250-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fmin ptr %ptr, double %val monotonic
ret double %result
@@ -730,40 +738,42 @@ define double @flat_one_as_atomic_fmin_f64(ptr %ptr, double %val) {
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
-; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
-; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execz .LBB39_2
-; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
-; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: s_cbranch_execnz .LBB39_3
+; GFX1250-NEXT: ; %bb.1: ; %Flow
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB39_4
+; GFX1250-NEXT: .LBB39_2: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEX...
[truncated]
|
cdevadas
approved these changes
Oct 3, 2025
Base automatically changed from
users/arsenm/amdgpu/fix-constraining-spill-physregs
to
main
October 3, 2025 12:19
MixedMatched
pushed a commit
to MixedMatched/llvm-project
that referenced
this pull request
Oct 3, 2025
We do not need to reconstrain physical registers. Enables an additional fold for constant physregs.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
We do not need to reconstrain physical registers. Enables an
additional fold for constant physregs.