diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 729a57ef23b1e..e1d39d64e9fb8 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -1929,7 +1929,27 @@ ValueTrackerResult ValueTracker::getNextSourceFromCopy() { const MachineOperand &Src = Def->getOperand(1); if (Src.isUndef()) return ValueTrackerResult(); - return ValueTrackerResult(Src.getReg(), Src.getSubReg()); + + Register SrcReg = Src.getReg(); + unsigned SubReg = Src.getSubReg(); + if (DefSubReg) { + const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); + SubReg = TRI->composeSubRegIndices(SubReg, DefSubReg); + + if (SrcReg.isVirtual()) { + // TODO: Try constraining on rewrite if we can + const TargetRegisterClass *RegRC = MRI.getRegClass(SrcReg); + const TargetRegisterClass *SrcWithSubRC = + TRI->getSubClassWithSubReg(RegRC, SubReg); + if (RegRC != SrcWithSubRC) + return ValueTrackerResult(); + } else { + if (!TRI->getSubReg(SrcReg, SubReg)) + return ValueTrackerResult(); + } + } + + return ValueTrackerResult(SrcReg, SubReg); } ValueTrackerResult ValueTracker::getNextSourceFromBitcast() { diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll index 666523c88860c..ff618c05e2b80 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll @@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 -; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v8, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] +; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen ; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mov_b32_e32 v6, s16 -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v8, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen ; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v6, s20 -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v8, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB14_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v6, s20 -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v8, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB14_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll index 351502816ae6e..007417c83e324 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll @@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 -; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v8, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] +; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen ; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mov_b32_e32 v6, s16 -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v8, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen ; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v6, s20 -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v8, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB14_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v6, s20 -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v8, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB14_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll index ba5a8e9c68a1f..9e412b6c7cd0a 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll @@ -209,48 +209,48 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2 ; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v2 ; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s10, v3, v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v6, s9 -; GFX8-NEXT: v_mov_b32_e32 v5, s11 +; GFX8-NEXT: v_sub_u32_e32 v6, vcc, s8, v0 ; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s11, v4, v[1:2] -; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s8, v0 -; GFX8-NEXT: v_subb_u32_e64 v6, s[0:1], v6, v1, vcc +; GFX8-NEXT: v_mov_b32_e32 v2, s9 +; GFX8-NEXT: v_mov_b32_e32 v5, s11 +; GFX8-NEXT: v_subb_u32_e64 v7, s[0:1], v2, v1, vcc ; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s9, v1 -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v6 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v7 ; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2 -; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v6 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v6 +; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1] +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v7 ; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[0:1] -; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s10, v2 +; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1] +; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s10, v6 ; GFX8-NEXT: v_subbrev_u32_e64 v8, s[0:1], 0, v0, vcc ; GFX8-NEXT: v_add_u32_e64 v9, s[0:1], 1, v4 ; GFX8-NEXT: v_addc_u32_e64 v10, s[0:1], 0, v3, s[0:1] ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v7 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2 ; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc ; GFX8-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v8 -; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s10, v7 +; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s10, v2 ; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[0:1] ; GFX8-NEXT: v_add_u32_e64 v12, s[0:1], 1, v9 ; GFX8-NEXT: v_subbrev_u32_e32 v14, vcc, 0, v0, vcc ; GFX8-NEXT: v_addc_u32_e64 v13, s[0:1], 0, v10, s[0:1] ; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11 ; GFX8-NEXT: v_cndmask_b32_e32 v0, v9, v12, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v13, vcc ; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v13, vcc ; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, v0, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v9, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v5, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v14, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v4, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc ; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v9, s[0:1] ; GFX8-NEXT: v_mov_b32_e32 v5, s5 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v14, vcc ; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1] ; GFX8-NEXT: v_mov_b32_e32 v0, s6 +; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[0:1] ; GFX8-NEXT: v_mov_b32_e32 v1, s7 ; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; GFX8-NEXT: s_endpgm @@ -299,7 +299,6 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v4, v1, vcc ; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s2, v3, 0 -; GFX9-NEXT: v_mov_b32_e32 v7, s19 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s2, v4, v[1:2] ; GFX9-NEXT: v_mul_hi_u32 v6, v3, v0 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s3, v3, v[1:2] @@ -346,30 +345,30 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX9-NEXT: v_add_u32_e32 v3, v4, v3 ; GFX9-NEXT: v_add3_u32 v3, v3, v2, v6 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s18, v3, v[1:2] -; GFX9-NEXT: v_mov_b32_e32 v6, s17 -; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: v_sub_co_u32_e32 v7, vcc, s16, v0 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s19, v5, v[1:2] -; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s16, v0 -; GFX9-NEXT: v_subb_co_u32_e64 v6, s[0:1], v6, v1, vcc -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v6 +; GFX9-NEXT: v_mov_b32_e32 v2, s17 +; GFX9-NEXT: v_mov_b32_e32 v4, s19 +; GFX9-NEXT: v_subb_co_u32_e64 v8, s[0:1], v2, v1, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v8 ; GFX9-NEXT: v_sub_u32_e32 v0, s17, v1 ; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v6 -; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v7, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[0:1] -; GFX9-NEXT: v_subrev_co_u32_e32 v8, vcc, s18, v2 +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1] +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v8 +; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v4, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1] +; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s18, v7 ; GFX9-NEXT: v_subbrev_co_u32_e64 v9, s[0:1], 0, v0, vcc ; GFX9-NEXT: v_add_co_u32_e64 v10, s[0:1], 1, v5 ; GFX9-NEXT: v_addc_co_u32_e64 v11, s[0:1], 0, v3, s[0:1] ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v9 ; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v8 -; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v7, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v2 +; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v4, vcc ; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1] ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v9 -; GFX9-NEXT: v_subrev_co_u32_e32 v7, vcc, s18, v8 +; GFX9-NEXT: v_subrev_co_u32_e32 v4, vcc, s18, v2 ; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v13, s[0:1] ; GFX9-NEXT: v_add_co_u32_e64 v13, s[0:1], 1, v10 ; GFX9-NEXT: v_subbrev_co_u32_e32 v15, vcc, 0, v0, vcc @@ -378,14 +377,15 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v13, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v14, vcc ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1 +; GFX9-NEXT: v_mov_b32_e32 v6, 0 ; GFX9-NEXT: v_cndmask_b32_e64 v0, v5, v0, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v10, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v7, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v15, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v5, s[0:1] -; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[12:13] -; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[14:15] +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v9, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v2, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v3, s[0:1] +; GFX9-NEXT: global_store_dwordx2 v6, v[0:1], s[12:13] +; GFX9-NEXT: global_store_dwordx2 v6, v[2:3], s[14:15] ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: udivrem_i64: @@ -1070,6 +1070,7 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX8-NEXT: v_mul_lo_u32 v3, s8, v1 ; GFX8-NEXT: v_mul_hi_u32 v4, s8, v0 ; GFX8-NEXT: v_mul_hi_u32 v0, s9, v0 +; GFX8-NEXT: v_mov_b32_e32 v5, s13 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3 ; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4 @@ -1082,184 +1083,183 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3 ; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v3, vcc, v4, v3 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v0, v2 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v0, v2 ; GFX8-NEXT: v_mul_hi_u32 v4, s9, v1 -; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s12, v6, 0 +; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s12, v7, 0 ; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v4, v2 -; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s12, v7, v[1:2] +; GFX8-NEXT: v_add_u32_e32 v8, vcc, v4, v2 +; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s12, v8, v[1:2] +; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s13, v7, v[1:2] ; GFX8-NEXT: v_mov_b32_e32 v3, s9 -; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s8, v0 -; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s13, v6, v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v4, s13 -; GFX8-NEXT: v_subb_u32_e64 v0, s[0:1], v3, v1, vcc -; GFX8-NEXT: v_sub_u32_e64 v1, s[0:1], s9, v1 +; GFX8-NEXT: v_sub_u32_e32 v1, vcc, s8, v0 +; GFX8-NEXT: v_subb_u32_e64 v0, s[0:1], v3, v2, vcc +; GFX8-NEXT: v_sub_u32_e64 v2, s[0:1], s9, v2 ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v0 -; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1] +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v1 +; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v0 -; GFX8-NEXT: v_cndmask_b32_e64 v9, v2, v3, s[0:1] -; GFX8-NEXT: v_cvt_f32_u32_e32 v2, s15 -; GFX8-NEXT: v_cvt_f32_u32_e32 v3, s14 -; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v1, v4, vcc -; GFX8-NEXT: v_mul_f32_e32 v1, 0x4f800000, v2 -; GFX8-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX8-NEXT: v_rcp_iflag_f32_e32 v1, v1 -; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s12, v8 -; GFX8-NEXT: v_subbrev_u32_e64 v11, s[0:1], 0, v5, vcc -; GFX8-NEXT: v_mul_f32_e32 v1, 0x5f7ffffc, v1 -; GFX8-NEXT: v_mul_f32_e32 v2, 0x2f800000, v1 -; GFX8-NEXT: v_trunc_f32_e32 v3, v2 -; GFX8-NEXT: v_mul_f32_e32 v2, 0xcf800000, v3 -; GFX8-NEXT: v_add_f32_e32 v1, v2, v1 -; GFX8-NEXT: v_cvt_u32_f32_e32 v12, v1 -; GFX8-NEXT: v_add_u32_e64 v13, s[0:1], 1, v6 -; GFX8-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v7, s[0:1] -; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s2, v12, 0 -; GFX8-NEXT: v_cvt_u32_f32_e32 v15, v3 +; GFX8-NEXT: v_cndmask_b32_e64 v9, v3, v4, s[0:1] +; GFX8-NEXT: v_cvt_f32_u32_e32 v3, s15 +; GFX8-NEXT: v_cvt_f32_u32_e32 v4, s14 +; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v2, v5, vcc +; GFX8-NEXT: v_mul_f32_e32 v2, 0x4f800000, v3 +; GFX8-NEXT: v_add_f32_e32 v2, v2, v4 +; GFX8-NEXT: v_rcp_iflag_f32_e32 v2, v2 +; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s12, v1 +; GFX8-NEXT: v_subbrev_u32_e64 v11, s[0:1], 0, v6, vcc +; GFX8-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2 +; GFX8-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2 +; GFX8-NEXT: v_trunc_f32_e32 v4, v3 +; GFX8-NEXT: v_mul_f32_e32 v3, 0xcf800000, v4 +; GFX8-NEXT: v_add_f32_e32 v2, v3, v2 +; GFX8-NEXT: v_cvt_u32_f32_e32 v12, v2 +; GFX8-NEXT: v_add_u32_e64 v13, s[0:1], 1, v7 +; GFX8-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v8, s[0:1] +; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v12, 0 +; GFX8-NEXT: v_cvt_u32_f32_e32 v15, v4 ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v11 ; GFX8-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[0:1] -; GFX8-NEXT: v_subb_u32_e32 v4, vcc, v5, v4, vcc -; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v15, v[2:3] +; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v6, v5, vcc +; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v15, v[3:4] ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v10 ; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[0:1] -; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s3, v12, v[2:3] +; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s3, v12, v[3:4] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v11 ; GFX8-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[0:1] -; GFX8-NEXT: v_mul_lo_u32 v3, v15, v1 -; GFX8-NEXT: v_mul_lo_u32 v17, v12, v2 -; GFX8-NEXT: v_mul_hi_u32 v5, v12, v1 -; GFX8-NEXT: v_mul_hi_u32 v1, v15, v1 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v17 +; GFX8-NEXT: v_mul_lo_u32 v4, v15, v2 +; GFX8-NEXT: v_mul_lo_u32 v17, v12, v3 +; GFX8-NEXT: v_mul_hi_u32 v6, v12, v2 +; GFX8-NEXT: v_mul_hi_u32 v2, v15, v2 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v17 ; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5 -; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc -; GFX8-NEXT: v_mul_lo_u32 v5, v15, v2 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v17, v3 -; GFX8-NEXT: v_mul_hi_u32 v17, v12, v2 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v5, v1 -; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v17 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v6 +; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc +; GFX8-NEXT: v_mul_lo_u32 v6, v15, v3 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v17, v4 +; GFX8-NEXT: v_mul_hi_u32 v17, v12, v3 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, v6, v2 +; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v17 ; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v17 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v17 ; GFX8-NEXT: v_add_u32_e32 v17, vcc, 1, v13 ; GFX8-NEXT: v_addc_u32_e32 v18, vcc, 0, v14, vcc ; GFX8-NEXT: v_subrev_u32_e32 v19, vcc, s12, v10 -; GFX8-NEXT: v_mul_hi_u32 v2, v15, v2 -; GFX8-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v4, vcc -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3 -; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3 -; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v1 -; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v12, 0 -; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v15, v2, vcc +; GFX8-NEXT: v_mul_hi_u32 v3, v15, v3 +; GFX8-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v5, vcc +; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4 +; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v4 +; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v2 +; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v12, 0 +; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v15, v3, vcc ; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v13, v17, vcc -; GFX8-NEXT: v_mov_b32_e32 v1, v4 -; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v15, v[1:2] +; GFX8-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc +; GFX8-NEXT: v_mov_b32_e32 v2, v5 +; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s2, v15, v[2:3] ; GFX8-NEXT: v_cndmask_b32_e32 v13, v14, v18, vcc ; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9 -; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s3, v12, v[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v1, v6, v2, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v2, v7, v13, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v19, vcc -; GFX8-NEXT: v_mul_lo_u32 v7, v15, v3 -; GFX8-NEXT: v_mul_lo_u32 v9, v12, v4 -; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[0:1] -; GFX8-NEXT: v_mul_hi_u32 v8, v12, v3 -; GFX8-NEXT: v_cndmask_b32_e32 v6, v11, v20, vcc -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9 -; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 -; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX8-NEXT: v_mul_lo_u32 v8, v15, v4 -; GFX8-NEXT: v_mul_hi_u32 v3, v15, v3 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v9, v7 +; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[2:3], s3, v12, v[5:6] +; GFX8-NEXT: v_cndmask_b32_e64 v2, v7, v3, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v8, v13, s[0:1] +; GFX8-NEXT: v_mul_lo_u32 v7, v15, v4 +; GFX8-NEXT: v_mul_lo_u32 v8, v12, v5 ; GFX8-NEXT: v_mul_hi_u32 v9, v12, v4 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v8, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v19, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v10, v11, v20, vcc +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v9 -; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v9 -; GFX8-NEXT: v_mul_hi_u32 v4, v15, v4 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v7 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9 ; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc +; GFX8-NEXT: v_mul_lo_u32 v9, v15, v5 +; GFX8-NEXT: v_mul_hi_u32 v4, v15, v4 ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7 +; GFX8-NEXT: v_mul_hi_u32 v8, v12, v5 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v9, v4 +; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8 +; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v8, vcc, v9, v8 +; GFX8-NEXT: v_mul_hi_u32 v5, v15, v5 ; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v7 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v12, v3 -; GFX8-NEXT: v_addc_u32_e32 v4, vcc, v15, v4, vcc -; GFX8-NEXT: v_mul_lo_u32 v7, s11, v3 -; GFX8-NEXT: v_mul_lo_u32 v8, s10, v4 -; GFX8-NEXT: v_cndmask_b32_e64 v6, v0, v6, s[0:1] -; GFX8-NEXT: v_mul_hi_u32 v0, s10, v3 -; GFX8-NEXT: v_mul_hi_u32 v3, s11, v3 +; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7 +; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v7 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v12, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v15, v5, vcc +; GFX8-NEXT: v_mul_lo_u32 v7, s11, v4 +; GFX8-NEXT: v_mul_lo_u32 v8, s10, v5 +; GFX8-NEXT: v_cndmask_b32_e64 v6, v1, v6, s[0:1] +; GFX8-NEXT: v_mul_hi_u32 v1, s10, v4 +; GFX8-NEXT: v_mul_hi_u32 v4, s11, v4 ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v0 -; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc -; GFX8-NEXT: v_mul_lo_u32 v7, s11, v4 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, v8, v0 -; GFX8-NEXT: v_mul_hi_u32 v8, s10, v4 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v7, v3 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v7, v1 +; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc +; GFX8-NEXT: v_mul_lo_u32 v7, s11, v5 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v8, v1 +; GFX8-NEXT: v_mul_hi_u32 v8, s10, v5 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v7, v4 ; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v8 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 -; GFX8-NEXT: v_add_u32_e32 v9, vcc, v3, v0 -; GFX8-NEXT: v_mul_hi_u32 v8, s11, v4 -; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s14, v9, 0 -; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v0 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, v8, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, v4 -; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[0:1], s14, v10, v[0:1] -; GFX8-NEXT: v_mov_b32_e32 v4, s11 -; GFX8-NEXT: v_mov_b32_e32 v0, s15 -; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[0:1], s15, v9, v[7:8] -; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s10, v3 -; GFX8-NEXT: v_subb_u32_e64 v11, s[0:1], v4, v7, vcc -; GFX8-NEXT: v_sub_u32_e64 v3, s[0:1], s11, v7 -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v11 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, v4, v1 +; GFX8-NEXT: v_mul_hi_u32 v8, s11, v5 +; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s14, v11, 0 +; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v7, v1 +; GFX8-NEXT: v_add_u32_e32 v12, vcc, v8, v1 +; GFX8-NEXT: v_mov_b32_e32 v1, v5 +; GFX8-NEXT: v_mad_u64_u32 v[8:9], s[2:3], s14, v12, v[1:2] +; GFX8-NEXT: v_cndmask_b32_e64 v7, v0, v10, s[0:1] +; GFX8-NEXT: v_mov_b32_e32 v5, s15 +; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s15, v11, v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v1, s11 +; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s10, v4 +; GFX8-NEXT: v_subb_u32_e64 v1, s[0:1], v1, v0, vcc +; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s11, v0 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v1 ; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v8 -; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v11 -; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v3, v0, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v7, s[0:1] -; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s14, v8 -; GFX8-NEXT: v_subbrev_u32_e64 v12, s[0:1], 0, v3, vcc -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v12 +; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1] +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v1 +; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[0:1] +; GFX8-NEXT: v_subrev_u32_e32 v9, vcc, s14, v8 +; GFX8-NEXT: v_subbrev_u32_e64 v10, s[0:1], 0, v0, vcc +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v10 ; GFX8-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v7 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v9 ; GFX8-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v12 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v10 ; GFX8-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1] -; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v9 -; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v3, v0, vcc -; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v10, s[0:1] -; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v14 +; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v11 +; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc +; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v12, s[0:1] +; GFX8-NEXT: v_add_u32_e32 v5, vcc, 1, v14 ; GFX8-NEXT: v_addc_u32_e32 v16, vcc, 0, v15, vcc ; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 -; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s14, v7 +; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s14, v9 ; GFX8-NEXT: v_subbrev_u32_e64 v0, s[0:1], 0, v0, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e32 v3, v14, v3, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc ; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GFX8-NEXT: v_cndmask_b32_e64 v3, v9, v3, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v4, v10, v14, s[0:1] -; GFX8-NEXT: v_mov_b32_e32 v10, s5 -; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v13, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc -; GFX8-NEXT: v_mov_b32_e32 v9, s4 -; GFX8-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v8, v11, v0, s[0:1] -; GFX8-NEXT: flat_store_dwordx4 v[9:10], v[1:4] +; GFX8-NEXT: v_cndmask_b32_e32 v9, v9, v13, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v5, v14, v5, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v9, v1, v0, s[0:1] +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_cndmask_b32_e64 v4, v11, v5, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v5, v12, v14, s[0:1] +; GFX8-NEXT: v_mov_b32_e32 v1, s5 +; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[2:5] ; GFX8-NEXT: v_mov_b32_e32 v0, s6 ; GFX8-NEXT: v_mov_b32_e32 v1, s7 -; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[5:8] +; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[6:9] ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: udivrem_v2i64: @@ -1355,11 +1355,11 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX9-NEXT: v_add_u32_e32 v3, v4, v3 ; GFX9-NEXT: v_add3_u32 v8, v3, v2, v5 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s4, v8, v[1:2] -; GFX9-NEXT: v_mov_b32_e32 v4, s17 ; GFX9-NEXT: v_mov_b32_e32 v5, s5 ; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s5, v7, v[1:2] +; GFX9-NEXT: v_mov_b32_e32 v3, s17 ; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s16, v0 -; GFX9-NEXT: v_subb_co_u32_e64 v0, s[0:1], v4, v2, vcc +; GFX9-NEXT: v_subb_co_u32_e64 v0, s[0:1], v3, v2, vcc ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s5, v0 ; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1] ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s4, v1 @@ -1387,7 +1387,7 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v15, v4 ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s5, v11 ; GFX9-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[0:1] -; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v6, v5, vcc +; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v6, v5, vcc ; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v15, v[3:4] ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s4, v10 ; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[0:1] @@ -1396,128 +1396,128 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX9-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[0:1] ; GFX9-NEXT: v_mul_lo_u32 v4, v15, v2 ; GFX9-NEXT: v_mul_lo_u32 v17, v12, v3 -; GFX9-NEXT: v_mul_hi_u32 v6, v12, v2 +; GFX9-NEXT: v_mul_hi_u32 v5, v12, v2 ; GFX9-NEXT: v_mul_hi_u32 v2, v15, v2 ; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v17 ; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v6 +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v5 ; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc -; GFX9-NEXT: v_mul_lo_u32 v6, v15, v3 +; GFX9-NEXT: v_mul_lo_u32 v5, v15, v3 ; GFX9-NEXT: v_add_u32_e32 v4, v17, v4 ; GFX9-NEXT: v_mul_hi_u32 v17, v12, v3 ; GFX9-NEXT: v_mul_hi_u32 v3, v15, v3 -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v5, v2 +; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v17 ; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX9-NEXT: v_add_u32_e32 v6, v6, v17 +; GFX9-NEXT: v_add_u32_e32 v5, v5, v17 ; GFX9-NEXT: v_add_co_u32_e32 v17, vcc, 1, v13 ; GFX9-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v14, vcc -; GFX9-NEXT: v_subrev_co_u32_e32 v19, vcc, s4, v10 -; GFX9-NEXT: v_subbrev_co_u32_e32 v20, vcc, 0, v5, vcc ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4 ; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc ; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, v12, v2 -; GFX9-NEXT: v_add3_u32 v3, v6, v4, v3 +; GFX9-NEXT: v_add3_u32 v3, v5, v4, v3 ; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v12, 0 ; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, v15, v3, vcc -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc ; GFX9-NEXT: v_mov_b32_e32 v2, v5 -; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s2, v15, v[2:3] -; GFX9-NEXT: v_cndmask_b32_e32 v13, v14, v18, vcc +; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v15, v[2:3] +; GFX9-NEXT: v_subrev_co_u32_e32 v19, vcc, s4, v10 +; GFX9-NEXT: v_subbrev_co_u32_e32 v20, vcc, 0, v6, vcc +; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s3, v12, v[2:3] +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 +; GFX9-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9 -; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[2:3], s3, v12, v[5:6] -; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v3, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v13, s[0:1] -; GFX9-NEXT: v_mul_lo_u32 v7, v15, v4 -; GFX9-NEXT: v_mul_lo_u32 v8, v12, v5 -; GFX9-NEXT: v_cndmask_b32_e32 v6, v10, v19, vcc -; GFX9-NEXT: v_mul_hi_u32 v10, v12, v4 -; GFX9-NEXT: v_cndmask_b32_e32 v9, v11, v20, vcc -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v10 -; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX9-NEXT: v_mul_lo_u32 v10, v15, v5 +; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v13, s[0:1] +; GFX9-NEXT: v_mul_lo_u32 v6, v15, v4 +; GFX9-NEXT: v_mul_lo_u32 v7, v12, v5 +; GFX9-NEXT: v_mul_hi_u32 v9, v12, v4 ; GFX9-NEXT: v_mul_hi_u32 v4, v15, v4 -; GFX9-NEXT: v_add_u32_e32 v7, v8, v7 -; GFX9-NEXT: v_mul_hi_u32 v8, v12, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v18, vcc +; GFX9-NEXT: v_add_co_u32_e64 v6, s[2:3], v6, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[2:3] +; GFX9-NEXT: v_add_co_u32_e64 v6, s[2:3], v6, v9 +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3] +; GFX9-NEXT: v_mul_lo_u32 v9, v15, v5 +; GFX9-NEXT: v_add_u32_e32 v6, v7, v6 +; GFX9-NEXT: v_mul_hi_u32 v7, v12, v5 ; GFX9-NEXT: v_mul_hi_u32 v5, v15, v5 -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7 -; GFX9-NEXT: v_add_u32_e32 v8, v10, v8 +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v9, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[2:3] +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v4, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[2:3] +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v4, v6 +; GFX9-NEXT: v_add_u32_e32 v7, v9, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3] +; GFX9-NEXT: v_add3_u32 v5, v7, v6, v5 +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v12, v4 +; GFX9-NEXT: v_addc_co_u32_e64 v5, s[2:3], v15, v5, s[2:3] +; GFX9-NEXT: v_mul_lo_u32 v6, s19, v4 +; GFX9-NEXT: v_mul_lo_u32 v7, s18, v5 +; GFX9-NEXT: v_mul_hi_u32 v9, s18, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v14, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v10, v19, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v20, vcc +; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v7 ; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX9-NEXT: v_add3_u32 v5, v8, v7, v5 -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4 -; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc -; GFX9-NEXT: v_mul_lo_u32 v7, s19, v4 -; GFX9-NEXT: v_mul_lo_u32 v8, s18, v5 -; GFX9-NEXT: v_cndmask_b32_e64 v6, v1, v6, s[0:1] -; GFX9-NEXT: v_mul_hi_u32 v1, s18, v4 +; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v9 +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc +; GFX9-NEXT: v_mul_lo_u32 v9, s19, v5 ; GFX9-NEXT: v_mul_hi_u32 v4, s19, v4 -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v7, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc -; GFX9-NEXT: v_mul_lo_u32 v7, s19, v5 -; GFX9-NEXT: v_add_u32_e32 v1, v8, v1 -; GFX9-NEXT: v_mul_hi_u32 v8, s18, v5 -; GFX9-NEXT: v_mul_hi_u32 v12, s19, v5 -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v7, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v4, v1 +; GFX9-NEXT: v_add_u32_e32 v6, v7, v6 +; GFX9-NEXT: v_mul_hi_u32 v7, s18, v5 +; GFX9-NEXT: v_mul_hi_u32 v13, s19, v5 +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v9, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v4, v6 ; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s6, v11, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v7, v0, v9, s[0:1] -; GFX9-NEXT: v_add_u32_e32 v0, v10, v8 -; GFX9-NEXT: v_add3_u32 v8, v0, v1, v12 -; GFX9-NEXT: v_mov_b32_e32 v0, v5 -; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s6, v8, v[0:1] -; GFX9-NEXT: v_mov_b32_e32 v9, s19 +; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v6, v1, v8, s[0:1] +; GFX9-NEXT: v_add_u32_e32 v1, v9, v7 +; GFX9-NEXT: v_add3_u32 v12, v1, v12, v13 +; GFX9-NEXT: v_mov_b32_e32 v1, v5 +; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[2:3], s6, v12, v[1:2] +; GFX9-NEXT: v_cndmask_b32_e64 v7, v0, v10, s[0:1] ; GFX9-NEXT: v_mov_b32_e32 v5, s7 -; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s7, v11, v[0:1] -; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s18, v4 -; GFX9-NEXT: v_subb_co_u32_e64 v9, s[0:1], v9, v0, vcc -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v9 +; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s7, v11, v[8:9] +; GFX9-NEXT: v_mov_b32_e32 v1, s19 +; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, s18, v4 +; GFX9-NEXT: v_subb_co_u32_e64 v1, s[0:1], v1, v0, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v1 ; GFX9-NEXT: v_sub_u32_e32 v0, s19, v0 ; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v9 +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v8 +; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1] +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v1 ; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v5, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1] -; GFX9-NEXT: v_subrev_co_u32_e32 v10, vcc, s6, v1 -; GFX9-NEXT: v_subbrev_co_u32_e64 v12, s[0:1], 0, v0, vcc -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v12 +; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[0:1] +; GFX9-NEXT: v_subrev_co_u32_e32 v9, vcc, s6, v8 +; GFX9-NEXT: v_subbrev_co_u32_e64 v10, s[0:1], 0, v0, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v10 ; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v10 +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v9 ; GFX9-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v12 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v10 ; GFX9-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1] ; GFX9-NEXT: v_add_co_u32_e64 v14, s[0:1], 1, v11 ; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v5, vcc -; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v8, s[0:1] +; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v12, s[0:1] ; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, 1, v14 ; GFX9-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v15, vcc ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 ; GFX9-NEXT: v_cndmask_b32_e32 v5, v14, v5, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc -; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s6, v10 +; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s6, v9 ; GFX9-NEXT: v_subbrev_co_u32_e64 v0, s[0:1], 0, v0, s[0:1] ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 ; GFX9-NEXT: v_mov_b32_e32 v13, 0 ; GFX9-NEXT: v_cndmask_b32_e64 v4, v11, v5, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v14, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v8, v1, v8, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v0, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v5, v12, v14, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v9, v1, v0, s[0:1] ; GFX9-NEXT: global_store_dwordx4 v13, v[2:5], s[12:13] ; GFX9-NEXT: global_store_dwordx4 v13, v[6:9], s[14:15] ; GFX9-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll index d053425afbb6d..7cc505171da82 100644 --- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll +++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll @@ -1483,7 +1483,6 @@ define void @flat_atomic_xchg_i64_noret_av(ptr %ptr) #0 { ; GFX90A-NEXT: buffer_invl2 ; GFX90A-NEXT: buffer_wbinvl1_vol ; GFX90A-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX90A-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_cbranch_execz .LBB20_2 ; GFX90A-NEXT: .LBB20_4: ; %atomicrmw.private diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll index 815b9f294be8f..df9c97fa23722 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll @@ -161654,177 +161654,175 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:244 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:240 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:236 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:232 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:224 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:200 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:192 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:184 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:176 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:168 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:160 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:152 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:148 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:144 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:136 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:128 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:120 -; GFX11-TRUE16-NEXT: s_clause 0x1a -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:112 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:104 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:20 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:236 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:232 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:228 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:220 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:208 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:200 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:192 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:184 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:176 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:168 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:160 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:152 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:144 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:140 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:136 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:128 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:120 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:112 +; GFX11-TRUE16-NEXT: s_clause 0x18 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:12 ; GFX11-TRUE16-NEXT: s_clause 0x2 ; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:8 -; GFX11-TRUE16-NEXT: scratch_load_b32 v85, off, s32 offset:4 -; GFX11-TRUE16-NEXT: scratch_load_b32 v84, off, s32 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_lo16 +; GFX11-TRUE16-NEXT: scratch_load_b32 v99, off, s32 offset:4 +; GFX11-TRUE16-NEXT: scratch_load_b32 v98, off, s32 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr140_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr139_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr123_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr107_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr95_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr93_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr75_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr61_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr154_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr122_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr92_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr164_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr163_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr166_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr165_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr41_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr182_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr181_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 @@ -161838,135 +161836,135 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_2 ; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[15:16] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[116:117], 24, v[11:12] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[7:8] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[133:134], 24, v[5:6] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[27:28] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[13:14] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[11:12] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[9:10] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[3:4] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v13 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v7 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v6 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 8, v5 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 24, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 8, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v3 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v1 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 24, v85 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v85 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v99 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v99 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v40, 8, v84 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 24, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v104, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v106, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 8, v17 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[13:14] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[9:10] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[3:4] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[146:147], 24, v[1:2] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[84:85] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v98 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v17 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[5:6] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[1:2] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[98:99] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[29:30] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[23:24] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[21:22] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[19:20] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v1.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v1.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v2.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v65.h, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v43.h, v3.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v40.h, v3.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v62.h, v5.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v56.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.h, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v47.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v42.h, v6.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v6.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v91.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v79.h, v7.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v74.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v60.h, v8.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v111.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.h, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v89.h, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v10.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v138.h, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v108.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v153.h, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.h, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v137.h, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.h, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v154.h, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.h, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v152.h, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v16.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v17.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v106.h, v9.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v9.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v76.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v10.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v127.h, v11.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v11.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v104.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v12.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v142.h, v13.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.h, v13.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v14.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v143.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.h, v15.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v141.h, v16.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v16.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v17.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v17.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v19.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v19.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v147.h, v20.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v21.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v21.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v22.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v22.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v23.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v23.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v24.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v24.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v24.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v25.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v25.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v26.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v26.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v26.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v27.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v27.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v27.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v28.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v28.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v28.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v29.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v29.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v30.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v30.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v30.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v84.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v84.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v85.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v85.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v98.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v98.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v99.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v99.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5 @@ -161982,148 +161980,153 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99 ; GFX11-TRUE16-NEXT: .LBB90_2: ; %Flow ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_4 ; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true ; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v17 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v32 :: v_dual_lshlrev_b32 v31, 16, v18 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v18 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v20 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v32 :: v_dual_add_f32 v31, 0x40c00000, v31 ; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v34, 16, 1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v31, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v31 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v32, v32, v31, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v31, v38, v34, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v39, v32, v37, vcc_lo -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_lshlrev_b32 v17, 16, v17 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add3_u32 v35, v35, v18, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_cndmask_b32 v32, v35, v36 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v33 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v35, v36, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v17, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v17 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v37, v48, v17, 0x7fff -; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v20 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v32 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 24, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v32 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v37, v49, vcc_lo -; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v33 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v34 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v135, v37, v49, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff0000, v19 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v31, v33, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v33, v20, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v34 :: v_dual_add_f32 v19, 0x40c00000, v19 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff +; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v34 :: v_dual_add_f32 v19, 0x40c00000, v19 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v135.h ; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v36, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v148.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v149, v33, v35, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v147, v33, v35, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36 -; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v19 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v22 +; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v31 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v17, v34, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v19, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v149.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 8, v31 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v147.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v19, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v33, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v34 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v17, v33, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v20, 0xffff0000, v21 +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v18, 0xffff0000, v22 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v21 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 24, v34 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v34 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v22, 0x40c00000, v22 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_lshlrev_b32 v22, 16, v22 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v22 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v151, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24 -; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 -; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff -; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v149, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 -; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v24 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v21 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v17, v36, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v151.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v149.h ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v24 :: v_dual_lshlrev_b32 v21, 16, v23 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v23 +; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v24, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 -; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v23 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v36 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v35, v19, v35, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 -; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v20, 0x40c00000, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v148.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v36 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 24, v36 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v104, 8, v36 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v161, v19, v23, vcc_lo -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v33 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v151, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff +; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff +; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v17, v24, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 -; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v151.h ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v25 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v150.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v161.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v23 :: v_dual_lshlrev_b32 v21, 16, v25 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -162136,10 +162139,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v33 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v161, v19, v23 :: v_dual_lshlrev_b32 v22, 16, v28 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 @@ -162152,10 +162153,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v27 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v160.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v163.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v161.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v150.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -162168,10 +162169,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v49 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v49 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v106, 8, v35 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v49 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v49 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v35 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 @@ -162184,10 +162185,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v29 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v165.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v38 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v38 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v163.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v38 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v38 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -162200,14 +162201,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v85 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v51 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v51 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v99 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v51 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v51 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v85 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v99 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v22 :: v_dual_cndmask_b32 v53, v17, v24 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 @@ -162216,14 +162217,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v84 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v167.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v98 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v160.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v165.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v84 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v98 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_cndmask_b32 v52, v19, v24 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 @@ -162232,10 +162233,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v162.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 24, v53 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 8, v53 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v53 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v53 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v37 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v2 @@ -162248,11 +162249,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v177.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v37 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v167.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v2 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v22, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v22, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v1 @@ -162265,11 +162265,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_add_f32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v2, 0x7fff -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 24, v55 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v55 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v55 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v55 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v20, 16, 1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v19, v21, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v21, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v4 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 @@ -162282,10 +162282,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v1, 0x7fff -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v179.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v162.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v177.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v18, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v180, v17, v19, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v19, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v3 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 @@ -162300,9 +162301,9 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v6 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v164.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v65 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v183, v2, v19, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v65 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v65 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v2, v19, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v17, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v3 @@ -162312,13 +162313,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v17, 0x7fff ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v183.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v179.h ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v166.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[48:49] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v43, v1, v18, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v164.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v67 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v40, v1, v18, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6 @@ -162329,13 +162330,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v43.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[50:51] ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[50:51] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 24, v67 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 8, v67 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v47, v2, v17, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[48:49] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[37:38] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v67 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v42, v2, v17, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v8 @@ -162349,23 +162350,23 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v47.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v62, v2, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v50 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v56, v2, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v5, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v48 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v1, v17, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v2, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v7 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v56.h ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v74, v3, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v60, v3, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v7 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v10 @@ -162379,8 +162380,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v74.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v176.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v60.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v166.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v1, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 @@ -162388,21 +162389,20 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v91, v4, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v79, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v12 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v11 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v91.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v89, v1, v4, vcc_lo -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v9 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v79.h +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v76, v1, v4 :: v_dual_lshlrev_b32 v1, 16, v9 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v99, v2, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v2, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 @@ -162410,10 +162410,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v178.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.l, v89.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[82:83] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v98, v2, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.l, v76.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v176.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[82:83] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v2, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 @@ -162421,29 +162421,29 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[54:55] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[52:53] ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v111, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v106, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v7, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v14 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.l, v111.h -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v108, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.l, v106.h +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v104, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[98:99] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[96:97] ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v115, v3, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v3, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.l, v108.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v99 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v114, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.l, v104.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v112, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v4 :: v_dual_add_f32 v3, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1 @@ -162452,8 +162452,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v180.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v138, v4, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v178.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v127, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 @@ -162461,19 +162461,19 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v131, v4, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v129, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v137, v6, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v125, v6, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v114.l, v138.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v130, v3, v4, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v40.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v128, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 @@ -162481,11 +162481,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v62.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.l, v137.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.l, v125.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.l, v127.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v153, v4, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v142, v4, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 @@ -162494,83 +162494,82 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.l, v153.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v152, v2, v9, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.l, v142.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v141, v2, v9, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[116:117], 24, v[114:115] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[133:134], 24, v[68:69] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v154, v7, v11, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v42.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[112:113] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v143, v7, v11, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[33:34] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[130:131] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[66:67] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v145, v4, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[66:67] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[68:69] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[64:65] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v134, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.l, v152.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[146:147], 24, v[64:65] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v131 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v144, v2, v3, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.l, v154.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v145 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v145 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v131 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v130 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[144:145] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[37:38] +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v141.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[33:34] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v129 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v129 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v133, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.l, v143.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v134 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v134 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v128 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v113 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[133:134] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[128:129] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[35:36] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v144 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v115 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v115 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v114 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v99 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v98 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v83 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v83 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v82 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 8, v68 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v66 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v65 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v64 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v40, 8, v54 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v52 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v50 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v48 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v133 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v113 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v112 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v97 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v97 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v96 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v83 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v83 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v82 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v68 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v66 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v64 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v54 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v52 ; GFX11-TRUE16-NEXT: .LBB90_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v180.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v143.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v178.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v152.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v146.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v145.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v65.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.l, v1.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v141.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v139.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v2.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v179.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v142.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v177.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v140.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v144.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v1 ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v67.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v2.l, v2.h ; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v3.l, v3.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v43.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v140.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v127.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v40.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v138.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v136.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v5, v2 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v131.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v3.l, v3.h ; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.l, v4.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v183.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v139.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v179.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v137.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v69.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v121.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v5, v3 @@ -162578,89 +162577,89 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v4.l, v4.h ; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.l, v6.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v62.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v125.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v132.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v56.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v126.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v130.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v83.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v105.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v107.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v6.l, v6.h ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v47.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v42.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v123.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v98.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v96.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v117.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v5, v6 -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v99.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v97.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v7.l, v7.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.l, v8.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v91.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v110.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v90.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v114.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v79.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v111.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v91.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v112.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v7 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v8.l, v8.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v74.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v107.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v115.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v60.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v109.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v113.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v75.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v5, v8 -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v130.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v128.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v111.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v106.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v95.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v131.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v101.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v129.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v5, v9 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v59.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v61.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v10.l, v10.h ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v89.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v76.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v93.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v144.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v133.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v5, v10 -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v145.h +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v134.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v138.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v79.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v44.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v127.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v89.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v45.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v31.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v5, v11 ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v118.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.l, v12.h ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v13.l, v13.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v108.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v77.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v104.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v78.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v124.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v120.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v5, v12 ; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v13.l, v13.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v153.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v72.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v142.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v73.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v115.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v5, v13 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v109.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v105.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v137.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v61.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v125.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v63.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v35.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v102.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v5, v14 @@ -162668,71 +162667,71 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v15.l, v15.h ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v154.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v57.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v94.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v143.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v58.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v90.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v37.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v5, v15 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v101.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v86.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v16.l, v16.h ; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v17.l, v17.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v152.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v46.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v141.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v47.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v38.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v78.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v74.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v5, v16 ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v48.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v17.l, v17.h ; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v18.l, v18.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v148.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v136.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v135.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v124.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v85.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v49.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v5, v17 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v63.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v59.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v18.l, v18.h ; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.l, v19.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v126.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v122.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v50.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v86.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v84.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v5, v18 ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v51.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v19.l, v19.h ; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v20.l, v20.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v150.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v122.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v56.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v148.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v110.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v44.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v52.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v5, v19 ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v80.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v20.l, v20.h ; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v149.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v120.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v147.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v108.l ; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v53.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v41.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v183.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v5, v20 ; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v54.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v21.l, v21.h ; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v22.l, v22.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v160.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v106.l +; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v150.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v94.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v70.l ; GFX11-TRUE16-NEXT: v_and_b16 v34.l, 0xff, v55.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v5, v21 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v181.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v180.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v22.l, v22.h ; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v23.l, v23.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v151.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v104.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v149.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v92.l ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[6:9], off offset:16 @@ -162740,71 +162739,71 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v23.l, v23.h ; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.l, v24.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v162.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v92.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v160.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v88.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v5, v23 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v24.l, v24.h ; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v25.l, v25.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v161.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v88.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v151.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v77.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v5, v24 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v25.l, v25.h ; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v26.l, v26.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v164.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v76.l +; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v162.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v72.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v5, v25 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v26.l, v26.h ; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v27.l, v27.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v163.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v73.l +; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v161.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v62.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v5, v26 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v27.l, v27.h ; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v28.l, v28.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v166.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v60.l +; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v164.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v57.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v5, v27 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v28.l, v28.h ; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v29.l, v29.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v165.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v58.l +; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v163.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v46.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v5, v28 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v29.l, v29.h ; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v30.l, v30.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v176.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v45.l +; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v166.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v43.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v5, v29 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v30.l, v30.h ; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v31.l, v31.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v167.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v42.l +; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v165.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v41.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v5, v30 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v31.l, v31.h ; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v32.l, v32.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v178.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v40.l +; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v176.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v182.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v5, v31 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v32.l, v32.h ; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v33.l, v33.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v177.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v182.l +; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v167.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v181.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v5, v32 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v33.l, v33.h @@ -162820,66 +162819,64 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[26:29], off offset:96 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[30:33], off offset:112 ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:12 -; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:20 -; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:104 -; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:112 -; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:120 -; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:128 -; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:136 -; GFX11-TRUE16-NEXT: s_clause 0x1a -; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:144 -; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:148 -; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:152 -; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:160 -; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:168 -; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:176 -; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:184 -; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:192 -; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:200 -; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:224 -; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:232 -; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:236 -; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:240 -; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:244 +; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:112 +; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:120 +; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:128 +; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:136 +; GFX11-TRUE16-NEXT: s_clause 0x18 +; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:140 +; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:144 +; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:152 +; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:160 +; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:168 +; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:176 +; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:184 +; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:192 +; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:200 +; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:208 +; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:220 +; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:228 +; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:232 +; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:236 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -186724,55 +186721,55 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 @@ -186798,24 +186795,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -186827,24 +186824,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] @@ -186906,24 +186903,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -186935,24 +186932,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17 ; GFX11-TRUE16-NEXT: .LBB94_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 @@ -186990,7 +186987,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h @@ -186998,15 +186995,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h @@ -187014,15 +187011,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h @@ -187030,15 +187027,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h @@ -187046,15 +187043,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h @@ -187062,15 +187059,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h @@ -187078,8 +187075,8 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 @@ -187094,15 +187091,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h @@ -187110,15 +187107,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h @@ -187126,15 +187123,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h @@ -187142,15 +187139,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h @@ -187158,15 +187155,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h @@ -187174,15 +187171,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h @@ -209426,55 +209423,55 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 @@ -209500,24 +209497,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -209529,24 +209526,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] @@ -209608,24 +209605,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -209637,24 +209634,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17 ; GFX11-TRUE16-NEXT: .LBB98_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 @@ -209692,7 +209689,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h @@ -209700,15 +209697,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h @@ -209716,15 +209713,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h @@ -209732,15 +209729,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h @@ -209748,15 +209745,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h @@ -209764,15 +209761,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h @@ -209780,8 +209777,8 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 @@ -209796,15 +209793,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h @@ -209812,15 +209809,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h @@ -209828,15 +209825,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h @@ -209844,15 +209841,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h @@ -209860,15 +209857,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h @@ -209876,15 +209873,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll index e33493c6a760e..d3fbba3cf4dd7 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll @@ -85072,13 +85072,13 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 @@ -85086,20 +85086,20 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 @@ -85119,18 +85119,18 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v16 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v16 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v14 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v13 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v12 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v11 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v9 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v8 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v7 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v6 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v6 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v5 @@ -85159,19 +85159,19 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v7.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v8.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v9.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v9.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v9.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v10.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v10.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v11.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v12.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v12.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v13.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v87.h, v14.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v15.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v15.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v103.h, v16.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v16.h @@ -85345,29 +85345,29 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v68.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v2, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18] ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v14 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v83.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24 -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v82, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v82.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18] +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v80, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 @@ -85384,82 +85384,81 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v23 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v80.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24 ; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v27, v2, v3 :: v_dual_add_f32 v2, 0x40c00000, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v82.h +; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v4, v5, vcc_lo -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 ; GFX11-TRUE16-NEXT: v_add3_u32 v6, v8, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h +; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 -; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v6, v7, vcc_lo -; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v6, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24 ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v96.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v87.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16 ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v33 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v33 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v33 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v33 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28 -; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v23 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v112, v4, v6 :: v_dual_add_f32 v1, 0x40c00000, v5 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 -; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v15 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v112.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 -; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15 -; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v27 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff +; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v114, v7, v11, vcc_lo +; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v7, v11, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v103.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v37, v2, v3, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v114.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v113.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v38 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v38 @@ -85524,7 +85523,7 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v31.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v98.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v31, v6 @@ -85541,12 +85540,12 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v8.l, v8.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v83.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v82.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v33.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v31, v8 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v81.l ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v31.h @@ -85560,14 +85559,14 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v31.h ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v97.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v87.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v96.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v69.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v31, v10 ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v80.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v86.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v31, v11 @@ -85581,14 +85580,14 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v13.l, v13.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v96.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v81.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v87.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v83.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v31, v13 ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v114.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v113.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v71.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v31, v14 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll index 67c9bfe9d9f3b..ecc715cfb52f3 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll @@ -11261,8 +11261,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; VI-NEXT: s_lshr_b32 s5, s17, 24 ; VI-NEXT: s_lshr_b32 s8, s17, 16 -; VI-NEXT: s_lshr_b32 s9, s17, 8 -; VI-NEXT: s_lshr_b32 s10, s16, 16 +; VI-NEXT: s_lshr_b32 s10, s17, 8 +; VI-NEXT: s_lshr_b32 s9, s16, 16 ; VI-NEXT: s_lshr_b32 s11, s16, 8 ; VI-NEXT: s_cbranch_execnz .LBB85_4 ; VI-NEXT: .LBB85_2: ; %cmp.true @@ -11277,9 +11277,9 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; VI-NEXT: s_branch .LBB85_5 ; VI-NEXT: .LBB85_3: ; VI-NEXT: ; implicit-def: $sgpr11 -; VI-NEXT: ; implicit-def: $sgpr10 -; VI-NEXT: ; implicit-def: $sgpr4 ; VI-NEXT: ; implicit-def: $sgpr9 +; VI-NEXT: ; implicit-def: $sgpr4 +; VI-NEXT: ; implicit-def: $sgpr10 ; VI-NEXT: ; implicit-def: $sgpr8 ; VI-NEXT: ; implicit-def: $sgpr5 ; VI-NEXT: s_branch .LBB85_2 @@ -11287,8 +11287,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; VI-NEXT: v_mov_b32_e32 v8, s16 ; VI-NEXT: v_mov_b32_e32 v9, s17 ; VI-NEXT: v_mov_b32_e32 v1, s11 -; VI-NEXT: v_mov_b32_e32 v2, s10 -; VI-NEXT: v_mov_b32_e32 v5, s9 +; VI-NEXT: v_mov_b32_e32 v2, s9 +; VI-NEXT: v_mov_b32_e32 v5, s10 ; VI-NEXT: v_mov_b32_e32 v6, s8 ; VI-NEXT: v_mov_b32_e32 v7, s5 ; VI-NEXT: v_mov_b32_e32 v3, s4 @@ -11306,8 +11306,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; GFX9-NEXT: s_lshr_b32 s5, s17, 24 ; GFX9-NEXT: s_lshr_b32 s8, s17, 16 -; GFX9-NEXT: s_lshr_b32 s9, s17, 8 -; GFX9-NEXT: s_lshr_b32 s10, s16, 16 +; GFX9-NEXT: s_lshr_b32 s10, s17, 8 +; GFX9-NEXT: s_lshr_b32 s9, s16, 16 ; GFX9-NEXT: s_lshr_b32 s11, s16, 8 ; GFX9-NEXT: s_cbranch_execnz .LBB85_4 ; GFX9-NEXT: .LBB85_2: ; %cmp.true @@ -11322,9 +11322,9 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX9-NEXT: s_branch .LBB85_5 ; GFX9-NEXT: .LBB85_3: ; GFX9-NEXT: ; implicit-def: $sgpr11 -; GFX9-NEXT: ; implicit-def: $sgpr10 -; GFX9-NEXT: ; implicit-def: $sgpr4 ; GFX9-NEXT: ; implicit-def: $sgpr9 +; GFX9-NEXT: ; implicit-def: $sgpr4 +; GFX9-NEXT: ; implicit-def: $sgpr10 ; GFX9-NEXT: ; implicit-def: $sgpr8 ; GFX9-NEXT: ; implicit-def: $sgpr5 ; GFX9-NEXT: s_branch .LBB85_2 @@ -11332,8 +11332,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX9-NEXT: v_mov_b32_e32 v8, s16 ; GFX9-NEXT: v_mov_b32_e32 v9, s17 ; GFX9-NEXT: v_mov_b32_e32 v1, s11 -; GFX9-NEXT: v_mov_b32_e32 v2, s10 -; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v2, s9 +; GFX9-NEXT: v_mov_b32_e32 v5, s10 ; GFX9-NEXT: v_mov_b32_e32 v6, s8 ; GFX9-NEXT: v_mov_b32_e32 v7, s5 ; GFX9-NEXT: v_mov_b32_e32 v3, s4 @@ -11352,8 +11352,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24 ; GFX11-NEXT: s_lshr_b32 s3, s1, 24 ; GFX11-NEXT: s_lshr_b32 s5, s1, 16 -; GFX11-NEXT: s_lshr_b32 s6, s1, 8 -; GFX11-NEXT: s_lshr_b32 s7, s0, 16 +; GFX11-NEXT: s_lshr_b32 s7, s1, 8 +; GFX11-NEXT: s_lshr_b32 s6, s0, 16 ; GFX11-NEXT: s_lshr_b32 s8, s0, 8 ; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4 ; GFX11-NEXT: s_cbranch_vccnz .LBB85_4 @@ -11370,16 +11370,16 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX11-NEXT: s_branch .LBB85_5 ; GFX11-NEXT: .LBB85_3: ; GFX11-NEXT: ; implicit-def: $sgpr8 -; GFX11-NEXT: ; implicit-def: $sgpr7 -; GFX11-NEXT: ; implicit-def: $sgpr2 ; GFX11-NEXT: ; implicit-def: $sgpr6 +; GFX11-NEXT: ; implicit-def: $sgpr2 +; GFX11-NEXT: ; implicit-def: $sgpr7 ; GFX11-NEXT: ; implicit-def: $sgpr5 ; GFX11-NEXT: ; implicit-def: $sgpr3 ; GFX11-NEXT: s_branch .LBB85_2 ; GFX11-NEXT: .LBB85_4: ; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1 -; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7 -; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5 +; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6 +; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5 ; GFX11-NEXT: v_mov_b32_e32 v7, s3 ; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: .LBB85_5: ; %end @@ -13517,8 +13517,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; GFX9-NEXT: s_lshr_b32 s5, s17, 24 ; GFX9-NEXT: s_lshr_b32 s8, s17, 16 -; GFX9-NEXT: s_lshr_b32 s9, s17, 8 -; GFX9-NEXT: s_lshr_b32 s10, s16, 16 +; GFX9-NEXT: s_lshr_b32 s10, s17, 8 +; GFX9-NEXT: s_lshr_b32 s9, s16, 16 ; GFX9-NEXT: s_lshr_b32 s11, s16, 8 ; GFX9-NEXT: s_cbranch_execnz .LBB97_4 ; GFX9-NEXT: .LBB97_2: ; %cmp.true @@ -13533,9 +13533,9 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX9-NEXT: s_branch .LBB97_5 ; GFX9-NEXT: .LBB97_3: ; GFX9-NEXT: ; implicit-def: $sgpr11 -; GFX9-NEXT: ; implicit-def: $sgpr10 -; GFX9-NEXT: ; implicit-def: $sgpr4 ; GFX9-NEXT: ; implicit-def: $sgpr9 +; GFX9-NEXT: ; implicit-def: $sgpr4 +; GFX9-NEXT: ; implicit-def: $sgpr10 ; GFX9-NEXT: ; implicit-def: $sgpr8 ; GFX9-NEXT: ; implicit-def: $sgpr5 ; GFX9-NEXT: s_branch .LBB97_2 @@ -13543,8 +13543,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX9-NEXT: v_mov_b32_e32 v8, s16 ; GFX9-NEXT: v_mov_b32_e32 v9, s17 ; GFX9-NEXT: v_mov_b32_e32 v1, s11 -; GFX9-NEXT: v_mov_b32_e32 v2, s10 -; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v2, s9 +; GFX9-NEXT: v_mov_b32_e32 v5, s10 ; GFX9-NEXT: v_mov_b32_e32 v6, s8 ; GFX9-NEXT: v_mov_b32_e32 v7, s5 ; GFX9-NEXT: v_mov_b32_e32 v3, s4 @@ -13563,8 +13563,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24 ; GFX11-NEXT: s_lshr_b32 s3, s1, 24 ; GFX11-NEXT: s_lshr_b32 s5, s1, 16 -; GFX11-NEXT: s_lshr_b32 s6, s1, 8 -; GFX11-NEXT: s_lshr_b32 s7, s0, 16 +; GFX11-NEXT: s_lshr_b32 s7, s1, 8 +; GFX11-NEXT: s_lshr_b32 s6, s0, 16 ; GFX11-NEXT: s_lshr_b32 s8, s0, 8 ; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4 ; GFX11-NEXT: s_cbranch_vccnz .LBB97_4 @@ -13581,16 +13581,16 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX11-NEXT: s_branch .LBB97_5 ; GFX11-NEXT: .LBB97_3: ; GFX11-NEXT: ; implicit-def: $sgpr8 -; GFX11-NEXT: ; implicit-def: $sgpr7 -; GFX11-NEXT: ; implicit-def: $sgpr2 ; GFX11-NEXT: ; implicit-def: $sgpr6 +; GFX11-NEXT: ; implicit-def: $sgpr2 +; GFX11-NEXT: ; implicit-def: $sgpr7 ; GFX11-NEXT: ; implicit-def: $sgpr5 ; GFX11-NEXT: ; implicit-def: $sgpr3 ; GFX11-NEXT: s_branch .LBB97_2 ; GFX11-NEXT: .LBB97_4: ; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1 -; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7 -; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5 +; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6 +; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5 ; GFX11-NEXT: v_mov_b32_e32 v7, s3 ; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: .LBB97_5: ; %end @@ -15345,8 +15345,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; GFX9-NEXT: s_lshr_b32 s5, s17, 24 ; GFX9-NEXT: s_lshr_b32 s8, s17, 16 -; GFX9-NEXT: s_lshr_b32 s9, s17, 8 -; GFX9-NEXT: s_lshr_b32 s10, s16, 16 +; GFX9-NEXT: s_lshr_b32 s10, s17, 8 +; GFX9-NEXT: s_lshr_b32 s9, s16, 16 ; GFX9-NEXT: s_lshr_b32 s11, s16, 8 ; GFX9-NEXT: s_cbranch_execnz .LBB105_4 ; GFX9-NEXT: .LBB105_2: ; %cmp.true @@ -15362,9 +15362,9 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX9-NEXT: s_branch .LBB105_5 ; GFX9-NEXT: .LBB105_3: ; GFX9-NEXT: ; implicit-def: $sgpr11 -; GFX9-NEXT: ; implicit-def: $sgpr10 -; GFX9-NEXT: ; implicit-def: $sgpr4 ; GFX9-NEXT: ; implicit-def: $sgpr9 +; GFX9-NEXT: ; implicit-def: $sgpr4 +; GFX9-NEXT: ; implicit-def: $sgpr10 ; GFX9-NEXT: ; implicit-def: $sgpr8 ; GFX9-NEXT: ; implicit-def: $sgpr5 ; GFX9-NEXT: s_branch .LBB105_2 @@ -15372,8 +15372,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX9-NEXT: v_mov_b32_e32 v8, s16 ; GFX9-NEXT: v_mov_b32_e32 v9, s17 ; GFX9-NEXT: v_mov_b32_e32 v1, s11 -; GFX9-NEXT: v_mov_b32_e32 v2, s10 -; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v2, s9 +; GFX9-NEXT: v_mov_b32_e32 v5, s10 ; GFX9-NEXT: v_mov_b32_e32 v6, s8 ; GFX9-NEXT: v_mov_b32_e32 v7, s5 ; GFX9-NEXT: v_mov_b32_e32 v3, s4 @@ -15392,8 +15392,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24 ; GFX11-NEXT: s_lshr_b32 s3, s1, 24 ; GFX11-NEXT: s_lshr_b32 s5, s1, 16 -; GFX11-NEXT: s_lshr_b32 s6, s1, 8 -; GFX11-NEXT: s_lshr_b32 s7, s0, 16 +; GFX11-NEXT: s_lshr_b32 s7, s1, 8 +; GFX11-NEXT: s_lshr_b32 s6, s0, 16 ; GFX11-NEXT: s_lshr_b32 s8, s0, 8 ; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4 ; GFX11-NEXT: s_cbranch_vccnz .LBB105_4 @@ -15410,16 +15410,16 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX11-NEXT: s_branch .LBB105_5 ; GFX11-NEXT: .LBB105_3: ; GFX11-NEXT: ; implicit-def: $sgpr8 -; GFX11-NEXT: ; implicit-def: $sgpr7 -; GFX11-NEXT: ; implicit-def: $sgpr2 ; GFX11-NEXT: ; implicit-def: $sgpr6 +; GFX11-NEXT: ; implicit-def: $sgpr2 +; GFX11-NEXT: ; implicit-def: $sgpr7 ; GFX11-NEXT: ; implicit-def: $sgpr5 ; GFX11-NEXT: ; implicit-def: $sgpr3 ; GFX11-NEXT: s_branch .LBB105_2 ; GFX11-NEXT: .LBB105_4: ; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1 -; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7 -; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5 +; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6 +; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5 ; GFX11-NEXT: v_mov_b32_e32 v7, s3 ; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: .LBB105_5: ; %end @@ -16493,8 +16493,8 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32 ; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; VI-NEXT: s_lshr_b32 s8, s17, 24 ; VI-NEXT: s_lshr_b32 s5, s17, 16 -; VI-NEXT: s_lshr_b32 s9, s17, 8 -; VI-NEXT: s_lshr_b32 s10, s16, 16 +; VI-NEXT: s_lshr_b32 s10, s17, 8 +; VI-NEXT: s_lshr_b32 s9, s16, 16 ; VI-NEXT: s_lshr_b32 s11, s16, 8 ; VI-NEXT: s_cbranch_execnz .LBB109_4 ; VI-NEXT: .LBB109_2: ; %cmp.true @@ -16546,16 +16546,16 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32 ; VI-NEXT: s_setpc_b64 s[30:31] ; VI-NEXT: .LBB109_3: ; VI-NEXT: ; implicit-def: $sgpr11 -; VI-NEXT: ; implicit-def: $sgpr10 -; VI-NEXT: ; implicit-def: $sgpr4 ; VI-NEXT: ; implicit-def: $sgpr9 +; VI-NEXT: ; implicit-def: $sgpr4 +; VI-NEXT: ; implicit-def: $sgpr10 ; VI-NEXT: ; implicit-def: $sgpr5 ; VI-NEXT: ; implicit-def: $sgpr8 ; VI-NEXT: s_branch .LBB109_2 ; VI-NEXT: .LBB109_4: ; VI-NEXT: v_mov_b32_e32 v1, s11 -; VI-NEXT: v_mov_b32_e32 v2, s10 -; VI-NEXT: v_mov_b32_e32 v5, s9 +; VI-NEXT: v_mov_b32_e32 v2, s9 +; VI-NEXT: v_mov_b32_e32 v5, s10 ; VI-NEXT: v_mov_b32_e32 v7, s8 ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_mov_b32_e32 v0, s16 diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll index 97df2a0dbd44b..258bc2959f391 100644 --- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll +++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll @@ -5548,7 +5548,6 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX7LESS: ; %bb.0: ; %entry ; GFX7LESS-NEXT: s_mov_b64 s[6:7], exec ; GFX7LESS-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX7LESS-NEXT: s_mov_b32 s4, 0 ; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0 ; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v4, s7, v0 ; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 @@ -5557,33 +5556,32 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX7LESS-NEXT: s_cbranch_execz .LBB9_4 ; GFX7LESS-NEXT: ; %bb.1: ; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0) -; GFX7LESS-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0 -; GFX7LESS-NEXT: s_bcnt1_i32_b64 s5, s[6:7] +; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 +; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX7LESS-NEXT: s_mov_b64 s[10:11], 0 -; GFX7LESS-NEXT: v_mov_b32_e32 v5, s4 ; GFX7LESS-NEXT: s_mov_b32 s7, 0xf000 -; GFX7LESS-NEXT: s_mul_i32 s12, s5, 5 +; GFX7LESS-NEXT: s_mul_i32 s12, s6, 5 ; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0) -; GFX7LESS-NEXT: v_mov_b32_e32 v0, s14 -; GFX7LESS-NEXT: v_mov_b32_e32 v1, s15 +; GFX7LESS-NEXT: v_mov_b32_e32 v0, s4 +; GFX7LESS-NEXT: v_mov_b32_e32 v1, s5 ; GFX7LESS-NEXT: s_mov_b32 s6, -1 ; GFX7LESS-NEXT: s_mov_b32 s4, s2 ; GFX7LESS-NEXT: s_mov_b32 s5, s3 ; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start ; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX7LESS-NEXT: v_mov_b32_e32 v9, v1 -; GFX7LESS-NEXT: v_mov_b32_e32 v8, v0 -; GFX7LESS-NEXT: v_subrev_i32_e32 v6, vcc, s12, v8 -; GFX7LESS-NEXT: v_subb_u32_e32 v7, vcc, v9, v5, vcc +; GFX7LESS-NEXT: v_mov_b32_e32 v8, v1 +; GFX7LESS-NEXT: v_mov_b32_e32 v7, v0 +; GFX7LESS-NEXT: v_subrev_i32_e32 v5, vcc, s12, v7 +; GFX7LESS-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v8, vcc ; GFX7LESS-NEXT: s_waitcnt expcnt(0) -; GFX7LESS-NEXT: v_mov_b32_e32 v0, v6 -; GFX7LESS-NEXT: v_mov_b32_e32 v1, v7 -; GFX7LESS-NEXT: v_mov_b32_e32 v2, v8 -; GFX7LESS-NEXT: v_mov_b32_e32 v3, v9 +; GFX7LESS-NEXT: v_mov_b32_e32 v0, v5 +; GFX7LESS-NEXT: v_mov_b32_e32 v1, v6 +; GFX7LESS-NEXT: v_mov_b32_e32 v2, v7 +; GFX7LESS-NEXT: v_mov_b32_e32 v3, v8 ; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; GFX7LESS-NEXT: s_waitcnt vmcnt(0) ; GFX7LESS-NEXT: buffer_wbinvl1 -; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] ; GFX7LESS-NEXT: s_or_b64 s[10:11], vcc, s[10:11] ; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2 @@ -5611,39 +5609,37 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0 ; GFX8-NEXT: v_mbcnt_hi_u32_b32 v4, s7, v0 -; GFX8-NEXT: s_mov_b32 s4, 0 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 ; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX8-NEXT: s_and_saveexec_b64 s[8:9], vcc ; GFX8-NEXT: s_cbranch_execz .LBB9_4 ; GFX8-NEXT: ; %bb.1: ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0 -; GFX8-NEXT: s_bcnt1_i32_b64 s5, s[6:7] +; GFX8-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 +; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX8-NEXT: s_mov_b64 s[10:11], 0 -; GFX8-NEXT: v_mov_b32_e32 v5, s4 -; GFX8-NEXT: s_mul_i32 s12, s5, 5 -; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v0, s14 -; GFX8-NEXT: v_mov_b32_e32 v1, s15 ; GFX8-NEXT: s_mov_b32 s7, 0xf000 +; GFX8-NEXT: s_mul_i32 s12, s6, 5 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_mov_b32_e32 v1, s5 ; GFX8-NEXT: s_mov_b32 s6, -1 ; GFX8-NEXT: s_mov_b32 s4, s2 ; GFX8-NEXT: s_mov_b32 s5, s3 ; GFX8-NEXT: .LBB9_2: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX8-NEXT: v_mov_b32_e32 v9, v1 -; GFX8-NEXT: v_mov_b32_e32 v8, v0 -; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s12, v8 -; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v9, v5, vcc -; GFX8-NEXT: v_mov_b32_e32 v0, v6 -; GFX8-NEXT: v_mov_b32_e32 v1, v7 -; GFX8-NEXT: v_mov_b32_e32 v2, v8 -; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: v_mov_b32_e32 v8, v1 +; GFX8-NEXT: v_mov_b32_e32 v7, v0 +; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s12, v7 +; GFX8-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v8, vcc +; GFX8-NEXT: v_mov_b32_e32 v0, v5 +; GFX8-NEXT: v_mov_b32_e32 v1, v6 +; GFX8-NEXT: v_mov_b32_e32 v2, v7 +; GFX8-NEXT: v_mov_b32_e32 v3, v8 ; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1_vol -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] ; GFX8-NEXT: s_or_b64 s[10:11], vcc, s[10:11] ; GFX8-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GFX8-NEXT: s_cbranch_execnz .LBB9_2 @@ -5670,39 +5666,37 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX9-NEXT: s_mov_b64 s[6:7], exec ; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0 ; GFX9-NEXT: v_mbcnt_hi_u32_b32 v4, s7, v0 -; GFX9-NEXT: s_mov_b32 s4, 0 ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX9-NEXT: s_and_saveexec_b64 s[8:9], vcc ; GFX9-NEXT: s_cbranch_execz .LBB9_4 ; GFX9-NEXT: ; %bb.1: ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0 -; GFX9-NEXT: s_bcnt1_i32_b64 s5, s[6:7] +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 +; GFX9-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX9-NEXT: s_mov_b64 s[10:11], 0 -; GFX9-NEXT: v_mov_b32_e32 v5, s4 -; GFX9-NEXT: s_mul_i32 s12, s5, 5 -; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s14 -; GFX9-NEXT: v_mov_b32_e32 v1, s15 ; GFX9-NEXT: s_mov_b32 s7, 0xf000 +; GFX9-NEXT: s_mul_i32 s12, s6, 5 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 ; GFX9-NEXT: s_mov_b32 s6, -1 ; GFX9-NEXT: s_mov_b32 s4, s2 ; GFX9-NEXT: s_mov_b32 s5, s3 ; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX9-NEXT: v_mov_b32_e32 v9, v1 -; GFX9-NEXT: v_mov_b32_e32 v8, v0 -; GFX9-NEXT: v_subrev_co_u32_e32 v6, vcc, s12, v8 -; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v9, v5, vcc -; GFX9-NEXT: v_mov_b32_e32 v0, v6 -; GFX9-NEXT: v_mov_b32_e32 v1, v7 -; GFX9-NEXT: v_mov_b32_e32 v2, v8 -; GFX9-NEXT: v_mov_b32_e32 v3, v9 +; GFX9-NEXT: v_mov_b32_e32 v8, v1 +; GFX9-NEXT: v_mov_b32_e32 v7, v0 +; GFX9-NEXT: v_subrev_co_u32_e32 v5, vcc, s12, v7 +; GFX9-NEXT: v_subbrev_co_u32_e32 v6, vcc, 0, v8, vcc +; GFX9-NEXT: v_mov_b32_e32 v0, v5 +; GFX9-NEXT: v_mov_b32_e32 v1, v6 +; GFX9-NEXT: v_mov_b32_e32 v2, v7 +; GFX9-NEXT: v_mov_b32_e32 v3, v8 ; GFX9-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol -; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] ; GFX9-NEXT: s_or_b64 s[10:11], vcc, s[10:11] ; GFX9-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GFX9-NEXT: s_cbranch_execnz .LBB9_2 diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll index 323bffe9947c8..ca50835018824 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll @@ -1836,22 +1836,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 +; GFX12-NEXT: v_mov_b32_e32 v10, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5] -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1876,23 +1876,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: v_mov_b32_e32 v6, s16 +; GFX11-NEXT: v_mov_b32_e32 v10, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen offset:2048 glc +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1907,25 +1907,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 -; GFX10-NEXT: v_mov_b32_e32 v6, s20 +; GFX10-NEXT: v_mov_b32_e32 v10, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX10-NEXT: v_mov_b32_e32 v0, v6 +; GFX10-NEXT: v_mov_b32_e32 v1, v7 +; GFX10-NEXT: v_mov_b32_e32 v2, v8 +; GFX10-NEXT: v_mov_b32_e32 v3, v9 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX10-NEXT: v_mov_b32_e32 v9, v1 +; GFX10-NEXT: v_mov_b32_e32 v8, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB8_1 @@ -1947,25 +1947,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s20 +; GFX908-NEXT: v_mov_b32_e32 v10, s20 ; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v0, v6 +; GFX908-NEXT: v_mov_b32_e32 v1, v7 +; GFX908-NEXT: v_mov_b32_e32 v2, v8 +; GFX908-NEXT: v_mov_b32_e32 v3, v9 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX908-NEXT: v_mov_b32_e32 v9, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v8, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB8_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1977,25 +1977,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s20 +; GFX8-NEXT: v_mov_b32_e32 v10, s20 ; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, v6 +; GFX8-NEXT: v_mov_b32_e32 v1, v7 +; GFX8-NEXT: v_mov_b32_e32 v2, v8 +; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v9, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v8, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB8_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2007,25 +2007,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: v_mov_b32_e32 v5, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s20 +; GFX7-NEXT: v_mov_b32_e32 v10, s20 ; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB8_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2037,27 +2037,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v5, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v10, s6 ; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB8_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2829,22 +2829,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 +; GFX12-NEXT: v_mov_b32_e32 v10, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5] -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2869,23 +2869,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: v_mov_b32_e32 v6, s16 +; GFX11-NEXT: v_mov_b32_e32 v10, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen offset:2048 glc +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2900,25 +2900,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 -; GFX10-NEXT: v_mov_b32_e32 v6, s20 +; GFX10-NEXT: v_mov_b32_e32 v10, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX10-NEXT: v_mov_b32_e32 v0, v6 +; GFX10-NEXT: v_mov_b32_e32 v1, v7 +; GFX10-NEXT: v_mov_b32_e32 v2, v8 +; GFX10-NEXT: v_mov_b32_e32 v3, v9 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX10-NEXT: v_mov_b32_e32 v9, v1 +; GFX10-NEXT: v_mov_b32_e32 v8, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB11_1 @@ -2931,22 +2931,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v4, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 +; GFX90A-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX90A-NEXT: v_mov_b32_e32 v5, v1 -; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v6, s20 +; GFX90A-NEXT: v_mov_b32_e32 v10, s20 ; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX90A-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[8:9], v[8:9] op_sel:[0,1] +; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[0:1], v[0:1] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB11_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2958,25 +2958,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s20 +; GFX908-NEXT: v_mov_b32_e32 v10, s20 ; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v0, v6 +; GFX908-NEXT: v_mov_b32_e32 v1, v7 +; GFX908-NEXT: v_mov_b32_e32 v2, v8 +; GFX908-NEXT: v_mov_b32_e32 v3, v9 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX908-NEXT: v_mov_b32_e32 v9, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v8, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB11_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2988,25 +2988,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s20 +; GFX8-NEXT: v_mov_b32_e32 v10, s20 ; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, v6 +; GFX8-NEXT: v_mov_b32_e32 v1, v7 +; GFX8-NEXT: v_mov_b32_e32 v2, v8 +; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v9, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v8, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB11_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3018,25 +3018,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: v_mov_b32_e32 v5, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s20 +; GFX7-NEXT: v_mov_b32_e32 v10, s20 ; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB11_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3048,27 +3048,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v5, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v10, s6 ; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB11_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3090,22 +3090,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 +; GFX12-NEXT: v_mov_b32_e32 v10, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5] -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -3130,23 +3130,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: v_mov_b32_e32 v6, s16 +; GFX11-NEXT: v_mov_b32_e32 v10, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen offset:2048 glc +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -3161,25 +3161,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 -; GFX10-NEXT: v_mov_b32_e32 v6, s20 +; GFX10-NEXT: v_mov_b32_e32 v10, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX10-NEXT: v_mov_b32_e32 v0, v6 +; GFX10-NEXT: v_mov_b32_e32 v1, v7 +; GFX10-NEXT: v_mov_b32_e32 v2, v8 +; GFX10-NEXT: v_mov_b32_e32 v3, v9 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX10-NEXT: v_mov_b32_e32 v9, v1 +; GFX10-NEXT: v_mov_b32_e32 v8, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB12_1 @@ -3201,25 +3201,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s20 +; GFX908-NEXT: v_mov_b32_e32 v10, s20 ; GFX908-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v0, v6 +; GFX908-NEXT: v_mov_b32_e32 v1, v7 +; GFX908-NEXT: v_mov_b32_e32 v2, v8 +; GFX908-NEXT: v_mov_b32_e32 v3, v9 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX908-NEXT: v_mov_b32_e32 v9, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v8, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB12_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3231,25 +3231,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s20 +; GFX8-NEXT: v_mov_b32_e32 v10, s20 ; GFX8-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, v6 +; GFX8-NEXT: v_mov_b32_e32 v1, v7 +; GFX8-NEXT: v_mov_b32_e32 v2, v8 +; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v9, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v8, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB12_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3261,25 +3261,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: v_mov_b32_e32 v5, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s20 +; GFX7-NEXT: v_mov_b32_e32 v10, s20 ; GFX7-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB12_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3291,27 +3291,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v5, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v10, s6 ; GFX6-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB12_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll index 316ba8527b595..1a4140cd0912b 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll @@ -1174,28 +1174,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1218,29 +1217,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: v_mov_b32_e32 v6, s16 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen offset:2048 glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1272,29 +1270,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s20 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB5_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1304,29 +1300,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s20 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB5_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1964,28 +1958,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2008,29 +2001,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: v_mov_b32_e32 v6, s16 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen offset:2048 glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2042,30 +2034,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v2, v0 -; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: v_mov_b32_e32 v3, v1 -; GFX10-NEXT: v_mov_b32_e32 v6, s20 +; GFX10-NEXT: v_mov_b32_e32 v2, s20 +; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX10-NEXT: v_mov_b32_e32 v8, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX10-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX10-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX10-NEXT: v_mov_b32_e32 v0, v2 +; GFX10-NEXT: v_mov_b32_e32 v1, v3 +; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v3, v5 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX10-NEXT: v_mov_b32_e32 v5, v1 +; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB8_1 @@ -2076,26 +2066,24 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_mov_b32_e32 v2, v0 -; GFX90A-NEXT: v_mov_b32_e32 v0, s20 -; GFX90A-NEXT: v_mov_b32_e32 v3, v1 -; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX90A-NEXT: v_mov_b32_e32 v2, s20 +; GFX90A-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX90A-NEXT: v_mov_b32_e32 v6, s20 +; GFX90A-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX90A-NEXT: v_mov_b32_e32 v8, s20 ; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX90A-NEXT: v_max_f64 v[8:9], v[0:1], v[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX90A-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX90A-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1] +; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB8_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2105,29 +2093,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s20 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB8_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2137,29 +2123,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s20 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB8_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2169,29 +2153,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v2, v0 -; GFX7-NEXT: v_mov_b32_e32 v0, s20 -; GFX7-NEXT: v_mov_b32_e32 v3, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX7-NEXT: v_mov_b32_e32 v2, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s20 +; GFX7-NEXT: v_mov_b32_e32 v8, s20 ; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX7-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX7-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX7-NEXT: v_mov_b32_e32 v0, v2 +; GFX7-NEXT: v_mov_b32_e32 v1, v3 +; GFX7-NEXT: v_mov_b32_e32 v2, v4 +; GFX7-NEXT: v_mov_b32_e32 v3, v5 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v5, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB8_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2201,31 +2183,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v2, v0 -; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v3, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: v_mov_b32_e32 v2, s20 +; GFX6-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 -; GFX6-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v8, s6 ; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX6-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX6-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX6-NEXT: v_mov_b32_e32 v0, v2 +; GFX6-NEXT: v_mov_b32_e32 v1, v3 +; GFX6-NEXT: v_mov_b32_e32 v2, v4 +; GFX6-NEXT: v_mov_b32_e32 v3, v5 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB8_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2245,28 +2224,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2289,29 +2267,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: v_mov_b32_e32 v6, s16 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen offset:2048 glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2343,29 +2320,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s20 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB9_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2375,29 +2350,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s20 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB9_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll index ed67e0227278b..671f42c6efd27 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll @@ -1174,28 +1174,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1218,29 +1217,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: v_mov_b32_e32 v6, s16 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen offset:2048 glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1272,29 +1270,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s20 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB5_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1304,29 +1300,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s20 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB5_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1964,28 +1958,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2008,29 +2001,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: v_mov_b32_e32 v6, s16 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen offset:2048 glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2042,30 +2034,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v2, v0 -; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: v_mov_b32_e32 v3, v1 -; GFX10-NEXT: v_mov_b32_e32 v6, s20 +; GFX10-NEXT: v_mov_b32_e32 v2, s20 +; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX10-NEXT: v_mov_b32_e32 v8, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX10-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX10-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX10-NEXT: v_mov_b32_e32 v0, v2 +; GFX10-NEXT: v_mov_b32_e32 v1, v3 +; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v3, v5 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX10-NEXT: v_mov_b32_e32 v5, v1 +; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB8_1 @@ -2076,26 +2066,24 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_mov_b32_e32 v2, v0 -; GFX90A-NEXT: v_mov_b32_e32 v0, s20 -; GFX90A-NEXT: v_mov_b32_e32 v3, v1 -; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX90A-NEXT: v_mov_b32_e32 v2, s20 +; GFX90A-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX90A-NEXT: v_mov_b32_e32 v6, s20 +; GFX90A-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX90A-NEXT: v_mov_b32_e32 v8, s20 ; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX90A-NEXT: v_min_f64 v[8:9], v[0:1], v[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX90A-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX90A-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1] +; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB8_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2105,29 +2093,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s20 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB8_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2137,29 +2123,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s20 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB8_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2169,29 +2153,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v2, v0 -; GFX7-NEXT: v_mov_b32_e32 v0, s20 -; GFX7-NEXT: v_mov_b32_e32 v3, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX7-NEXT: v_mov_b32_e32 v2, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s20 +; GFX7-NEXT: v_mov_b32_e32 v8, s20 ; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX7-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX7-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX7-NEXT: v_mov_b32_e32 v0, v2 +; GFX7-NEXT: v_mov_b32_e32 v1, v3 +; GFX7-NEXT: v_mov_b32_e32 v2, v4 +; GFX7-NEXT: v_mov_b32_e32 v3, v5 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v5, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB8_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2201,31 +2183,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v2, v0 -; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v3, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: v_mov_b32_e32 v2, s20 +; GFX6-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 -; GFX6-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v8, s6 ; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX6-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX6-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX6-NEXT: v_mov_b32_e32 v0, v2 +; GFX6-NEXT: v_mov_b32_e32 v1, v3 +; GFX6-NEXT: v_mov_b32_e32 v2, v4 +; GFX6-NEXT: v_mov_b32_e32 v3, v5 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB8_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2245,28 +2224,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2289,29 +2267,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: v_mov_b32_e32 v6, s16 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen offset:2048 glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2343,29 +2320,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s20 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB9_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2375,29 +2350,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s20 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen offset:2048 glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB9_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll index 5134159e3e406..0fc54aeaef77b 100644 --- a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll +++ b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll @@ -619,43 +619,43 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: s_mov_b64 s[8:9], 0 ; GISEL-NEXT: v_ashrrev_i32_e32 v18, 31, v7 ; GISEL-NEXT: v_ashrrev_i32_e32 v19, 31, v15 -; GISEL-NEXT: v_mov_b32_e32 v10, 0x7f -; GISEL-NEXT: v_mov_b32_e32 v11, 0 +; GISEL-NEXT: v_mov_b32_e32 v16, 0x7f +; GISEL-NEXT: v_mov_b32_e32 v17, 0 ; GISEL-NEXT: v_xor_b32_e32 v0, v18, v4 ; GISEL-NEXT: v_xor_b32_e32 v1, v18, v5 ; GISEL-NEXT: v_xor_b32_e32 v2, v18, v6 ; GISEL-NEXT: v_xor_b32_e32 v3, v18, v7 ; GISEL-NEXT: v_xor_b32_e32 v4, v19, v12 ; GISEL-NEXT: v_xor_b32_e32 v5, v19, v13 -; GISEL-NEXT: v_xor_b32_e32 v14, v19, v14 -; GISEL-NEXT: v_xor_b32_e32 v15, v19, v15 +; GISEL-NEXT: v_xor_b32_e32 v12, v19, v14 +; GISEL-NEXT: v_xor_b32_e32 v13, v19, v15 ; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v0, v18 ; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v1, v18, vcc ; GISEL-NEXT: v_sub_i32_e64 v20, s[4:5], v4, v19 ; GISEL-NEXT: v_subb_u32_e64 v21, s[4:5], v5, v19, s[4:5] -; GISEL-NEXT: v_subb_u32_e32 v12, vcc, v2, v18, vcc -; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v18, vcc -; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v14, v19, s[4:5] -; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v15, v19, vcc -; GISEL-NEXT: v_ffbh_u32_e32 v14, v21 -; GISEL-NEXT: v_ffbh_u32_e32 v15, v20 -; GISEL-NEXT: v_ffbh_u32_e32 v16, v7 -; GISEL-NEXT: v_ffbh_u32_e32 v17, v6 +; GISEL-NEXT: v_subb_u32_e32 v10, vcc, v2, v18, vcc +; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v3, v18, vcc +; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v12, v19, s[4:5] +; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v13, v19, vcc +; GISEL-NEXT: v_ffbh_u32_e32 v12, v21 +; GISEL-NEXT: v_ffbh_u32_e32 v13, v20 +; GISEL-NEXT: v_ffbh_u32_e32 v14, v7 +; GISEL-NEXT: v_ffbh_u32_e32 v15, v6 ; GISEL-NEXT: v_or_b32_e32 v0, v20, v4 ; GISEL-NEXT: v_or_b32_e32 v1, v21, v5 -; GISEL-NEXT: v_or_b32_e32 v2, v6, v12 -; GISEL-NEXT: v_or_b32_e32 v3, v7, v13 -; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15 +; GISEL-NEXT: v_or_b32_e32 v2, v6, v10 +; GISEL-NEXT: v_or_b32_e32 v3, v7, v11 +; GISEL-NEXT: v_add_i32_e32 v13, vcc, 32, v13 ; GISEL-NEXT: v_ffbh_u32_e32 v26, v5 ; GISEL-NEXT: v_ffbh_u32_e32 v27, v4 -; GISEL-NEXT: v_add_i32_e32 v17, vcc, 32, v17 -; GISEL-NEXT: v_ffbh_u32_e32 v28, v13 -; GISEL-NEXT: v_ffbh_u32_e32 v29, v12 +; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15 +; GISEL-NEXT: v_ffbh_u32_e32 v28, v11 +; GISEL-NEXT: v_ffbh_u32_e32 v29, v10 ; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3] -; GISEL-NEXT: v_min_u32_e32 v0, v14, v15 +; GISEL-NEXT: v_min_u32_e32 v0, v12, v13 ; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v27 -; GISEL-NEXT: v_min_u32_e32 v2, v16, v17 +; GISEL-NEXT: v_min_u32_e32 v2, v14, v15 ; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v29 ; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0 ; GISEL-NEXT: v_min_u32_e32 v1, v26, v1 @@ -665,32 +665,32 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, s[4:5] ; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] ; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13] +; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] ; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc ; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v0, v1 ; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, vcc ; GISEL-NEXT: v_subb_u32_e64 v0, s[4:5], 0, 0, s[4:5] ; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5] -; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[10:11] +; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[16:17] ; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc -; GISEL-NEXT: v_xor_b32_e32 v10, 0x7f, v2 +; GISEL-NEXT: v_xor_b32_e32 v12, 0x7f, v2 ; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc -; GISEL-NEXT: v_or_b32_e32 v10, v10, v0 -; GISEL-NEXT: v_or_b32_e32 v11, v3, v1 +; GISEL-NEXT: v_or_b32_e32 v12, v12, v0 +; GISEL-NEXT: v_or_b32_e32 v13, v3, v1 ; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc -; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] -; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc -; GISEL-NEXT: v_or_b32_e32 v11, v14, v15 -; GISEL-NEXT: v_and_b32_e32 v14, 1, v11 -; GISEL-NEXT: v_or_b32_e32 v10, v11, v10 +; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13] +; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc +; GISEL-NEXT: v_or_b32_e32 v13, v14, v15 +; GISEL-NEXT: v_and_b32_e32 v14, 1, v13 +; GISEL-NEXT: v_or_b32_e32 v12, v13, v12 ; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14 ; GISEL-NEXT: v_cndmask_b32_e64 v14, v6, 0, vcc -; GISEL-NEXT: v_and_b32_e32 v16, 1, v10 +; GISEL-NEXT: v_and_b32_e32 v16, 1, v12 ; GISEL-NEXT: v_cndmask_b32_e64 v15, v7, 0, vcc -; GISEL-NEXT: v_cndmask_b32_e64 v10, v12, 0, vcc -; GISEL-NEXT: v_cndmask_b32_e64 v11, v13, 0, vcc +; GISEL-NEXT: v_cndmask_b32_e64 v12, v10, 0, vcc +; GISEL-NEXT: v_cndmask_b32_e64 v13, v11, 0, vcc ; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 ; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1 ; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5] @@ -703,22 +703,22 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_addc_u32_e64 v28, vcc, 0, v0, s[4:5] ; GISEL-NEXT: v_addc_u32_e32 v29, vcc, 0, v1, vcc ; GISEL-NEXT: v_add_i32_e64 v14, s[4:5], v30, v2 -; GISEL-NEXT: v_sub_i32_e64 v10, s[4:5], 64, v30 +; GISEL-NEXT: v_sub_i32_e64 v12, s[4:5], 64, v30 ; GISEL-NEXT: v_lshl_b64 v[0:1], v[6:7], v30 -; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], v30 +; GISEL-NEXT: v_lshl_b64 v[2:3], v[10:11], v30 ; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1 -; GISEL-NEXT: v_lshr_b64 v[10:11], v[6:7], v10 +; GISEL-NEXT: v_lshr_b64 v[12:13], v[6:7], v12 ; GISEL-NEXT: v_lshl_b64 v[16:17], v[6:7], v14 ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30 ; GISEL-NEXT: v_cndmask_b32_e32 v14, 0, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v15, 0, v1, vcc -; GISEL-NEXT: v_or_b32_e32 v0, v10, v2 -; GISEL-NEXT: v_or_b32_e32 v1, v11, v3 +; GISEL-NEXT: v_or_b32_e32 v0, v12, v2 +; GISEL-NEXT: v_or_b32_e32 v1, v13, v3 ; GISEL-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30 -; GISEL-NEXT: v_cndmask_b32_e32 v10, v0, v12, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v11, v1, v13, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v12, v0, v10, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v13, v1, v11, vcc ; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9] ; GISEL-NEXT: v_mov_b32_e32 v0, s8 ; GISEL-NEXT: v_mov_b32_e32 v1, s9 @@ -730,26 +730,26 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: ; %bb.8: ; %udiv-preheader ; GISEL-NEXT: v_add_i32_e32 v32, vcc, 0xffffffc0, v26 ; GISEL-NEXT: v_sub_i32_e32 v16, vcc, 64, v26 -; GISEL-NEXT: v_lshr_b64 v[0:1], v[12:13], v26 +; GISEL-NEXT: v_lshr_b64 v[0:1], v[10:11], v26 ; GISEL-NEXT: v_lshr_b64 v[2:3], v[6:7], v26 ; GISEL-NEXT: s_mov_b64 s[4:5], 0 ; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v20 ; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v21, vcc -; GISEL-NEXT: v_lshl_b64 v[16:17], v[12:13], v16 -; GISEL-NEXT: v_lshr_b64 v[12:13], v[12:13], v32 +; GISEL-NEXT: v_lshl_b64 v[16:17], v[10:11], v16 +; GISEL-NEXT: v_lshr_b64 v[10:11], v[10:11], v32 ; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v4, vcc ; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v5, vcc ; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5] ; GISEL-NEXT: v_or_b32_e32 v2, v2, v16 ; GISEL-NEXT: v_or_b32_e32 v3, v3, v17 ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26 -; GISEL-NEXT: v_cndmask_b32_e32 v2, v12, v2, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v1, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26 -; GISEL-NEXT: v_cndmask_b32_e32 v12, v2, v6, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v13, v3, v7, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v10, v2, v6, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v11, v3, v7, vcc ; GISEL-NEXT: v_mov_b32_e32 v7, 0 ; GISEL-NEXT: v_mov_b32_e32 v0, s4 ; GISEL-NEXT: v_mov_b32_e32 v1, s5 @@ -757,20 +757,20 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_mov_b32_e32 v3, s7 ; GISEL-NEXT: .LBB0_9: ; %udiv-do-while ; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], 1 +; GISEL-NEXT: v_lshl_b64 v[2:3], v[10:11], 1 ; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1 -; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v13 -; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v11 -; GISEL-NEXT: v_lshl_b64 v[12:13], v[14:15], 1 -; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 +; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v11 +; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v13 +; GISEL-NEXT: v_lshl_b64 v[10:11], v[14:15], 1 +; GISEL-NEXT: v_lshl_b64 v[12:13], v[12:13], 1 ; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v15 ; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v26 ; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc ; GISEL-NEXT: v_or_b32_e32 v16, v16, v6 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v34 -; GISEL-NEXT: v_or_b32_e32 v10, v10, v14 -; GISEL-NEXT: v_or_b32_e32 v14, v0, v12 -; GISEL-NEXT: v_or_b32_e32 v15, v1, v13 +; GISEL-NEXT: v_or_b32_e32 v12, v12, v14 +; GISEL-NEXT: v_or_b32_e32 v14, v0, v10 +; GISEL-NEXT: v_or_b32_e32 v15, v1, v11 ; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc ; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc ; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v30, v2 @@ -783,14 +783,14 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v6 ; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; GISEL-NEXT: v_and_b32_e32 v6, 1, v0 -; GISEL-NEXT: v_and_b32_e32 v12, v0, v20 -; GISEL-NEXT: v_and_b32_e32 v13, v0, v21 +; GISEL-NEXT: v_and_b32_e32 v10, v0, v20 +; GISEL-NEXT: v_and_b32_e32 v11, v0, v21 ; GISEL-NEXT: v_and_b32_e32 v34, v0, v4 ; GISEL-NEXT: v_and_b32_e32 v35, v0, v5 ; GISEL-NEXT: v_mov_b32_e32 v0, v6 ; GISEL-NEXT: v_mov_b32_e32 v1, v7 -; GISEL-NEXT: v_sub_i32_e32 v12, vcc, v2, v12 -; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v13, vcc +; GISEL-NEXT: v_sub_i32_e32 v10, vcc, v2, v10 +; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v3, v11, vcc ; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v34, vcc ; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v35, vcc ; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5] @@ -800,9 +800,9 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: .LBB0_11: ; %Flow11 ; GISEL-NEXT: s_or_b64 exec, exec, s[8:9] ; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1 -; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 +; GISEL-NEXT: v_lshl_b64 v[12:13], v[12:13], 1 ; GISEL-NEXT: v_lshrrev_b32_e32 v4, 31, v15 -; GISEL-NEXT: v_or_b32_e32 v10, v10, v4 +; GISEL-NEXT: v_or_b32_e32 v12, v12, v4 ; GISEL-NEXT: v_or_b32_e32 v14, v0, v2 ; GISEL-NEXT: v_or_b32_e32 v15, v1, v3 ; GISEL-NEXT: .LBB0_12: ; %Flow12 @@ -815,8 +815,8 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_xor_b32_e32 v6, v9, v3 ; GISEL-NEXT: v_xor_b32_e32 v4, v14, v7 ; GISEL-NEXT: v_xor_b32_e32 v5, v15, v7 -; GISEL-NEXT: v_xor_b32_e32 v8, v10, v7 -; GISEL-NEXT: v_xor_b32_e32 v9, v11, v7 +; GISEL-NEXT: v_xor_b32_e32 v8, v12, v7 +; GISEL-NEXT: v_xor_b32_e32 v9, v13, v7 ; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v3 ; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc ; GISEL-NEXT: v_sub_i32_e64 v4, s[4:5], v4, v7 diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll index e7af7467171c3..e0421575c3174 100644 --- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll +++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll @@ -20,7 +20,8 @@ define i128 @fptosi_f64_to_i128(double %x) { ; SDAG-NEXT: s_cbranch_execz .LBB0_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc @@ -386,7 +387,8 @@ define i128 @fptoui_f64_to_i128(double %x) { ; SDAG-NEXT: s_cbranch_execz .LBB1_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc @@ -749,9 +751,10 @@ define i128 @fptosi_f32_to_i128(float %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB2_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc @@ -1100,9 +1103,10 @@ define i128 @fptoui_f32_to_i128(float %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB3_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc @@ -1489,9 +1493,10 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB6_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc @@ -1836,9 +1841,10 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB7_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll index 05403f008276c..a50791e10f5a2 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll @@ -7575,15 +7575,13 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB38_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -7593,7 +7591,9 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB38_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7609,15 +7609,13 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB38_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -7628,7 +7626,9 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB38_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7809,15 +7809,13 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB39_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -7827,7 +7825,9 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB39_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7843,15 +7843,13 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB39_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -7862,7 +7860,9 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB39_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8039,34 +8039,32 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_neg__amdgpu_no_fine_g ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: s_movk_i32 s4, 0xf800 -; GFX7-NEXT: v_mov_b32_e32 v7, v1 -; GFX7-NEXT: v_mov_b32_e32 v6, v0 ; GFX7-NEXT: s_mov_b32 s5, -1 ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX7-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 -; GFX7-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX7-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 ; GFX7-NEXT: .LBB40_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB40_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8077,35 +8075,33 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_neg__amdgpu_no_fine_g ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: s_movk_i32 s4, 0xf800 -; GFX6-NEXT: v_mov_b32_e32 v7, v1 -; GFX6-NEXT: v_mov_b32_e32 v6, v0 ; GFX6-NEXT: s_mov_b32 s5, -1 ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s6, 0 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX6-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX6-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 ; GFX6-NEXT: .LBB40_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB40_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll index ac223fd6030bd..311faac1b7c29 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll @@ -4203,25 +4203,25 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX7-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 +; GFX7-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 ; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB24_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4237,26 +4237,25 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX6-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX6-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX6-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7] -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 ; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB24_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll index 5653f85c67339..e2808ee9bf706 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll @@ -4203,25 +4203,25 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX7-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 +; GFX7-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX7-NEXT: v_min_f64 v[6:7], v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 ; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB24_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4237,26 +4237,25 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX6-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX6-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX6-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7] -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX6-NEXT: v_min_f64 v[6:7], v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 ; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB24_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll index f0e16150c9e79..11f0f38d2b6fa 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll @@ -3913,15 +3913,13 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -3931,7 +3929,9 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB16_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3947,15 +3947,13 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -3966,7 +3964,9 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB16_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4165,15 +4165,13 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -4183,7 +4181,9 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB17_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4199,15 +4199,13 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -4218,7 +4216,9 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB17_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4413,34 +4413,32 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_neg(ptr addrspace(1) ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: s_movk_i32 s4, 0xf800 -; GFX7-NEXT: v_mov_b32_e32 v7, v1 -; GFX7-NEXT: v_mov_b32_e32 v6, v0 ; GFX7-NEXT: s_mov_b32 s5, -1 ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX7-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 -; GFX7-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX7-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 ; GFX7-NEXT: .LBB18_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB18_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4451,35 +4449,33 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_neg(ptr addrspace(1) ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: s_movk_i32 s4, 0xf800 -; GFX6-NEXT: v_mov_b32_e32 v7, v1 -; GFX6-NEXT: v_mov_b32_e32 v6, v0 ; GFX6-NEXT: s_mov_b32 s5, -1 ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s6, 0 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX6-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX6-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 ; GFX6-NEXT: .LBB18_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB18_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll index 74f0f64c935b4..6a4c2849ba4a3 100644 --- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll +++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll @@ -1502,13 +1502,11 @@ define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB32_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7 ; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc ; SI-NEXT: s_waitcnt expcnt(0) @@ -1521,6 +1519,8 @@ define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB32_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1593,13 +1593,11 @@ define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB33_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7 ; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc ; SI-NEXT: s_waitcnt expcnt(0) @@ -1612,6 +1610,8 @@ define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB33_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1883,43 +1883,42 @@ define amdgpu_gfx i64 @global_atomic_sub_i64_ret_scalar(ptr addrspace(1) inreg % ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v9, s6, 0 -; SI-NEXT: v_writelane_b32 v9, s7, 1 +; SI-NEXT: v_writelane_b32 v7, s6, 0 +; SI-NEXT: v_writelane_b32 v7, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 +; SI-NEXT: v_mov_b32_e32 v6, s35 ; SI-NEXT: .LBB36_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v8, v1 -; SI-NEXT: v_mov_b32_e32 v7, v0 -; SI-NEXT: v_subrev_i32_e32 v5, vcc, s34, v7 -; SI-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v5 -; SI-NEXT: v_mov_b32_e32 v1, v6 -; SI-NEXT: v_mov_b32_e32 v2, v7 -; SI-NEXT: v_mov_b32_e32 v3, v8 +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_subrev_i32_e32 v2, vcc, s34, v4 +; SI-NEXT: v_subb_u32_e32 v3, vcc, v5, v6, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB36_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v9, 1 -; SI-NEXT: v_readlane_b32 s6, v9, 0 +; SI-NEXT: v_readlane_b32 s7, v7, 1 +; SI-NEXT: v_readlane_b32 s6, v7, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -1985,43 +1984,42 @@ define amdgpu_gfx i64 @global_atomic_sub_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v9, s6, 0 -; SI-NEXT: v_writelane_b32 v9, s7, 1 +; SI-NEXT: v_writelane_b32 v7, s6, 0 +; SI-NEXT: v_writelane_b32 v7, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 +; SI-NEXT: v_mov_b32_e32 v6, s35 ; SI-NEXT: .LBB37_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v8, v1 -; SI-NEXT: v_mov_b32_e32 v7, v0 -; SI-NEXT: v_subrev_i32_e32 v5, vcc, s34, v7 -; SI-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v5 -; SI-NEXT: v_mov_b32_e32 v1, v6 -; SI-NEXT: v_mov_b32_e32 v2, v7 -; SI-NEXT: v_mov_b32_e32 v3, v8 +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_subrev_i32_e32 v2, vcc, s34, v4 +; SI-NEXT: v_subb_u32_e32 v3, vcc, v5, v6, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB37_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v9, 1 -; SI-NEXT: v_readlane_b32 s6, v9, 0 +; SI-NEXT: v_readlane_b32 s7, v7, 1 +; SI-NEXT: v_readlane_b32 s6, v7, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -2342,13 +2340,11 @@ define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB42_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_and_b32_e32 v9, v11, v6 ; SI-NEXT: v_and_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -2361,6 +2357,8 @@ define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB42_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2433,13 +2431,11 @@ define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB43_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_and_b32_e32 v9, v11, v6 ; SI-NEXT: v_and_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -2452,6 +2448,8 @@ define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB43_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2726,14 +2724,11 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB46_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v3, s34, v5 ; SI-NEXT: v_and_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -2745,6 +2740,8 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB46_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2825,14 +2822,11 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB47_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v3, s34, v5 ; SI-NEXT: v_and_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -2844,6 +2838,8 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB47_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3182,14 +3178,11 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB52_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, v11, v6 ; SI-NEXT: v_and_b32_e32 v1, v10, v7 ; SI-NEXT: v_not_b32_e32 v9, v0 @@ -3203,6 +3196,8 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB52_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3279,14 +3274,11 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB53_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, v11, v6 ; SI-NEXT: v_and_b32_e32 v1, v10, v7 ; SI-NEXT: v_not_b32_e32 v9, v0 @@ -3300,6 +3292,8 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB53_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3590,14 +3584,11 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB56_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, s34, v5 ; SI-NEXT: v_and_b32_e32 v1, s35, v4 ; SI-NEXT: v_not_b32_e32 v3, v0 @@ -3611,6 +3602,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB56_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3695,14 +3688,11 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB57_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, s34, v5 ; SI-NEXT: v_and_b32_e32 v1, s35, v4 ; SI-NEXT: v_not_b32_e32 v3, v0 @@ -3716,6 +3706,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB57_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3891,14 +3883,11 @@ define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB59_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, v11, v6 ; SI-NEXT: v_and_b32_e32 v1, v10, v7 ; SI-NEXT: v_not_b32_e32 v9, v0 @@ -3912,6 +3901,8 @@ define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB59_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4162,13 +4153,11 @@ define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB62_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_or_b32_e32 v9, v11, v6 ; SI-NEXT: v_or_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -4181,6 +4170,8 @@ define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB62_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4253,13 +4244,11 @@ define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB63_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_or_b32_e32 v9, v11, v6 ; SI-NEXT: v_or_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -4272,6 +4261,8 @@ define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB63_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4546,14 +4537,11 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %p ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB66_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_or_b32_e32 v3, s34, v5 ; SI-NEXT: v_or_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -4565,6 +4553,8 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %p ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB66_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4645,14 +4635,11 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) i ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB67_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_or_b32_e32 v3, s34, v5 ; SI-NEXT: v_or_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -4664,6 +4651,8 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB67_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4990,13 +4979,11 @@ define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB72_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_xor_b32_e32 v9, v11, v6 ; SI-NEXT: v_xor_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -5009,6 +4996,8 @@ define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB72_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5081,13 +5070,11 @@ define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB73_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_xor_b32_e32 v9, v11, v6 ; SI-NEXT: v_xor_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -5100,6 +5087,8 @@ define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB73_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5374,14 +5363,11 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB76_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_xor_b32_e32 v3, s34, v5 ; SI-NEXT: v_xor_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -5393,6 +5379,8 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB76_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5473,14 +5461,11 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB77_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_xor_b32_e32 v3, s34, v5 ; SI-NEXT: v_xor_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -5492,6 +5477,8 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB77_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5824,13 +5811,11 @@ define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB82_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -5844,6 +5829,8 @@ define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB82_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5918,13 +5905,11 @@ define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB83_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -5938,6 +5923,8 @@ define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB83_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6223,45 +6210,45 @@ define amdgpu_gfx i64 @global_atomic_max_i64_ret_scalar(ptr addrspace(1) inreg % ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB86_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB86_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -6331,45 +6318,45 @@ define amdgpu_gfx i64 @global_atomic_max_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB87_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB87_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -7176,13 +7163,11 @@ define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB96_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -7196,6 +7181,8 @@ define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB96_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7270,13 +7257,11 @@ define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB97_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -7290,6 +7275,8 @@ define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB97_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7575,45 +7562,45 @@ define amdgpu_gfx i64 @global_atomic_umax_i64_ret_scalar(ptr addrspace(1) inreg ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB100_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB100_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -7683,45 +7670,45 @@ define amdgpu_gfx i64 @global_atomic_umax_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB101_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB101_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -8416,13 +8403,11 @@ define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB109_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -8436,6 +8421,8 @@ define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB109_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8510,13 +8497,11 @@ define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB110_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -8530,6 +8515,8 @@ define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB110_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8815,45 +8802,45 @@ define amdgpu_gfx i64 @global_atomic_umin_i64_ret_scalar(ptr addrspace(1) inreg ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB113_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB113_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -8923,45 +8910,45 @@ define amdgpu_gfx i64 @global_atomic_umin_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB114_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB114_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -9292,13 +9279,11 @@ define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB119_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -9312,6 +9297,8 @@ define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB119_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9386,13 +9373,11 @@ define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB120_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -9406,6 +9391,8 @@ define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB120_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9691,45 +9678,45 @@ define amdgpu_gfx i64 @global_atomic_min_i64_ret_scalar(ptr addrspace(1) inreg % ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB123_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB123_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -9799,45 +9786,45 @@ define amdgpu_gfx i64 @global_atomic_min_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB124_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB124_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -10645,14 +10632,11 @@ define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB133_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc ; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5] @@ -10667,6 +10651,8 @@ define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB133_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -10745,14 +10731,11 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB134_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc ; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5] @@ -10767,6 +10750,8 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB134_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11065,14 +11050,11 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) i ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB137_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v4 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc ; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[34:35], v[4:5] @@ -11087,6 +11069,8 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB137_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11173,14 +11157,11 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspa ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB138_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v4 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc ; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[34:35], v[4:5] @@ -11195,6 +11176,8 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspa ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB138_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11557,14 +11540,11 @@ define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s11, 0xf000 ; SI-NEXT: s_mov_b32 s8, s10 ; SI-NEXT: s_mov_b32 s9, s10 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[8:11], 0 addr64 ; SI-NEXT: s_mov_b64 s[6:7], 0 ; SI-NEXT: .LBB143_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc ; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] @@ -11581,6 +11561,8 @@ define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[6:7] ; SI-NEXT: s_cbranch_execnz .LBB143_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11663,14 +11645,11 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: s_mov_b32 s11, 0xf000 ; SI-NEXT: s_mov_b32 s8, s10 ; SI-NEXT: s_mov_b32 s9, s10 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[8:11], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[6:7], 0 ; SI-NEXT: .LBB144_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc ; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] @@ -11687,6 +11666,8 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[6:7] ; SI-NEXT: s_cbranch_execnz .LBB144_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -12004,49 +11985,48 @@ define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_scalar(ptr addrspace(1) i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[38:39], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB147_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v8 -; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v9, vcc -; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9] -; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[8:9] +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4 +; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v5, vcc +; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] +; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[4:5] ; SI-NEXT: s_or_b64 vcc, vcc, s[36:37] -; SI-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v0, v5, vcc -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v0, v7, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[38:39] ; SI-NEXT: s_cbranch_execnz .LBB147_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[38:39] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -12124,49 +12104,48 @@ define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_offset_scalar(ptr addrspa ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[38:39], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB148_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v8 -; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v9, vcc -; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9] -; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[8:9] +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4 +; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v5, vcc +; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] +; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[4:5] ; SI-NEXT: s_or_b64 vcc, vcc, s[36:37] -; SI-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v0, v5, vcc -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v0, v7, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[38:39] ; SI-NEXT: s_cbranch_execnz .LBB148_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[38:39] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll index ba5ce8bb5fae7..8bb7274c84620 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll @@ -76,13 +76,12 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) % ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: s_mov_b64 s[4:5], s[2:3] ; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 -; SI-NEXT: s_movk_i32 s4, 0xfc01 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_mov_b32 s3, 0xfffff ; SI-NEXT: v_mov_b32_e32 v8, 0x3ff00000 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_bfe_u32 v4, v3, 20, 11 -; SI-NEXT: v_add_i32_e32 v6, vcc, s4, v4 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0xfffffc01, v4 ; SI-NEXT: v_lshr_b64 v[4:5], s[2:3], v6 ; SI-NEXT: v_and_b32_e32 v7, 0x80000000, v3 ; SI-NEXT: v_bfi_b32 v5, v5, 0, v3 diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll index c92c672dda2ad..ca4f5d22ca9a0 100644 --- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll +++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll @@ -51,7 +51,7 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) { ; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s4, v2 ; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v3, v7, vcc ; CHECK-NEXT: s_add_u32 s4, s4, 1 -; CHECK-NEXT: s_addc_u32 s5, s5, 0 +; CHECK-NEXT: s_addc_u32 s5, 0, s5 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: flat_store_byte v[6:7], v10 ; CHECK-NEXT: ; %bb.7: diff --git a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir index 0c723a09809c6..c9645c31aad75 100644 --- a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir +++ b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir @@ -113,7 +113,7 @@ body: | ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] - ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0 + ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 ; GCN-NEXT: $vgpr0 = COPY [[COPY3]] %0:vgpr_32 = COPY $vgpr0 %1:vgpr_32 = COPY $vgpr1 @@ -135,7 +135,7 @@ body: | ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; GCN-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE]] - ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0 + ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 ; GCN-NEXT: $vgpr0 = COPY [[COPY3]] %0:vgpr_32 = COPY $vgpr0 %1:vgpr_32 = COPY $vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll index aa131ed6c9db1..760a298ce8971 100644 --- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll +++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll @@ -495,8 +495,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX900-NEXT: v_mov_b32_e32 v1, s35 ; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0 ; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; GFX900-NEXT: s_movk_i32 s0, 0x5000 -; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, 0x5000, v0 ; GFX900-NEXT: v_mov_b32_e32 v4, 0 ; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX900-NEXT: v_mov_b32_e32 v5, 0 @@ -718,8 +717,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX90A-NEXT: v_mov_b32_e32 v2, s35 ; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, s34, v1 ; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v2, vcc -; GFX90A-NEXT: s_movk_i32 s0, 0x5000 -; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, s0, v1 +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, 0x5000, v1 ; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], 0, 0 ; GFX90A-NEXT: v_mov_b32_e32 v1, 0x7f diff --git a/llvm/test/CodeGen/ARM/llvm.exp10.ll b/llvm/test/CodeGen/ARM/llvm.exp10.ll index eb72fe8c1e1b7..49397ca386cb4 100644 --- a/llvm/test/CodeGen/ARM/llvm.exp10.ll +++ b/llvm/test/CodeGen/ARM/llvm.exp10.ll @@ -189,12 +189,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) { ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: bl exp10f +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vmov s17, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl exp10f ; CHECK-NEXT: vmov s16, r0 +; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: vmov s18, r6 -; CHECK-NEXT: vmov r0, r1, d8 ; CHECK-NEXT: vmov r2, r3, d9 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: pop {r4, r5, r6, pc} @@ -207,7 +208,6 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) { ; CHECK: @ %bb.0: ; CHECK-NEXT: push {r4, r5, r6, r7, lr} ; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: mov r4, r3 @@ -216,17 +216,15 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) { ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: bl exp10f -; CHECK-NEXT: vmov s19, r0 +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl exp10f -; CHECK-NEXT: vmov s18, r0 +; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: mov r0, r6 -; CHECK-NEXT: vmov s17, r7 ; CHECK-NEXT: bl exp10f -; CHECK-NEXT: vmov s16, r0 -; CHECK-NEXT: vmov r2, r3, d9 -; CHECK-NEXT: vmov r0, r1, d8 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: mov r1, r7 +; CHECK-NEXT: mov r2, r5 +; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop {r4, r5, r6, r7, pc} %r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x) diff --git a/llvm/test/CodeGen/ARM/llvm.frexp.ll b/llvm/test/CodeGen/ARM/llvm.frexp.ll index 376426d701b3e..80972b75cf283 100644 --- a/llvm/test/CodeGen/ARM/llvm.frexp.ll +++ b/llvm/test/CodeGen/ARM/llvm.frexp.ll @@ -362,33 +362,31 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) { define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) { ; CHECK-LABEL: test_frexp_v4f32_v4i32_only_use_fract: ; CHECK: @ %bb.0: -; CHECK-NEXT: push {r4, r5, r6, lr} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: sub sp, #16 -; CHECK-NEXT: mov r5, r1 -; CHECK-NEXT: mov r6, r0 -; CHECK-NEXT: mov r1, sp -; CHECK-NEXT: mov r0, r3 -; CHECK-NEXT: mov r4, r2 -; CHECK-NEXT: bl frexpf +; CHECK-NEXT: push {r4, r5, r6, r7, lr} +; CHECK-NEXT: sub sp, #20 +; CHECK-NEXT: mov r6, r1 ; CHECK-NEXT: add r1, sp, #4 -; CHECK-NEXT: vmov s19, r0 -; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: mov r7, r0 +; CHECK-NEXT: mov r0, r3 +; CHECK-NEXT: mov r5, r2 ; CHECK-NEXT: bl frexpf ; CHECK-NEXT: add r1, sp, #8 -; CHECK-NEXT: vmov s18, r0 +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl frexpf ; CHECK-NEXT: add r1, sp, #12 -; CHECK-NEXT: vmov s17, r0 +; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: bl frexpf -; CHECK-NEXT: vmov s16, r0 -; CHECK-NEXT: vmov r2, r3, d9 -; CHECK-NEXT: vmov r0, r1, d8 -; CHECK-NEXT: add sp, #16 -; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: pop {r4, r5, r6, pc} +; CHECK-NEXT: add r1, sp, #16 +; CHECK-NEXT: mov r6, r0 +; CHECK-NEXT: mov r0, r7 +; CHECK-NEXT: bl frexpf +; CHECK-NEXT: mov r1, r6 +; CHECK-NEXT: mov r2, r5 +; CHECK-NEXT: mov r3, r4 +; CHECK-NEXT: add sp, #20 +; CHECK-NEXT: pop {r4, r5, r6, r7, pc} %result = call { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float> %a) %result.0 = extractvalue { <4 x float>, <4 x i32> } %result, 0 ret <4 x float> %result.0 diff --git a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll index 7c9f375cbb9ad..40d36fbb6fe76 100644 --- a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll +++ b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll @@ -97,7 +97,6 @@ entry: ; ALL: lw $[[R0:[0-9]+]], %got(v4f32)( ; ALL: ld.w $w12, 0($[[R0]]) ; ALL: move.v $w[[W0:13]], $w12 -; NOODDSPREG: move.v $w[[W0:12]], $w13 ; ALL: teqi $zero, 1 ; ALL-NOT: st.w ; ALL-NOT: ld.w diff --git a/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll b/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll index a2ad2946cc8ec..98314a02c23fe 100644 --- a/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll +++ b/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll @@ -897,31 +897,31 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) { ; P8LE-NEXT: mfvsrd r6, v2 ; P8LE-NEXT: mfvsrd r8, v3 ; P8LE-NEXT: ori r3, r3, 51289 +; P8LE-NEXT: mffprd r4, f0 ; P8LE-NEXT: ori r5, r5, 42889 -; P8LE-NEXT: rldic r4, r3, 36, 1 -; P8LE-NEXT: mffprd r3, f0 +; P8LE-NEXT: rldic r3, r3, 36, 1 ; P8LE-NEXT: rldic r5, r5, 35, 1 ; P8LE-NEXT: rldicl r7, r6, 63, 1 -; P8LE-NEXT: oris r4, r4, 45590 +; P8LE-NEXT: oris r3, r3, 45590 ; P8LE-NEXT: oris r5, r5, 1603 -; P8LE-NEXT: ori r4, r4, 17097 +; P8LE-NEXT: ori r3, r3, 17097 ; P8LE-NEXT: ori r5, r5, 21445 -; P8LE-NEXT: mulhdu r4, r3, r4 +; P8LE-NEXT: mulhdu r3, r4, r3 ; P8LE-NEXT: mulhdu r5, r7, r5 -; P8LE-NEXT: sub r7, r3, r4 +; P8LE-NEXT: sub r7, r4, r3 ; P8LE-NEXT: rldicl r5, r5, 57, 7 ; P8LE-NEXT: rldicl r7, r7, 63, 1 ; P8LE-NEXT: mulli r5, r5, 654 -; P8LE-NEXT: add r4, r7, r4 +; P8LE-NEXT: add r3, r7, r3 ; P8LE-NEXT: lis r7, -16037 ; P8LE-NEXT: ori r7, r7, 28749 -; P8LE-NEXT: rldicl r4, r4, 60, 4 +; P8LE-NEXT: rldicl r3, r3, 60, 4 ; P8LE-NEXT: sub r5, r6, r5 ; P8LE-NEXT: rldic r7, r7, 32, 0 -; P8LE-NEXT: mulli r4, r4, 23 +; P8LE-NEXT: mulli r3, r3, 23 ; P8LE-NEXT: oris r7, r7, 52170 ; P8LE-NEXT: ori r7, r7, 12109 -; P8LE-NEXT: sub r3, r3, r4 +; P8LE-NEXT: sub r3, r4, r3 ; P8LE-NEXT: mulhdu r7, r8, r7 ; P8LE-NEXT: mtfprd f1, r3 ; P8LE-NEXT: li r3, 0 diff --git a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll index 435b0ab3fea6c..816b12e2d8e5b 100644 --- a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll +++ b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll @@ -35,12 +35,12 @@ define i64 @test2elt(<2 x i64> %a) local_unnamed_addr #0 { ; ; CHECK-BE-LABEL: test2elt: ; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xscvuxdsp f0, v2 +; CHECK-BE-NEXT: xscvdpspn v3, f0 ; CHECK-BE-NEXT: xxswapd vs0, v2 -; CHECK-BE-NEXT: xscvuxdsp f1, v2 ; CHECK-BE-NEXT: xscvuxdsp f0, f0 -; CHECK-BE-NEXT: xscvdpspn v2, f1 -; CHECK-BE-NEXT: xscvdpspn v3, f0 -; CHECK-BE-NEXT: vmrgow v2, v2, v3 +; CHECK-BE-NEXT: xscvdpspn v2, f0 +; CHECK-BE-NEXT: vmrgow v2, v3, v2 ; CHECK-BE-NEXT: mfvsrd r3, v2 ; CHECK-BE-NEXT: blr entry: @@ -327,12 +327,12 @@ define i64 @test2elt_signed(<2 x i64> %a) local_unnamed_addr #0 { ; ; CHECK-BE-LABEL: test2elt_signed: ; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xscvsxdsp f0, v2 +; CHECK-BE-NEXT: xscvdpspn v3, f0 ; CHECK-BE-NEXT: xxswapd vs0, v2 -; CHECK-BE-NEXT: xscvsxdsp f1, v2 ; CHECK-BE-NEXT: xscvsxdsp f0, f0 -; CHECK-BE-NEXT: xscvdpspn v2, f1 -; CHECK-BE-NEXT: xscvdpspn v3, f0 -; CHECK-BE-NEXT: vmrgow v2, v2, v3 +; CHECK-BE-NEXT: xscvdpspn v2, f0 +; CHECK-BE-NEXT: vmrgow v2, v3, v2 ; CHECK-BE-NEXT: mfvsrd r3, v2 ; CHECK-BE-NEXT: blr entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll index 9173fa4622487..cc1282a9119da 100644 --- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll @@ -1666,20 +1666,20 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: .LBB61_32: # %else114 ; CHECK-RV32-NEXT: slli a2, a3, 1 ; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; CHECK-RV32-NEXT: vsrl.vx v16, v0, a1 +; CHECK-RV32-NEXT: vsrl.vx v24, v0, a1 ; CHECK-RV32-NEXT: bgez a2, .LBB61_34 ; CHECK-RV32-NEXT: # %bb.33: # %cond.load117 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v24, v8 +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a2 ; CHECK-RV32-NEXT: vsetivli zero, 31, e8, m1, tu, ma ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 30 ; CHECK-RV32-NEXT: addi a0, a0, 1 -; CHECK-RV32-NEXT: vmv1r.v v24, v8 -; CHECK-RV32-NEXT: vmv8r.v v8, v24 +; CHECK-RV32-NEXT: vmv1r.v v16, v8 +; CHECK-RV32-NEXT: vmv8r.v v8, v16 ; CHECK-RV32-NEXT: .LBB61_34: # %else118 ; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; CHECK-RV32-NEXT: vmv.x.s a2, v16 +; CHECK-RV32-NEXT: vmv.x.s a2, v24 ; CHECK-RV32-NEXT: bgez a3, .LBB61_35 ; CHECK-RV32-NEXT: j .LBB61_572 ; CHECK-RV32-NEXT: .LBB61_35: # %else122 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll index 1d691b130b3da..a2fcd7962b8b0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll @@ -661,8 +661,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v16, v8, v0 ; RV32-NEXT: csrr a7, vlenb -; RV32-NEXT: li t3, 36 -; RV32-NEXT: mul a7, a7, t3 +; RV32-NEXT: slli a7, a7, 5 ; RV32-NEXT: add a7, sp, a7 ; RV32-NEXT: addi a7, a7, 16 ; RV32-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill @@ -682,7 +681,11 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vl8r.v v8, (t1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vmerge.vvm v8, v24, v8, v0 -; RV32-NEXT: addi t1, sp, 16 +; RV32-NEXT: csrr t1, vlenb +; RV32-NEXT: li t2, 44 +; RV32-NEXT: mul t1, t1, t2 +; RV32-NEXT: add t1, sp, t1 +; RV32-NEXT: addi t1, t1, 16 ; RV32-NEXT: vs4r.v v8, (t1) # vscale x 32-byte Folded Spill ; RV32-NEXT: vmv.s.x v0, a7 ; RV32-NEXT: addi a3, a3, 12 @@ -694,8 +697,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 ; RV32-NEXT: csrr a7, vlenb -; RV32-NEXT: li t1, 20 -; RV32-NEXT: mul a7, a7, t1 +; RV32-NEXT: slli a7, a7, 4 ; RV32-NEXT: add a7, sp, a7 ; RV32-NEXT: addi a7, a7, 16 ; RV32-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill @@ -733,7 +735,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV32-NEXT: csrr a7, vlenb -; RV32-NEXT: li t0, 28 +; RV32-NEXT: li t0, 24 ; RV32-NEXT: mul a7, a7, t0 ; RV32-NEXT: add a7, sp, a7 ; RV32-NEXT: addi a7, a7, 16 @@ -755,7 +757,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vmerge.vvm v8, v24, v8, v0 ; RV32-NEXT: csrr a6, vlenb -; RV32-NEXT: li a7, 44 +; RV32-NEXT: li a7, 40 ; RV32-NEXT: mul a6, a6, a7 ; RV32-NEXT: add a6, sp, a6 ; RV32-NEXT: addi a6, a6, 16 @@ -772,24 +774,19 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a4, 12 -; RV32-NEXT: mul a1, a1, a4 +; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: vmv.s.x v0, a3 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a3, 36 -; RV32-NEXT: mul a1, a1, a3 +; RV32-NEXT: slli a1, a1, 5 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vrgatherei16.vv v24, v8, v6 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 2 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a3, 92 @@ -812,8 +809,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a3, 20 -; RV32-NEXT: mul a1, a1, a3 +; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload @@ -835,12 +831,6 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 84 -; RV32-NEXT: mul a1, a1, a2 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill -; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 72 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 @@ -860,30 +850,36 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs4r.v v28, (a1) # vscale x 32-byte Folded Spill +; RV32-NEXT: addi a1, sp, 16 +; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 60 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl4r.v v16, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: vl4r.v v20, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: vmv.v.v v20, v16 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 2 +; RV32-NEXT: li a2, 60 +; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload -; RV32-NEXT: vmv.v.v v16, v8 +; RV32-NEXT: vs4r.v v20, (a1) # vscale x 32-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 60 +; RV32-NEXT: li a2, 44 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vs4r.v v16, (a1) # vscale x 32-byte Folded Spill -; RV32-NEXT: addi a1, sp, 16 -; RV32-NEXT: vl4r.v v8, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: vl4r.v v16, (a1) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; RV32-NEXT: vrgatherei16.vv v28, v8, v3 +; RV32-NEXT: vrgatherei16.vv v20, v16, v3 ; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma -; RV32-NEXT: vmv.v.v v28, v24 +; RV32-NEXT: vmv.v.v v20, v24 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 6 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs4r.v v20, (a1) # vscale x 32-byte Folded Spill ; RV32-NEXT: lui a1, %hi(.LCPI27_4) ; RV32-NEXT: addi a1, a1, %lo(.LCPI27_4) ; RV32-NEXT: lui a2, %hi(.LCPI27_5) @@ -891,13 +887,25 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; RV32-NEXT: vle16.v v24, (a2) ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV32-NEXT: vle16.v v8, (a1) +; RV32-NEXT: vle16.v v16, (a1) +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 84 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs1r.v v16, (a1) # vscale x 8-byte Folded Spill ; RV32-NEXT: lui a1, %hi(.LCPI27_7) ; RV32-NEXT: addi a1, a1, %lo(.LCPI27_7) ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV32-NEXT: vle16.v v10, (a1) +; RV32-NEXT: vle16.v v16, (a1) ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 28 +; RV32-NEXT: li a2, 76 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs2r.v v16, (a1) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 24 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 @@ -909,18 +917,29 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl4r.v v20, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 84 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vl1r.v v7, (a1) # vscale x 8-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; RV32-NEXT: vrgatherei16.vv v24, v20, v8 +; RV32-NEXT: vrgatherei16.vv v24, v20, v7 ; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma ; RV32-NEXT: vmv.v.v v24, v16 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 12 -; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v0, (a1) # vscale x 64-byte Folded Reload +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 76 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vl2r.v v28, (a1) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV32-NEXT: vrgatherei16.vv v16, v0, v10 +; RV32-NEXT: vrgatherei16.vv v16, v0, v28 ; RV32-NEXT: lui a1, %hi(.LCPI27_6) ; RV32-NEXT: addi a1, a1, %lo(.LCPI27_6) ; RV32-NEXT: lui a2, %hi(.LCPI27_8) @@ -934,7 +953,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle16.v v5, (a2) ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 44 +; RV32-NEXT: li a2, 40 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 @@ -942,12 +961,6 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vrgatherei16.vv v0, v20, v4 ; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma ; RV32-NEXT: vmv.v.v v0, v16 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 84 -; RV32-NEXT: mul a1, a1, a2 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vrgatherei16.vv v16, v8, v6 ; RV32-NEXT: csrr a1, vlenb @@ -968,7 +981,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: addi a1, a0, 192 ; RV32-NEXT: vse32.v v24, (a1) ; RV32-NEXT: addi a1, a0, 128 -; RV32-NEXT: vse32.v v28, (a1) +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 6 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vl4r.v v8, (a2) # vscale x 32-byte Folded Reload +; RV32-NEXT: vse32.v v8, (a1) ; RV32-NEXT: addi a1, a0, 64 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: li a3, 60 diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll index d995a31f243d3..acc68491d5aee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -416,14 +416,14 @@ define @reverse_nxv32i1( %a) { ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v12, v8, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v12, v16 -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v13, v16 -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v14, v16 -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v15, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv32i1: @@ -437,14 +437,14 @@ define @reverse_nxv32i1( %a) { ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-256-NEXT: vmerge.vim v12, v8, 1, v0 +; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-256-NEXT: vrgather.vv v11, v12, v16 -; RV32-BITS-256-NEXT: vrgather.vv v10, v13, v16 -; RV32-BITS-256-NEXT: vrgather.vv v9, v14, v16 -; RV32-BITS-256-NEXT: vrgather.vv v8, v15, v16 +; RV32-BITS-256-NEXT: vrgather.vv v15, v8, v16 +; RV32-BITS-256-NEXT: vrgather.vv v14, v9, v16 +; RV32-BITS-256-NEXT: vrgather.vv v13, v10, v16 +; RV32-BITS-256-NEXT: vrgather.vv v12, v11, v16 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-256-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv32i1: @@ -458,14 +458,14 @@ define @reverse_nxv32i1( %a) { ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-512-NEXT: vmerge.vim v12, v8, 1, v0 +; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-512-NEXT: vrgather.vv v11, v12, v16 -; RV32-BITS-512-NEXT: vrgather.vv v10, v13, v16 -; RV32-BITS-512-NEXT: vrgather.vv v9, v14, v16 -; RV32-BITS-512-NEXT: vrgather.vv v8, v15, v16 +; RV32-BITS-512-NEXT: vrgather.vv v15, v8, v16 +; RV32-BITS-512-NEXT: vrgather.vv v14, v9, v16 +; RV32-BITS-512-NEXT: vrgather.vv v13, v10, v16 +; RV32-BITS-512-NEXT: vrgather.vv v12, v11, v16 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-512-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i1: @@ -479,14 +479,14 @@ define @reverse_nxv32i1( %a) { ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v12, v8, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v12, v16 -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v13, v16 -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v14, v16 -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v15, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv32i1: @@ -500,14 +500,14 @@ define @reverse_nxv32i1( %a) { ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-256-NEXT: vmerge.vim v12, v8, 1, v0 +; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-256-NEXT: vrgather.vv v11, v12, v16 -; RV64-BITS-256-NEXT: vrgather.vv v10, v13, v16 -; RV64-BITS-256-NEXT: vrgather.vv v9, v14, v16 -; RV64-BITS-256-NEXT: vrgather.vv v8, v15, v16 +; RV64-BITS-256-NEXT: vrgather.vv v15, v8, v16 +; RV64-BITS-256-NEXT: vrgather.vv v14, v9, v16 +; RV64-BITS-256-NEXT: vrgather.vv v13, v10, v16 +; RV64-BITS-256-NEXT: vrgather.vv v12, v11, v16 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-256-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv32i1: @@ -521,14 +521,14 @@ define @reverse_nxv32i1( %a) { ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-512-NEXT: vmerge.vim v12, v8, 1, v0 +; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-512-NEXT: vrgather.vv v11, v12, v16 -; RV64-BITS-512-NEXT: vrgather.vv v10, v13, v16 -; RV64-BITS-512-NEXT: vrgather.vv v9, v14, v16 -; RV64-BITS-512-NEXT: vrgather.vv v8, v15, v16 +; RV64-BITS-512-NEXT: vrgather.vv v15, v8, v16 +; RV64-BITS-512-NEXT: vrgather.vv v14, v9, v16 +; RV64-BITS-512-NEXT: vrgather.vv v13, v10, v16 +; RV64-BITS-512-NEXT: vrgather.vv v12, v11, v16 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-512-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv32i1( %a) ret %res diff --git a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll index 4bc6313494d41..1ee7e138654b9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll +++ b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll @@ -37772,18 +37772,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_P1( %val, %val, %val, < ; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_PALL: ; CHECK-RV32VC: # %bb.0: ; CHECK-RV32VC-NEXT: csrr a1, vlenb -; CHECK-RV32VC-NEXT: slli a5, a1, 4 +; CHECK-RV32VC-NEXT: slli a6, a1, 4 ; CHECK-RV32VC-NEXT: slli a2, a1, 2 -; CHECK-RV32VC-NEXT: slli a6, a1, 3 +; CHECK-RV32VC-NEXT: slli a5, a1, 3 ; CHECK-RV32VC-NEXT: mv a4, a3 ; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB916_2 ; CHECK-RV32VC-NEXT: # %bb.1: ; CHECK-RV32VC-NEXT: mv a4, a2 ; CHECK-RV32VC-NEXT: .LBB916_2: ; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0) -; CHECK-RV32VC-NEXT: add a7, a0, a5 +; CHECK-RV32VC-NEXT: add a6, a6, a0 ; CHECK-RV32VC-NEXT: slli a1, a1, 1 -; CHECK-RV32VC-NEXT: add a0, a0, a6 +; CHECK-RV32VC-NEXT: add a0, a0, a5 ; CHECK-RV32VC-NEXT: mv a5, a4 ; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB916_4 ; CHECK-RV32VC-NEXT: # %bb.3: @@ -38416,11 +38416,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_PALL( %val, < ; CHECK-RV32VC-NEXT: .LBB916_4: ; CHECK-RV32VC-NEXT: addi sp, sp, -16 ; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV32VC-NEXT: csrr a6, vlenb -; CHECK-RV32VC-NEXT: slli a6, a6, 3 -; CHECK-RV32VC-NEXT: sub sp, sp, a6 +; CHECK-RV32VC-NEXT: csrr a7, vlenb +; CHECK-RV32VC-NEXT: slli a7, a7, 3 +; CHECK-RV32VC-NEXT: sub sp, sp, a7 ; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7) +; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6) ; CHECK-RV32VC-NEXT: addi a6, sp, 16 ; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill ; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0) @@ -39022,18 +39022,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_S1( %val, %val, %val, %val, %val ; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_DEFAULT: ; CHECK-RV32VC: # %bb.0: ; CHECK-RV32VC-NEXT: csrr a1, vlenb -; CHECK-RV32VC-NEXT: slli a5, a1, 4 +; CHECK-RV32VC-NEXT: slli a6, a1, 4 ; CHECK-RV32VC-NEXT: slli a2, a1, 2 -; CHECK-RV32VC-NEXT: slli a6, a1, 3 +; CHECK-RV32VC-NEXT: slli a5, a1, 3 ; CHECK-RV32VC-NEXT: mv a4, a3 ; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB919_2 ; CHECK-RV32VC-NEXT: # %bb.1: ; CHECK-RV32VC-NEXT: mv a4, a2 ; CHECK-RV32VC-NEXT: .LBB919_2: ; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0) -; CHECK-RV32VC-NEXT: add a7, a0, a5 +; CHECK-RV32VC-NEXT: add a6, a6, a0 ; CHECK-RV32VC-NEXT: slli a1, a1, 1 -; CHECK-RV32VC-NEXT: add a0, a0, a6 +; CHECK-RV32VC-NEXT: add a0, a0, a5 ; CHECK-RV32VC-NEXT: mv a5, a4 ; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB919_4 ; CHECK-RV32VC-NEXT: # %bb.3: @@ -40290,11 +40290,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_DEFAULT( %val ; CHECK-RV32VC-NEXT: .LBB919_4: ; CHECK-RV32VC-NEXT: addi sp, sp, -16 ; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV32VC-NEXT: csrr a6, vlenb -; CHECK-RV32VC-NEXT: slli a6, a6, 3 -; CHECK-RV32VC-NEXT: sub sp, sp, a6 +; CHECK-RV32VC-NEXT: csrr a7, vlenb +; CHECK-RV32VC-NEXT: slli a7, a7, 3 +; CHECK-RV32VC-NEXT: sub sp, sp, a7 ; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7) +; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6) ; CHECK-RV32VC-NEXT: addi a6, sp, 16 ; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill ; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0) diff --git a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll index 41d2c02a73cd0..5a79659436f34 100644 --- a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll +++ b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll @@ -348,38 +348,35 @@ entry: define <4 x float> @vector_add_f32(<4 x float> %lhs, <4 x float> %rhs) { ; CHECK-MVE-LABEL: vector_add_f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: .save {r4, r5, r6, r7, lr} -; CHECK-MVE-NEXT: push {r4, r5, r6, r7, lr} -; CHECK-MVE-NEXT: .pad #4 -; CHECK-MVE-NEXT: sub sp, #4 +; CHECK-MVE-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-MVE-NEXT: push.w {r4, r5, r6, r7, r8, lr} ; CHECK-MVE-NEXT: .vsave {d8, d9} ; CHECK-MVE-NEXT: vpush {d8, d9} -; CHECK-MVE-NEXT: mov r4, r0 +; CHECK-MVE-NEXT: mov r8, r0 ; CHECK-MVE-NEXT: add r0, sp, #40 ; CHECK-MVE-NEXT: vldrw.u32 q4, [r0] -; CHECK-MVE-NEXT: mov r6, r1 +; CHECK-MVE-NEXT: mov r7, r1 ; CHECK-MVE-NEXT: mov r0, r3 -; CHECK-MVE-NEXT: mov r5, r2 -; CHECK-MVE-NEXT: vmov r7, r1, d9 +; CHECK-MVE-NEXT: mov r6, r2 +; CHECK-MVE-NEXT: vmov r4, r1, d9 ; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov s19, r0 -; CHECK-MVE-NEXT: mov r0, r5 -; CHECK-MVE-NEXT: mov r1, r7 -; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov r5, r1, d8 -; CHECK-MVE-NEXT: vmov s18, r0 +; CHECK-MVE-NEXT: mov r5, r0 ; CHECK-MVE-NEXT: mov r0, r6 +; CHECK-MVE-NEXT: mov r1, r4 ; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov s17, r0 -; CHECK-MVE-NEXT: mov r0, r4 -; CHECK-MVE-NEXT: mov r1, r5 +; CHECK-MVE-NEXT: vmov r6, r1, d8 +; CHECK-MVE-NEXT: mov r4, r0 +; CHECK-MVE-NEXT: mov r0, r7 ; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov s16, r0 -; CHECK-MVE-NEXT: vmov r2, r3, d9 -; CHECK-MVE-NEXT: vmov r0, r1, d8 +; CHECK-MVE-NEXT: mov r7, r0 +; CHECK-MVE-NEXT: mov r0, r8 +; CHECK-MVE-NEXT: mov r1, r6 +; CHECK-MVE-NEXT: bl __aeabi_fadd +; CHECK-MVE-NEXT: mov r1, r7 +; CHECK-MVE-NEXT: mov r2, r4 +; CHECK-MVE-NEXT: mov r3, r5 ; CHECK-MVE-NEXT: vpop {d8, d9} -; CHECK-MVE-NEXT: add sp, #4 -; CHECK-MVE-NEXT: pop {r4, r5, r6, r7, pc} +; CHECK-MVE-NEXT: pop.w {r4, r5, r6, r7, r8, pc} ; ; CHECK-BE-LABEL: vector_add_f32: ; CHECK-BE: @ %bb.0: @ %entry diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll index 4dd9173e2d418..93b5e3f266b0a 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll @@ -33,53 +33,29 @@ entry: } define void @vld3_v4i32(ptr %src, ptr %dst) { -; CHECK-LV-LABEL: vld3_v4i32: -; CHECK-LV: @ %bb.0: @ %entry -; CHECK-LV-NEXT: .vsave {d8, d9} -; CHECK-LV-NEXT: vpush {d8, d9} -; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-LV-NEXT: vldrw.u32 q1, [r0] -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #32] -; CHECK-LV-NEXT: vmov.f32 s10, s2 -; CHECK-LV-NEXT: vmov.f32 s13, s0 -; CHECK-LV-NEXT: vmov.f32 s14, s3 -; CHECK-LV-NEXT: vmov.f32 s8, s4 -; CHECK-LV-NEXT: vmov.f32 s9, s7 -; CHECK-LV-NEXT: vmov.f32 s12, s5 -; CHECK-LV-NEXT: vmov.f32 s15, s18 -; CHECK-LV-NEXT: vmov.f32 s11, s17 -; CHECK-LV-NEXT: vadd.i32 q2, q2, q3 -; CHECK-LV-NEXT: vmov.f32 s0, s6 -; CHECK-LV-NEXT: vmov.f32 s2, s16 -; CHECK-LV-NEXT: vmov.f32 s3, s19 -; CHECK-LV-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LV-NEXT: vstrw.32 q0, [r1] -; CHECK-LV-NEXT: vpop {d8, d9} -; CHECK-LV-NEXT: bx lr -; -; CHECK-LIS-LABEL: vld3_v4i32: -; CHECK-LIS: @ %bb.0: @ %entry -; CHECK-LIS-NEXT: .vsave {d8, d9} -; CHECK-LIS-NEXT: vpush {d8, d9} -; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0] -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LIS-NEXT: vmov.f32 s10, s2 -; CHECK-LIS-NEXT: vmov.f32 s17, s0 -; CHECK-LIS-NEXT: vmov.f32 s18, s3 -; CHECK-LIS-NEXT: vmov.f32 s8, s4 -; CHECK-LIS-NEXT: vmov.f32 s9, s7 -; CHECK-LIS-NEXT: vmov.f32 s16, s5 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s11, s13 -; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4 -; CHECK-LIS-NEXT: vmov.f32 s0, s6 -; CHECK-LIS-NEXT: vmov.f32 s2, s12 -; CHECK-LIS-NEXT: vmov.f32 s3, s15 -; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LIS-NEXT: vstrw.32 q0, [r1] -; CHECK-LIS-NEXT: vpop {d8, d9} -; CHECK-LIS-NEXT: bx lr +; CHECK-LABEL: vld3_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vldrw.u32 q0, [r0, #16] +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: vldrw.u32 q4, [r0, #32] +; CHECK-NEXT: vmov.f32 s10, s2 +; CHECK-NEXT: vmov.f32 s13, s0 +; CHECK-NEXT: vmov.f32 s14, s3 +; CHECK-NEXT: vmov.f32 s8, s4 +; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmov.f32 s15, s18 +; CHECK-NEXT: vmov.f32 s11, s17 +; CHECK-NEXT: vadd.i32 q2, q2, q3 +; CHECK-NEXT: vmov.f32 s0, s6 +; CHECK-NEXT: vmov.f32 s2, s16 +; CHECK-NEXT: vmov.f32 s3, s19 +; CHECK-NEXT: vadd.i32 q0, q2, q0 +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr entry: %l1 = load <12 x i32>, ptr %src, align 4 @@ -93,87 +69,46 @@ entry: } define void @vld3_v8i32(ptr %src, ptr %dst) { -; CHECK-LV-LABEL: vld3_v8i32: -; CHECK-LV: @ %bb.0: @ %entry -; CHECK-LV-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-LV-NEXT: vpush {d8, d9, d10, d11} -; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #80] -; CHECK-LV-NEXT: vmov.f32 s10, s2 -; CHECK-LV-NEXT: vmov.f32 s13, s0 -; CHECK-LV-NEXT: vmov.f32 s14, s3 -; CHECK-LV-NEXT: vmov.f32 s8, s4 -; CHECK-LV-NEXT: vmov.f32 s9, s7 -; CHECK-LV-NEXT: vmov.f32 s12, s5 -; CHECK-LV-NEXT: vmov.f32 s15, s18 -; CHECK-LV-NEXT: vmov.f32 s11, s17 -; CHECK-LV-NEXT: vadd.i32 q2, q2, q3 -; CHECK-LV-NEXT: vmov.f32 s0, s6 -; CHECK-LV-NEXT: vmov.f32 s2, s16 -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LV-NEXT: vmov.f32 s3, s19 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LV-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LV-NEXT: vldrw.u32 q2, [r0] -; CHECK-LV-NEXT: vmov.f32 s17, s4 -; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LV-NEXT: vmov.f32 s18, s7 -; CHECK-LV-NEXT: vmov.f32 s22, s6 -; CHECK-LV-NEXT: vmov.f32 s16, s9 -; CHECK-LV-NEXT: vmov.f32 s19, s14 -; CHECK-LV-NEXT: vmov.f32 s20, s8 -; CHECK-LV-NEXT: vmov.f32 s21, s11 -; CHECK-LV-NEXT: vmov.f32 s23, s13 -; CHECK-LV-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LV-NEXT: vmov.f32 s4, s10 -; CHECK-LV-NEXT: vmov.f32 s6, s12 -; CHECK-LV-NEXT: vmov.f32 s7, s15 -; CHECK-LV-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LV-NEXT: vstrw.32 q1, [r1] -; CHECK-LV-NEXT: vpop {d8, d9, d10, d11} -; CHECK-LV-NEXT: bx lr -; -; CHECK-LIS-LABEL: vld3_v8i32: -; CHECK-LIS: @ %bb.0: @ %entry -; CHECK-LIS-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-LIS-NEXT: vpush {d8, d9, d10, d11} -; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #80] -; CHECK-LIS-NEXT: vmov.f32 s10, s2 -; CHECK-LIS-NEXT: vmov.f32 s17, s0 -; CHECK-LIS-NEXT: vmov.f32 s18, s3 -; CHECK-LIS-NEXT: vmov.f32 s8, s4 -; CHECK-LIS-NEXT: vmov.f32 s9, s7 -; CHECK-LIS-NEXT: vmov.f32 s16, s5 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s11, s13 -; CHECK-LIS-NEXT: vmov.f32 s0, s6 -; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4 -; CHECK-LIS-NEXT: vmov.f32 s2, s12 -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LIS-NEXT: vmov.f32 s3, s15 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LIS-NEXT: vldrw.u32 q2, [r0] -; CHECK-LIS-NEXT: vmov.f32 s17, s4 -; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LIS-NEXT: vmov.f32 s18, s7 -; CHECK-LIS-NEXT: vmov.f32 s22, s6 -; CHECK-LIS-NEXT: vmov.f32 s16, s9 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s20, s8 -; CHECK-LIS-NEXT: vmov.f32 s21, s11 -; CHECK-LIS-NEXT: vmov.f32 s23, s13 -; CHECK-LIS-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LIS-NEXT: vmov.f32 s4, s10 -; CHECK-LIS-NEXT: vmov.f32 s6, s12 -; CHECK-LIS-NEXT: vmov.f32 s7, s15 -; CHECK-LIS-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LIS-NEXT: vstrw.32 q1, [r1] -; CHECK-LIS-NEXT: vpop {d8, d9, d10, d11} -; CHECK-LIS-NEXT: bx lr +; CHECK-LABEL: vld3_v8i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vldrw.u32 q0, [r0, #64] +; CHECK-NEXT: vldrw.u32 q1, [r0, #48] +; CHECK-NEXT: vldrw.u32 q4, [r0, #80] +; CHECK-NEXT: vmov.f32 s10, s2 +; CHECK-NEXT: vmov.f32 s13, s0 +; CHECK-NEXT: vmov.f32 s14, s3 +; CHECK-NEXT: vmov.f32 s8, s4 +; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmov.f32 s15, s18 +; CHECK-NEXT: vmov.f32 s11, s17 +; CHECK-NEXT: vadd.i32 q2, q2, q3 +; CHECK-NEXT: vmov.f32 s0, s6 +; CHECK-NEXT: vmov.f32 s2, s16 +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vmov.f32 s3, s19 +; CHECK-NEXT: vldrw.u32 q3, [r0, #32] +; CHECK-NEXT: vadd.i32 q0, q2, q0 +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vmov.f32 s17, s4 +; CHECK-NEXT: vstrw.32 q0, [r1, #16] +; CHECK-NEXT: vmov.f32 s18, s7 +; CHECK-NEXT: vmov.f32 s22, s6 +; CHECK-NEXT: vmov.f32 s16, s9 +; CHECK-NEXT: vmov.f32 s19, s14 +; CHECK-NEXT: vmov.f32 s20, s8 +; CHECK-NEXT: vmov.f32 s21, s11 +; CHECK-NEXT: vmov.f32 s23, s13 +; CHECK-NEXT: vadd.i32 q4, q5, q4 +; CHECK-NEXT: vmov.f32 s4, s10 +; CHECK-NEXT: vmov.f32 s6, s12 +; CHECK-NEXT: vmov.f32 s7, s15 +; CHECK-NEXT: vadd.i32 q1, q4, q1 +; CHECK-NEXT: vstrw.32 q1, [r1] +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: bx lr entry: %l1 = load <24 x i32>, ptr %src, align 4 @@ -187,155 +122,80 @@ entry: } define void @vld3_v16i32(ptr %src, ptr %dst) { -; CHECK-LV-LABEL: vld3_v16i32: -; CHECK-LV: @ %bb.0: @ %entry -; CHECK-LV-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LV-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #80] -; CHECK-LV-NEXT: vldrw.u32 q6, [r0, #176] -; CHECK-LV-NEXT: vmov.f32 s10, s2 -; CHECK-LV-NEXT: vmov.f32 s13, s0 -; CHECK-LV-NEXT: vmov.f32 s14, s3 -; CHECK-LV-NEXT: vmov.f32 s8, s4 -; CHECK-LV-NEXT: vmov.f32 s9, s7 -; CHECK-LV-NEXT: vmov.f32 s12, s5 -; CHECK-LV-NEXT: vmov.f32 s15, s18 -; CHECK-LV-NEXT: vmov.f32 s11, s17 -; CHECK-LV-NEXT: vadd.i32 q2, q2, q3 -; CHECK-LV-NEXT: vmov.f32 s0, s6 -; CHECK-LV-NEXT: vmov.f32 s2, s16 -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LV-NEXT: vmov.f32 s3, s19 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LV-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LV-NEXT: vldrw.u32 q2, [r0] -; CHECK-LV-NEXT: vmov.f32 s17, s4 -; CHECK-LV-NEXT: vmov.f32 s18, s7 -; CHECK-LV-NEXT: vmov.f32 s22, s6 -; CHECK-LV-NEXT: vmov.f32 s16, s9 -; CHECK-LV-NEXT: vmov.f32 s19, s14 -; CHECK-LV-NEXT: vmov.f32 s20, s8 -; CHECK-LV-NEXT: vmov.f32 s21, s11 -; CHECK-LV-NEXT: vmov.f32 s23, s13 -; CHECK-LV-NEXT: vmov.f32 s4, s10 -; CHECK-LV-NEXT: vldrw.u32 q2, [r0, #160] -; CHECK-LV-NEXT: vmov.f32 s6, s12 -; CHECK-LV-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LV-NEXT: vmov.f32 s7, s15 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #144] -; CHECK-LV-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LV-NEXT: vmov.f32 s18, s10 -; CHECK-LV-NEXT: vmov.f32 s21, s8 -; CHECK-LV-NEXT: vmov.f32 s22, s11 -; CHECK-LV-NEXT: vmov.f32 s16, s12 -; CHECK-LV-NEXT: vmov.f32 s17, s15 -; CHECK-LV-NEXT: vmov.f32 s20, s13 -; CHECK-LV-NEXT: vmov.f32 s23, s26 -; CHECK-LV-NEXT: vmov.f32 s19, s25 -; CHECK-LV-NEXT: vadd.i32 q4, q4, q5 -; CHECK-LV-NEXT: vmov.f32 s8, s14 -; CHECK-LV-NEXT: vmov.f32 s10, s24 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #112] -; CHECK-LV-NEXT: vmov.f32 s11, s27 -; CHECK-LV-NEXT: vldrw.u32 q5, [r0, #128] -; CHECK-LV-NEXT: vadd.i32 q2, q4, q2 -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #96] -; CHECK-LV-NEXT: vmov.f32 s25, s12 -; CHECK-LV-NEXT: vstrw.32 q2, [r1, #48] -; CHECK-LV-NEXT: vmov.f32 s26, s15 -; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LV-NEXT: vmov.f32 s30, s14 -; CHECK-LV-NEXT: vstrw.32 q1, [r1] -; CHECK-LV-NEXT: vmov.f32 s24, s17 -; CHECK-LV-NEXT: vmov.f32 s27, s22 -; CHECK-LV-NEXT: vmov.f32 s28, s16 -; CHECK-LV-NEXT: vmov.f32 s29, s19 -; CHECK-LV-NEXT: vmov.f32 s31, s21 -; CHECK-LV-NEXT: vadd.i32 q6, q7, q6 -; CHECK-LV-NEXT: vmov.f32 s12, s18 -; CHECK-LV-NEXT: vmov.f32 s14, s20 -; CHECK-LV-NEXT: vmov.f32 s15, s23 -; CHECK-LV-NEXT: vadd.i32 q3, q6, q3 -; CHECK-LV-NEXT: vstrw.32 q3, [r1, #32] -; CHECK-LV-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LV-NEXT: bx lr -; -; CHECK-LIS-LABEL: vld3_v16i32: -; CHECK-LIS: @ %bb.0: @ %entry -; CHECK-LIS-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LIS-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #80] -; CHECK-LIS-NEXT: vmov.f32 s10, s2 -; CHECK-LIS-NEXT: vmov.f32 s17, s0 -; CHECK-LIS-NEXT: vmov.f32 s18, s3 -; CHECK-LIS-NEXT: vmov.f32 s8, s4 -; CHECK-LIS-NEXT: vmov.f32 s9, s7 -; CHECK-LIS-NEXT: vmov.f32 s16, s5 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s11, s13 -; CHECK-LIS-NEXT: vmov.f32 s0, s6 -; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4 -; CHECK-LIS-NEXT: vmov.f32 s2, s12 -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LIS-NEXT: vmov.f32 s3, s15 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LIS-NEXT: vldrw.u32 q2, [r0] -; CHECK-LIS-NEXT: vmov.f32 s17, s4 -; CHECK-LIS-NEXT: vmov.f32 s18, s7 -; CHECK-LIS-NEXT: vmov.f32 s22, s6 -; CHECK-LIS-NEXT: vmov.f32 s16, s9 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s20, s8 -; CHECK-LIS-NEXT: vmov.f32 s21, s11 -; CHECK-LIS-NEXT: vmov.f32 s23, s13 -; CHECK-LIS-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LIS-NEXT: vmov.f32 s4, s10 -; CHECK-LIS-NEXT: vldrw.u32 q2, [r0, #160] -; CHECK-LIS-NEXT: vldrw.u32 q5, [r0, #176] -; CHECK-LIS-NEXT: vmov.f32 s6, s12 -; CHECK-LIS-NEXT: vmov.f32 s7, s15 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #144] -; CHECK-LIS-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LIS-NEXT: vmov.f32 s18, s10 -; CHECK-LIS-NEXT: vmov.f32 s25, s8 -; CHECK-LIS-NEXT: vmov.f32 s26, s11 -; CHECK-LIS-NEXT: vmov.f32 s16, s12 -; CHECK-LIS-NEXT: vmov.f32 s17, s15 -; CHECK-LIS-NEXT: vmov.f32 s24, s13 -; CHECK-LIS-NEXT: vmov.f32 s27, s22 -; CHECK-LIS-NEXT: vmov.f32 s19, s21 -; CHECK-LIS-NEXT: vmov.f32 s8, s14 -; CHECK-LIS-NEXT: vadd.i32 q4, q4, q6 -; CHECK-LIS-NEXT: vmov.f32 s10, s20 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #112] -; CHECK-LIS-NEXT: vmov.f32 s11, s23 -; CHECK-LIS-NEXT: vldrw.u32 q5, [r0, #128] -; CHECK-LIS-NEXT: vadd.i32 q2, q4, q2 -; CHECK-LIS-NEXT: vldrw.u32 q4, [r0, #96] -; CHECK-LIS-NEXT: vmov.f32 s25, s12 -; CHECK-LIS-NEXT: vstrw.32 q2, [r1, #48] -; CHECK-LIS-NEXT: vmov.f32 s26, s15 -; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LIS-NEXT: vmov.f32 s30, s14 -; CHECK-LIS-NEXT: vstrw.32 q1, [r1] -; CHECK-LIS-NEXT: vmov.f32 s24, s17 -; CHECK-LIS-NEXT: vmov.f32 s27, s22 -; CHECK-LIS-NEXT: vmov.f32 s28, s16 -; CHECK-LIS-NEXT: vmov.f32 s29, s19 -; CHECK-LIS-NEXT: vmov.f32 s31, s21 -; CHECK-LIS-NEXT: vadd.i32 q6, q7, q6 -; CHECK-LIS-NEXT: vmov.f32 s12, s18 -; CHECK-LIS-NEXT: vmov.f32 s14, s20 -; CHECK-LIS-NEXT: vmov.f32 s15, s23 -; CHECK-LIS-NEXT: vadd.i32 q3, q6, q3 -; CHECK-LIS-NEXT: vstrw.32 q3, [r1, #32] -; CHECK-LIS-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LIS-NEXT: bx lr +; CHECK-LABEL: vld3_v16i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vldrw.u32 q0, [r0, #64] +; CHECK-NEXT: vldrw.u32 q1, [r0, #48] +; CHECK-NEXT: vldrw.u32 q4, [r0, #80] +; CHECK-NEXT: vldrw.u32 q6, [r0, #176] +; CHECK-NEXT: vmov.f32 s10, s2 +; CHECK-NEXT: vmov.f32 s13, s0 +; CHECK-NEXT: vmov.f32 s14, s3 +; CHECK-NEXT: vmov.f32 s8, s4 +; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmov.f32 s15, s18 +; CHECK-NEXT: vmov.f32 s11, s17 +; CHECK-NEXT: vadd.i32 q2, q2, q3 +; CHECK-NEXT: vmov.f32 s0, s6 +; CHECK-NEXT: vmov.f32 s2, s16 +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vmov.f32 s3, s19 +; CHECK-NEXT: vldrw.u32 q3, [r0, #32] +; CHECK-NEXT: vadd.i32 q0, q2, q0 +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vmov.f32 s17, s4 +; CHECK-NEXT: vmov.f32 s18, s7 +; CHECK-NEXT: vmov.f32 s22, s6 +; CHECK-NEXT: vmov.f32 s16, s9 +; CHECK-NEXT: vmov.f32 s19, s14 +; CHECK-NEXT: vmov.f32 s20, s8 +; CHECK-NEXT: vmov.f32 s21, s11 +; CHECK-NEXT: vmov.f32 s23, s13 +; CHECK-NEXT: vmov.f32 s4, s10 +; CHECK-NEXT: vldrw.u32 q2, [r0, #160] +; CHECK-NEXT: vmov.f32 s6, s12 +; CHECK-NEXT: vadd.i32 q4, q5, q4 +; CHECK-NEXT: vmov.f32 s7, s15 +; CHECK-NEXT: vldrw.u32 q3, [r0, #144] +; CHECK-NEXT: vadd.i32 q1, q4, q1 +; CHECK-NEXT: vmov.f32 s18, s10 +; CHECK-NEXT: vmov.f32 s21, s8 +; CHECK-NEXT: vmov.f32 s22, s11 +; CHECK-NEXT: vmov.f32 s16, s12 +; CHECK-NEXT: vmov.f32 s17, s15 +; CHECK-NEXT: vmov.f32 s20, s13 +; CHECK-NEXT: vmov.f32 s23, s26 +; CHECK-NEXT: vmov.f32 s19, s25 +; CHECK-NEXT: vadd.i32 q4, q4, q5 +; CHECK-NEXT: vmov.f32 s8, s14 +; CHECK-NEXT: vmov.f32 s10, s24 +; CHECK-NEXT: vldrw.u32 q3, [r0, #112] +; CHECK-NEXT: vmov.f32 s11, s27 +; CHECK-NEXT: vldrw.u32 q5, [r0, #128] +; CHECK-NEXT: vadd.i32 q2, q4, q2 +; CHECK-NEXT: vldrw.u32 q4, [r0, #96] +; CHECK-NEXT: vmov.f32 s25, s12 +; CHECK-NEXT: vstrw.32 q2, [r1, #48] +; CHECK-NEXT: vmov.f32 s26, s15 +; CHECK-NEXT: vstrw.32 q0, [r1, #16] +; CHECK-NEXT: vmov.f32 s30, s14 +; CHECK-NEXT: vstrw.32 q1, [r1] +; CHECK-NEXT: vmov.f32 s24, s17 +; CHECK-NEXT: vmov.f32 s27, s22 +; CHECK-NEXT: vmov.f32 s28, s16 +; CHECK-NEXT: vmov.f32 s29, s19 +; CHECK-NEXT: vmov.f32 s31, s21 +; CHECK-NEXT: vadd.i32 q6, q7, q6 +; CHECK-NEXT: vmov.f32 s12, s18 +; CHECK-NEXT: vmov.f32 s14, s20 +; CHECK-NEXT: vmov.f32 s15, s23 +; CHECK-NEXT: vadd.i32 q3, q6, q3 +; CHECK-NEXT: vstrw.32 q3, [r1, #32] +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: bx lr entry: %l1 = load <48 x i32>, ptr %src, align 4