Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 18 additions & 4 deletions llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -902,14 +902,28 @@ bool SIFixSGPRCopies::lowerSpecialCase(MachineInstr &MI,
// really much we can do to fix this.
// Some special instructions use M0 as an input. Some even only use
// the first lane. Insert a readfirstlane and hope for the best.
if (DstReg == AMDGPU::M0 &&
TRI->hasVectorRegisters(MRI->getRegClass(SrcReg))) {
const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
if (DstReg == AMDGPU::M0 && TRI->hasVectorRegisters(SrcRC)) {
Register TmpReg =
MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
TII->get(AMDGPU::V_READFIRSTLANE_B32), TmpReg)

const MCInstrDesc &ReadFirstLaneDesc =
TII->get(AMDGPU::V_READFIRSTLANE_B32);
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), ReadFirstLaneDesc, TmpReg)
.add(MI.getOperand(1));

unsigned SubReg = MI.getOperand(1).getSubReg();
MI.getOperand(1).setReg(TmpReg);
MI.getOperand(1).setSubReg(AMDGPU::NoSubRegister);

const TargetRegisterClass *OpRC = TII->getRegClass(ReadFirstLaneDesc, 1);
const TargetRegisterClass *ConstrainRC =
SubReg == AMDGPU::NoSubRegister
? OpRC
: TRI->getMatchingSuperRegClass(SrcRC, OpRC, SubReg);

if (!MRI->constrainRegClass(SrcReg, ConstrainRC))
llvm_unreachable("failed to constrain register");
} else if (tryMoveVGPRConstToSGPR(MI.getOperand(1), DstReg, MI.getParent(),
MI, MI.getDebugLoc())) {
I = std::next(I);
Expand Down
44 changes: 26 additions & 18 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8177,26 +8177,34 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist,
return;
}

if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() &&
NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
// Instead of creating a copy where src and dst are the same register
// class, we just replace all uses of dst with src. These kinds of
// copies interfere with the heuristics MachineSink uses to decide
// whether or not to split a critical edge. Since the pass assumes
// that copies will end up as machine instructions and not be
// eliminated.
addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual()) {
Register NewDstReg = Inst.getOperand(1).getReg();
MRI.replaceRegWith(DstReg, NewDstReg);
MRI.clearKillFlags(NewDstReg);
Inst.getOperand(0).setReg(DstReg);
Inst.eraseFromParent();
// Legalize t16 operand since replaceReg is called after addUsersToVALU
for (MachineOperand &MO :
make_early_inc_range(MRI.use_operands(NewDstReg))) {
legalizeOperandsVALUt16(*MO.getParent(), MRI);
const TargetRegisterClass *SrcRC = RI.getRegClassForReg(MRI, NewDstReg);
if (const TargetRegisterClass *CommonRC =
RI.getCommonSubClass(NewDstRC, SrcRC)) {
// Instead of creating a copy where src and dst are the same register
// class, we just replace all uses of dst with src. These kinds of
// copies interfere with the heuristics MachineSink uses to decide
// whether or not to split a critical edge. Since the pass assumes
// that copies will end up as machine instructions and not be
// eliminated.
addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
MRI.replaceRegWith(DstReg, NewDstReg);
MRI.clearKillFlags(NewDstReg);
Inst.getOperand(0).setReg(DstReg);

if (!MRI.constrainRegClass(NewDstReg, CommonRC))
llvm_unreachable("failed to constrain register");

Inst.eraseFromParent();
// Legalize t16 operand since replaceReg is called after addUsersToVALU
for (MachineOperand &MO :
make_early_inc_range(MRI.use_operands(NewDstReg))) {
legalizeOperandsVALUt16(*MO.getParent(), MRI);
}

return;
}
return;
}

// If this is a v2s copy between 16bit and 32bit reg,
Expand Down
28 changes: 16 additions & 12 deletions llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10733,15 +10733,16 @@ define void @flat_atomic_fmaximum_f64_ret_a_a(ptr %ptr) #0 {
; GFX90A-NEXT: buffer_load_dword v0, v6, s[0:3], 0 offen
; GFX90A-NEXT: buffer_load_dword v1, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: v_mov_b32_e32 v7, 0x7ff80000
; GFX90A-NEXT: s_waitcnt vmcnt(1)
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1
; GFX90A-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
; GFX90A-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc
; GFX90A-NEXT: buffer_store_dword v2, v6, s[0:3], 0 offen
; GFX90A-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: .LBB135_6: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: ;;#ASMSTART
Expand Down Expand Up @@ -11000,15 +11001,16 @@ define void @flat_atomic_fminimum_f64_ret_a_a(ptr %ptr) #0 {
; GFX90A-NEXT: buffer_load_dword v0, v6, s[0:3], 0 offen
; GFX90A-NEXT: buffer_load_dword v1, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: v_mov_b32_e32 v7, 0x7ff80000
; GFX90A-NEXT: s_waitcnt vmcnt(1)
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1
; GFX90A-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
; GFX90A-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc
; GFX90A-NEXT: buffer_store_dword v2, v6, s[0:3], 0 offen
; GFX90A-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: .LBB137_6: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: ;;#ASMSTART
Expand Down Expand Up @@ -19023,15 +19025,16 @@ define void @flat_atomic_fmaximum_f64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: buffer_load_dword v0, v6, s[0:3], 0 offen
; GFX90A-NEXT: buffer_load_dword v1, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: v_mov_b32_e32 v7, 0x7ff80000
; GFX90A-NEXT: s_waitcnt vmcnt(1)
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1
; GFX90A-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
; GFX90A-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc
; GFX90A-NEXT: buffer_store_dword v2, v6, s[0:3], 0 offen
; GFX90A-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: .LBB243_6: ; %atomicrmw.phi
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; use a[0:1]
Expand Down Expand Up @@ -19282,15 +19285,16 @@ define void @flat_atomic_fminimum_f64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: buffer_load_dword v0, v6, s[0:3], 0 offen
; GFX90A-NEXT: buffer_load_dword v1, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: v_mov_b32_e32 v7, 0x7ff80000
; GFX90A-NEXT: s_waitcnt vmcnt(1)
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
; GFX90A-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1
; GFX90A-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc
; GFX90A-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc
; GFX90A-NEXT: buffer_store_dword v2, v6, s[0:3], 0 offen
; GFX90A-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen offset:4
; GFX90A-NEXT: .LBB245_6: ; %atomicrmw.phi
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; use a[0:1]
Expand Down
17 changes: 8 additions & 9 deletions llvm/test/CodeGen/AMDGPU/copy-to-reg-frameindex.ll
Original file line number Diff line number Diff line change
Expand Up @@ -43,26 +43,25 @@ define void @phi_with_alloca_and_divergent_copy_to_reg(ptr addrspace(5) %diverge
; CHECK-LABEL: phi_with_alloca_and_divergent_copy_to_reg:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_lshr_b32 s6, s32, 6
; CHECK-NEXT: v_mov_b32_e32 v7, v2
; CHECK-NEXT: v_mov_b32_e32 v6, v1
; CHECK-NEXT: s_mov_b64 s[4:5], 0
; CHECK-NEXT: v_mov_b32_e32 v1, s6
; CHECK-NEXT: v_lshrrev_b32_e64 v2, 6, s32
; CHECK-NEXT: .LBB1_1: ; %loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: v_add_u32_e32 v8, 1, v3
; CHECK-NEXT: v_lshl_add_u32 v5, v3, 2, v1
; CHECK-NEXT: v_cmp_lt_u32_e32 vcc, 15, v8
; CHECK-NEXT: v_mov_b32_e32 v2, v1
; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen
; CHECK-NEXT: v_mov_b32_e32 v1, v2
; CHECK-NEXT: v_lshl_add_u32 v2, v3, 2, v1
; CHECK-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen
; CHECK-NEXT: v_add_u32_e32 v2, 1, v3
; CHECK-NEXT: v_cmp_lt_u32_e32 vcc, 15, v2
; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; CHECK-NEXT: v_mov_b32_e32 v3, v4
; CHECK-NEXT: v_mov_b32_e32 v2, v0
; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
; CHECK-NEXT: s_cbranch_execnz .LBB1_1
; CHECK-NEXT: ; %bb.2: ; %done
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: buffer_load_dword v0, v2, s[0:3], 0 offen
; CHECK-NEXT: buffer_load_dword v0, v1, s[0:3], 0 offen
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_store_dword v[6:7], v0, off
; CHECK-NEXT: s_waitcnt vmcnt(0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,4 +49,19 @@ bb16: ; preds = %bb16, %bb
br label %bb16
}


define void @av_class_to_m0(ptr addrspace(1) %ptr) {
; CHECK-LABEL: av_class_to_m0:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: global_load_dword v0, v[0:1], off
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s4, v0
; CHECK-NEXT: s_mov_b32 m0, s4
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use m0
; CHECK-NEXT: ;;#ASMEND
; CHECK-NEXT: s_setpc_b64 s[30:31]
%load = load i32, ptr addrspace(1) %ptr
call void asm sideeffect "; use $0", "{m0}"(i32 %load)
ret void
}
Loading
Loading