From 766cb615a3b96025192707f4670cdf171da84034 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Sun, 19 Jul 2020 22:57:24 -0400 Subject: [PATCH] AMDGPU: Relax restriction on folding immediates into physregs I never completed the work on the patches referenced by f8bf7d7f42f28fa18144091022236208e199f331, but this was intended to avoid folding immediate writes into m0 which the coalescer doesn't understand very well. Relax this to allow simple SGPR immediates to fold directly into VGPR copies. This pattern shows up routinely in current GlobalISel code since nothing is smart enough to emit VGPR constants yet. --- llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 53 +++++++++---------- .../llvm.amdgcn.kernarg.segment.ptr.ll | 6 +-- .../CodeGen/AMDGPU/GlobalISel/zextload.ll | 27 ++++------ llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir | 28 ++++++++++ 4 files changed, 64 insertions(+), 50 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index ea1d20f7387df..88d7304550f60 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -643,38 +643,35 @@ void SIFoldOperands::foldOperand( if (FoldingImmLike && UseMI->isCopy()) { Register DestReg = UseMI->getOperand(0).getReg(); + Register SrcReg = UseMI->getOperand(1).getReg(); + assert(SrcReg.isVirtual()); + + const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg); - // Don't fold into a copy to a physical register. Doing so would interfere - // with the register coalescer's logic which would avoid redundant - // initalizations. - if (DestReg.isPhysical()) + // Don't fold into a copy to a physical register with the same class. Doing + // so would interfere with the register coalescer's logic which would avoid + // redundant initalizations. + if (DestReg.isPhysical() && SrcRC->contains(DestReg)) return; - const TargetRegisterClass *DestRC = MRI->getRegClass(DestReg); + const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg); + if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) { + MachineRegisterInfo::use_iterator NextUse; + SmallVector CopyUses; + for (MachineRegisterInfo::use_iterator Use = MRI->use_begin(DestReg), + E = MRI->use_end(); + Use != E; Use = NextUse) { + NextUse = std::next(Use); + // There's no point trying to fold into an implicit operand. + if (Use->isImplicit()) + continue; - Register SrcReg = UseMI->getOperand(1).getReg(); - if (SrcReg.isVirtual()) { // XXX - This can be an assert? - const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg); - if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) { - MachineRegisterInfo::use_iterator NextUse; - SmallVector CopyUses; - for (MachineRegisterInfo::use_iterator - Use = MRI->use_begin(DestReg), E = MRI->use_end(); - Use != E; Use = NextUse) { - NextUse = std::next(Use); - - // There's no point trying to fold into an implicit operand. - if (Use->isImplicit()) - continue; - - FoldCandidate FC = FoldCandidate(Use->getParent(), - Use.getOperandNo(), &UseMI->getOperand(1)); - CopyUses.push_back(FC); - } - for (auto & F : CopyUses) { - foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, - FoldList, CopiesToReplace); - } + FoldCandidate FC = FoldCandidate(Use->getParent(), Use.getOperandNo(), + &UseMI->getOperand(1)); + CopyUses.push_back(FC); + } + for (auto &F : CopyUses) { + foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, FoldList, CopiesToReplace); } } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.kernarg.segment.ptr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.kernarg.segment.ptr.ll index e3fd488af5e0d..f44c91ff6710d 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.kernarg.segment.ptr.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.kernarg.segment.ptr.ll @@ -115,10 +115,8 @@ define amdgpu_kernel void @opencl_test_implicit_alignment_no_explicit_kernargs_r } ; ALL-LABEL: {{^}}func_kernarg_segment_ptr: -; ALL: s_mov_b32 [[S_LO:s[0-9]+]], 0{{$}} -; ALL: s_mov_b32 [[S_HI:s[0-9]+]], 0{{$}} -; ALL: v_mov_b32_e32 v0, [[S_LO]]{{$}} -; ALL: v_mov_b32_e32 v1, [[S_HI]]{{$}} +; ALL: v_mov_b32_e32 v0, 0{{$}} +; ALL: v_mov_b32_e32 v1, 0{{$}} define i8 addrspace(4)* @func_kernarg_segment_ptr() { %ptr = call i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() ret i8 addrspace(4)* %ptr diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/zextload.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/zextload.ll index dc899ed8ba98a..1ebca23e3eda7 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/zextload.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/zextload.ll @@ -139,9 +139,8 @@ define i96 @zextload_global_i32_to_i96(i32 addrspace(1)* %ptr) { ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dword v0, v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 -; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -149,9 +148,8 @@ define i96 @zextload_global_i32_to_i96(i32 addrspace(1)* %ptr) { ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_mov_b32 s4, 0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NEXT: v_mov_b32_e32 v2, 0 ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: s_setpc_b64 s[30:31] ; @@ -162,9 +160,8 @@ define i96 @zextload_global_i32_to_i96(i32 addrspace(1)* %ptr) { ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 ; GFX6-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX6-NEXT: s_mov_b32 s4, 0 ; GFX6-NEXT: v_mov_b32_e32 v1, 0 -; GFX6-NEXT: v_mov_b32_e32 v2, s4 +; GFX6-NEXT: v_mov_b32_e32 v2, 0 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: s_setpc_b64 s[30:31] %load = load i32, i32 addrspace(1)* %ptr @@ -177,11 +174,9 @@ define i128 @zextload_global_i32_to_i128(i32 addrspace(1)* %ptr) { ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dword v0, v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 0 -; GFX9-NEXT: s_mov_b32 s5, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 -; GFX9-NEXT: v_mov_b32_e32 v2, s4 -; GFX9-NEXT: v_mov_b32_e32 v3, s5 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: v_mov_b32_e32 v3, 0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -189,11 +184,9 @@ define i128 @zextload_global_i32_to_i128(i32 addrspace(1)* %ptr) { ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_mov_b32 s4, 0 -; GFX8-NEXT: s_mov_b32 s5, 0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_mov_b32_e32 v3, s5 +; GFX8-NEXT: v_mov_b32_e32 v2, 0 +; GFX8-NEXT: v_mov_b32_e32 v3, 0 ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: s_setpc_b64 s[30:31] ; @@ -204,11 +197,9 @@ define i128 @zextload_global_i32_to_i128(i32 addrspace(1)* %ptr) { ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 ; GFX6-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX6-NEXT: s_mov_b32 s4, 0 -; GFX6-NEXT: s_mov_b32 s5, 0 ; GFX6-NEXT: v_mov_b32_e32 v1, 0 -; GFX6-NEXT: v_mov_b32_e32 v2, s4 -; GFX6-NEXT: v_mov_b32_e32 v3, s5 +; GFX6-NEXT: v_mov_b32_e32 v2, 0 +; GFX6-NEXT: v_mov_b32_e32 v3, 0 ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: s_setpc_b64 s[30:31] %load = load i32, i32 addrspace(1)* %ptr diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir index c53b817344c50..9164e5e267914 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir @@ -59,3 +59,31 @@ body: | S_ENDPGM 0, implicit %1, implicit %2 ... + +# GCN-LABEL: name: no_fold_imm_into_m0{{$}} +# GCN: %0:sreg_32 = S_MOV_B32 -8 +# GCN-NEXT: $m0 = COPY %0 + +--- +name: no_fold_imm_into_m0 +tracksRegLiveness: true +body: | + bb.0: + %0:sreg_32 = S_MOV_B32 -8 + $m0 = COPY %0 + S_ENDPGM 0, implicit $m0 + +... + +# GCN-LABEL: name: fold_sgpr_imm_to_vgpr_copy{{$}} +# GCN: $vgpr0 = V_MOV_B32_e32 -8, implicit $exec +--- +name: fold_sgpr_imm_to_vgpr_copy +tracksRegLiveness: true +body: | + bb.0: + %0:sreg_32 = S_MOV_B32 -8 + $vgpr0 = COPY %0 + S_ENDPGM 0, implicit $vgpr0 + +...