diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index 7559efffffa54..f9f89cb9c74fb 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -2981,8 +2981,41 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, : RS->scavengeRegisterBackwards(AMDGPU::SReg_32_XM0RegClass, MI, false, 0, !UseSGPR); - // TODO: for flat scratch another attempt can be made with a VGPR index - // if no SGPRs can be scavenged. + // Fallback: If we need an SGPR but cannot scavenge one and there is no + // frame register, try to convert the flat-scratch instruction to use a + // VGPR index (SS -> SV) and materialize the offset in a VGPR. + unsigned Opc = MI->getOpcode(); + int NewOpc = AMDGPU::getFlatScratchInstSVfromSS(Opc); + if (!TmpSReg && !FrameReg && TII->isFLATScratch(*MI) && NewOpc != -1) { + // Reuse an existing VGPR temp if available, otherwise scavenge one. + Register VTmp = (!UseSGPR && TmpReg) + ? TmpReg + : RS->scavengeRegisterBackwards( + AMDGPU::VGPR_32RegClass, MI, + /*RestoreAfter=*/false, /*SPAdj=*/0); + if (VTmp) { + // Put the large offset into a VGPR and zero the immediate offset. + BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), VTmp) + .addImm(Offset); + + int OldSAddrIdx = + AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr); + int NewVAddrIdx = + AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr); + + assert(OldSAddrIdx >= 0 && NewVAddrIdx >= 0 && + "Invalid address operand indexes"); + MI->setDesc(TII->get(NewOpc)); + MI->getOperand(NewVAddrIdx).ChangeToRegister(VTmp, false); + MachineOperand *OffOp = + TII->getNamedOperand(*MI, AMDGPU::OpName::offset); + + assert(OffOp && "Flat scratch SV form must have offset operand"); + OffOp->setImm(0); + return false; + } + } + if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR)) report_fatal_error("Cannot scavenge register in FI elimination!"); diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch-ss-to-sv-scavenge.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch-ss-to-sv-scavenge.ll new file mode 100644 index 0000000000000..1505323d065c2 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/flat-scratch-ss-to-sv-scavenge.ll @@ -0,0 +1,231 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -verify-machineinstrs -O0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx950 | FileCheck %s --check-prefix=GFX950 + +; Ensure we don't crash with: "Cannot scavenge register in FI elimination!" +define amdgpu_kernel void @issue155902(i64 %arg, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8, i64 %arg9, i64 %arg10, i64 %arg11, i64 %arg12, i64 %arg13, i64 %arg14, i64 %arg15, i64 %arg16, i64 %arg17, i64 %arg18, i64 %arg19, i64 %arg20, i64 %arg21, i64 %arg22, i64 %arg23, i64 %arg24, i64 %arg25, i64 %arg26, i64 %arg27, i64 %arg28, i64 %arg29, i64 %arg30, i64 %arg31, i64 %arg32, i64 %arg33, i64 %arg34, i64 %arg35, i64 %arg36, i64 %arg37, i64 %arg38, i64 %arg39, i64 %arg40, i64 %arg41, i64 %arg42, i64 %arg43, i64 %arg44, i64 %arg45, i64 %arg46, i64 %arg47, i64 %arg48, i64 %arg49) { +; GFX950-LABEL: issue155902: +; GFX950: ; %bb.0: ; %bb +; GFX950-NEXT: s_mov_b32 s33, 0x4008 +; GFX950-NEXT: s_mov_b64 s[2:3], s[4:5] +; GFX950-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 +; GFX950-NEXT: s_load_dwordx2 vcc, s[2:3], 0x8 +; GFX950-NEXT: s_load_dwordx2 s[98:99], s[2:3], 0x10 +; GFX950-NEXT: s_load_dwordx2 s[96:97], s[2:3], 0x18 +; GFX950-NEXT: s_load_dwordx2 s[94:95], s[2:3], 0x20 +; GFX950-NEXT: s_load_dwordx2 s[92:93], s[2:3], 0x28 +; GFX950-NEXT: s_load_dwordx2 s[90:91], s[2:3], 0x30 +; GFX950-NEXT: s_load_dwordx2 s[88:89], s[2:3], 0x38 +; GFX950-NEXT: s_load_dwordx2 s[86:87], s[2:3], 0x40 +; GFX950-NEXT: s_load_dwordx2 s[84:85], s[2:3], 0x48 +; GFX950-NEXT: s_load_dwordx2 s[82:83], s[2:3], 0x50 +; GFX950-NEXT: s_load_dwordx2 s[80:81], s[2:3], 0x58 +; GFX950-NEXT: s_load_dwordx2 s[78:79], s[2:3], 0x60 +; GFX950-NEXT: s_load_dwordx2 s[76:77], s[2:3], 0x68 +; GFX950-NEXT: s_load_dwordx2 s[74:75], s[2:3], 0x70 +; GFX950-NEXT: s_load_dwordx2 s[72:73], s[2:3], 0x78 +; GFX950-NEXT: s_load_dwordx2 s[70:71], s[2:3], 0x80 +; GFX950-NEXT: s_load_dwordx2 s[68:69], s[2:3], 0x88 +; GFX950-NEXT: s_load_dwordx2 s[66:67], s[2:3], 0x90 +; GFX950-NEXT: s_load_dwordx2 s[64:65], s[2:3], 0x98 +; GFX950-NEXT: s_load_dwordx2 s[62:63], s[2:3], 0xa0 +; GFX950-NEXT: s_load_dwordx2 s[60:61], s[2:3], 0xa8 +; GFX950-NEXT: s_load_dwordx2 s[58:59], s[2:3], 0xb0 +; GFX950-NEXT: s_load_dwordx2 s[56:57], s[2:3], 0xb8 +; GFX950-NEXT: s_load_dwordx2 s[54:55], s[2:3], 0xc0 +; GFX950-NEXT: s_load_dwordx2 s[52:53], s[2:3], 0xc8 +; GFX950-NEXT: s_load_dwordx2 s[50:51], s[2:3], 0xd0 +; GFX950-NEXT: s_load_dwordx2 s[48:49], s[2:3], 0xd8 +; GFX950-NEXT: s_load_dwordx2 s[46:47], s[2:3], 0xe0 +; GFX950-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0xe8 +; GFX950-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0xf0 +; GFX950-NEXT: s_load_dwordx2 s[40:41], s[2:3], 0xf8 +; GFX950-NEXT: s_load_dwordx2 s[38:39], s[2:3], 0x100 +; GFX950-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x108 +; GFX950-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x110 +; GFX950-NEXT: s_load_dwordx2 s[30:31], s[2:3], 0x118 +; GFX950-NEXT: s_load_dwordx2 s[28:29], s[2:3], 0x120 +; GFX950-NEXT: s_load_dwordx2 s[26:27], s[2:3], 0x128 +; GFX950-NEXT: s_load_dwordx2 s[24:25], s[2:3], 0x130 +; GFX950-NEXT: s_load_dwordx2 s[22:23], s[2:3], 0x138 +; GFX950-NEXT: s_load_dwordx2 s[20:21], s[2:3], 0x140 +; GFX950-NEXT: s_load_dwordx2 s[18:19], s[2:3], 0x148 +; GFX950-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x150 +; GFX950-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x158 +; GFX950-NEXT: s_load_dwordx2 s[12:13], s[2:3], 0x160 +; GFX950-NEXT: s_load_dwordx2 s[10:11], s[2:3], 0x168 +; GFX950-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0x170 +; GFX950-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x178 +; GFX950-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x180 +; GFX950-NEXT: s_nop 0 +; GFX950-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x188 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], 0 +; GFX950-NEXT: v_mov_b32_e32 v3, 0x4008 +; GFX950-NEXT: scratch_store_dwordx2 v3, v[0:1], off +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s33 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], 0x384 +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s33 offset:16 +; GFX950-NEXT: s_mov_b32 s33, 0 +; GFX950-NEXT: ; implicit-def: $vgpr2 : SGPR spill to VGPR lane +; GFX950-NEXT: v_writelane_b32 v2, s33, 0 +; GFX950-NEXT: s_waitcnt lgkmcnt(0) +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX950-NEXT: v_readlane_b32 s0, v2, 0 +; GFX950-NEXT: s_nop 4 +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], vcc +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[98:99] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[96:97] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[94:95] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[92:93] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[90:91] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[88:89] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[86:87] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[84:85] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[82:83] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[80:81] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[78:79] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[76:77] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[74:75] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[72:73] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[70:71] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[68:69] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[66:67] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[64:65] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[62:63] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[60:61] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[58:59] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[56:57] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[54:55] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[52:53] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[50:51] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[48:49] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[46:47] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[44:45] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[42:43] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[40:41] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[38:39] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[36:37] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[34:35] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[30:31] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[28:29] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[26:27] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[24:25] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[22:23] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[20:21] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[18:19] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[16:17] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[14:15] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[12:13] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[10:11] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[8:9] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[6:7] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[4:5] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[2:3] +; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 +; GFX950-NEXT: s_endpgm +bb: + %alloca.big = alloca [4096 x i32], align 4, addrspace(5) + %alloca304 = alloca [2 x i64], align 8, addrspace(5) + %alloca307 = alloca i64, align 8, addrspace(5) + store [2 x i64] zeroinitializer, ptr addrspace(5) %alloca304, align 8 + store i64 900, ptr addrspace(5) %alloca307, align 8 + store i64 %arg, ptr addrspace(5) null, align 8 + store i64 %arg1, ptr addrspace(5) null, align 8 + store i64 %arg2, ptr addrspace(5) null, align 8 + store i64 %arg3, ptr addrspace(5) null, align 8 + store i64 %arg4, ptr addrspace(5) null, align 8 + store i64 %arg5, ptr addrspace(5) null, align 8 + store i64 %arg6, ptr addrspace(5) null, align 8 + store i64 %arg7, ptr addrspace(5) null, align 8 + store i64 %arg8, ptr addrspace(5) null, align 8 + store i64 %arg9, ptr addrspace(5) null, align 8 + store i64 %arg10, ptr addrspace(5) null, align 8 + store i64 %arg11, ptr addrspace(5) null, align 8 + store i64 %arg12, ptr addrspace(5) null, align 8 + store i64 %arg13, ptr addrspace(5) null, align 8 + store i64 %arg14, ptr addrspace(5) null, align 8 + store i64 %arg15, ptr addrspace(5) null, align 8 + store i64 %arg16, ptr addrspace(5) null, align 8 + store i64 %arg17, ptr addrspace(5) null, align 8 + store i64 %arg18, ptr addrspace(5) null, align 8 + store i64 %arg19, ptr addrspace(5) null, align 8 + store i64 %arg20, ptr addrspace(5) null, align 8 + store i64 %arg21, ptr addrspace(5) null, align 8 + store i64 %arg22, ptr addrspace(5) null, align 8 + store i64 %arg23, ptr addrspace(5) null, align 8 + store i64 %arg24, ptr addrspace(5) null, align 8 + store i64 %arg25, ptr addrspace(5) null, align 8 + store i64 %arg26, ptr addrspace(5) null, align 8 + store i64 %arg27, ptr addrspace(5) null, align 8 + store i64 %arg28, ptr addrspace(5) null, align 8 + store i64 %arg29, ptr addrspace(5) null, align 8 + store i64 %arg30, ptr addrspace(5) null, align 8 + store i64 %arg31, ptr addrspace(5) null, align 8 + store i64 %arg32, ptr addrspace(5) null, align 8 + store i64 %arg33, ptr addrspace(5) null, align 8 + store i64 %arg34, ptr addrspace(5) null, align 8 + store i64 %arg35, ptr addrspace(5) null, align 8 + store i64 %arg36, ptr addrspace(5) null, align 8 + store i64 %arg37, ptr addrspace(5) null, align 8 + store i64 %arg38, ptr addrspace(5) null, align 8 + store i64 %arg39, ptr addrspace(5) null, align 8 + store i64 %arg40, ptr addrspace(5) null, align 8 + store i64 %arg41, ptr addrspace(5) null, align 8 + store i64 %arg42, ptr addrspace(5) null, align 8 + store i64 %arg43, ptr addrspace(5) null, align 8 + store i64 %arg44, ptr addrspace(5) null, align 8 + store i64 %arg45, ptr addrspace(5) null, align 8 + store i64 %arg46, ptr addrspace(5) null, align 8 + store i64 %arg47, ptr addrspace(5) null, align 8 + store i64 %arg48, ptr addrspace(5) null, align 8 + store i64 %arg49, ptr addrspace(5) null, align 8 + ret void +}