diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp index c1b515dae3ebe..997f677ac54f4 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -1102,8 +1102,8 @@ static bool addCallTargetOperands(MachineInstrBuilder &CallInst, MachineIRBuilder &MIRBuilder, AMDGPUCallLowering::CallLoweringInfo &Info) { if (Info.Callee.isReg()) { + CallInst.addReg(Info.Callee.getReg()); CallInst.addImm(0); - CallInst.add(Info.Callee); } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) { // The call lowering lightly assumed we can directly encode a call target in // the instruction, which is not the case. Materialize the address here. diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll new file mode 100644 index 0000000000000..96eb596dabc51 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll @@ -0,0 +1,54 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -global-isel -amdgpu-fixed-function-abi -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s + +define amdgpu_kernel void @test_indirect_call_sgpr_ptr(void()* %fptr) { + ; CHECK-LABEL: name: test_indirect_call_sgpr_ptr + ; CHECK: bb.1 (%ir-block.0): + ; CHECK: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11 + ; CHECK: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 + ; CHECK: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16 + ; CHECK: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15 + ; CHECK: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14 + ; CHECK: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11 + ; CHECK: [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7 + ; CHECK: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5 + ; CHECK: [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9 + ; CHECK: [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr) + ; CHECK: [[LOAD:%[0-9]+]]:sreg_64(p0) = G_LOAD [[INT]](p4) :: (dereferenceable invariant load 8 from %ir.fptr.kernarg.offset.cast, align 16, addrspace 4) + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $scc + ; CHECK: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]] + ; CHECK: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]] + ; CHECK: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64) + ; CHECK: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]] + ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]] + ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]] + ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]] + ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32) + ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10 + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32) + ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]] + ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 + ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32) + ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]] + ; CHECK: [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg + ; CHECK: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>) + ; CHECK: $sgpr4_sgpr5 = COPY [[COPY10]](p4) + ; CHECK: $sgpr6_sgpr7 = COPY [[COPY11]](p4) + ; CHECK: $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4) + ; CHECK: $sgpr10_sgpr11 = COPY [[COPY13]](s64) + ; CHECK: $sgpr12 = COPY [[COPY14]](s32) + ; CHECK: $sgpr13 = COPY [[COPY15]](s32) + ; CHECK: $sgpr14 = COPY [[COPY16]](s32) + ; CHECK: $vgpr31 = COPY [[OR1]](s32) + ; CHECK: $sgpr30_sgpr31 = SI_CALL [[LOAD]](p0), 0, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31 + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $scc + ; CHECK: S_ENDPGM 0 + call void %fptr() + ret void +}