diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td index 8e35109061792..a41723e1e9db8 100644 --- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -2718,6 +2718,15 @@ def int_amdgcn_call_whole_wave: llvm_vararg_ty], // The arguments to the callee. [IntrConvergent]>; +// +// llvm.amdgcn.subgroup.shuffle +// value and result can be any scalar of floating-point, integer, +// or Boolean types, but must be the same type +def int_amdgcn_subgroup_shuffle : + Intrinsic<[llvm_any_ty], // return types + [LLVMMatchType<0>, llvm_i32_ty], // arg types + [IntrConvergent, IntrNoMem, IntrNoFree, IntrWillReturn, IntrNoCallback]>; // flags + //===----------------------------------------------------------------------===// // CI+ Intrinsics //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h index da4bd878b8853..7c38f474cc38d 100644 --- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h +++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h @@ -1868,6 +1868,10 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo, bool requiresWaitsBeforeSystemScopeStores() const { return RequiresWaitsBeforeSystemScopeStores; } + + bool supportsWaveWideBPermute() const { + return ((getGeneration() == AMDGPUSubtarget::GFX12) || isWave32()); + } }; class GCNUserSGPRUsageInfo { diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 8bb28084159e8..6d867f0eaf9ae 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -7269,6 +7269,83 @@ static SDValue lowerLaneOp(const SITargetLowering &TLI, SDNode *N, return DAG.getBitcast(VT, UnrolledLaneOp); } +static SDValue lowerSubgroupShuffle(const SITargetLowering &TLI, SDNode *N, + SelectionDAG &DAG) { + EVT VT = N->getValueType(0); + unsigned ValSize = VT.getSizeInBits(); + SDLoc SL(N); + + SDValue Value = N->getOperand(1); + SDValue Index = N->getOperand(2); + + // ds_bpermute requires index to be multiplied by 4 + SDValue ShiftAmount = DAG.getTargetConstant(2, SL, MVT::i32); + SDValue ShiftedIndex = DAG.getNode(ISD::SHL, SL, Index.getValueType(), Index, + ShiftAmount); + + // Intrinsics will require i32 to operate on + SDValue Value32 = Value; + if ((ValSize != 32) || (VT.isFloatingPoint())) + Value32 = DAG.getBitcast(MVT::i32, Value); + + auto MakeIntrinsic = [&DAG, &SL](unsigned IID, MVT RetVT, + SmallVector IntrinArgs) -> SDValue { + SmallVector Operands(1); + Operands[0] = DAG.getTargetConstant(IID, SL, MVT::i32); + Operands.append(IntrinArgs); + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, RetVT, Operands); + }; + + if (TLI.getSubtarget()->supportsWaveWideBPermute()) { + // If we can bpermute across the whole wave, then just do that + SDValue BPermute = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute, MVT::i32, + {ShiftedIndex, Value32}); + return DAG.getBitcast(VT, BPermute); + } else { + assert(TLI.getSubtarget()->isWave64()); + + // Otherwise, we need to make use of whole wave mode + SDValue PoisonVal = DAG.getPOISON(Value32->getValueType(0)); + SDValue PoisonIndex = DAG.getPOISON(ShiftedIndex->getValueType(0)); + + // Set inactive lanes to poison + SDValue WWMValue = MakeIntrinsic(Intrinsic::amdgcn_set_inactive, MVT::i32, + {Value32, PoisonVal}); + SDValue WWMIndex = MakeIntrinsic(Intrinsic::amdgcn_set_inactive, MVT::i32, + {ShiftedIndex, PoisonIndex}); + + SDValue Swapped = + MakeIntrinsic(Intrinsic::amdgcn_permlane64, MVT::i32, {WWMValue}); + + // Get permutation of each half, then we'll select which one to use + SDValue BPermSameHalf = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute, + MVT::i32, {WWMIndex, WWMValue}); + SDValue BPermOtherHalf = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute, + MVT::i32, {WWMIndex, Swapped}); + SDValue BPermOtherHalfWWM = + MakeIntrinsic(Intrinsic::amdgcn_wwm, MVT::i32, {BPermOtherHalf}); + + // Select which side to take the permute from + SDValue ThreadIDMask = DAG.getTargetConstant(UINT32_MAX, SL, MVT::i32); + SDValue ThreadIDLo = + MakeIntrinsic(Intrinsic::amdgcn_mbcnt_lo, MVT::i32, + {ThreadIDMask, DAG.getTargetConstant(0, SL, MVT::i32)}); + SDValue ThreadID = MakeIntrinsic(Intrinsic::amdgcn_mbcnt_hi, MVT::i32, + {ThreadIDMask, ThreadIDLo}); + + SDValue SameOrOtherHalf = + DAG.getNode(ISD::AND, SL, MVT::i32, + DAG.getNode(ISD::XOR, SL, MVT::i32, ThreadID, Index), + DAG.getTargetConstant(32, SL, MVT::i32)); + SDValue UseSameHalf = + DAG.getSetCC(SL, MVT::i1, SameOrOtherHalf, + DAG.getConstant(0, SL, MVT::i32), ISD::SETEQ); + SDValue Result = DAG.getSelect(SL, MVT::i32, UseSameHalf, BPermSameHalf, + BPermOtherHalfWWM); + return DAG.getBitcast(VT, Result); + } +} + void SITargetLowering::ReplaceNodeResults(SDNode *N, SmallVectorImpl &Results, SelectionDAG &DAG) const { @@ -10176,6 +10253,8 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, Poisons.push_back(DAG.getPOISON(ValTy)); return DAG.getMergeValues(Poisons, SDLoc(Op)); } + case Intrinsic::amdgcn_subgroup_shuffle: + return lowerSubgroupShuffle(*this, Op.getNode(), DAG); default: if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll new file mode 100644 index 0000000000000..4572c0ff9a2f1 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll @@ -0,0 +1,76 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11-W32 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12-W32 %s + +; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX11-W64 %s +; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-W64 %s + +declare float @llvm.amdgcn.subgroup.shuffle.float(float, i32) + +define float @test_subgroup_shuffle_scalar(float %val, i32 %idx) { +; GFX11-W32-LABEL: test_subgroup_shuffle_scalar: +; GFX11-W32: ; %bb.0: ; %entry +; GFX11-W32-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-W32-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-W32-NEXT: ds_bpermute_b32 v0, v1, v0 +; GFX11-W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-W32-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-W32-LABEL: test_subgroup_shuffle_scalar: +; GFX12-W32: ; %bb.0: ; %entry +; GFX12-W32-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-W32-NEXT: s_wait_expcnt 0x0 +; GFX12-W32-NEXT: s_wait_samplecnt 0x0 +; GFX12-W32-NEXT: s_wait_bvhcnt 0x0 +; GFX12-W32-NEXT: s_wait_kmcnt 0x0 +; GFX12-W32-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX12-W32-NEXT: ds_bpermute_b32 v0, v1, v0 +; GFX12-W32-NEXT: s_wait_dscnt 0x0 +; GFX12-W32-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-W64-LABEL: test_subgroup_shuffle_scalar: +; GFX11-W64: ; %bb.0: ; %entry +; GFX11-W64-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-W64-NEXT: s_xor_saveexec_b64 s[0:1], -1 +; GFX11-W64-NEXT: scratch_store_b32 off, v2, s32 ; 4-byte Folded Spill +; GFX11-W64-NEXT: s_mov_b64 exec, s[0:1] +; GFX11-W64-NEXT: v_lshlrev_b32_e32 v3, 2, v1 +; GFX11-W64-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $exec +; GFX11-W64-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $exec +; GFX11-W64-NEXT: s_or_saveexec_b64 s[0:1], -1 +; GFX11-W64-NEXT: v_permlane64_b32 v2, v0 +; GFX11-W64-NEXT: ds_bpermute_b32 v2, v3, v2 +; GFX11-W64-NEXT: s_mov_b64 exec, s[0:1] +; GFX11-W64-NEXT: v_mbcnt_lo_u32_b32 v4, -1, 0 +; GFX11-W64-NEXT: ds_bpermute_b32 v0, v3, v0 +; GFX11-W64-NEXT: v_mbcnt_hi_u32_b32 v3, -1, v4 +; GFX11-W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11-W64-NEXT: v_xor_b32_e32 v1, v3, v1 +; GFX11-W64-NEXT: s_waitcnt lgkmcnt(1) +; GFX11-W64-NEXT: v_mov_b32_e32 v3, v2 +; GFX11-W64-NEXT: v_and_b32_e32 v1, 32, v1 +; GFX11-W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-W64-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX11-W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-W64-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc +; GFX11-W64-NEXT: s_xor_saveexec_b64 s[0:1], -1 +; GFX11-W64-NEXT: scratch_load_b32 v2, off, s32 ; 4-byte Folded Reload +; GFX11-W64-NEXT: s_mov_b64 exec, s[0:1] +; GFX11-W64-NEXT: s_waitcnt vmcnt(0) +; GFX11-W64-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-W64-LABEL: test_subgroup_shuffle_scalar: +; GFX12-W64: ; %bb.0: ; %entry +; GFX12-W64-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-W64-NEXT: s_wait_expcnt 0x0 +; GFX12-W64-NEXT: s_wait_samplecnt 0x0 +; GFX12-W64-NEXT: s_wait_bvhcnt 0x0 +; GFX12-W64-NEXT: s_wait_kmcnt 0x0 +; GFX12-W64-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX12-W64-NEXT: ds_bpermute_b32 v0, v1, v0 +; GFX12-W64-NEXT: s_wait_dscnt 0x0 +; GFX12-W64-NEXT: s_setpc_b64 s[30:31] +entry: + %0 = tail call float @llvm.amdgcn.subgroup.shuffle(float %val, i32 %idx) + ret float %0 +}