-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[CodeGenPrepare] Consider target memory intrinics as memory use #159638
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-backend-amdgpu Author: Jeffrey Byrnes (jrbyrnes) ChangesWhen deciding to sink address instructions into their uses, we check if it is profitable to do so. The profitability check is based on the types of uses of this address instruction -- if there are users which are not memory instructions, then do not fold. However, this profitability check wasn't considering target intrinsics, which may be loads / stores. This adds some logic to handle target memory intrinsics. Full diff: https://github.com/llvm/llvm-project/pull/159638.diff 2 Files Affected:
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index a190f0dac1379..ae499e773e7b2 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -5596,6 +5596,20 @@ static bool FindAllMemoryUses(
continue;
}
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(UserI)) {
+ SmallVector<Value *, 2> PtrOps;
+ Type *AccessTy;
+ if (!TLI.getAddrModeArguments(II, PtrOps, AccessTy))
+ return true;
+
+ auto PtrVal = U.get();
+ if (!find(PtrOps, PtrVal))
+ return true;
+
+ MemoryUses.push_back({&U, AccessTy});
+ continue;
+ }
+
if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
if (CI->hasFnAttr(Attribute::Cold)) {
// If this is a cold call, we can sink the addressing calculation into
diff --git a/llvm/test/CodeGen/AMDGPU/sink-addr-memory-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/sink-addr-memory-intrinsics.ll
new file mode 100644
index 0000000000000..b9838d8e39033
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sink-addr-memory-intrinsics.ll
@@ -0,0 +1,166 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx950 < %s | FileCheck %s
+
+define protected amdgpu_kernel void @memoryIntrinstic(ptr addrspace(3) %inptr, i1 %cond, ptr addrspace(3) %outptr) {
+; CHECK-LABEL: memoryIntrinstic:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_bitcmp0_b32 s1, 0
+; CHECK-NEXT: s_cbranch_scc0 .LBB0_2
+; CHECK-NEXT: ; %bb.1: ; %else
+; CHECK-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-NEXT: ds_read_b64_tr_b16 v[2:3], v0 offset:8192
+; CHECK-NEXT: s_mov_b32 s1, 0x7060302
+; CHECK-NEXT: s_mov_b32 s3, 0x5040100
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_perm_b32 v0, v3, v2, s1
+; CHECK-NEXT: v_perm_b32 v1, v3, v2, s3
+; CHECK-NEXT: s_cbranch_execz .LBB0_3
+; CHECK-NEXT: s_branch .LBB0_4
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: ; implicit-def: $vgpr1
+; CHECK-NEXT: .LBB0_3: ; %then
+; CHECK-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-NEXT: ds_read_b64_tr_b16 v[2:3], v0 offset:8192
+; CHECK-NEXT: s_mov_b32 s0, 0x5040100
+; CHECK-NEXT: s_mov_b32 s1, 0x7060302
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_perm_b32 v0, v3, v2, s0
+; CHECK-NEXT: v_perm_b32 v1, v3, v2, s1
+; CHECK-NEXT: .LBB0_4: ; %end
+; CHECK-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-NEXT: ds_write_b64 v2, v[0:1]
+; CHECK-NEXT: s_endpgm
+ %gep0 = getelementptr ptr addrspace(3), ptr addrspace(3) %inptr, i32 2048
+ br i1 %cond, label %then, label %else
+
+then:
+ %load0 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+ %shuf0 = shufflevector <4 x half> %load0, <4 x half> %load0, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ br label %end
+
+else:
+ %load1 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+ %shuf1 = shufflevector <4 x half> %load1, <4 x half> %load1, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ br label %end
+
+end:
+ %res = phi <4 x half> [ %shuf0, %then ], [ %shuf1, %else ]
+ store <4 x half> %res, ptr addrspace(3) %outptr
+ ret void
+}
+
+
+
+define protected amdgpu_kernel void @badIntrinsicUse(ptr addrspace(3) %inptr, i1 %cond, ptr addrspace(3) %outptr, <4 x i32> %rsrc) {
+; CHECK-LABEL: badIntrinsicUse:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_and_b32 s1, s1, 1
+; CHECK-NEXT: s_add_i32 s3, s0, 0x2000
+; CHECK-NEXT: s_cmp_eq_u32 s1, 0
+; CHECK-NEXT: s_cbranch_scc0 .LBB1_2
+; CHECK-NEXT: ; %bb.1: ; %else
+; CHECK-NEXT: v_mov_b32_e32 v0, s3
+; CHECK-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x10
+; CHECK-NEXT: ds_read_b64_tr_b16 v[2:3], v0
+; CHECK-NEXT: s_mov_b32 s0, 0x7060302
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; CHECK-NEXT: v_perm_b32 v0, v3, v2, s0
+; CHECK-NEXT: s_mov_b32 s0, 0x5040100
+; CHECK-NEXT: v_perm_b32 v1, v3, v2, s0
+; CHECK-NEXT: s_cbranch_execz .LBB1_3
+; CHECK-NEXT: s_branch .LBB1_4
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: ; implicit-def: $vgpr1
+; CHECK-NEXT: .LBB1_3: ; %then
+; CHECK-NEXT: v_mov_b32_e32 v0, s3
+; CHECK-NEXT: ds_read_b64_tr_b16 v[2:3], v0
+; CHECK-NEXT: s_mov_b32 s0, 0x5040100
+; CHECK-NEXT: s_mov_b32 s1, 0x7060302
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_perm_b32 v0, v3, v2, s0
+; CHECK-NEXT: v_perm_b32 v1, v3, v2, s1
+; CHECK-NEXT: .LBB1_4: ; %end
+; CHECK-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-NEXT: ds_write_b64 v2, v[0:1]
+; CHECK-NEXT: s_endpgm
+ %gep0 = getelementptr ptr addrspace(3), ptr addrspace(3) %inptr, i32 2048
+ br i1 %cond, label %then, label %else
+
+then:
+ %load0 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+ %shuf0 = shufflevector <4 x half> %load0, <4 x half> %load0, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ br label %end
+
+else:
+ %load1 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+ call void @llvm.amdgcn.raw.buffer.store(ptr addrspace(3) %gep0, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %shuf1 = shufflevector <4 x half> %load1, <4 x half> %load1, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ br label %end
+
+end:
+ %res = phi <4 x half> [ %shuf0, %then ], [ %shuf1, %else ]
+ store <4 x half> %res, ptr addrspace(3) %outptr
+ ret void
+}
+
+define protected amdgpu_kernel void @badIntrinsicUse2(ptr addrspace(3) %inptr, i1 %cond, ptr addrspace(3) %outptr, ptr addrspace(3) %outptr1) {
+; CHECK-LABEL: badIntrinsicUse2:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_and_b32 s1, s1, 1
+; CHECK-NEXT: s_add_i32 s4, s0, 0x2000
+; CHECK-NEXT: s_cmp_eq_u32 s1, 0
+; CHECK-NEXT: s_cbranch_scc0 .LBB2_2
+; CHECK-NEXT: ; %bb.1: ; %else
+; CHECK-NEXT: v_mov_b32_e32 v0, s4
+; CHECK-NEXT: ds_read_b64_tr_b16 v[2:3], v0
+; CHECK-NEXT: v_mov_b32_e32 v0, s3
+; CHECK-NEXT: v_mov_b32_e32 v1, s4
+; CHECK-NEXT: s_mov_b32 s0, 0x7060302
+; CHECK-NEXT: ds_write_b32 v0, v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(1)
+; CHECK-NEXT: v_perm_b32 v0, v3, v2, s0
+; CHECK-NEXT: s_mov_b32 s0, 0x5040100
+; CHECK-NEXT: v_perm_b32 v1, v3, v2, s0
+; CHECK-NEXT: s_cbranch_execz .LBB2_3
+; CHECK-NEXT: s_branch .LBB2_4
+; CHECK-NEXT: .LBB2_2:
+; CHECK-NEXT: ; implicit-def: $vgpr1
+; CHECK-NEXT: .LBB2_3: ; %then
+; CHECK-NEXT: v_mov_b32_e32 v0, s4
+; CHECK-NEXT: ds_read_b64_tr_b16 v[2:3], v0
+; CHECK-NEXT: s_mov_b32 s0, 0x5040100
+; CHECK-NEXT: s_mov_b32 s1, 0x7060302
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_perm_b32 v0, v3, v2, s0
+; CHECK-NEXT: v_perm_b32 v1, v3, v2, s1
+; CHECK-NEXT: .LBB2_4: ; %end
+; CHECK-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-NEXT: ds_write_b64 v2, v[0:1]
+; CHECK-NEXT: s_endpgm
+ %gep0 = getelementptr ptr addrspace(3), ptr addrspace(3) %inptr, i32 2048
+ br i1 %cond, label %then, label %else
+
+then:
+ %load0 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+ %shuf0 = shufflevector <4 x half> %load0, <4 x half> %load0, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ br label %end
+
+else:
+ %load1 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+ %gep1 = call ptr addrspace(3) @llvm.amdgcn.readfirstlane(ptr addrspace(3) %gep0)
+ store ptr addrspace(3) %gep1, ptr addrspace(3) %outptr1
+ %shuf1 = shufflevector <4 x half> %load1, <4 x half> %load1, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+ br label %end
+
+end:
+ %res = phi <4 x half> [ %shuf0, %then ], [ %shuf1, %else ]
+ store <4 x half> %res, ptr addrspace(3) %outptr
+ ret void
+}
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you precommit the test case so we can see what effect your patch has on it?
Change-Id: Ieebc6e6246e04164bce6b6b425d39e8624aac578
fe5ccf0
to
4c52b42
Compare
Change-Id: I93a777d78d19df5b1a799a3a7e4bc7c465be2558
Done . The main effect is to fold in Addressing Mode values (e.g. constant offsets) when a intrinsic doing a mem op is using the same address as some other mem op. |
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/94/builds/10975 Here is the relevant piece of the build log for the reference
|
When deciding to sink address instructions into their uses, we check if it is profitable to do so. The profitability check is based on the types of uses of this address instruction -- if there are users which are not memory instructions, then do not fold.
However, this profitability check wasn't considering target intrinsics, which may be loads / stores.
This adds some logic to handle target memory intrinsics.