-
Notifications
You must be signed in to change notification settings - Fork 11.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[AMDGPU] PromoteAlloca - bail always if load/store is volatile #73228
Merged
mariusz-sikora-at-amd
merged 1 commit into
llvm:main
from
mariusz-sikora-at-amd:masikora/pub-PromoteAllocaFixVolatile
Nov 28, 2023
Merged
[AMDGPU] PromoteAlloca - bail always if load/store is volatile #73228
mariusz-sikora-at-amd
merged 1 commit into
llvm:main
from
mariusz-sikora-at-amd:masikora/pub-PromoteAllocaFixVolatile
Nov 28, 2023
Conversation
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This change is addressing case where alloca size is the same as load/store size.
@llvm/pr-subscribers-backend-amdgpu Author: Mariusz Sikora (mariusz-sikora-at-amd) ChangesThis change is addressing case where alloca size is the same as load/store size. Full diff: https://github.com/llvm/llvm-project/pull/73228.diff 3 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 9293f8954cfe2b5..1bed516fb5c7f8c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -681,6 +681,12 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
return RejectUser(Inst, "unsupported load/store as aggregate");
assert(!AccessTy->isAggregateType() || AccessTy->isArrayTy());
+ // Check that this is a simple access of a vector element.
+ bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
+ : cast<StoreInst>(Inst)->isSimple();
+ if (!IsSimple)
+ return RejectUser(Inst, "not a simple load or store");
+
Ptr = Ptr->stripPointerCasts();
// Alloca already accessed as vector.
@@ -690,11 +696,6 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
continue;
}
- // Check that this is a simple access of a vector element.
- bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
- : cast<StoreInst>(Inst)->isSimple();
- if (!IsSimple)
- return RejectUser(Inst, "not a simple load or store");
if (!isSupportedAccessType(VectorTy, AccessTy, *DL))
return RejectUser(Inst, "not a supported access type");
diff --git a/llvm/test/CodeGen/AMDGPU/fix-frame-reg-in-custom-csr-spills.ll b/llvm/test/CodeGen/AMDGPU/fix-frame-reg-in-custom-csr-spills.ll
index f440b1f9ee6f7bb..8e0750195b3b4d1 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-frame-reg-in-custom-csr-spills.ll
+++ b/llvm/test/CodeGen/AMDGPU/fix-frame-reg-in-custom-csr-spills.ll
@@ -13,20 +13,37 @@ define void @test_stack_realign(<8 x i32> %val, i32 %idx) #0 {
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s16, s33
-; GCN-NEXT: s_mov_b32 s33, s32
+; GCN-NEXT: s_add_i32 s33, s32, 0xfc0
+; GCN-NEXT: s_and_b32 s33, s33, 0xfffff000
; GCN-NEXT: s_or_saveexec_b64 s[18:19], -1
-; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
+; GCN-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:96 ; 4-byte Folded Spill
; GCN-NEXT: s_mov_b64 exec, s[18:19]
-; GCN-NEXT: s_addk_i32 s32, 0x400
+; GCN-NEXT: s_addk_i32 s32, 0x3000
; GCN-NEXT: v_writelane_b32 v42, s16, 2
; GCN-NEXT: s_getpc_b64 s[16:17]
; GCN-NEXT: s_add_u32 s16, s16, extern_func@gotpcrel32@lo+4
; GCN-NEXT: s_addc_u32 s17, s17, extern_func@gotpcrel32@hi+12
; GCN-NEXT: s_load_dwordx2 s[16:17], s[16:17], 0x0
-; GCN-NEXT: v_writelane_b32 v42, s30, 0
-; GCN-NEXT: v_mov_b32_e32 v0, v8
; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
; GCN-NEXT: buffer_store_dword v41, off, s[0:3], s33 ; 4-byte Folded Spill
+; GCN-NEXT: v_writelane_b32 v42, s30, 0
+; GCN-NEXT: buffer_store_dword v7, off, s[0:3], s33 offset:92
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:88
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:84
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:80
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:76
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:72
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_store_dword v1, off, s[0:3], s33 offset:68
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:64
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, v8
; GCN-NEXT: v_writelane_b32 v42, s31, 1
; GCN-NEXT: ;;#ASMSTART
; GCN-NEXT: ;;#ASMEND
@@ -40,9 +57,9 @@ define void @test_stack_realign(<8 x i32> %val, i32 %idx) #0 {
; GCN-NEXT: v_readlane_b32 s30, v42, 0
; GCN-NEXT: v_readlane_b32 s4, v42, 2
; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
-; GCN-NEXT: buffer_load_dword v42, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
+; GCN-NEXT: buffer_load_dword v42, off, s[0:3], s33 offset:96 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[6:7]
-; GCN-NEXT: s_addk_i32 s32, 0xfc00
+; GCN-NEXT: s_addk_i32 s32, 0xd000
; GCN-NEXT: s_mov_b32 s33, s4
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-volatile.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
index 6bac2f92726a99e..1cddc528b54e1ff 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
@@ -25,6 +25,17 @@ entry:
ret void
}
+; CHECK-LABEL: @volatile_store_vec(
+; CHECK: alloca [4 x i32]
+; CHECK: store volatile <4 x i32>
+define amdgpu_kernel void @volatile_store_vec(ptr addrspace(1) nocapture %out, ptr addrspace(1) nocapture %in) {
+entry:
+ %stack = alloca [4 x i32], align 4, addrspace(5)
+ %tmp = load <4 x i32>, ptr addrspace(1) %in, align 16
+ store volatile <4 x i32> %tmp, ptr addrspace(5) %stack
+ ret void
+}
+
; Has on OK non-volatile user but also a volatile user
; CHECK-LABEL: @volatile_and_non_volatile_load(
; CHECK: alloca double
|
arsenm
approved these changes
Nov 28, 2023
mariusz-sikora-at-amd
deleted the
masikora/pub-PromoteAllocaFixVolatile
branch
November 28, 2023 11:01
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
This change is addressing case where alloca size is the same as load/store size.