Skip to content

Commit

Permalink
[LoadStoreVectorizer] Only upgrade align for alloca
Browse files Browse the repository at this point in the history
In commit 2be0abb (D149893) the load store vectorized was
reimplemented. One thing that can happen with the new LSV is that
it can increase the align of alloca and global objects. However,
the code comments indicate that the intention only was to increase
alignment of alloca.
Now we will use stripPointerCasts to analyse if the load/store really
is accessing an alloca (same as getOrEnforceKnownAlignment is using).
And then we only try to change the align if we find an alloca
instruction. This way the code will match better with code comments,
and we won't change alignment of non-stack variables to use the
"StackAdjustedAlignment".

Differential Revision: https://reviews.llvm.org/D152386
  • Loading branch information
bjope committed Jun 9, 2023
1 parent 5acac7d commit 263bc7f
Show file tree
Hide file tree
Showing 3 changed files with 99 additions and 51 deletions.
10 changes: 6 additions & 4 deletions llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -801,12 +801,14 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
//
// FIXME: We will upgrade the alignment of the alloca even if it turns out
// we can't vectorize for some other reason.
Value *PtrOperand = getLoadStorePointerOperand(C[CBegin].Inst);
bool IsAllocaAccess = isa<AllocaInst>(PtrOperand->stripPointerCasts());
Align Alignment = getLoadStoreAlignment(C[CBegin].Inst);
if (AS == DL.getAllocaAddrSpace() && Alignment.value() % SizeBytes != 0 &&
IsAllowedAndFast(Align(StackAdjustedAlignment))) {
Align PrefAlign = Align(StackAdjustedAlignment);
if (IsAllocaAccess && AS == DL.getAllocaAddrSpace() &&
Alignment.value() % SizeBytes != 0 && IsAllowedAndFast(PrefAlign)) {
Align NewAlign = getOrEnforceKnownAlignment(
getLoadStorePointerOperand(C[CBegin].Inst),
Align(StackAdjustedAlignment), DL, C[CBegin].Inst, nullptr, &DT);
PtrOperand, PrefAlign, DL, C[CBegin].Inst, nullptr, &DT);
if (NewAlign >= Alignment) {
LLVM_DEBUG(dbgs()
<< "LSV: splitByChain upgrading alloca alignment from "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ define amdgpu_kernel void @load_unknown_offset_align1_i8(ptr addrspace(1) noalia
; UNALIGNED-LABEL: @load_unknown_offset_align1_i8(
; UNALIGNED-NEXT: [[ALLOCA:%.*]] = alloca [128 x i8], align 1, addrspace(5)
; UNALIGNED-NEXT: [[PTR0:%.*]] = getelementptr inbounds [128 x i8], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
; UNALIGNED-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr addrspace(5) [[PTR0]], align 1
; UNALIGNED-NEXT: [[VAL01:%.*]] = extractelement <2 x i8> [[TMP2]], i32 0
; UNALIGNED-NEXT: [[VAL12:%.*]] = extractelement <2 x i8> [[TMP2]], i32 1
; UNALIGNED-NEXT: [[TMP1:%.*]] = load <2 x i8>, ptr addrspace(5) [[PTR0]], align 1
; UNALIGNED-NEXT: [[VAL01:%.*]] = extractelement <2 x i8> [[TMP1]], i32 0
; UNALIGNED-NEXT: [[VAL12:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1
; UNALIGNED-NEXT: [[ADD:%.*]] = add i8 [[VAL01]], [[VAL12]]
; UNALIGNED-NEXT: store i8 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 1
; UNALIGNED-NEXT: ret void
Expand Down Expand Up @@ -52,9 +52,9 @@ define amdgpu_kernel void @load_unknown_offset_align1_i16(ptr addrspace(1) noali
; UNALIGNED-LABEL: @load_unknown_offset_align1_i16(
; UNALIGNED-NEXT: [[ALLOCA:%.*]] = alloca [128 x i16], align 1, addrspace(5)
; UNALIGNED-NEXT: [[PTR0:%.*]] = getelementptr inbounds [128 x i16], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
; UNALIGNED-NEXT: [[TMP2:%.*]] = load <2 x i16>, ptr addrspace(5) [[PTR0]], align 1
; UNALIGNED-NEXT: [[VAL01:%.*]] = extractelement <2 x i16> [[TMP2]], i32 0
; UNALIGNED-NEXT: [[VAL12:%.*]] = extractelement <2 x i16> [[TMP2]], i32 1
; UNALIGNED-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr addrspace(5) [[PTR0]], align 1
; UNALIGNED-NEXT: [[VAL01:%.*]] = extractelement <2 x i16> [[TMP1]], i32 0
; UNALIGNED-NEXT: [[VAL12:%.*]] = extractelement <2 x i16> [[TMP1]], i32 1
; UNALIGNED-NEXT: [[ADD:%.*]] = add i16 [[VAL01]], [[VAL12]]
; UNALIGNED-NEXT: store i16 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 2
; UNALIGNED-NEXT: ret void
Expand Down Expand Up @@ -85,9 +85,9 @@ define amdgpu_kernel void @load_unknown_offset_align1_i32(ptr addrspace(1) noali
; UNALIGNED-LABEL: @load_unknown_offset_align1_i32(
; UNALIGNED-NEXT: [[ALLOCA:%.*]] = alloca [128 x i32], align 1, addrspace(5)
; UNALIGNED-NEXT: [[PTR0:%.*]] = getelementptr inbounds [128 x i32], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
; UNALIGNED-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr addrspace(5) [[PTR0]], align 1
; UNALIGNED-NEXT: [[VAL01:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
; UNALIGNED-NEXT: [[VAL12:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
; UNALIGNED-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr addrspace(5) [[PTR0]], align 1
; UNALIGNED-NEXT: [[VAL01:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
; UNALIGNED-NEXT: [[VAL12:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
; UNALIGNED-NEXT: [[ADD:%.*]] = add i32 [[VAL01]], [[VAL12]]
; UNALIGNED-NEXT: store i32 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 4
; UNALIGNED-NEXT: ret void
Expand All @@ -104,15 +104,25 @@ define amdgpu_kernel void @load_unknown_offset_align1_i32(ptr addrspace(1) noali

; Make sure alloca alignment isn't decreased
define amdgpu_kernel void @load_alloca16_unknown_offset_align1_i32(ptr addrspace(1) noalias %out, i32 %offset) #0 {
; CHECK-LABEL: @load_alloca16_unknown_offset_align1_i32(
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [128 x i32], align 16, addrspace(5)
; CHECK-NEXT: [[PTR0:%.*]] = getelementptr inbounds [128 x i32], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr addrspace(5) [[PTR0]], align 4
; CHECK-NEXT: [[VAL01:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
; CHECK-NEXT: [[VAL12:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[VAL01]], [[VAL12]]
; CHECK-NEXT: store i32 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 4
; CHECK-NEXT: ret void
; ALIGNED-LABEL: @load_alloca16_unknown_offset_align1_i32(
; ALIGNED-NEXT: [[ALLOCA:%.*]] = alloca [128 x i32], align 16, addrspace(5)
; ALIGNED-NEXT: [[PTR0:%.*]] = getelementptr inbounds [128 x i32], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
; ALIGNED-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(5) [[PTR0]], align 1
; ALIGNED-NEXT: [[PTR1:%.*]] = getelementptr inbounds i32, ptr addrspace(5) [[PTR0]], i32 1
; ALIGNED-NEXT: [[VAL1:%.*]] = load i32, ptr addrspace(5) [[PTR1]], align 1
; ALIGNED-NEXT: [[ADD:%.*]] = add i32 [[VAL0]], [[VAL1]]
; ALIGNED-NEXT: store i32 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 4
; ALIGNED-NEXT: ret void
;
; UNALIGNED-LABEL: @load_alloca16_unknown_offset_align1_i32(
; UNALIGNED-NEXT: [[ALLOCA:%.*]] = alloca [128 x i32], align 16, addrspace(5)
; UNALIGNED-NEXT: [[PTR0:%.*]] = getelementptr inbounds [128 x i32], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
; UNALIGNED-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr addrspace(5) [[PTR0]], align 4
; UNALIGNED-NEXT: [[VAL01:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
; UNALIGNED-NEXT: [[VAL12:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
; UNALIGNED-NEXT: [[ADD:%.*]] = add i32 [[VAL01]], [[VAL12]]
; UNALIGNED-NEXT: store i32 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 4
; UNALIGNED-NEXT: ret void
;
%alloca = alloca [128 x i32], align 16, addrspace(5)
%ptr0 = getelementptr inbounds [128 x i32], ptr addrspace(5) %alloca, i32 0, i32 %offset
Expand Down Expand Up @@ -235,11 +245,11 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8() {
define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i32() {
; CHECK-LABEL: @merge_private_load_4_vector_elts_loads_v4i32(
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [8 x i32], align 4, addrspace(5)
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr addrspace(5) [[ALLOCA]], align 4
; CHECK-NEXT: [[LOAD01:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
; CHECK-NEXT: [[LOAD12:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
; CHECK-NEXT: [[LOAD23:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
; CHECK-NEXT: [[LOAD34:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr addrspace(5) [[ALLOCA]], align 4
; CHECK-NEXT: [[LOAD01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
; CHECK-NEXT: [[LOAD12:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
; CHECK-NEXT: [[LOAD23:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
; CHECK-NEXT: [[LOAD34:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
; CHECK-NEXT: ret void
;
%alloca = alloca [8 x i32], align 1, addrspace(5)
Expand All @@ -257,11 +267,11 @@ define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i32() {
define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i8() {
; CHECK-LABEL: @merge_private_load_4_vector_elts_loads_v4i8(
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [8 x i8], align 4, addrspace(5)
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr addrspace(5) [[ALLOCA]], align 4
; CHECK-NEXT: [[LOAD01:%.*]] = extractelement <4 x i8> [[TMP2]], i32 0
; CHECK-NEXT: [[LOAD12:%.*]] = extractelement <4 x i8> [[TMP2]], i32 1
; CHECK-NEXT: [[LOAD23:%.*]] = extractelement <4 x i8> [[TMP2]], i32 2
; CHECK-NEXT: [[LOAD34:%.*]] = extractelement <4 x i8> [[TMP2]], i32 3
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr addrspace(5) [[ALLOCA]], align 4
; CHECK-NEXT: [[LOAD01:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
; CHECK-NEXT: [[LOAD12:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
; CHECK-NEXT: [[LOAD23:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
; CHECK-NEXT: [[LOAD34:%.*]] = extractelement <4 x i8> [[TMP1]], i32 3
; CHECK-NEXT: ret void
;
%alloca = alloca [8 x i8], align 1, addrspace(5)
Expand All @@ -278,12 +288,15 @@ define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i8() {

; Make sure we don't think the alignment will increase if the base address isn't an alloca
define void @private_store_2xi16_align2_not_alloca(ptr addrspace(5) %p, ptr addrspace(5) %r) #0 {
; CHECK-LABEL: @private_store_2xi16_align2_not_alloca(
; ALIGNED-NEXT: [[GEP_R:%.*]] = getelementptr i16, ptr addrspace(5) [[R:%.*]], i32 1
; ALIGNED-NEXT: store i16 1, ptr addrspace(5) [[R]], align 2
; ALIGNED-NEXT: store i16 2, ptr addrspace(5) [[GEP_R]], align 2
; UNALIGNED-NEXT:store <2 x i16>
; CHECK-NEXT: ret void
; ALIGNED-LABEL: @private_store_2xi16_align2_not_alloca(
; ALIGNED-NEXT: [[GEP_R:%.*]] = getelementptr i16, ptr addrspace(5) [[R:%.*]], i32 1
; ALIGNED-NEXT: store i16 1, ptr addrspace(5) [[R]], align 2
; ALIGNED-NEXT: store i16 2, ptr addrspace(5) [[GEP_R]], align 2
; ALIGNED-NEXT: ret void
;
; UNALIGNED-LABEL: @private_store_2xi16_align2_not_alloca(
; UNALIGNED-NEXT: store <2 x i16> <i16 1, i16 2>, ptr addrspace(5) [[R:%.*]], align 2
; UNALIGNED-NEXT: ret void
;
%gep.r = getelementptr i16, ptr addrspace(5) %r, i32 1
store i16 1, ptr addrspace(5) %r, align 2
Expand All @@ -309,16 +322,25 @@ define void @private_store_2xi16_align1_not_alloca(ptr addrspace(5) %p, ptr addr
}

define i32 @private_load_2xi16_align2_not_alloca(ptr addrspace(5) %p) #0 {
; CHECK-LABEL: @private_load_2xi16_align2_not_alloca(
; ALIGNED-NEXT: [[GEP_P:%.*]] = getelementptr i16, ptr addrspace(5) [[P:%.*]], i64 1
; ALIGNED-NEXT: [[P_0:%.*]] = load i16, ptr addrspace(5) [[P]], align 2
; ALIGNED-NEXT: [[P_1:%.*]] = load i16, ptr addrspace(5) [[GEP_P]], align 2
; UNALIGNED-NEXT:load <2 x i16>
; CHECK: [[ZEXT_0:%.*]] = zext i16
; CHECK-NEXT: [[ZEXT_1:%.*]] = zext i16
; CHECK-NEXT: [[SHL_1:%.*]] = shl i32 [[ZEXT_1]], 16
; CHECK-NEXT: [[OR:%.*]] = or i32 [[ZEXT_0]], [[SHL_1]]
; CHECK-NEXT: ret i32 [[OR]]
; ALIGNED-LABEL: @private_load_2xi16_align2_not_alloca(
; ALIGNED-NEXT: [[GEP_P:%.*]] = getelementptr i16, ptr addrspace(5) [[P:%.*]], i64 1
; ALIGNED-NEXT: [[P_0:%.*]] = load i16, ptr addrspace(5) [[P]], align 2
; ALIGNED-NEXT: [[P_1:%.*]] = load i16, ptr addrspace(5) [[GEP_P]], align 2
; ALIGNED-NEXT: [[ZEXT_0:%.*]] = zext i16 [[P_0]] to i32
; ALIGNED-NEXT: [[ZEXT_1:%.*]] = zext i16 [[P_1]] to i32
; ALIGNED-NEXT: [[SHL_1:%.*]] = shl i32 [[ZEXT_1]], 16
; ALIGNED-NEXT: [[OR:%.*]] = or i32 [[ZEXT_0]], [[SHL_1]]
; ALIGNED-NEXT: ret i32 [[OR]]
;
; UNALIGNED-LABEL: @private_load_2xi16_align2_not_alloca(
; UNALIGNED-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr addrspace(5) [[P:%.*]], align 2
; UNALIGNED-NEXT: [[P_01:%.*]] = extractelement <2 x i16> [[TMP1]], i32 0
; UNALIGNED-NEXT: [[P_12:%.*]] = extractelement <2 x i16> [[TMP1]], i32 1
; UNALIGNED-NEXT: [[ZEXT_0:%.*]] = zext i16 [[P_01]] to i32
; UNALIGNED-NEXT: [[ZEXT_1:%.*]] = zext i16 [[P_12]] to i32
; UNALIGNED-NEXT: [[SHL_1:%.*]] = shl i32 [[ZEXT_1]], 16
; UNALIGNED-NEXT: [[OR:%.*]] = or i32 [[ZEXT_0]], [[SHL_1]]
; UNALIGNED-NEXT: ret i32 [[OR]]
;
%gep.p = getelementptr i16, ptr addrspace(5) %p, i64 1
%p.0 = load i16, ptr addrspace(5) %p, align 2
Expand All @@ -342,9 +364,9 @@ define i32 @private_load_2xi16_align1_not_alloca(ptr addrspace(5) %p) #0 {
; ALIGNED-NEXT: ret i32 [[OR]]
;
; UNALIGNED-LABEL: @private_load_2xi16_align1_not_alloca(
; UNALIGNED-NEXT: [[TMP2:%.*]] = load <2 x i16>, ptr addrspace(5) [[P:%.*]], align 1
; UNALIGNED-NEXT: [[P_01:%.*]] = extractelement <2 x i16> [[TMP2]], i32 0
; UNALIGNED-NEXT: [[P_12:%.*]] = extractelement <2 x i16> [[TMP2]], i32 1
; UNALIGNED-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr addrspace(5) [[P:%.*]], align 1
; UNALIGNED-NEXT: [[P_01:%.*]] = extractelement <2 x i16> [[TMP1]], i32 0
; UNALIGNED-NEXT: [[P_12:%.*]] = extractelement <2 x i16> [[TMP1]], i32 1
; UNALIGNED-NEXT: [[ZEXT_0:%.*]] = zext i16 [[P_01]] to i32
; UNALIGNED-NEXT: [[ZEXT_1:%.*]] = zext i16 [[P_12]] to i32
; UNALIGNED-NEXT: [[SHL_1:%.*]] = shl i32 [[ZEXT_1]], 16
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
; RUN: opt -S -passes=load-store-vectorizer --mcpu=hawaii -mattr=+unaligned-access-mode,+unaligned-scratch-access,+max-private-element-size-16 < %s | FileCheck --match-full-lines %s

target triple = "amdgcn--"
target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"

@G = internal addrspace(5) global [8 x i16] undef, align 1

; Verify that the alignment of the global remains at 1, even if we vectorize
; the stores.
;
; CHECK: @G = internal addrspace(5) global [8 x i16] undef, align 1

define void @private_store_2xi16_align2_not_alloca(ptr addrspace(5) %p, ptr addrspace(5) %r) {
; CHECK: define void @private_store_2xi16_align2_not_alloca(ptr addrspace(5) [[P:%.*]], ptr addrspace(5) [[R:%.*]]) #0 {
; CHECK-NEXT: [[GEP0:%.*]] = getelementptr i16, ptr addrspace(5) @G, i32 0
; CHECK-NEXT: store <2 x i16> <i16 1, i16 2>, ptr addrspace(5) [[GEP0]], align 1
; CHECK-NEXT: ret void
;
%gep0 = getelementptr i16, ptr addrspace(5) @G, i32 0
%gep1 = getelementptr i16, ptr addrspace(5) @G, i32 1
store i16 1, ptr addrspace(5) %gep0, align 1
store i16 2, ptr addrspace(5) %gep1, align 1
ret void
}

0 comments on commit 263bc7f

Please sign in to comment.