Skip to content

Commit

Permalink
[SROA] isVectorPromotionViable(): memory intrinsics operate on vect…
Browse files Browse the repository at this point in the history
…ors of bytes (take 2)

This is a recommit of cf624b2,
which was reverted in 5cfc22c,
because the cut-off on the number of vector elements was not low enough,
and it triggered both SDAG SDNode operand number assertions,
and caused compile time explosions in some cases.

Let's try with something really *REALLY* conservative first,
just to get somewhere, and try to bump it (to 64/128) later.

FIXME: should this respect TTI reg width * num vec regs?

Original commit message:

Now, there's a big caveat here - these bytes
are abstract bytes, not the i8 we have in LLVM,
so strictly speaking this is not exactly legal,
see e.g. AliveToolkit/alive2#860
^ the "bytes" "could" have been a pointer,
and loading it as an integer inserts an implicit ptrtoint.

But at the same time,
InstCombine's `InstCombinerImpl::SimplifyAnyMemTransfer()`
would expand a memtransfer of 1/2/4/8 bytes
into integer-typed load+store,
so this isn't exactly a new problem.

Note that in memory, poison is byte-wise,
so we really can't widen elements,
but SROA seems to be inconsistent here.

Fixes #59116.
  • Loading branch information
LebedevRI committed Nov 26, 2022
1 parent a677afd commit 3c4d2a0
Show file tree
Hide file tree
Showing 15 changed files with 180 additions and 211 deletions.
18 changes: 11 additions & 7 deletions clang/test/CodeGenOpenCL/amdgpu-nullptr.cl
Expand Up @@ -515,13 +515,17 @@ typedef struct {
private char *p;
} StructTy3;

// CHECK-LABEL: test_memset_private
// CHECK: call void @llvm.memset.p5i8.i64(i8 addrspace(5)* noundef align 8 {{.*}}, i8 0, i64 32, i1 false)
// CHECK: [[GEP:%.*]] = getelementptr inbounds %struct.StructTy3, %struct.StructTy3 addrspace(5)* %ptr, i32 0, i32 4
// CHECK: store i8 addrspace(5)* addrspacecast (i8* null to i8 addrspace(5)*), i8 addrspace(5)* addrspace(5)* [[GEP]]
// CHECK: [[GEP1:%.*]] = getelementptr inbounds i8, i8 addrspace(5)* {{.*}}, i32 36
// CHECK: [[GEP1_CAST:%.*]] = bitcast i8 addrspace(5)* [[GEP1]] to i32 addrspace(5)*
// CHECK: store i32 0, i32 addrspace(5)* [[GEP1_CAST]], align 4
// CHECK-LABEL: @test_memset_private(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = bitcast [[STRUCT_STRUCTTY3:%.*]] addrspace(5)* [[PTR:%.*]] to i8 addrspace(5)*
// CHECK-NEXT: [[S3_SROA_0_SROA_0_0_S3_SROA_0_0__SROA_CAST2_SROA_CAST:%.*]] = bitcast [[STRUCT_STRUCTTY3]] addrspace(5)* [[PTR]] to <32 x i8> addrspace(5)*
// CHECK-NEXT: store <32 x i8> zeroinitializer, <32 x i8> addrspace(5)* [[S3_SROA_0_SROA_0_0_S3_SROA_0_0__SROA_CAST2_SROA_CAST]], align 8, !tbaa.struct !9
// CHECK-NEXT: [[S3_SROA_4_0__SROA_IDX6:%.*]] = getelementptr inbounds [[STRUCT_STRUCTTY3]], [[STRUCT_STRUCTTY3]] addrspace(5)* [[PTR]], i32 0, i32 4
// CHECK-NEXT: store i8 addrspace(5)* addrspacecast (i8* null to i8 addrspace(5)*), i8 addrspace(5)* addrspace(5)* [[S3_SROA_4_0__SROA_IDX6]], align 8, !tbaa.struct !12
// CHECK-NEXT: [[S3_SROA_5_0__SROA_IDX:%.*]] = getelementptr inbounds i8, i8 addrspace(5)* [[TMP0]], i32 36
// CHECK-NEXT: [[S3_SROA_5_0__SROA_CAST8:%.*]] = bitcast i8 addrspace(5)* [[S3_SROA_5_0__SROA_IDX]] to i32 addrspace(5)*
// CHECK-NEXT: store i32 0, i32 addrspace(5)* [[S3_SROA_5_0__SROA_CAST8]], align 4, !tbaa.struct !13
// CHECK-NEXT: ret void
void test_memset_private(private StructTy3 *ptr) {
StructTy3 S3 = {0, 0, 0, 0, 0};
*ptr = S3;
Expand Down
49 changes: 35 additions & 14 deletions llvm/lib/Transforms/Scalar/SROA.cpp
Expand Up @@ -1806,8 +1806,10 @@ static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
? Ty->getElementType()
: FixedVectorType::get(Ty->getElementType(), NumElements);

Type *SplitIntTy =
Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
Type *SplitIntTy = nullptr;
if (uint64_t Bitwidth = NumElements * ElementSize * 8;
Bitwidth <= IntegerType::MAX_INT_BITS)
SplitIntTy = Type::getIntNTy(Ty->getContext(), Bitwidth);

Use *U = S.getUse();

Expand All @@ -1826,7 +1828,8 @@ static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
// Disable vector promotion when there are loads or stores of an FCA.
if (LTy->isStructTy())
return false;
if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
if (SplitIntTy &&
(P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset())) {
assert(LTy->isIntegerTy());
LTy = SplitIntTy;
}
Expand All @@ -1839,7 +1842,8 @@ static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
// Disable vector promotion when there are loads or stores of an FCA.
if (STy->isStructTy())
return false;
if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
if (SplitIntTy &&
(P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset())) {
assert(STy->isIntegerTy());
STy = SplitIntTy;
}
Expand Down Expand Up @@ -1889,7 +1893,8 @@ static bool checkVectorTypeForPromotion(Partition &P, VectorType *VTy,
/// SSA value. We only can ensure this for a limited set of operations, and we
/// don't want to do the rewrites unless we are confident that the result will
/// be promotable, so we have an early test here.
static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
static VectorType *isVectorPromotionViable(Partition &P, LLVMContext &Ctx,
const DataLayout &DL) {
// Collect the candidate types for vector-based promotion. Also track whether
// we have different element types.
SmallVector<VectorType *, 4> CandidateTys;
Expand Down Expand Up @@ -1926,6 +1931,7 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
}
}
};
bool SeenMemTransferInst = false;
// Consider any loads or stores that are the exact size of the slice.
for (const Slice &S : P)
if (S.beginOffset() == P.beginOffset() &&
Expand All @@ -1934,8 +1940,29 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
CheckCandidateType(LI->getType());
else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser()))
CheckCandidateType(SI->getValueOperand()->getType());
else if (isa<MemTransferInst>(S.getUse()->getUser()))
SeenMemTransferInst = true;
}

// If we have seen mem transfer intrinsic,
// and the partition is small-enough,
// enqueue appropriate byte vector.
//
// The "small-enough" threshold is somewhat arbitrary,
// and is mostly dictated by compile-time concerns,
// but, at the same time, SDAG SDNode can't handle
// more then 65535 operands, so we should not
// produce vectors with more than ~32768 elements.
//
// Perhaps, we should also take into account the TTI:
// `getNumberOfRegisters() * getRegisterBitWidth() / 8` ?
//
// FIXME: byte type is sticky. If we had any op with byte-typed elements,
// then we should choose that type.
if (SeenMemTransferInst && P.size() <= 32)
CheckCandidateType(
FixedVectorType::get(IntegerType::getInt8Ty(Ctx), P.size()));

// If we didn't find a vector type, nothing to do here.
if (CandidateTys.empty())
return nullptr;
Expand Down Expand Up @@ -1992,13 +2019,6 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
CandidateTys.resize(1);
}

// FIXME: hack. Do we have a named constant for this?
// SDAG SDNode can't have more than 65535 operands.
llvm::erase_if(CandidateTys, [](VectorType *VTy) {
return cast<FixedVectorType>(VTy)->getNumElements() >
std::numeric_limits<unsigned short>::max();
});

for (VectorType *VTy : CandidateTys)
if (checkVectorTypeForPromotion(P, VTy, DL))
return VTy;
Expand Down Expand Up @@ -4323,8 +4343,9 @@ AllocaInst *SROAPass::rewritePartition(AllocaInst &AI, AllocaSlices &AS,

bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL);

VectorType *VecTy =
IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL);
VectorType *VecTy = IsIntegerPromotable
? nullptr
: isVectorPromotionViable(P, AI.getContext(), DL);
if (VecTy)
SliceTy = VecTy;

Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AMDGPU/v1024.ll
Expand Up @@ -4,7 +4,7 @@

; GCN-LABEL: {{^}}test_v1024:
; GCN-NOT: v_accvgpr
; GCN-COUNT-32: v_mov_b32_e32
; GCN-COUNT-10: v_mov_b32_e32
; GCN-NOT: v_accvgpr
define amdgpu_kernel void @test_v1024() {
entry:
Expand Down
6 changes: 2 additions & 4 deletions llvm/test/DebugInfo/X86/sroasplit-1.ll
Expand Up @@ -20,10 +20,8 @@
;

; Verify that SROA creates a variable piece when splitting i1.
; CHECK: %[[I1:.*]] = alloca [12 x i8], align 4
; CHECK: call void @llvm.dbg.declare(metadata [12 x i8]* %[[I1]], metadata ![[VAR:[0-9]+]], metadata !DIExpression(DW_OP_LLVM_fragment, 32, 96))
; CHECK: call void @llvm.dbg.value(metadata i32 %[[A:.*]], metadata ![[VAR]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 32))
; CHECK: ret i32 %[[A]]
; CHECK: %[[I1:.*]] = load <12 x i8>,
; CHECK: call void @llvm.dbg.value(metadata <12 x i8> %[[I1]], metadata ![[VAR:.*]], metadata !DIExpression(DW_OP_LLVM_fragment, 32, 96))
; Read Var and Piece:
; CHECK: ![[VAR]] = !DILocalVariable(name: "i1",{{.*}} line: 11,

Expand Down
20 changes: 10 additions & 10 deletions llvm/test/DebugInfo/X86/sroasplit-4.ll
@@ -1,28 +1,28 @@
; RUN: opt -sroa < %s -S -o - | FileCheck %s
;
; Test that recursively splitting an alloca updates the debug info correctly.
; CHECK: %[[T:.*]] = load i64, i64* @t, align 8
; CHECK: call void @llvm.dbg.value(metadata i64 %[[T]], metadata ![[Y:.*]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 64))
; CHECK: %[[T1:.*]] = load i64, i64* @t, align 8
; CHECK: call void @llvm.dbg.value(metadata i64 %[[T1]], metadata ![[Y]], metadata !DIExpression(DW_OP_LLVM_fragment, 64, 64))
; CHECK: call void @llvm.dbg.value(metadata i64 %[[T]], metadata ![[R:.*]], metadata !DIExpression(DW_OP_LLVM_fragment, 192, 64))
; CHECK: call void @llvm.dbg.value(metadata i64 %[[T1]], metadata ![[R]], metadata !DIExpression(DW_OP_LLVM_fragment, 256, 64))
;
; CHECK: call void @llvm.dbg.value(metadata <16 x i8> %[[Y_VEC:.*]], metadata ![[Y:.*]], metadata !DIExpression())
; CHECK: call void @llvm.dbg.value(metadata <16 x i8> %[[Y_VEC1:.*]], metadata ![[Y]], metadata !DIExpression())
; CHECK: call void @llvm.dbg.value(metadata i32 0, metadata ![[R:.*]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 32))
; CHECK: call void @llvm.dbg.value(metadata i64 0, metadata ![[R]], metadata !DIExpression(DW_OP_LLVM_fragment, 64, 64))
; CHECK: call void @llvm.dbg.value(metadata i64 0, metadata ![[R]], metadata !DIExpression(DW_OP_LLVM_fragment, 128, 64))
; CHECK: call void @llvm.dbg.value(metadata <16 x i8> %[[Y_VEC1]], metadata ![[R]], metadata !DIExpression(DW_OP_LLVM_fragment, 192, 128))
;
; struct p {
; __SIZE_TYPE__ s;
; __SIZE_TYPE__ t;
; };
;
;
; struct r {
; int i;
; struct p x;
; struct p y;
; };
;
;
; extern int call_me(struct r);
; extern int maybe();
; extern __SIZE_TYPE__ t;
;
;
; int test() {
; if (maybe())
; return 0;
Expand Down
20 changes: 11 additions & 9 deletions llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
Expand Up @@ -68,12 +68,13 @@ define dso_local i32* @_Z3foo1S(%0* byval(%0) align 8 %arg) {
; CHECK-LABEL: @_Z3foo1S(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I2:%.*]] = alloca [[TMP0:%.*]], align 8
; CHECK-NEXT: [[I1_SROA_0_0_I5_SROA_IDX:%.*]] = getelementptr inbounds [[TMP0]], %0* [[ARG:%.*]], i64 0, i32 0
; CHECK-NEXT: [[I1_SROA_0_0_COPYLOAD:%.*]] = load i32*, i32** [[I1_SROA_0_0_I5_SROA_IDX]], align 8
; CHECK-NEXT: [[TMP0]] = bitcast %0* [[ARG:%.*]] to i64*
; CHECK-NEXT: [[I11_SROA_0_0_VEC_EXTRACT_EXTRACT:%.*]] = load i64, i64* [[TMP0]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i64 [[I11_SROA_0_0_VEC_EXTRACT_EXTRACT]] to i32*
; CHECK-NEXT: [[I_SROA_0_0_I6_SROA_IDX:%.*]] = getelementptr inbounds [[TMP0]], %0* [[I2]], i64 0, i32 0
; CHECK-NEXT: store i32* [[I1_SROA_0_0_COPYLOAD]], i32** [[I_SROA_0_0_I6_SROA_IDX]], align 8
; CHECK-NEXT: store i32* [[TMP1]], i32** [[I_SROA_0_0_I6_SROA_IDX]], align 8
; CHECK-NEXT: tail call void @_Z7escape01S(%0* nonnull byval([[TMP0]]) align 8 [[I2]])
; CHECK-NEXT: ret i32* [[I1_SROA_0_0_COPYLOAD]]
; CHECK-NEXT: ret i32* [[TMP1]]
;
bb:
%i = alloca %0, align 8
Expand Down Expand Up @@ -107,21 +108,22 @@ declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
define dso_local i32* @_Z3bar1S(%0* byval(%0) align 8 %arg) {
; CHECK-LABEL: @_Z3bar1S(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I1_SROA_0_0_I4_SROA_IDX:%.*]] = getelementptr inbounds [[TMP0:%.*]], %0* [[ARG:%.*]], i64 0, i32 0
; CHECK-NEXT: [[I1_SROA_0_0_COPYLOAD:%.*]] = load i32*, i32** [[I1_SROA_0_0_I4_SROA_IDX]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = bitcast %0* [[ARG:%.*]] to i64*
; CHECK-NEXT: [[I13_SROA_0_0_VEC_EXTRACT_EXTRACT:%.*]] = load i64, i64* [[TMP0]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i64 [[I13_SROA_0_0_VEC_EXTRACT_EXTRACT]] to i32*
; CHECK-NEXT: [[I5:%.*]] = tail call i32 @_Z4condv()
; CHECK-NEXT: [[I6_NOT:%.*]] = icmp eq i32 [[I5]], 0
; CHECK-NEXT: br i1 [[I6_NOT]], label [[BB10:%.*]], label [[BB7:%.*]]
; CHECK: bb7:
; CHECK-NEXT: tail call void @_Z5sync0v()
; CHECK-NEXT: tail call void @_Z7escape0Pi(i32* [[I1_SROA_0_0_COPYLOAD]])
; CHECK-NEXT: tail call void @_Z7escape0Pi(i32* [[TMP1]])
; CHECK-NEXT: br label [[BB13:%.*]]
; CHECK: bb10:
; CHECK-NEXT: tail call void @_Z5sync1v()
; CHECK-NEXT: tail call void @_Z7escape1Pi(i32* [[I1_SROA_0_0_COPYLOAD]])
; CHECK-NEXT: tail call void @_Z7escape1Pi(i32* [[TMP1]])
; CHECK-NEXT: br label [[BB13]]
; CHECK: bb13:
; CHECK-NEXT: ret i32* [[I1_SROA_0_0_COPYLOAD]]
; CHECK-NEXT: ret i32* [[TMP1]]
;
bb:
%i = alloca %0, align 8
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/Transforms/SROA/address-spaces.ll
Expand Up @@ -11,8 +11,8 @@ declare void @llvm.memcpy.p1.p1.i32(ptr addrspace(1) nocapture, ptr addrspace(1)
; Make sure an illegal bitcast isn't introduced
define void @test_address_space_1_1(ptr addrspace(1) %a, ptr addrspace(1) %b) {
; CHECK-LABEL: @test_address_space_1_1(
; CHECK-NEXT: [[AA_0_COPYLOAD:%.*]] = load <2 x i64>, ptr addrspace(1) [[A:%.*]], align 2
; CHECK-NEXT: store <2 x i64> [[AA_0_COPYLOAD]], ptr addrspace(1) [[B:%.*]], align 2
; CHECK-NEXT: [[AA_SROA_0_0_COPYLOAD:%.*]] = load <16 x i8>, ptr addrspace(1) [[A:%.*]], align 2
; CHECK-NEXT: store <16 x i8> [[AA_SROA_0_0_COPYLOAD]], ptr addrspace(1) [[B:%.*]], align 2
; CHECK-NEXT: ret void
;
%aa = alloca <2 x i64>, align 16
Expand All @@ -23,8 +23,8 @@ define void @test_address_space_1_1(ptr addrspace(1) %a, ptr addrspace(1) %b) {

define void @test_address_space_1_0(ptr addrspace(1) %a, ptr %b) {
; CHECK-LABEL: @test_address_space_1_0(
; CHECK-NEXT: [[AA_0_COPYLOAD:%.*]] = load <2 x i64>, ptr addrspace(1) [[A:%.*]], align 2
; CHECK-NEXT: store <2 x i64> [[AA_0_COPYLOAD]], ptr [[B:%.*]], align 2
; CHECK-NEXT: [[AA_SROA_0_0_COPYLOAD:%.*]] = load <16 x i8>, ptr addrspace(1) [[A:%.*]], align 2
; CHECK-NEXT: store <16 x i8> [[AA_SROA_0_0_COPYLOAD]], ptr [[B:%.*]], align 2
; CHECK-NEXT: ret void
;
%aa = alloca <2 x i64>, align 16
Expand All @@ -35,8 +35,8 @@ define void @test_address_space_1_0(ptr addrspace(1) %a, ptr %b) {

define void @test_address_space_0_1(ptr %a, ptr addrspace(1) %b) {
; CHECK-LABEL: @test_address_space_0_1(
; CHECK-NEXT: [[AA_0_COPYLOAD:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 2
; CHECK-NEXT: store <2 x i64> [[AA_0_COPYLOAD]], ptr addrspace(1) [[B:%.*]], align 2
; CHECK-NEXT: [[AA_SROA_0_0_COPYLOAD:%.*]] = load <16 x i8>, ptr [[A:%.*]], align 2
; CHECK-NEXT: store <16 x i8> [[AA_SROA_0_0_COPYLOAD]], ptr addrspace(1) [[B:%.*]], align 2
; CHECK-NEXT: ret void
;
%aa = alloca <2 x i64>, align 16
Expand Down
24 changes: 10 additions & 14 deletions llvm/test/Transforms/SROA/alignment.ll
Expand Up @@ -92,15 +92,15 @@ define void @PR13920(ptr %a, ptr %b) {
; Test that alignments on memcpy intrinsics get propagated to loads and stores.
; CHECK-LABEL: @PR13920(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[AA_0_COPYLOAD:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 2
; CHECK-NEXT: store <2 x i64> [[AA_0_COPYLOAD]], ptr [[B:%.*]], align 2
; CHECK-NEXT: [[AA_SROA_0_0_COPYLOAD:%.*]] = load <16 x i8>, ptr [[A:%.*]], align 2
; CHECK-NEXT: store <16 x i8> [[AA_SROA_0_0_COPYLOAD]], ptr [[B:%.*]], align 2
; CHECK-NEXT: ret void
;
; DEBUGLOC-LABEL: @PR13920(
; DEBUGLOC-NEXT: entry:
; DEBUGLOC-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META37:![0-9]+]], metadata !DIExpression()), !dbg [[DBG38:![0-9]+]]
; DEBUGLOC-NEXT: [[AA_0_COPYLOAD:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 2, !dbg [[DBG39:![0-9]+]]
; DEBUGLOC-NEXT: store <2 x i64> [[AA_0_COPYLOAD]], ptr [[B:%.*]], align 2, !dbg [[DBG40:![0-9]+]]
; DEBUGLOC-NEXT: [[AA_SROA_0_0_COPYLOAD:%.*]] = load <16 x i8>, ptr [[A:%.*]], align 2, !dbg [[DBG39:![0-9]+]]
; DEBUGLOC-NEXT: store <16 x i8> [[AA_SROA_0_0_COPYLOAD]], ptr [[B:%.*]], align 2, !dbg [[DBG40:![0-9]+]]
; DEBUGLOC-NEXT: ret void, !dbg [[DBG41:![0-9]+]]
;

Expand All @@ -118,21 +118,17 @@ define void @test3(ptr %x) {
; reduce the alignment.
; CHECK-LABEL: @test3(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [22 x i8], align 8
; CHECK-NEXT: [[B_SROA_0:%.*]] = alloca [18 x i8], align 2
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[A_SROA_0]], ptr align 8 [[X:%.*]], i32 22, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 2 [[B_SROA_0]], ptr align 2 [[X]], i32 18, i1 false)
; CHECK-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load <22 x i8>, ptr [[X:%.*]], align 8
; CHECK-NEXT: [[B_SROA_0_6_COPYLOAD:%.*]] = load <18 x i8>, ptr [[X]], align 2
; CHECK-NEXT: ret void
;
; DEBUGLOC-LABEL: @test3(
; DEBUGLOC-NEXT: entry:
; DEBUGLOC-NEXT: [[A_SROA_0:%.*]] = alloca [22 x i8], align 8, !dbg [[DBG47:![0-9]+]]
; DEBUGLOC-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META44:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47]]
; DEBUGLOC-NEXT: [[B_SROA_0:%.*]] = alloca [18 x i8], align 2, !dbg [[DBG48:![0-9]+]]
; DEBUGLOC-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META45:![0-9]+]], metadata !DIExpression()), !dbg [[DBG48]]
; DEBUGLOC-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[A_SROA_0]], ptr align 8 [[X:%.*]], i32 22, i1 false), !dbg [[DBG49:![0-9]+]]
; DEBUGLOC-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META44:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47:![0-9]+]]
; DEBUGLOC-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META45:![0-9]+]], metadata !DIExpression()), !dbg [[DBG48:![0-9]+]]
; DEBUGLOC-NEXT: [[A_SROA_0_0_COPYLOAD:%.*]] = load <22 x i8>, ptr [[X:%.*]], align 8, !dbg [[DBG49:![0-9]+]]
; DEBUGLOC-NEXT: call void @llvm.dbg.value(metadata ptr undef, metadata [[META46:![0-9]+]], metadata !DIExpression()), !dbg [[DBG50:![0-9]+]]
; DEBUGLOC-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 2 [[B_SROA_0]], ptr align 2 [[X]], i32 18, i1 false), !dbg [[DBG51:![0-9]+]]
; DEBUGLOC-NEXT: [[B_SROA_0_6_COPYLOAD:%.*]] = load <18 x i8>, ptr [[X]], align 2, !dbg [[DBG51:![0-9]+]]
; DEBUGLOC-NEXT: ret void, !dbg [[DBG52:![0-9]+]]
;

Expand Down
12 changes: 6 additions & 6 deletions llvm/test/Transforms/SROA/alloca-address-space.ll
Expand Up @@ -10,8 +10,8 @@ declare void @llvm.memcpy.p1.p1.i32(ptr addrspace(1) nocapture, ptr addrspace(1)

define void @test_address_space_1_1(ptr addrspace(1) %a, ptr addrspace(1) %b) {
; CHECK-LABEL: @test_address_space_1_1(
; CHECK-NEXT: [[AA_0_COPYLOAD:%.*]] = load <2 x i64>, ptr addrspace(1) [[A:%.*]], align 2
; CHECK-NEXT: store <2 x i64> [[AA_0_COPYLOAD]], ptr addrspace(1) [[B:%.*]], align 2
; CHECK-NEXT: [[AA_SROA_0_0_COPYLOAD:%.*]] = load <16 x i8>, ptr addrspace(1) [[A:%.*]], align 2
; CHECK-NEXT: store <16 x i8> [[AA_SROA_0_0_COPYLOAD]], ptr addrspace(1) [[B:%.*]], align 2
; CHECK-NEXT: ret void
;
%aa = alloca <2 x i64>, align 16, addrspace(2)
Expand All @@ -22,8 +22,8 @@ define void @test_address_space_1_1(ptr addrspace(1) %a, ptr addrspace(1) %b) {

define void @test_address_space_1_0(ptr addrspace(1) %a, ptr addrspace(2) %b) {
; CHECK-LABEL: @test_address_space_1_0(
; CHECK-NEXT: [[AA_0_COPYLOAD:%.*]] = load <2 x i64>, ptr addrspace(1) [[A:%.*]], align 2
; CHECK-NEXT: store <2 x i64> [[AA_0_COPYLOAD]], ptr addrspace(2) [[B:%.*]], align 2
; CHECK-NEXT: [[AA_SROA_0_0_COPYLOAD:%.*]] = load <16 x i8>, ptr addrspace(1) [[A:%.*]], align 2
; CHECK-NEXT: store <16 x i8> [[AA_SROA_0_0_COPYLOAD]], ptr addrspace(2) [[B:%.*]], align 2
; CHECK-NEXT: ret void
;
%aa = alloca <2 x i64>, align 16, addrspace(2)
Expand All @@ -34,8 +34,8 @@ define void @test_address_space_1_0(ptr addrspace(1) %a, ptr addrspace(2) %b) {

define void @test_address_space_0_1(ptr addrspace(2) %a, ptr addrspace(1) %b) {
; CHECK-LABEL: @test_address_space_0_1(
; CHECK-NEXT: [[AA_0_COPYLOAD:%.*]] = load <2 x i64>, ptr addrspace(2) [[A:%.*]], align 2
; CHECK-NEXT: store <2 x i64> [[AA_0_COPYLOAD]], ptr addrspace(1) [[B:%.*]], align 2
; CHECK-NEXT: [[AA_SROA_0_0_COPYLOAD:%.*]] = load <16 x i8>, ptr addrspace(2) [[A:%.*]], align 2
; CHECK-NEXT: store <16 x i8> [[AA_SROA_0_0_COPYLOAD]], ptr addrspace(1) [[B:%.*]], align 2
; CHECK-NEXT: ret void
;
%aa = alloca <2 x i64>, align 16, addrspace(2)
Expand Down

0 comments on commit 3c4d2a0

Please sign in to comment.