8 changes: 8 additions & 0 deletions llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,11 @@ EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden,
" to make use of cmpxchg flow-based information"),
cl::init(true));

static cl::opt<bool> AArch64InterleavedAccessOpt(
"aarch64-interleaved-access-opt",
cl::desc("Optimize interleaved memory accesses in the AArch64 backend"),
cl::init(false), cl::Hidden);

static cl::opt<bool>
EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
cl::desc("Run early if-conversion"),
Expand Down Expand Up @@ -226,6 +231,9 @@ void AArch64PassConfig::addIRPasses() {
if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
addPass(createCFGSimplificationPass());

if (TM->getOptLevel() != CodeGenOpt::None && AArch64InterleavedAccessOpt)
addPass(createAArch64InterleavedAccessPass());

TargetPassConfig::addIRPasses();

if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
Expand Down
12 changes: 12 additions & 0 deletions llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,18 @@ unsigned AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
return LT.first;
}

unsigned AArch64TTIImpl::getInterleavedMemoryOpCost(
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
unsigned Alignment, unsigned AddressSpace) {
assert(isa<VectorType>(VecTy) && "Expect vector types");

if (Factor > 1 && Factor < 5 && isTypeLegal(VecTy))
return Factor;

return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
Alignment, AddressSpace);
}

unsigned AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
unsigned Cost = 0;
for (auto *I : Tys) {
Expand Down
5 changes: 5 additions & 0 deletions llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,11 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {

bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);

unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace);
/// @}
};

Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/AArch64/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ add_llvm_target(AArch64CodeGen
AArch64PBQPRegAlloc.cpp
AArch64RegisterInfo.cpp
AArch64SelectionDAGInfo.cpp
AArch64InterleavedAccess.cpp
AArch64StorePairSuppress.cpp
AArch64Subtarget.cpp
AArch64TargetMachine.cpp
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ static cl::opt<bool> EnableMemAccessVersioning(
cl::desc("Enable symblic stride memory access versioning"));

static cl::opt<bool> EnableInterleavedMemAccesses(
"enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
"enable-interleaved-mem-accesses", cl::init(true), cl::Hidden,
cl::desc("Enable vectorization on interleaved memory accesses in a loop"));

/// Maximum factor for an interleaved memory access.
Expand Down
197 changes: 197 additions & 0 deletions llvm/test/CodeGen/AArch64/aarch64-interleaved-accesses.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,197 @@
; RUN: llc -march=aarch64 -aarch64-interleaved-access-opt=true < %s | FileCheck %s

; CHECK-LABEL: load_factor2:
; CHECK: ld2 { v0.8b, v1.8b }, [x0]
define <8 x i8> @load_factor2(<16 x i8>* %ptr) {
%wide.vec = load <16 x i8>, <16 x i8>* %ptr, align 4
%strided.v0 = shufflevector <16 x i8> %wide.vec, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
%strided.v1 = shufflevector <16 x i8> %wide.vec, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
%add = add nsw <8 x i8> %strided.v0, %strided.v1
ret <8 x i8> %add
}

; CHECK-LABEL: load_delat3:
; CHECK: ld3 { v0.4s, v1.4s, v2.4s }, [x0]
define <4 x i32> @load_delat3(i32* %ptr) {
%base = bitcast i32* %ptr to <12 x i32>*
%wide.vec = load <12 x i32>, <12 x i32>* %base, align 4
%strided.v2 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
%strided.v1 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
%add = add nsw <4 x i32> %strided.v2, %strided.v1
ret <4 x i32> %add
}

; CHECK-LABEL: load_factor4:
; CHECK: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
define <4 x i32> @load_factor4(i32* %ptr) {
%base = bitcast i32* %ptr to <16 x i32>*
%wide.vec = load <16 x i32>, <16 x i32>* %base, align 4
%strided.v0 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
%strided.v2 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
%add = add nsw <4 x i32> %strided.v0, %strided.v2
ret <4 x i32> %add
}

; CHECK-LABEL: store_factor2:
; CHECK: st2 { v0.8b, v1.8b }, [x0]
define void @store_factor2(<16 x i8>* %ptr, <8 x i8> %v0, <8 x i8> %v1) {
%interleaved.vec = shufflevector <8 x i8> %v0, <8 x i8> %v1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
store <16 x i8> %interleaved.vec, <16 x i8>* %ptr, align 4
ret void
}

; CHECK-LABEL: store_factor3:
; CHECK: st3 { v0.4s, v1.4s, v2.4s }, [x0]
define void @store_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
%base = bitcast i32* %ptr to <12 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%v2_u = shufflevector <4 x i32> %v2, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_u, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
store <12 x i32> %interleaved.vec, <12 x i32>* %base, align 4
ret void
}

; CHECK-LABEL: store_factor4:
; CHECK: st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
define void @store_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
%base = bitcast i32* %ptr to <16 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%v2_v3 = shufflevector <4 x i32> %v2, <4 x i32> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_v3, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
store <16 x i32> %interleaved.vec, <16 x i32>* %base, align 4
ret void
}

; The following cases test that interleaved access of pointer vectors can be
; matched to ldN/stN instruction.

; CHECK-LABEL: load_ptrvec_factor2:
; CHECK: ld2 { v0.2d, v1.2d }, [x0]
define <2 x i32*> @load_ptrvec_factor2(i32** %ptr) {
%base = bitcast i32** %ptr to <4 x i32*>*
%wide.vec = load <4 x i32*>, <4 x i32*>* %base, align 4
%strided.v0 = shufflevector <4 x i32*> %wide.vec, <4 x i32*> undef, <2 x i32> <i32 0, i32 2>
ret <2 x i32*> %strided.v0
}

; CHECK-LABEL: load_ptrvec_factor3:
; CHECK: ld3 { v0.2d, v1.2d, v2.2d }, [x0]
define void @load_ptrvec_factor3(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) {
%base = bitcast i32** %ptr to <6 x i32*>*
%wide.vec = load <6 x i32*>, <6 x i32*>* %base, align 4
%strided.v2 = shufflevector <6 x i32*> %wide.vec, <6 x i32*> undef, <2 x i32> <i32 2, i32 5>
store <2 x i32*> %strided.v2, <2 x i32*>* %ptr1
%strided.v1 = shufflevector <6 x i32*> %wide.vec, <6 x i32*> undef, <2 x i32> <i32 1, i32 4>
store <2 x i32*> %strided.v1, <2 x i32*>* %ptr2
ret void
}

; CHECK-LABEL: load_ptrvec_factor4:
; CHECK: ld4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
define void @load_ptrvec_factor4(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) {
%base = bitcast i32** %ptr to <8 x i32*>*
%wide.vec = load <8 x i32*>, <8 x i32*>* %base, align 4
%strided.v1 = shufflevector <8 x i32*> %wide.vec, <8 x i32*> undef, <2 x i32> <i32 1, i32 5>
%strided.v3 = shufflevector <8 x i32*> %wide.vec, <8 x i32*> undef, <2 x i32> <i32 3, i32 7>
store <2 x i32*> %strided.v1, <2 x i32*>* %ptr1
store <2 x i32*> %strided.v3, <2 x i32*>* %ptr2
ret void
}

; CHECK-LABEL: store_ptrvec_factor2:
; CHECK: st2 { v0.2d, v1.2d }, [x0]
define void @store_ptrvec_factor2(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1) {
%base = bitcast i32** %ptr to <4 x i32*>*
%interleaved.vec = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
store <4 x i32*> %interleaved.vec, <4 x i32*>* %base, align 4
ret void
}

; CHECK-LABEL: store_ptrvec_factor3:
; CHECK: st3 { v0.2d, v1.2d, v2.2d }, [x0]
define void @store_ptrvec_factor3(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2) {
%base = bitcast i32** %ptr to <6 x i32*>*
%v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%v2_u = shufflevector <2 x i32*> %v2, <2 x i32*> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%interleaved.vec = shufflevector <4 x i32*> %v0_v1, <4 x i32*> %v2_u, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
store <6 x i32*> %interleaved.vec, <6 x i32*>* %base, align 4
ret void
}

; CHECK-LABEL: store_ptrvec_factor4:
; CHECK: st4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
define void @store_ptrvec_factor4(i32* %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2, <2 x i32*> %v3) {
%base = bitcast i32* %ptr to <8 x i32*>*
%v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%v2_v3 = shufflevector <2 x i32*> %v2, <2 x i32*> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%interleaved.vec = shufflevector <4 x i32*> %v0_v1, <4 x i32*> %v2_v3, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
store <8 x i32*> %interleaved.vec, <8 x i32*>* %base, align 4
ret void
}

; Following cases check that shuffle maskes with undef indices can be matched
; into ldN/stN instruction.

; CHECK-LABEL: load_undef_mask_factor2:
; CHECK: ld2 { v0.4s, v1.4s }, [x0]
define <4 x i32> @load_undef_mask_factor2(i32* %ptr) {
%base = bitcast i32* %ptr to <8 x i32>*
%wide.vec = load <8 x i32>, <8 x i32>* %base, align 4
%strided.v0 = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 undef, i32 2, i32 undef, i32 6>
%strided.v1 = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 undef, i32 3, i32 undef, i32 7>
%add = add nsw <4 x i32> %strided.v0, %strided.v1
ret <4 x i32> %add
}

; CHECK-LABEL: load_undef_mask_factor3:
; CHECK: ld3 { v0.4s, v1.4s, v2.4s }, [x0]
define <4 x i32> @load_undef_mask_factor3(i32* %ptr) {
%base = bitcast i32* %ptr to <12 x i32>*
%wide.vec = load <12 x i32>, <12 x i32>* %base, align 4
%strided.v2 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 undef>
%strided.v1 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
%add = add nsw <4 x i32> %strided.v2, %strided.v1
ret <4 x i32> %add
}

; CHECK-LABEL: load_undef_mask_factor4:
; CHECK: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
define <4 x i32> @load_undef_mask_factor4(i32* %ptr) {
%base = bitcast i32* %ptr to <16 x i32>*
%wide.vec = load <16 x i32>, <16 x i32>* %base, align 4
%strided.v0 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 undef, i32 undef>
%strided.v2 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 undef, i32 undef>
%add = add nsw <4 x i32> %strided.v0, %strided.v2
ret <4 x i32> %add
}

; CHECK-LABEL: store_undef_mask_factor2:
; CHECK: st2 { v0.4s, v1.4s }, [x0]
define void @store_undef_mask_factor2(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1) {
%base = bitcast i32* %ptr to <8 x i32>*
%interleaved.vec = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %base, align 4
ret void
}

; CHECK-LABEL: store_undef_mask_factor3:
; CHECK: st3 { v0.4s, v1.4s, v2.4s }, [x0]
define void @store_undef_mask_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
%base = bitcast i32* %ptr to <12 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%v2_u = shufflevector <4 x i32> %v2, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_u, <12 x i32> <i32 0, i32 4, i32 undef, i32 1, i32 undef, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
store <12 x i32> %interleaved.vec, <12 x i32>* %base, align 4
ret void
}

; CHECK-LABEL: store_undef_mask_factor4:
; CHECK: st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
define void @store_undef_mask_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
%base = bitcast i32* %ptr to <16 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%v2_v3 = shufflevector <4 x i32> %v2, <4 x i32> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_v3, <16 x i32> <i32 0, i32 4, i32 8, i32 undef, i32 undef, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
store <16 x i32> %interleaved.vec, <16 x i32>* %base, align 4
ret void
}