diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index a3368a1960318e..36e6cf436b18c2 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -5076,6 +5076,7 @@ InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX2( {2, MVT::v32i16, 18}, // (load 64i16 and) deinterleave into 2 x 32i16 {2, MVT::v2i32, 2}, // (load 4i32 and) deinterleave into 2 x 2i32 + {2, MVT::v4i32, 2}, // (load 8i32 and) deinterleave into 2 x 4i32 {2, MVT::v4i64, 6}, // (load 8i64 and) deinterleave into 2 x 4i64 @@ -5121,6 +5122,7 @@ InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX2( {2, MVT::v32i16, 8}, // interleave 2 x 32i16 into 64i16 (and store) {2, MVT::v2i32, 1}, // interleave 2 x 2i32 into 4i32 (and store) + {2, MVT::v4i32, 2}, // interleave 2 x 4i32 into 8i32 (and store) {2, MVT::v4i64, 6}, // interleave 2 x 4i64 into 8i64 (and store) diff --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-2.ll index afbf47a1c5be60..369dbe70045e5b 100644 --- a/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-2.ll +++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f32-stride-2.ll @@ -27,7 +27,7 @@ target triple = "x86_64-unknown-linux-gnu" ;; ; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load float, float* %in0, align 4 ; AVX2: LV: Found an estimated cost of 3 for VF 2 For instruction: %v0 = load float, float* %in0, align 4 -; AVX2: LV: Found an estimated cost of 17 for VF 4 For instruction: %v0 = load float, float* %in0, align 4 +; AVX2: LV: Found an estimated cost of 3 for VF 4 For instruction: %v0 = load float, float* %in0, align 4 ; AVX2: LV: Found an estimated cost of 38 for VF 8 For instruction: %v0 = load float, float* %in0, align 4 ; AVX2: LV: Found an estimated cost of 76 for VF 16 For instruction: %v0 = load float, float* %in0, align 4 ; AVX2: LV: Found an estimated cost of 152 for VF 32 For instruction: %v0 = load float, float* %in0, align 4 diff --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-2.ll index 71b893cd6bc0f7..6e07222d90f208 100644 --- a/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-2.ll +++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i32-stride-2.ll @@ -27,7 +27,7 @@ target triple = "x86_64-unknown-linux-gnu" ; ; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load i32, i32* %in0, align 4 ; AVX2: LV: Found an estimated cost of 3 for VF 2 For instruction: %v0 = load i32, i32* %in0, align 4 -; AVX2: LV: Found an estimated cost of 21 for VF 4 For instruction: %v0 = load i32, i32* %in0, align 4 +; AVX2: LV: Found an estimated cost of 3 for VF 4 For instruction: %v0 = load i32, i32* %in0, align 4 ; AVX2: LV: Found an estimated cost of 46 for VF 8 For instruction: %v0 = load i32, i32* %in0, align 4 ; AVX2: LV: Found an estimated cost of 92 for VF 16 For instruction: %v0 = load i32, i32* %in0, align 4 ; AVX2: LV: Found an estimated cost of 184 for VF 32 For instruction: %v0 = load i32, i32* %in0, align 4 diff --git a/llvm/test/Analysis/CostModel/X86/interleaved-store-f32-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-store-f32-stride-2.ll index b38ceb5ef89507..0998cab6f1a6cf 100644 --- a/llvm/test/Analysis/CostModel/X86/interleaved-store-f32-stride-2.ll +++ b/llvm/test/Analysis/CostModel/X86/interleaved-store-f32-stride-2.ll @@ -27,7 +27,7 @@ target triple = "x86_64-unknown-linux-gnu" ; ; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction: store float %v1, float* %out1, align 4 ; AVX2: LV: Found an estimated cost of 2 for VF 2 For instruction: store float %v1, float* %out1, align 4 -; AVX2: LV: Found an estimated cost of 15 for VF 4 For instruction: store float %v1, float* %out1, align 4 +; AVX2: LV: Found an estimated cost of 3 for VF 4 For instruction: store float %v1, float* %out1, align 4 ; AVX2: LV: Found an estimated cost of 38 for VF 8 For instruction: store float %v1, float* %out1, align 4 ; AVX2: LV: Found an estimated cost of 76 for VF 16 For instruction: store float %v1, float* %out1, align 4 ; AVX2: LV: Found an estimated cost of 152 for VF 32 For instruction: store float %v1, float* %out1, align 4 diff --git a/llvm/test/Analysis/CostModel/X86/interleaved-store-i32-stride-2.ll b/llvm/test/Analysis/CostModel/X86/interleaved-store-i32-stride-2.ll index bca202ff4dc1e3..5dc4cce71794dd 100644 --- a/llvm/test/Analysis/CostModel/X86/interleaved-store-i32-stride-2.ll +++ b/llvm/test/Analysis/CostModel/X86/interleaved-store-i32-stride-2.ll @@ -27,7 +27,7 @@ target triple = "x86_64-unknown-linux-gnu" ; ; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %v1, i32* %out1, align 4 ; AVX2: LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %v1, i32* %out1, align 4 -; AVX2: LV: Found an estimated cost of 19 for VF 4 For instruction: store i32 %v1, i32* %out1, align 4 +; AVX2: LV: Found an estimated cost of 3 for VF 4 For instruction: store i32 %v1, i32* %out1, align 4 ; AVX2: LV: Found an estimated cost of 46 for VF 8 For instruction: store i32 %v1, i32* %out1, align 4 ; AVX2: LV: Found an estimated cost of 92 for VF 16 For instruction: store i32 %v1, i32* %out1, align 4 ; AVX2: LV: Found an estimated cost of 184 for VF 32 For instruction: store i32 %v1, i32* %out1, align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll index 4afb7a8cb3001c..d37ab074ae9ddf 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll @@ -64,48 +64,48 @@ define void @foo(i32* noalias nocapture %a, i32* noalias nocapture readonly %b) ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1 ; AVX2-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1 -; AVX2-NEXT: [[TMP2:%.*]] = or i64 [[TMP1]], 4 +; AVX2-NEXT: [[TMP2:%.*]] = or i64 [[TMP1]], 8 ; AVX2-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 1 -; AVX2-NEXT: [[TMP4:%.*]] = or i64 [[TMP3]], 8 +; AVX2-NEXT: [[TMP4:%.*]] = or i64 [[TMP3]], 16 ; AVX2-NEXT: [[TMP5:%.*]] = shl i64 [[INDEX]], 1 -; AVX2-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 12 +; AVX2-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 24 ; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[TMP0]] ; AVX2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[TMP2]] ; AVX2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[TMP4]] ; AVX2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[TMP6]] -; AVX2-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP7]] to <4 x i32>* -; AVX2-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>* -; AVX2-NEXT: [[TMP13:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>* -; AVX2-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>* -; AVX2-NEXT: [[WIDE_VEC:%.*]] = load <4 x i32>, <4 x i32>* [[TMP11]], align 4 -; AVX2-NEXT: [[WIDE_VEC1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP12]], align 4 -; AVX2-NEXT: [[WIDE_VEC2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP13]], align 4 -; AVX2-NEXT: [[WIDE_VEC3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP14]], align 4 -; AVX2-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x i32> [[WIDE_VEC]], <4 x i32> poison, <2 x i32> -; AVX2-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <4 x i32> [[WIDE_VEC1]], <4 x i32> poison, <2 x i32> -; AVX2-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <4 x i32> [[WIDE_VEC2]], <4 x i32> poison, <2 x i32> -; AVX2-NEXT: [[STRIDED_VEC6:%.*]] = shufflevector <4 x i32> [[WIDE_VEC3]], <4 x i32> poison, <2 x i32> -; AVX2-NEXT: [[STRIDED_VEC7:%.*]] = shufflevector <4 x i32> [[WIDE_VEC]], <4 x i32> poison, <2 x i32> -; AVX2-NEXT: [[STRIDED_VEC8:%.*]] = shufflevector <4 x i32> [[WIDE_VEC1]], <4 x i32> poison, <2 x i32> -; AVX2-NEXT: [[STRIDED_VEC9:%.*]] = shufflevector <4 x i32> [[WIDE_VEC2]], <4 x i32> poison, <2 x i32> -; AVX2-NEXT: [[STRIDED_VEC10:%.*]] = shufflevector <4 x i32> [[WIDE_VEC3]], <4 x i32> poison, <2 x i32> -; AVX2-NEXT: [[TMP15:%.*]] = add nsw <2 x i32> [[STRIDED_VEC7]], [[STRIDED_VEC]] -; AVX2-NEXT: [[TMP16:%.*]] = add nsw <2 x i32> [[STRIDED_VEC8]], [[STRIDED_VEC4]] -; AVX2-NEXT: [[TMP17:%.*]] = add nsw <2 x i32> [[STRIDED_VEC9]], [[STRIDED_VEC5]] -; AVX2-NEXT: [[TMP18:%.*]] = add nsw <2 x i32> [[STRIDED_VEC10]], [[STRIDED_VEC6]] +; AVX2-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP7]] to <8 x i32>* +; AVX2-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP8]] to <8 x i32>* +; AVX2-NEXT: [[TMP13:%.*]] = bitcast i32* [[TMP9]] to <8 x i32>* +; AVX2-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP10]] to <8 x i32>* +; AVX2-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, <8 x i32>* [[TMP11]], align 4 +; AVX2-NEXT: [[WIDE_VEC1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP12]], align 4 +; AVX2-NEXT: [[WIDE_VEC2:%.*]] = load <8 x i32>, <8 x i32>* [[TMP13]], align 4 +; AVX2-NEXT: [[WIDE_VEC3:%.*]] = load <8 x i32>, <8 x i32>* [[TMP14]], align 4 +; AVX2-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> +; AVX2-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <8 x i32> [[WIDE_VEC1]], <8 x i32> poison, <4 x i32> +; AVX2-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <8 x i32> [[WIDE_VEC2]], <8 x i32> poison, <4 x i32> +; AVX2-NEXT: [[STRIDED_VEC6:%.*]] = shufflevector <8 x i32> [[WIDE_VEC3]], <8 x i32> poison, <4 x i32> +; AVX2-NEXT: [[STRIDED_VEC7:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> +; AVX2-NEXT: [[STRIDED_VEC8:%.*]] = shufflevector <8 x i32> [[WIDE_VEC1]], <8 x i32> poison, <4 x i32> +; AVX2-NEXT: [[STRIDED_VEC9:%.*]] = shufflevector <8 x i32> [[WIDE_VEC2]], <8 x i32> poison, <4 x i32> +; AVX2-NEXT: [[STRIDED_VEC10:%.*]] = shufflevector <8 x i32> [[WIDE_VEC3]], <8 x i32> poison, <4 x i32> +; AVX2-NEXT: [[TMP15:%.*]] = add nsw <4 x i32> [[STRIDED_VEC7]], [[STRIDED_VEC]] +; AVX2-NEXT: [[TMP16:%.*]] = add nsw <4 x i32> [[STRIDED_VEC8]], [[STRIDED_VEC4]] +; AVX2-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[STRIDED_VEC9]], [[STRIDED_VEC5]] +; AVX2-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[STRIDED_VEC10]], [[STRIDED_VEC6]] ; AVX2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]] -; AVX2-NEXT: [[TMP20:%.*]] = bitcast i32* [[TMP19]] to <2 x i32>* -; AVX2-NEXT: store <2 x i32> [[TMP15]], <2 x i32>* [[TMP20]], align 4 -; AVX2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP19]], i64 2 -; AVX2-NEXT: [[TMP22:%.*]] = bitcast i32* [[TMP21]] to <2 x i32>* -; AVX2-NEXT: store <2 x i32> [[TMP16]], <2 x i32>* [[TMP22]], align 4 -; AVX2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, i32* [[TMP19]], i64 4 -; AVX2-NEXT: [[TMP24:%.*]] = bitcast i32* [[TMP23]] to <2 x i32>* -; AVX2-NEXT: store <2 x i32> [[TMP17]], <2 x i32>* [[TMP24]], align 4 -; AVX2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[TMP19]], i64 6 -; AVX2-NEXT: [[TMP26:%.*]] = bitcast i32* [[TMP25]] to <2 x i32>* -; AVX2-NEXT: store <2 x i32> [[TMP18]], <2 x i32>* [[TMP26]], align 4 -; AVX2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; AVX2-NEXT: [[TMP20:%.*]] = bitcast i32* [[TMP19]] to <4 x i32>* +; AVX2-NEXT: store <4 x i32> [[TMP15]], <4 x i32>* [[TMP20]], align 4 +; AVX2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, i32* [[TMP19]], i64 4 +; AVX2-NEXT: [[TMP22:%.*]] = bitcast i32* [[TMP21]] to <4 x i32>* +; AVX2-NEXT: store <4 x i32> [[TMP16]], <4 x i32>* [[TMP22]], align 4 +; AVX2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, i32* [[TMP19]], i64 8 +; AVX2-NEXT: [[TMP24:%.*]] = bitcast i32* [[TMP23]] to <4 x i32>* +; AVX2-NEXT: store <4 x i32> [[TMP17]], <4 x i32>* [[TMP24]], align 4 +; AVX2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, i32* [[TMP19]], i64 12 +; AVX2-NEXT: [[TMP26:%.*]] = bitcast i32* [[TMP25]] to <4 x i32>* +; AVX2-NEXT: store <4 x i32> [[TMP18]], <4 x i32>* [[TMP26]], align 4 +; AVX2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; AVX2-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; AVX2-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AVX2: middle.block: