diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll new file mode 100644 index 00000000000000..869f068c9371b3 --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll @@ -0,0 +1,274 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2 %s +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2 %s +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2 %s + +; These patterns are produced by LoopVectorizer for interleaved stores. + +define void @load_i8_stride4_vf2(<8 x i8>* %in.vec, <2 x i8>* %out.vec0, <2 x i8>* %out.vec1, <2 x i8>* %out.vec2, <2 x i8>* %out.vec3) nounwind { +; AVX2-LABEL: load_i8_stride4_vf2: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,u,u,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpextrw $0, %xmm1, (%rsi) +; AVX2-NEXT: vpextrw $0, %xmm2, (%rdx) +; AVX2-NEXT: vpextrw $0, %xmm3, (%rcx) +; AVX2-NEXT: vpextrw $0, %xmm0, (%r8) +; AVX2-NEXT: retq + %wide.vec = load <8 x i8>, <8 x i8>* %in.vec, align 32 + + %strided.vec0 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> + %strided.vec1 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> + %strided.vec2 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> + %strided.vec3 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> + + store <2 x i8> %strided.vec0, <2 x i8>* %out.vec0, align 32 + store <2 x i8> %strided.vec1, <2 x i8>* %out.vec1, align 32 + store <2 x i8> %strided.vec2, <2 x i8>* %out.vec2, align 32 + store <2 x i8> %strided.vec3, <2 x i8>* %out.vec3, align 32 + + ret void +} + +define void @load_i8_stride4_vf4(<16 x i8>* %in.vec, <4 x i8>* %out.vec0, <4 x i8>* %out.vec1, <4 x i8>* %out.vec2, <4 x i8>* %out.vec3) nounwind { +; AVX2-LABEL: load_i8_stride4_vf4: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %xmm0 +; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vmovd %xmm1, (%rsi) +; AVX2-NEXT: vmovd %xmm2, (%rdx) +; AVX2-NEXT: vmovd %xmm3, (%rcx) +; AVX2-NEXT: vmovd %xmm0, (%r8) +; AVX2-NEXT: retq + %wide.vec = load <16 x i8>, <16 x i8>* %in.vec, align 32 + + %strided.vec0 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> + %strided.vec1 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> + %strided.vec2 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> + %strided.vec3 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> + + store <4 x i8> %strided.vec0, <4 x i8>* %out.vec0, align 32 + store <4 x i8> %strided.vec1, <4 x i8>* %out.vec1, align 32 + store <4 x i8> %strided.vec2, <4 x i8>* %out.vec2, align 32 + store <4 x i8> %strided.vec3, <4 x i8>* %out.vec3, align 32 + + ret void +} + +define void @load_i8_stride4_vf8(<32 x i8>* %in.vec, <8 x i8>* %out.vec0, <8 x i8>* %out.vec1, <8 x i8>* %out.vec2, <8 x i8>* %out.vec3) nounwind { +; AVX2-LABEL: load_i8_stride4_vf8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vmovdqa (%rdi), %xmm1 +; AVX2-NEXT: vmovdqa 16(%rdi), %xmm2 +; AVX2-NEXT: vpshufb %xmm0, %xmm2, %xmm3 +; AVX2-NEXT: vpshufb %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm4 +; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm3 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm5 +; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2 +; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX2-NEXT: vmovq %xmm0, (%rsi) +; AVX2-NEXT: vmovq %xmm3, (%rdx) +; AVX2-NEXT: vmovq %xmm4, (%rcx) +; AVX2-NEXT: vmovq %xmm1, (%r8) +; AVX2-NEXT: retq + %wide.vec = load <32 x i8>, <32 x i8>* %in.vec, align 32 + + %strided.vec0 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> + %strided.vec1 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> + %strided.vec2 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> + %strided.vec3 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> + + store <8 x i8> %strided.vec0, <8 x i8>* %out.vec0, align 32 + store <8 x i8> %strided.vec1, <8 x i8>* %out.vec1, align 32 + store <8 x i8> %strided.vec2, <8 x i8>* %out.vec2, align 32 + store <8 x i8> %strided.vec3, <8 x i8>* %out.vec3, align 32 + + ret void +} + +define void @load_i8_stride4_vf16(<64 x i8>* %in.vec, <16 x i8>* %out.vec0, <16 x i8>* %out.vec1, <16 x i8>* %out.vec2, <16 x i8>* %out.vec3) nounwind { +; AVX2-LABEL: load_i8_stride4_vf16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX2-NEXT: vmovdqa 32(%rdi), %xmm2 +; AVX2-NEXT: vmovdqa 48(%rdi), %xmm3 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = +; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm5 +; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm4 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm6 +; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm5 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm8 = xmm5[0,1],xmm4[2,3] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = +; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm6 +; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm5 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm7 +; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = +; AVX2-NEXT: vpshufb %xmm6, %xmm3, %xmm7 +; AVX2-NEXT: vpshufb %xmm6, %xmm2, %xmm6 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm7, %xmm1, %xmm4 +; AVX2-NEXT: vpshufb %xmm7, %xmm0, %xmm7 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,3] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = +; AVX2-NEXT: vpshufb %xmm6, %xmm3, %xmm3 +; AVX2-NEXT: vpshufb %xmm6, %xmm2, %xmm2 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3] +; AVX2-NEXT: vmovdqa %xmm8, (%rsi) +; AVX2-NEXT: vmovdqa %xmm5, (%rdx) +; AVX2-NEXT: vmovdqa %xmm4, (%rcx) +; AVX2-NEXT: vmovdqa %xmm0, (%r8) +; AVX2-NEXT: retq + %wide.vec = load <64 x i8>, <64 x i8>* %in.vec, align 32 + + %strided.vec0 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> + %strided.vec1 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> + %strided.vec2 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> + %strided.vec3 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> + + store <16 x i8> %strided.vec0, <16 x i8>* %out.vec0, align 32 + store <16 x i8> %strided.vec1, <16 x i8>* %out.vec1, align 32 + store <16 x i8> %strided.vec2, <16 x i8>* %out.vec2, align 32 + store <16 x i8> %strided.vec3, <16 x i8>* %out.vec3, align 32 + + ret void +} + +define void @load_i8_stride4_vf32(<128 x i8>* %in.vec, <32 x i8>* %out.vec0, <32 x i8>* %out.vec1, <32 x i8>* %out.vec2, <32 x i8>* %out.vec3) nounwind { +; AVX2-LABEL: load_i8_stride4_vf32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = +; AVX2-NEXT: vmovdqa 112(%rdi), %xmm10 +; AVX2-NEXT: vpshufb %xmm0, %xmm10, %xmm1 +; AVX2-NEXT: vmovdqa 96(%rdi), %xmm11 +; AVX2-NEXT: vpshufb %xmm0, %xmm11, %xmm3 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vmovdqa 80(%rdi), %xmm13 +; AVX2-NEXT: vpshufb %xmm2, %xmm13, %xmm4 +; AVX2-NEXT: vmovdqa 64(%rdi), %xmm5 +; AVX2-NEXT: vpshufb %xmm2, %xmm5, %xmm6 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] +; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 +; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm4[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: vmovdqa (%rdi), %xmm12 +; AVX2-NEXT: vmovdqa 16(%rdi), %xmm14 +; AVX2-NEXT: vmovdqa 32(%rdi), %xmm6 +; AVX2-NEXT: vmovdqa 48(%rdi), %xmm7 +; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm1 +; AVX2-NEXT: vpshufb %xmm0, %xmm6, %xmm0 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX2-NEXT: vpshufb %xmm2, %xmm14, %xmm1 +; AVX2-NEXT: vpshufb %xmm2, %xmm12, %xmm2 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm8[4,5,6,7] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = +; AVX2-NEXT: vpshufb %xmm0, %xmm10, %xmm1 +; AVX2-NEXT: vpshufb %xmm0, %xmm11, %xmm2 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm2, %xmm13, %xmm3 +; AVX2-NEXT: vpshufb %xmm2, %xmm5, %xmm4 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3 +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm3 +; AVX2-NEXT: vpshufb %xmm0, %xmm6, %xmm0 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; AVX2-NEXT: vpshufb %xmm2, %xmm14, %xmm3 +; AVX2-NEXT: vpshufb %xmm2, %xmm12, %xmm2 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = +; AVX2-NEXT: vpshufb %xmm0, %xmm10, %xmm1 +; AVX2-NEXT: vpshufb %xmm0, %xmm11, %xmm2 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm2, %xmm13, %xmm3 +; AVX2-NEXT: vpshufb %xmm2, %xmm5, %xmm4 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3 +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm3 +; AVX2-NEXT: vpshufb %xmm0, %xmm6, %xmm0 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; AVX2-NEXT: vpshufb %xmm2, %xmm14, %xmm3 +; AVX2-NEXT: vpshufb %xmm2, %xmm12, %xmm2 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = +; AVX2-NEXT: vpshufb %xmm1, %xmm10, %xmm2 +; AVX2-NEXT: vpshufb %xmm1, %xmm11, %xmm3 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm3, %xmm13, %xmm4 +; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7] +; AVX2-NEXT: vpshufb %xmm1, %xmm7, %xmm4 +; AVX2-NEXT: vpshufb %xmm1, %xmm6, %xmm1 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; AVX2-NEXT: vpshufb %xmm3, %xmm14, %xmm4 +; AVX2-NEXT: vpshufb %xmm3, %xmm12, %xmm3 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVX2-NEXT: vmovdqa %ymm8, (%rsi) +; AVX2-NEXT: vmovdqa %ymm9, (%rdx) +; AVX2-NEXT: vmovdqa %ymm0, (%rcx) +; AVX2-NEXT: vmovdqa %ymm1, (%r8) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %wide.vec = load <128 x i8>, <128 x i8>* %in.vec, align 32 + + %strided.vec0 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> + %strided.vec1 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> + %strided.vec2 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> + %strided.vec3 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> + + store <32 x i8> %strided.vec0, <32 x i8>* %out.vec0, align 32 + store <32 x i8> %strided.vec1, <32 x i8>* %out.vec1, align 32 + store <32 x i8> %strided.vec2, <32 x i8>* %out.vec2, align 32 + store <32 x i8> %strided.vec3, <32 x i8>* %out.vec3, align 32 + + ret void +} diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll new file mode 100644 index 00000000000000..04647d48f25e65 --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-4.ll @@ -0,0 +1,162 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2 %s +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2 %s +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2 %s + +; These patterns are produced by LoopVectorizer for interleaved stores. + +define void @store_i8_stride4_vf2(<2 x i8>* %in.vecptr0, <2 x i8>* %in.vecptr1, <2 x i8>* %in.vecptr2, <2 x i8>* %in.vecptr3, <8 x i8>* %out.vec) nounwind { +; AVX2-LABEL: store_i8_stride4_vf2: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa (%rdx), %xmm1 +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3] +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,1,5,2,6,3,7,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vmovq %xmm0, (%r8) +; AVX2-NEXT: retq + %in.vec0 = load <2 x i8>, <2 x i8>* %in.vecptr0, align 32 + %in.vec1 = load <2 x i8>, <2 x i8>* %in.vecptr1, align 32 + %in.vec2 = load <2 x i8>, <2 x i8>* %in.vecptr2, align 32 + %in.vec3 = load <2 x i8>, <2 x i8>* %in.vecptr3, align 32 + + %concat01 = shufflevector <2 x i8> %in.vec0, <2 x i8> %in.vec1, <4 x i32> + %concat23 = shufflevector <2 x i8> %in.vec2, <2 x i8> %in.vec3, <4 x i32> + %concat0123 = shufflevector <4 x i8> %concat01, <4 x i8> %concat23, <8 x i32> + %interleaved.vec = shufflevector <8 x i8> %concat0123, <8 x i8> poison, <8 x i32> + + store <8 x i8> %interleaved.vec, <8 x i8>* %out.vec, align 32 + + ret void +} + +define void @store_i8_stride4_vf4(<4 x i8>* %in.vecptr0, <4 x i8>* %in.vecptr1, <4 x i8>* %in.vecptr2, <4 x i8>* %in.vecptr3, <16 x i8>* %out.vec) nounwind { +; AVX2-LABEL: store_i8_stride4_vf4: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa (%rdx), %xmm1 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15] +; AVX2-NEXT: vmovdqa %xmm0, (%r8) +; AVX2-NEXT: retq + %in.vec0 = load <4 x i8>, <4 x i8>* %in.vecptr0, align 32 + %in.vec1 = load <4 x i8>, <4 x i8>* %in.vecptr1, align 32 + %in.vec2 = load <4 x i8>, <4 x i8>* %in.vecptr2, align 32 + %in.vec3 = load <4 x i8>, <4 x i8>* %in.vecptr3, align 32 + + %concat01 = shufflevector <4 x i8> %in.vec0, <4 x i8> %in.vec1, <8 x i32> + %concat23 = shufflevector <4 x i8> %in.vec2, <4 x i8> %in.vec3, <8 x i32> + %concat0123 = shufflevector <8 x i8> %concat01, <8 x i8> %concat23, <16 x i32> + %interleaved.vec = shufflevector <16 x i8> %concat0123, <16 x i8> poison, <16 x i32> + + store <16 x i8> %interleaved.vec, <16 x i8>* %out.vec, align 32 + + ret void +} + +define void @store_i8_stride4_vf8(<8 x i8>* %in.vecptr0, <8 x i8>* %in.vecptr1, <8 x i8>* %in.vecptr2, <8 x i8>* %in.vecptr3, <32 x i8>* %out.vec) nounwind { +; AVX2-LABEL: store_i8_stride4_vf8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX2-NEXT: vmovdqa %xmm0, 16(%r8) +; AVX2-NEXT: vmovdqa %xmm2, (%r8) +; AVX2-NEXT: retq + %in.vec0 = load <8 x i8>, <8 x i8>* %in.vecptr0, align 32 + %in.vec1 = load <8 x i8>, <8 x i8>* %in.vecptr1, align 32 + %in.vec2 = load <8 x i8>, <8 x i8>* %in.vecptr2, align 32 + %in.vec3 = load <8 x i8>, <8 x i8>* %in.vecptr3, align 32 + + %concat01 = shufflevector <8 x i8> %in.vec0, <8 x i8> %in.vec1, <16 x i32> + %concat23 = shufflevector <8 x i8> %in.vec2, <8 x i8> %in.vec3, <16 x i32> + %concat0123 = shufflevector <16 x i8> %concat01, <16 x i8> %concat23, <32 x i32> + %interleaved.vec = shufflevector <32 x i8> %concat0123, <32 x i8> poison, <32 x i32> + + store <32 x i8> %interleaved.vec, <32 x i8>* %out.vec, align 32 + + ret void +} + +define void @store_i8_stride4_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr1, <16 x i8>* %in.vecptr2, <16 x i8>* %in.vecptr3, <64 x i8>* %out.vec) nounwind { +; AVX2-LABEL: store_i8_stride4_vf16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa (%rsi), %xmm1 +; AVX2-NEXT: vmovdqa (%rdx), %xmm2 +; AVX2-NEXT: vmovdqa (%rcx), %xmm3 +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] +; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] +; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7] +; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; AVX2-NEXT: vmovdqa %xmm0, 48(%r8) +; AVX2-NEXT: vmovdqa %xmm4, 32(%r8) +; AVX2-NEXT: vmovdqa %xmm1, 16(%r8) +; AVX2-NEXT: vmovdqa %xmm3, (%r8) +; AVX2-NEXT: retq + %in.vec0 = load <16 x i8>, <16 x i8>* %in.vecptr0, align 32 + %in.vec1 = load <16 x i8>, <16 x i8>* %in.vecptr1, align 32 + %in.vec2 = load <16 x i8>, <16 x i8>* %in.vecptr2, align 32 + %in.vec3 = load <16 x i8>, <16 x i8>* %in.vecptr3, align 32 + + %concat01 = shufflevector <16 x i8> %in.vec0, <16 x i8> %in.vec1, <32 x i32> + %concat23 = shufflevector <16 x i8> %in.vec2, <16 x i8> %in.vec3, <32 x i32> + %concat0123 = shufflevector <32 x i8> %concat01, <32 x i8> %concat23, <64 x i32> + %interleaved.vec = shufflevector <64 x i8> %concat0123, <64 x i8> poison, <64 x i32> + + store <64 x i8> %interleaved.vec, <64 x i8>* %out.vec, align 32 + + ret void +} + +define void @store_i8_stride4_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr1, <32 x i8>* %in.vecptr2, <32 x i8>* %in.vecptr3, <128 x i8>* %out.vec) nounwind { +; AVX2-LABEL: store_i8_stride4_vf32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vmovdqa (%rsi), %ymm1 +; AVX2-NEXT: vmovdqa (%rdx), %ymm2 +; AVX2-NEXT: vmovdqa (%rcx), %ymm3 +; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23] +; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31] +; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11] +; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm1 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15] +; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm4 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11] +; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15] +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm2 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm4, %ymm5 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3] +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm4[2,3],ymm0[2,3] +; AVX2-NEXT: vmovdqa %ymm0, 96(%r8) +; AVX2-NEXT: vmovdqa %ymm1, 64(%r8) +; AVX2-NEXT: vmovdqa %ymm5, 32(%r8) +; AVX2-NEXT: vmovdqa %ymm2, (%r8) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %in.vec0 = load <32 x i8>, <32 x i8>* %in.vecptr0, align 32 + %in.vec1 = load <32 x i8>, <32 x i8>* %in.vecptr1, align 32 + %in.vec2 = load <32 x i8>, <32 x i8>* %in.vecptr2, align 32 + %in.vec3 = load <32 x i8>, <32 x i8>* %in.vecptr3, align 32 + + %concat01 = shufflevector <32 x i8> %in.vec0, <32 x i8> %in.vec1, <64 x i32> + %concat23 = shufflevector <32 x i8> %in.vec2, <32 x i8> %in.vec3, <64 x i32> + %concat0123 = shufflevector <64 x i8> %concat01, <64 x i8> %concat23, <128 x i32> + %interleaved.vec = shufflevector <128 x i8> %concat0123, <128 x i8> poison, <128 x i32> + + store <128 x i8> %interleaved.vec, <128 x i8>* %out.vec, align 32 + + ret void +}