diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 53ef3a81177e9..afe5984b09763 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -18818,9 +18818,33 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SDValue N0 = Op.getOperand(0); SDValue N1 = Op.getOperand(1); SDValue N2 = Op.getOperand(2); - auto *N2C = dyn_cast(N2); - if (!N2C || N2C->getAPIntValue().uge(NumElts)) + + if (!N2C) { + // Variable insertion indices, usually we're better off spilling to stack, + // but AVX512 can use a variable compare+select by comparing against all + // possible vector indices. + if (!(Subtarget.hasBWI() || + (Subtarget.hasAVX512() && EltVT.getScalarSizeInBits() >= 32))) + return SDValue(); + + MVT IdxSVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits()); + MVT IdxVT = MVT::getVectorVT(IdxSVT, NumElts); + SDValue IdxExt = DAG.getZExtOrTrunc(N2, dl, IdxSVT); + SDValue IdxSplat = DAG.getSplatBuildVector(IdxVT, dl, IdxExt); + SDValue EltSplat = DAG.getSplatBuildVector(VT, dl, N1); + + SmallVector RawIndices; + for (unsigned I = 0; I != NumElts; ++I) + RawIndices.push_back(DAG.getConstant(I, dl, IdxSVT)); + SDValue Indices = DAG.getBuildVector(IdxVT, dl, RawIndices); + + // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0. + return DAG.getSelectCC(dl, IdxSplat, Indices, EltSplat, N0, + ISD::CondCode::SETEQ); + } + + if (N2C->getAPIntValue().uge(NumElts)) return SDValue(); uint64_t IdxVal = N2C->getZExtValue(); diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll index 50026ec543e2c..ea7f928d09be3 100644 --- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll +++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll @@ -1653,25 +1653,16 @@ define i32 @test_insertelement_variable_v32i1(<32 x i8> %a, i8 %b, i32 %index) { ; ; SKX-LABEL: test_insertelement_variable_v32i1: ; SKX: ## %bb.0: -; SKX-NEXT: pushq %rbp -; SKX-NEXT: .cfi_def_cfa_offset 16 -; SKX-NEXT: .cfi_offset %rbp, -16 -; SKX-NEXT: movq %rsp, %rbp -; SKX-NEXT: .cfi_def_cfa_register %rbp -; SKX-NEXT: andq $-32, %rsp -; SKX-NEXT: subq $64, %rsp -; SKX-NEXT: ## kill: def $esi killed $esi def $rsi ; SKX-NEXT: vptestmb %ymm0, %ymm0, %k0 -; SKX-NEXT: andl $31, %esi ; SKX-NEXT: testb %dil, %dil +; SKX-NEXT: setne %al +; SKX-NEXT: vpbroadcastb %esi, %ymm0 +; SKX-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %k1 ; SKX-NEXT: vpmovm2b %k0, %ymm0 -; SKX-NEXT: vmovdqa %ymm0, (%rsp) -; SKX-NEXT: setne (%rsp,%rsi) -; SKX-NEXT: vpsllw $7, (%rsp), %ymm0 +; SKX-NEXT: vpbroadcastb %eax, %ymm0 {%k1} +; SKX-NEXT: vpsllw $7, %ymm0, %ymm0 ; SKX-NEXT: vpmovb2m %ymm0, %k0 ; SKX-NEXT: kmovd %k0, %eax -; SKX-NEXT: movq %rbp, %rsp -; SKX-NEXT: popq %rbp ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %t1 = icmp ugt <32 x i8> %a, zeroinitializer @@ -1731,25 +1722,16 @@ define i64 @test_insertelement_variable_v64i1(<64 x i8> %a, i8 %b, i32 %index) { ; ; SKX-LABEL: test_insertelement_variable_v64i1: ; SKX: ## %bb.0: -; SKX-NEXT: pushq %rbp -; SKX-NEXT: .cfi_def_cfa_offset 16 -; SKX-NEXT: .cfi_offset %rbp, -16 -; SKX-NEXT: movq %rsp, %rbp -; SKX-NEXT: .cfi_def_cfa_register %rbp -; SKX-NEXT: andq $-64, %rsp -; SKX-NEXT: subq $128, %rsp -; SKX-NEXT: ## kill: def $esi killed $esi def $rsi ; SKX-NEXT: vptestmb %zmm0, %zmm0, %k0 -; SKX-NEXT: andl $63, %esi ; SKX-NEXT: testb %dil, %dil +; SKX-NEXT: setne %al +; SKX-NEXT: vpbroadcastb %esi, %zmm0 +; SKX-NEXT: vpcmpeqb {{.*}}(%rip), %zmm0, %k1 ; SKX-NEXT: vpmovm2b %k0, %zmm0 -; SKX-NEXT: vmovdqa64 %zmm0, (%rsp) -; SKX-NEXT: setne (%rsp,%rsi) -; SKX-NEXT: vpsllw $7, (%rsp), %zmm0 +; SKX-NEXT: vpbroadcastb %eax, %zmm0 {%k1} +; SKX-NEXT: vpsllw $7, %zmm0, %zmm0 ; SKX-NEXT: vpmovb2m %zmm0, %k0 ; SKX-NEXT: kmovq %k0, %rax -; SKX-NEXT: movq %rbp, %rsp -; SKX-NEXT: popq %rbp ; SKX-NEXT: vzeroupper ; SKX-NEXT: retq %t1 = icmp ugt <64 x i8> %a, zeroinitializer diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll index 9acc259dc3254..58a5d59834534 100644 --- a/llvm/test/CodeGen/X86/insertelement-var-index.ll +++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSE41 -; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1 -; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX1OR2,AVX2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512F ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512BW @@ -699,14 +699,30 @@ define <16 x i8> @arg_i8_v16i8(<16 x i8> %v, i8 %x, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i8_v16i8: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $15, %esi -; AVX-NEXT: movb %dil, -24(%rsp,%rsi) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_i8_v16i8: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $15, %esi +; AVX1OR2-NEXT: movb %dil, -24(%rsp,%rsi) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512F-LABEL: arg_i8_v16i8: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi +; AVX512F-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX512F-NEXT: andl $15, %esi +; AVX512F-NEXT: movb %dil, -24(%rsp,%rsi) +; AVX512F-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: arg_i8_v16i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpbroadcastb %esi, %xmm1 +; AVX512BW-NEXT: vpcmpeqb {{.*}}(%rip), %xmm1, %k1 +; AVX512BW-NEXT: vpbroadcastb %edi, %xmm0 {%k1} +; AVX512BW-NEXT: retq %ins = insertelement <16 x i8> %v, i8 %x, i32 %y ret <16 x i8> %ins } @@ -721,14 +737,30 @@ define <8 x i16> @arg_i16_v8i16(<8 x i16> %v, i16 %x, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i16_v8i16: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $7, %esi -; AVX-NEXT: movw %di, -24(%rsp,%rsi,2) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_i16_v8i16: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $7, %esi +; AVX1OR2-NEXT: movw %di, -24(%rsp,%rsi,2) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512F-LABEL: arg_i16_v8i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi +; AVX512F-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX512F-NEXT: andl $7, %esi +; AVX512F-NEXT: movw %di, -24(%rsp,%rsi,2) +; AVX512F-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: arg_i16_v8i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpbroadcastw %esi, %xmm1 +; AVX512BW-NEXT: vpcmpeqw {{.*}}(%rip), %xmm1, %k1 +; AVX512BW-NEXT: vpbroadcastw %edi, %xmm0 {%k1} +; AVX512BW-NEXT: retq %ins = insertelement <8 x i16> %v, i16 %x, i32 %y ret <8 x i16> %ins } @@ -743,14 +775,21 @@ define <4 x i32> @arg_i32_v4i32(<4 x i32> %v, i32 %x, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i32_v4i32: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: movl %edi, -24(%rsp,%rsi,4) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_i32_v4i32: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $3, %esi +; AVX1OR2-NEXT: movl %edi, -24(%rsp,%rsi,4) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: arg_i32_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastd %esi, %xmm1 +; AVX512-NEXT: vpcmpeqd {{.*}}(%rip), %xmm1, %k1 +; AVX512-NEXT: vpbroadcastd %edi, %xmm0 {%k1} +; AVX512-NEXT: retq %ins = insertelement <4 x i32> %v, i32 %x, i32 %y ret <4 x i32> %ins } @@ -765,14 +804,22 @@ define <2 x i64> @arg_i64_v2i64(<2 x i64> %v, i64 %x, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i64_v2i64: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $1, %esi -; AVX-NEXT: movq %rdi, -24(%rsp,%rsi,8) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_i64_v2i64: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $1, %esi +; AVX1OR2-NEXT: movq %rdi, -24(%rsp,%rsi,8) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: arg_i64_v2i64: +; AVX512: # %bb.0: +; AVX512-NEXT: movslq %esi, %rax +; AVX512-NEXT: vpbroadcastq %rax, %xmm1 +; AVX512-NEXT: vpcmpeqq {{.*}}(%rip), %xmm1, %k1 +; AVX512-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} +; AVX512-NEXT: retq %ins = insertelement <2 x i64> %v, i64 %x, i32 %y ret <2 x i64> %ins } @@ -787,14 +834,21 @@ define <4 x float> @arg_f32_v4f32(<4 x float> %v, float %x, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_f32_v4f32: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $edi killed $edi def $rdi -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $3, %edi -; AVX-NEXT: vmovss %xmm1, -24(%rsp,%rdi,4) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_f32_v4f32: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $edi killed $edi def $rdi +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $3, %edi +; AVX1OR2-NEXT: vmovss %xmm1, -24(%rsp,%rdi,4) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: arg_f32_v4f32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastd %edi, %xmm2 +; AVX512-NEXT: vpcmpeqd {{.*}}(%rip), %xmm2, %k1 +; AVX512-NEXT: vbroadcastss %xmm1, %xmm0 {%k1} +; AVX512-NEXT: retq %ins = insertelement <4 x float> %v, float %x, i32 %y ret <4 x float> %ins } @@ -809,14 +863,22 @@ define <2 x double> @arg_f64_v2f64(<2 x double> %v, double %x, i32 %y) nounwind ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_f64_v2f64: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $edi killed $edi def $rdi -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $1, %edi -; AVX-NEXT: vmovsd %xmm1, -24(%rsp,%rdi,8) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_f64_v2f64: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $edi killed $edi def $rdi +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $1, %edi +; AVX1OR2-NEXT: vmovsd %xmm1, -24(%rsp,%rdi,8) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: arg_f64_v2f64: +; AVX512: # %bb.0: +; AVX512-NEXT: movslq %edi, %rax +; AVX512-NEXT: vpbroadcastq %rax, %xmm2 +; AVX512-NEXT: vpcmpeqq {{.*}}(%rip), %xmm2, %k1 +; AVX512-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0] +; AVX512-NEXT: retq %ins = insertelement <2 x double> %v, double %x, i32 %y ret <2 x double> %ins } @@ -832,15 +894,32 @@ define <16 x i8> @load_i8_v16i8(<16 x i8> %v, i8* %p, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: load_i8_v16i8: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movb (%rdi), %al -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $15, %esi -; AVX-NEXT: movb %al, -24(%rsp,%rsi) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_i8_v16i8: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: movb (%rdi), %al +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $15, %esi +; AVX1OR2-NEXT: movb %al, -24(%rsp,%rsi) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512F-LABEL: load_i8_v16i8: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi +; AVX512F-NEXT: movb (%rdi), %al +; AVX512F-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX512F-NEXT: andl $15, %esi +; AVX512F-NEXT: movb %al, -24(%rsp,%rsi) +; AVX512F-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: load_i8_v16i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpbroadcastb %esi, %xmm1 +; AVX512BW-NEXT: vpcmpeqb {{.*}}(%rip), %xmm1, %k1 +; AVX512BW-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} +; AVX512BW-NEXT: retq %x = load i8, i8* %p %ins = insertelement <16 x i8> %v, i8 %x, i32 %y ret <16 x i8> %ins @@ -857,15 +936,32 @@ define <8 x i16> @load_i16_v8i16(<8 x i16> %v, i16* %p, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: load_i16_v8i16: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movzwl (%rdi), %eax -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $7, %esi -; AVX-NEXT: movw %ax, -24(%rsp,%rsi,2) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_i16_v8i16: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: movzwl (%rdi), %eax +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $7, %esi +; AVX1OR2-NEXT: movw %ax, -24(%rsp,%rsi,2) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512F-LABEL: load_i16_v8i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi +; AVX512F-NEXT: movzwl (%rdi), %eax +; AVX512F-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX512F-NEXT: andl $7, %esi +; AVX512F-NEXT: movw %ax, -24(%rsp,%rsi,2) +; AVX512F-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: load_i16_v8i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpbroadcastw %esi, %xmm1 +; AVX512BW-NEXT: vpcmpeqw {{.*}}(%rip), %xmm1, %k1 +; AVX512BW-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} +; AVX512BW-NEXT: retq %x = load i16, i16* %p %ins = insertelement <8 x i16> %v, i16 %x, i32 %y ret <8 x i16> %ins @@ -882,15 +978,22 @@ define <4 x i32> @load_i32_v4i32(<4 x i32> %v, i32* %p, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: load_i32_v4i32: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movl (%rdi), %eax -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: movl %eax, -24(%rsp,%rsi,4) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_i32_v4i32: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: movl (%rdi), %eax +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $3, %esi +; AVX1OR2-NEXT: movl %eax, -24(%rsp,%rsi,4) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: load_i32_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastd %esi, %xmm1 +; AVX512-NEXT: vpcmpeqd {{.*}}(%rip), %xmm1, %k1 +; AVX512-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} +; AVX512-NEXT: retq %x = load i32, i32* %p %ins = insertelement <4 x i32> %v, i32 %x, i32 %y ret <4 x i32> %ins @@ -907,15 +1010,23 @@ define <2 x i64> @load_i64_v2i64(<2 x i64> %v, i64* %p, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: load_i64_v2i64: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movq (%rdi), %rax -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $1, %esi -; AVX-NEXT: movq %rax, -24(%rsp,%rsi,8) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_i64_v2i64: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: movq (%rdi), %rax +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $1, %esi +; AVX1OR2-NEXT: movq %rax, -24(%rsp,%rsi,8) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: load_i64_v2i64: +; AVX512: # %bb.0: +; AVX512-NEXT: movslq %esi, %rax +; AVX512-NEXT: vpbroadcastq %rax, %xmm1 +; AVX512-NEXT: vpcmpeqq {{.*}}(%rip), %xmm1, %k1 +; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1} +; AVX512-NEXT: retq %x = load i64, i64* %p %ins = insertelement <2 x i64> %v, i64 %x, i32 %y ret <2 x i64> %ins @@ -932,15 +1043,22 @@ define <4 x float> @load_f32_v4f32(<4 x float> %v, float* %p, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: load_f32_v4f32: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: vmovss %xmm1, -24(%rsp,%rsi,4) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_f32_v4f32: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $3, %esi +; AVX1OR2-NEXT: vmovss %xmm1, -24(%rsp,%rsi,4) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: load_f32_v4f32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastd %esi, %xmm1 +; AVX512-NEXT: vpcmpeqd {{.*}}(%rip), %xmm1, %k1 +; AVX512-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} +; AVX512-NEXT: retq %x = load float, float* %p %ins = insertelement <4 x float> %v, float %x, i32 %y ret <4 x float> %ins @@ -957,15 +1075,23 @@ define <2 x double> @load_f64_v2f64(<2 x double> %v, double* %p, i32 %y) nounwin ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: load_f64_v2f64: -; AVX: # %bb.0: -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: andl $1, %esi -; AVX-NEXT: vmovsd %xmm1, -24(%rsp,%rsi,8) -; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_f64_v2f64: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; AVX1OR2-NEXT: andl $1, %esi +; AVX1OR2-NEXT: vmovsd %xmm1, -24(%rsp,%rsi,8) +; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: load_f64_v2f64: +; AVX512: # %bb.0: +; AVX512-NEXT: movslq %esi, %rax +; AVX512-NEXT: vpbroadcastq %rax, %xmm1 +; AVX512-NEXT: vpcmpeqq {{.*}}(%rip), %xmm1, %k1 +; AVX512-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0] +; AVX512-NEXT: retq %x = load double, double* %p %ins = insertelement <2 x double> %v, double %x, i32 %y ret <2 x double> %ins @@ -983,20 +1109,42 @@ define <32 x i8> @arg_i8_v32i8(<32 x i8> %v, i8 %x, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i8_v32i8: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $31, %esi -; AVX-NEXT: movb %dil, (%rsp,%rsi) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_i8_v32i8: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $31, %esi +; AVX1OR2-NEXT: movb %dil, (%rsp,%rsi) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512F-LABEL: arg_i8_v32i8: +; AVX512F: # %bb.0: +; AVX512F-NEXT: pushq %rbp +; AVX512F-NEXT: movq %rsp, %rbp +; AVX512F-NEXT: andq $-32, %rsp +; AVX512F-NEXT: subq $64, %rsp +; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi +; AVX512F-NEXT: vmovaps %ymm0, (%rsp) +; AVX512F-NEXT: andl $31, %esi +; AVX512F-NEXT: movb %dil, (%rsp,%rsi) +; AVX512F-NEXT: vmovaps (%rsp), %ymm0 +; AVX512F-NEXT: movq %rbp, %rsp +; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: arg_i8_v32i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpbroadcastb %esi, %ymm1 +; AVX512BW-NEXT: vpcmpeqb {{.*}}(%rip), %ymm1, %k1 +; AVX512BW-NEXT: vpbroadcastb %edi, %ymm0 {%k1} +; AVX512BW-NEXT: retq %ins = insertelement <32 x i8> %v, i8 %x, i32 %y ret <32 x i8> %ins } @@ -1013,20 +1161,42 @@ define <16 x i16> @arg_i16_v16i16(<16 x i16> %v, i16 %x, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i16_v16i16: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $15, %esi -; AVX-NEXT: movw %di, (%rsp,%rsi,2) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_i16_v16i16: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $15, %esi +; AVX1OR2-NEXT: movw %di, (%rsp,%rsi,2) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512F-LABEL: arg_i16_v16i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: pushq %rbp +; AVX512F-NEXT: movq %rsp, %rbp +; AVX512F-NEXT: andq $-32, %rsp +; AVX512F-NEXT: subq $64, %rsp +; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi +; AVX512F-NEXT: vmovaps %ymm0, (%rsp) +; AVX512F-NEXT: andl $15, %esi +; AVX512F-NEXT: movw %di, (%rsp,%rsi,2) +; AVX512F-NEXT: vmovaps (%rsp), %ymm0 +; AVX512F-NEXT: movq %rbp, %rsp +; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: arg_i16_v16i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpbroadcastw %esi, %ymm1 +; AVX512BW-NEXT: vpcmpeqw {{.*}}(%rip), %ymm1, %k1 +; AVX512BW-NEXT: vpbroadcastw %edi, %ymm0 {%k1} +; AVX512BW-NEXT: retq %ins = insertelement <16 x i16> %v, i16 %x, i32 %y ret <16 x i16> %ins } @@ -1043,20 +1213,27 @@ define <8 x i32> @arg_i32_v8i32(<8 x i32> %v, i32 %x, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i32_v8i32: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $7, %esi -; AVX-NEXT: movl %edi, (%rsp,%rsi,4) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_i32_v8i32: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $7, %esi +; AVX1OR2-NEXT: movl %edi, (%rsp,%rsi,4) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: arg_i32_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastd %esi, %ymm1 +; AVX512-NEXT: vpcmpeqd {{.*}}(%rip), %ymm1, %k1 +; AVX512-NEXT: vpbroadcastd %edi, %ymm0 {%k1} +; AVX512-NEXT: retq %ins = insertelement <8 x i32> %v, i32 %x, i32 %y ret <8 x i32> %ins } @@ -1073,20 +1250,28 @@ define <4 x i64> @arg_i64_v4i64(<4 x i64> %v, i64 %x, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_i64_v4i64: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: movq %rdi, (%rsp,%rsi,8) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_i64_v4i64: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $3, %esi +; AVX1OR2-NEXT: movq %rdi, (%rsp,%rsi,8) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: arg_i64_v4i64: +; AVX512: # %bb.0: +; AVX512-NEXT: movslq %esi, %rax +; AVX512-NEXT: vpbroadcastq %rax, %ymm1 +; AVX512-NEXT: vpcmpeqq {{.*}}(%rip), %ymm1, %k1 +; AVX512-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} +; AVX512-NEXT: retq %ins = insertelement <4 x i64> %v, i64 %x, i32 %y ret <4 x i64> %ins } @@ -1103,20 +1288,27 @@ define <8 x float> @arg_f32_v8f32(<8 x float> %v, float %x, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_f32_v8f32: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $edi killed $edi def $rdi -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $7, %edi -; AVX-NEXT: vmovss %xmm1, (%rsp,%rdi,4) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_f32_v8f32: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $edi killed $edi def $rdi +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $7, %edi +; AVX1OR2-NEXT: vmovss %xmm1, (%rsp,%rdi,4) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: arg_f32_v8f32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastd %edi, %ymm2 +; AVX512-NEXT: vpcmpeqd {{.*}}(%rip), %ymm2, %k1 +; AVX512-NEXT: vbroadcastss %xmm1, %ymm0 {%k1} +; AVX512-NEXT: retq %ins = insertelement <8 x float> %v, float %x, i32 %y ret <8 x float> %ins } @@ -1133,20 +1325,28 @@ define <4 x double> @arg_f64_v4f64(<4 x double> %v, double %x, i32 %y) nounwind ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: arg_f64_v4f64: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $edi killed $edi def $rdi -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $3, %edi -; AVX-NEXT: vmovsd %xmm1, (%rsp,%rdi,8) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: arg_f64_v4f64: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $edi killed $edi def $rdi +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $3, %edi +; AVX1OR2-NEXT: vmovsd %xmm1, (%rsp,%rdi,8) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: arg_f64_v4f64: +; AVX512: # %bb.0: +; AVX512-NEXT: movslq %edi, %rax +; AVX512-NEXT: vpbroadcastq %rax, %ymm2 +; AVX512-NEXT: vpcmpeqq {{.*}}(%rip), %ymm2, %k1 +; AVX512-NEXT: vbroadcastsd %xmm1, %ymm0 {%k1} +; AVX512-NEXT: retq %ins = insertelement <4 x double> %v, double %x, i32 %y ret <4 x double> %ins } @@ -1164,21 +1364,44 @@ define <32 x i8> @load_i8_v32i8(<32 x i8> %v, i8* %p, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: load_i8_v32i8: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movb (%rdi), %al -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $31, %esi -; AVX-NEXT: movb %al, (%rsp,%rsi) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_i8_v32i8: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: movb (%rdi), %al +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $31, %esi +; AVX1OR2-NEXT: movb %al, (%rsp,%rsi) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512F-LABEL: load_i8_v32i8: +; AVX512F: # %bb.0: +; AVX512F-NEXT: pushq %rbp +; AVX512F-NEXT: movq %rsp, %rbp +; AVX512F-NEXT: andq $-32, %rsp +; AVX512F-NEXT: subq $64, %rsp +; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi +; AVX512F-NEXT: movb (%rdi), %al +; AVX512F-NEXT: vmovaps %ymm0, (%rsp) +; AVX512F-NEXT: andl $31, %esi +; AVX512F-NEXT: movb %al, (%rsp,%rsi) +; AVX512F-NEXT: vmovaps (%rsp), %ymm0 +; AVX512F-NEXT: movq %rbp, %rsp +; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: load_i8_v32i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpbroadcastb %esi, %ymm1 +; AVX512BW-NEXT: vpcmpeqb {{.*}}(%rip), %ymm1, %k1 +; AVX512BW-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} +; AVX512BW-NEXT: retq %x = load i8, i8* %p %ins = insertelement <32 x i8> %v, i8 %x, i32 %y ret <32 x i8> %ins @@ -1197,21 +1420,44 @@ define <16 x i16> @load_i16_v16i16(<16 x i16> %v, i16* %p, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: load_i16_v16i16: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movzwl (%rdi), %eax -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $15, %esi -; AVX-NEXT: movw %ax, (%rsp,%rsi,2) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_i16_v16i16: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: movzwl (%rdi), %eax +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $15, %esi +; AVX1OR2-NEXT: movw %ax, (%rsp,%rsi,2) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512F-LABEL: load_i16_v16i16: +; AVX512F: # %bb.0: +; AVX512F-NEXT: pushq %rbp +; AVX512F-NEXT: movq %rsp, %rbp +; AVX512F-NEXT: andq $-32, %rsp +; AVX512F-NEXT: subq $64, %rsp +; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi +; AVX512F-NEXT: movzwl (%rdi), %eax +; AVX512F-NEXT: vmovaps %ymm0, (%rsp) +; AVX512F-NEXT: andl $15, %esi +; AVX512F-NEXT: movw %ax, (%rsp,%rsi,2) +; AVX512F-NEXT: vmovaps (%rsp), %ymm0 +; AVX512F-NEXT: movq %rbp, %rsp +; AVX512F-NEXT: popq %rbp +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: load_i16_v16i16: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpbroadcastw %esi, %ymm1 +; AVX512BW-NEXT: vpcmpeqw {{.*}}(%rip), %ymm1, %k1 +; AVX512BW-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} +; AVX512BW-NEXT: retq %x = load i16, i16* %p %ins = insertelement <16 x i16> %v, i16 %x, i32 %y ret <16 x i16> %ins @@ -1230,21 +1476,28 @@ define <8 x i32> @load_i32_v8i32(<8 x i32> %v, i32* %p, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: load_i32_v8i32: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movl (%rdi), %eax -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $7, %esi -; AVX-NEXT: movl %eax, (%rsp,%rsi,4) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_i32_v8i32: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: movl (%rdi), %eax +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $7, %esi +; AVX1OR2-NEXT: movl %eax, (%rsp,%rsi,4) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: load_i32_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastd %esi, %ymm1 +; AVX512-NEXT: vpcmpeqd {{.*}}(%rip), %ymm1, %k1 +; AVX512-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} +; AVX512-NEXT: retq %x = load i32, i32* %p %ins = insertelement <8 x i32> %v, i32 %x, i32 %y ret <8 x i32> %ins @@ -1263,21 +1516,29 @@ define <4 x i64> @load_i64_v4i64(<4 x i64> %v, i64* %p, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: load_i64_v4i64: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: movq (%rdi), %rax -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: movq %rax, (%rsp,%rsi,8) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_i64_v4i64: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: movq (%rdi), %rax +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $3, %esi +; AVX1OR2-NEXT: movq %rax, (%rsp,%rsi,8) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: load_i64_v4i64: +; AVX512: # %bb.0: +; AVX512-NEXT: movslq %esi, %rax +; AVX512-NEXT: vpbroadcastq %rax, %ymm1 +; AVX512-NEXT: vpcmpeqq {{.*}}(%rip), %ymm1, %k1 +; AVX512-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} +; AVX512-NEXT: retq %x = load i64, i64* %p %ins = insertelement <4 x i64> %v, i64 %x, i32 %y ret <4 x i64> %ins @@ -1296,21 +1557,28 @@ define <8 x float> @load_f32_v8f32(<8 x float> %v, float* %p, i32 %y) nounwind { ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: load_f32_v8f32: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $7, %esi -; AVX-NEXT: vmovss %xmm1, (%rsp,%rsi,4) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_f32_v8f32: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $7, %esi +; AVX1OR2-NEXT: vmovss %xmm1, (%rsp,%rsi,4) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: load_f32_v8f32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastd %esi, %ymm1 +; AVX512-NEXT: vpcmpeqd {{.*}}(%rip), %ymm1, %k1 +; AVX512-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} +; AVX512-NEXT: retq %x = load float, float* %p %ins = insertelement <8 x float> %v, float %x, i32 %y ret <8 x float> %ins @@ -1329,21 +1597,29 @@ define <4 x double> @load_f64_v4f64(<4 x double> %v, double* %p, i32 %y) nounwin ; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: load_f64_v4f64: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $64, %rsp -; AVX-NEXT: # kill: def $esi killed $esi def $rsi -; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; AVX-NEXT: vmovaps %ymm0, (%rsp) -; AVX-NEXT: andl $3, %esi -; AVX-NEXT: vmovsd %xmm1, (%rsp,%rsi,8) -; AVX-NEXT: vmovaps (%rsp), %ymm0 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVX1OR2-LABEL: load_f64_v4f64: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: pushq %rbp +; AVX1OR2-NEXT: movq %rsp, %rbp +; AVX1OR2-NEXT: andq $-32, %rsp +; AVX1OR2-NEXT: subq $64, %rsp +; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp) +; AVX1OR2-NEXT: andl $3, %esi +; AVX1OR2-NEXT: vmovsd %xmm1, (%rsp,%rsi,8) +; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0 +; AVX1OR2-NEXT: movq %rbp, %rsp +; AVX1OR2-NEXT: popq %rbp +; AVX1OR2-NEXT: retq +; +; AVX512-LABEL: load_f64_v4f64: +; AVX512: # %bb.0: +; AVX512-NEXT: movslq %esi, %rax +; AVX512-NEXT: vpbroadcastq %rax, %ymm1 +; AVX512-NEXT: vpcmpeqq {{.*}}(%rip), %ymm1, %k1 +; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} +; AVX512-NEXT: retq %x = load double, double* %p %ins = insertelement <4 x double> %v, double %x, i32 %y ret <4 x double> %ins