diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 24f69ea1b742a..8ef9e857888ba 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1405,6 +1405,9 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, StoreMMO); } + // Freeze the index so we don't poison the clamping code we're about to emit. + Idx = DAG.getFreeze(Idx); + SDValue NewLoad; Align ElementAlignment = std::min(cast(Ch)->getAlign(), diff --git a/llvm/test/CodeGen/AArch64/pr88959.ll b/llvm/test/CodeGen/AArch64/pr88959.ll new file mode 100644 index 0000000000000..b25eb94730805 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/pr88959.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc < %s -mtriple=aarch64 | FileCheck %s + +define i8 @f(<16 x i8> %vec, i32 %idx) { +; CHECK-LABEL: f: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: mov w8, #1 // =0x1 +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: str q0, [sp] +; CHECK-NEXT: sub w8, w8, w0 +; CHECK-NEXT: bfxil x9, x8, #0, #4 +; CHECK-NEXT: ldrb w0, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %sub = sub nuw i32 1, %idx + %extract = extractelement <16 x i8> %vec, i32 %sub + ret i8 %extract +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/extractelement.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/extractelement.ll index fc2929d8e6db3..619439e9a06fa 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/extractelement.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/extractelement.ll @@ -91,9 +91,10 @@ define void @extract_32xi8_idx(ptr %src, ptr %dst, i32 %idx) nounwind { ; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0 ; CHECK-NEXT: xvld $xr0, $a0, 0 ; CHECK-NEXT: xvst $xr0, $sp, 0 -; CHECK-NEXT: addi.d $a0, $sp, 0 -; CHECK-NEXT: bstrins.d $a0, $a2, 4, 0 -; CHECK-NEXT: ld.b $a0, $a0, 0 +; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0 +; CHECK-NEXT: addi.d $a2, $sp, 0 +; CHECK-NEXT: bstrins.d $a2, $a0, 4, 0 +; CHECK-NEXT: ld.b $a0, $a2, 0 ; CHECK-NEXT: st.b $a0, $a1, 0 ; CHECK-NEXT: addi.d $sp, $fp, -64 ; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload @@ -116,9 +117,10 @@ define void @extract_16xi16_idx(ptr %src, ptr %dst, i32 %idx) nounwind { ; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0 ; CHECK-NEXT: xvld $xr0, $a0, 0 ; CHECK-NEXT: xvst $xr0, $sp, 0 -; CHECK-NEXT: addi.d $a0, $sp, 0 -; CHECK-NEXT: bstrins.d $a0, $a2, 4, 1 -; CHECK-NEXT: ld.h $a0, $a0, 0 +; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0 +; CHECK-NEXT: addi.d $a2, $sp, 0 +; CHECK-NEXT: bstrins.d $a2, $a0, 4, 1 +; CHECK-NEXT: ld.h $a0, $a2, 0 ; CHECK-NEXT: st.h $a0, $a1, 0 ; CHECK-NEXT: addi.d $sp, $fp, -64 ; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload @@ -141,9 +143,10 @@ define void @extract_8xi32_idx(ptr %src, ptr %dst, i32 %idx) nounwind { ; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0 ; CHECK-NEXT: xvld $xr0, $a0, 0 ; CHECK-NEXT: xvst $xr0, $sp, 0 -; CHECK-NEXT: addi.d $a0, $sp, 0 -; CHECK-NEXT: bstrins.d $a0, $a2, 4, 2 -; CHECK-NEXT: ld.w $a0, $a0, 0 +; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0 +; CHECK-NEXT: addi.d $a2, $sp, 0 +; CHECK-NEXT: bstrins.d $a2, $a0, 4, 2 +; CHECK-NEXT: ld.w $a0, $a2, 0 ; CHECK-NEXT: st.w $a0, $a1, 0 ; CHECK-NEXT: addi.d $sp, $fp, -64 ; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload @@ -166,9 +169,10 @@ define void @extract_4xi64_idx(ptr %src, ptr %dst, i32 %idx) nounwind { ; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0 ; CHECK-NEXT: xvld $xr0, $a0, 0 ; CHECK-NEXT: xvst $xr0, $sp, 0 -; CHECK-NEXT: addi.d $a0, $sp, 0 -; CHECK-NEXT: bstrins.d $a0, $a2, 4, 3 -; CHECK-NEXT: ld.d $a0, $a0, 0 +; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0 +; CHECK-NEXT: addi.d $a2, $sp, 0 +; CHECK-NEXT: bstrins.d $a2, $a0, 4, 3 +; CHECK-NEXT: ld.d $a0, $a2, 0 ; CHECK-NEXT: st.d $a0, $a1, 0 ; CHECK-NEXT: addi.d $sp, $fp, -64 ; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload @@ -191,9 +195,10 @@ define void @extract_8xfloat_idx(ptr %src, ptr %dst, i32 %idx) nounwind { ; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0 ; CHECK-NEXT: xvld $xr0, $a0, 0 ; CHECK-NEXT: xvst $xr0, $sp, 0 -; CHECK-NEXT: addi.d $a0, $sp, 0 -; CHECK-NEXT: bstrins.d $a0, $a2, 4, 2 -; CHECK-NEXT: fld.s $fa0, $a0, 0 +; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0 +; CHECK-NEXT: addi.d $a2, $sp, 0 +; CHECK-NEXT: bstrins.d $a2, $a0, 4, 2 +; CHECK-NEXT: fld.s $fa0, $a2, 0 ; CHECK-NEXT: fst.s $fa0, $a1, 0 ; CHECK-NEXT: addi.d $sp, $fp, -64 ; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload @@ -216,9 +221,10 @@ define void @extract_4xdouble_idx(ptr %src, ptr %dst, i32 %idx) nounwind { ; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0 ; CHECK-NEXT: xvld $xr0, $a0, 0 ; CHECK-NEXT: xvst $xr0, $sp, 0 -; CHECK-NEXT: addi.d $a0, $sp, 0 -; CHECK-NEXT: bstrins.d $a0, $a2, 4, 3 -; CHECK-NEXT: fld.d $fa0, $a0, 0 +; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0 +; CHECK-NEXT: addi.d $a2, $sp, 0 +; CHECK-NEXT: bstrins.d $a2, $a0, 4, 3 +; CHECK-NEXT: fld.d $fa0, $a2, 0 ; CHECK-NEXT: fst.d $fa0, $a1, 0 ; CHECK-NEXT: addi.d $sp, $fp, -64 ; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/X86/extractelement-legalization-store-ordering.ll b/llvm/test/CodeGen/X86/extractelement-legalization-store-ordering.ll index 0e0cfc64af9ee..c33eb6bd433f6 100644 --- a/llvm/test/CodeGen/X86/extractelement-legalization-store-ordering.ll +++ b/llvm/test/CodeGen/X86/extractelement-legalization-store-ordering.ll @@ -10,26 +10,37 @@ target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128" define void @test_extractelement_legalization_storereuse(<4 x i32> %a, ptr nocapture %x, ptr nocapture readonly %y, i32 %i) #0 { ; CHECK-LABEL: test_extractelement_legalization_storereuse: ; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: pushl %ebp ; CHECK-NEXT: pushl %ebx ; CHECK-NEXT: pushl %edi ; CHECK-NEXT: pushl %esi ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx ; CHECK-NEXT: paddd (%edx), %xmm0 ; CHECK-NEXT: movdqa %xmm0, (%edx) -; CHECK-NEXT: movl (%edx), %esi -; CHECK-NEXT: movl 4(%edx), %edi -; CHECK-NEXT: shll $4, %ecx -; CHECK-NEXT: movl 8(%edx), %ebx -; CHECK-NEXT: movl 12(%edx), %edx -; CHECK-NEXT: movl %esi, 12(%eax,%ecx) -; CHECK-NEXT: movl %edi, (%eax,%ecx) -; CHECK-NEXT: movl %ebx, 8(%eax,%ecx) -; CHECK-NEXT: movl %edx, 4(%eax,%ecx) +; CHECK-NEXT: leal (,%eax,4), %ecx +; CHECK-NEXT: movl %ecx, %esi +; CHECK-NEXT: andl $3, %esi +; CHECK-NEXT: leal 1(,%eax,4), %edi +; CHECK-NEXT: andl $3, %edi +; CHECK-NEXT: leal 2(,%eax,4), %ebx +; CHECK-NEXT: andl $3, %ebx +; CHECK-NEXT: leal 3(,%eax,4), %ebp +; CHECK-NEXT: andl $3, %ebp +; CHECK-NEXT: movl (%edx,%esi,4), %esi +; CHECK-NEXT: movl (%edx,%edi,4), %edi +; CHECK-NEXT: movl (%edx,%ebx,4), %ebx +; CHECK-NEXT: movl (%edx,%ebp,4), %edx +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebp +; CHECK-NEXT: movl %esi, 12(%ebp,%ecx,4) +; CHECK-NEXT: shll $4, %eax +; CHECK-NEXT: movl %edi, (%ebp,%eax) +; CHECK-NEXT: movl %ebx, 8(%ebp,%ecx,4) +; CHECK-NEXT: movl %edx, 4(%ebp,%ecx,4) ; CHECK-NEXT: popl %esi ; CHECK-NEXT: popl %edi ; CHECK-NEXT: popl %ebx +; CHECK-NEXT: popl %ebp ; CHECK-NEXT: retl entry: %0 = load <4 x i32>, ptr %y, align 16 diff --git a/llvm/test/CodeGen/X86/sttni.ll b/llvm/test/CodeGen/X86/sttni.ll index 870912bb6bb1b..d8dfde8b5a76c 100644 --- a/llvm/test/CodeGen/X86/sttni.ll +++ b/llvm/test/CodeGen/X86/sttni.ll @@ -315,11 +315,10 @@ define i32 @pcmpestri_reg_diff_i16(<8 x i16> %lhs, i32 %lhs_len, <8 x i16> %rhs, ; X86-NEXT: jmp .LBB8_3 ; X86-NEXT: .LBB8_2: # %compare ; X86-NEXT: movdqa %xmm0, (%esp) -; X86-NEXT: addl %ecx, %ecx -; X86-NEXT: andl $14, %ecx -; X86-NEXT: movzwl (%esp,%ecx), %eax +; X86-NEXT: andl $7, %ecx +; X86-NEXT: movzwl (%esp,%ecx,2), %eax ; X86-NEXT: movdqa %xmm1, {{[0-9]+}}(%esp) -; X86-NEXT: subw 16(%esp,%ecx), %ax +; X86-NEXT: subw 16(%esp,%ecx,2), %ax ; X86-NEXT: .LBB8_3: # %exit ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: movl %ebp, %esp @@ -452,11 +451,10 @@ define i32 @pcmpestri_mem_diff_i16(ptr %lhs_ptr, i32 %lhs_len, ptr %rhs_ptr, i32 ; X86-NEXT: jmp .LBB11_3 ; X86-NEXT: .LBB11_2: # %compare ; X86-NEXT: movdqa %xmm1, (%esp) -; X86-NEXT: addl %ecx, %ecx -; X86-NEXT: andl $14, %ecx -; X86-NEXT: movzwl (%esp,%ecx), %eax +; X86-NEXT: andl $7, %ecx +; X86-NEXT: movzwl (%esp,%ecx,2), %eax ; X86-NEXT: movdqa %xmm0, {{[0-9]+}}(%esp) -; X86-NEXT: subw 16(%esp,%ecx), %ax +; X86-NEXT: subw 16(%esp,%ecx,2), %ax ; X86-NEXT: .LBB11_3: # %exit ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: leal -4(%ebp), %esp @@ -772,11 +770,10 @@ define i32 @pcmpistri_reg_diff_i16(<8 x i16> %lhs, <8 x i16> %rhs) nounwind { ; X86-NEXT: andl $-16, %esp ; X86-NEXT: subl $48, %esp ; X86-NEXT: movdqa %xmm0, (%esp) -; X86-NEXT: addl %ecx, %ecx -; X86-NEXT: andl $14, %ecx -; X86-NEXT: movzwl (%esp,%ecx), %eax +; X86-NEXT: andl $7, %ecx +; X86-NEXT: movzwl (%esp,%ecx,2), %eax ; X86-NEXT: movdqa %xmm1, {{[0-9]+}}(%esp) -; X86-NEXT: subw 16(%esp,%ecx), %ax +; X86-NEXT: subw 16(%esp,%ecx,2), %ax ; X86-NEXT: movl %ebp, %esp ; X86-NEXT: popl %ebp ; X86-NEXT: movzwl %ax, %eax @@ -889,11 +886,10 @@ define i32 @pcmpistri_mem_diff_i16(ptr %lhs_ptr, ptr %rhs_ptr) nounwind { ; X86-NEXT: jmp .LBB23_3 ; X86-NEXT: .LBB23_2: # %compare ; X86-NEXT: movdqa %xmm1, (%esp) -; X86-NEXT: addl %ecx, %ecx -; X86-NEXT: andl $14, %ecx -; X86-NEXT: movzwl (%esp,%ecx), %eax +; X86-NEXT: andl $7, %ecx +; X86-NEXT: movzwl (%esp,%ecx,2), %eax ; X86-NEXT: movdqa %xmm0, {{[0-9]+}}(%esp) -; X86-NEXT: subw 16(%esp,%ecx), %ax +; X86-NEXT: subw 16(%esp,%ecx,2), %ax ; X86-NEXT: .LBB23_3: # %exit ; X86-NEXT: movzwl %ax, %eax ; X86-NEXT: movl %ebp, %esp diff --git a/llvm/test/CodeGen/X86/var-permute-128.ll b/llvm/test/CodeGen/X86/var-permute-128.ll index f2240a9468442..2a4d3053ce228 100644 --- a/llvm/test/CodeGen/X86/var-permute-128.ll +++ b/llvm/test/CodeGen/X86/var-permute-128.ll @@ -226,69 +226,92 @@ define <8 x i16> @var_shuffle_v8i16(<8 x i16> %v, <8 x i16> %indices) nounwind { define <16 x i8> @var_shuffle_v16i8(<16 x i8> %v, <16 x i8> %indices) nounwind { ; SSE3-LABEL: var_shuffle_v16i8: ; SSE3: # %bb.0: +; SSE3-NEXT: pushq %rbp +; SSE3-NEXT: pushq %r15 +; SSE3-NEXT: pushq %r14 +; SSE3-NEXT: pushq %r13 +; SSE3-NEXT: pushq %r12 +; SSE3-NEXT: pushq %rbx ; SSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) -; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movb %al, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r12d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r13d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) +; SSE3-NEXT: movzbl %al, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm1 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r13b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm2 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r12b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm4 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r15b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm3 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r14b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm6 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %bpl, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm7 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %bl, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm8 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r11b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm5 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r10b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm9 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r9b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm10 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r8b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm12 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %dil, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm11 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %sil, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm13 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %dl, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm14 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %cl, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm15 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm0 @@ -307,6 +330,12 @@ define <16 x i8> @var_shuffle_v16i8(<16 x i8> %v, <16 x i8> %indices) nounwind { ; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] +; SSE3-NEXT: popq %rbx +; SSE3-NEXT: popq %r12 +; SSE3-NEXT: popq %r13 +; SSE3-NEXT: popq %r14 +; SSE3-NEXT: popq %r15 +; SSE3-NEXT: popq %rbp ; SSE3-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v16i8: @@ -490,69 +519,92 @@ define <4 x float> @var_shuffle_v4f32(<4 x float> %v, <4 x i32> %indices) nounwi define <16 x i8> @var_shuffle_v16i8_from_v16i8_v32i8(<16 x i8> %v, <32 x i8> %indices) nounwind { ; SSE3-LABEL: var_shuffle_v16i8_from_v16i8_v32i8: ; SSE3: # %bb.0: +; SSE3-NEXT: pushq %rbp +; SSE3-NEXT: pushq %r15 +; SSE3-NEXT: pushq %r14 +; SSE3-NEXT: pushq %r13 +; SSE3-NEXT: pushq %r12 +; SSE3-NEXT: pushq %rbx ; SSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) -; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movb %al, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r12d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %r13d +; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) +; SSE3-NEXT: movzbl %al, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm1 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r13b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm2 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r12b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm4 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r15b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm3 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r14b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm6 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %bpl, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm7 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %bl, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm8 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r11b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm5 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r10b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm9 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r9b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm10 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %r8b, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm12 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %dil, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm11 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %sil, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm13 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %dl, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm14 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl %cl, %eax ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm15 -; SSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE3-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload ; SSE3-NEXT: andl $15, %eax ; SSE3-NEXT: movzbl -24(%rsp,%rax), %eax ; SSE3-NEXT: movd %eax, %xmm0 @@ -571,6 +623,12 @@ define <16 x i8> @var_shuffle_v16i8_from_v16i8_v32i8(<16 x i8> %v, <32 x i8> %in ; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3] ; SSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] +; SSE3-NEXT: popq %rbx +; SSE3-NEXT: popq %r12 +; SSE3-NEXT: popq %r13 +; SSE3-NEXT: popq %r14 +; SSE3-NEXT: popq %r15 +; SSE3-NEXT: popq %rbp ; SSE3-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v16i8_from_v16i8_v32i8: diff --git a/llvm/test/CodeGen/X86/var-permute-256.ll b/llvm/test/CodeGen/X86/var-permute-256.ll index 56dc2f0571b17..9d0b9fec75008 100644 --- a/llvm/test/CodeGen/X86/var-permute-256.ll +++ b/llvm/test/CodeGen/X86/var-permute-256.ll @@ -1184,6 +1184,8 @@ define <4 x i64> @PR50356(<4 x i64> %0, <4 x i32> %1, <4 x i64> %2) unnamed_addr ; AVX2-NEXT: movq %rsp, %rbp ; AVX2-NEXT: andq $-32, %rsp ; AVX2-NEXT: subq $64, %rsp +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [7,7,7,7] +; AVX2-NEXT: vpand %xmm3, %xmm1, %xmm1 ; AVX2-NEXT: vmovd %xmm1, %eax ; AVX2-NEXT: vmovaps %ymm0, (%rsp) ; AVX2-NEXT: andl $3, %eax @@ -1206,6 +1208,8 @@ define <4 x i64> @PR50356(<4 x i64> %0, <4 x i32> %1, <4 x i64> %2) unnamed_addr ; AVX512-NEXT: andq $-32, %rsp ; AVX512-NEXT: subq $64, %rsp ; AVX512-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 +; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm3 = [7,7,7,7] +; AVX512-NEXT: vpand %xmm3, %xmm1, %xmm1 ; AVX512-NEXT: vmovd %xmm1, %eax ; AVX512-NEXT: vmovaps %ymm0, (%rsp) ; AVX512-NEXT: andl $3, %eax @@ -1229,6 +1233,7 @@ define <4 x i64> @PR50356(<4 x i64> %0, <4 x i32> %1, <4 x i64> %2) unnamed_addr ; AVX512VL-NEXT: movq %rsp, %rbp ; AVX512VL-NEXT: andq $-32, %rsp ; AVX512VL-NEXT: subq $64, %rsp +; AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1 ; AVX512VL-NEXT: vmovd %xmm1, %eax ; AVX512VL-NEXT: vmovaps %ymm0, (%rsp) ; AVX512VL-NEXT: andl $3, %eax diff --git a/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll b/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll index ce8d2acd035f6..3ca0e2121e0d1 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll @@ -255,28 +255,28 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1 ; SSE2-NEXT: andl $7, %r8d ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: andl $7, %r9d -; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %r10d -; SSE2-NEXT: movd %r10d, %xmm0 -; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: movzwl -24(%rsp,%r9,2), %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movzwl -24(%rsp,%r8,2), %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %eax -; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %ecx +; SSE2-NEXT: movd %ecx, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: movzwl -24(%rsp,%rdi,2), %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: movzwl -24(%rsp,%rdi,2), %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movzwl -24(%rsp,%r9,2), %ecx +; SSE2-NEXT: movd %ecx, %xmm1 +; SSE2-NEXT: movzwl -24(%rsp,%r8,2), %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %ecx +; SSE2-NEXT: movd %ecx, %xmm1 +; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax +; SSE2-NEXT: movd %eax, %xmm3 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; SSE2-NEXT: retq ; @@ -299,28 +299,28 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1 ; SSSE3-NEXT: andl $7, %r8d ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: andl $7, %r9d -; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %r10d -; SSSE3-NEXT: movd %r10d, %xmm0 -; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax -; SSSE3-NEXT: movd %eax, %xmm1 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSSE3-NEXT: movzwl -24(%rsp,%r9,2), %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %eax -; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %eax -; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %ecx +; SSSE3-NEXT: movd %ecx, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax -; SSSE3-NEXT: movd %eax, %xmm3 -; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %eax -; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSSE3-NEXT: movzwl -24(%rsp,%r9,2), %ecx +; SSSE3-NEXT: movd %ecx, %xmm1 +; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %ecx +; SSSE3-NEXT: movd %ecx, %xmm1 +; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax +; SSSE3-NEXT: movd %eax, %xmm3 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; SSSE3-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll b/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll index 8f78438dedf92..f3bafec3399a7 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll @@ -293,52 +293,52 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x, ; AVX1-NEXT: # kill: def $edx killed $edx def $rdx ; AVX1-NEXT: # kill: def $esi killed $esi def $rsi ; AVX1-NEXT: # kill: def $edi killed $edi def $rdi +; AVX1-NEXT: andl $15, %edi +; AVX1-NEXT: vmovaps %ymm0, (%rsp) +; AVX1-NEXT: movzwl (%rsp,%rdi,2), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: andl $15, %esi +; AVX1-NEXT: vpinsrw $1, (%rsp,%rsi,2), %xmm0, %xmm0 +; AVX1-NEXT: andl $15, %edx +; AVX1-NEXT: vpinsrw $2, (%rsp,%rdx,2), %xmm0, %xmm0 +; AVX1-NEXT: andl $15, %ecx +; AVX1-NEXT: vpinsrw $3, (%rsp,%rcx,2), %xmm0, %xmm0 +; AVX1-NEXT: andl $15, %r8d +; AVX1-NEXT: vpinsrw $4, (%rsp,%r8,2), %xmm0, %xmm0 +; AVX1-NEXT: andl $15, %r9d +; AVX1-NEXT: vpinsrw $5, (%rsp,%r9,2), %xmm0, %xmm0 +; AVX1-NEXT: movl 16(%rbp), %eax +; AVX1-NEXT: andl $15, %eax +; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: movl 24(%rbp), %eax +; AVX1-NEXT: andl $15, %eax +; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movl 32(%rbp), %eax ; AVX1-NEXT: andl $15, %eax -; AVX1-NEXT: vmovaps %ymm0, (%rsp) ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: vmovd %eax, %xmm1 ; AVX1-NEXT: movl 40(%rbp), %eax ; AVX1-NEXT: andl $15, %eax -; AVX1-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movl 48(%rbp), %eax ; AVX1-NEXT: andl $15, %eax -; AVX1-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movl 56(%rbp), %eax ; AVX1-NEXT: andl $15, %eax -; AVX1-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movl 64(%rbp), %eax ; AVX1-NEXT: andl $15, %eax -; AVX1-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movl 72(%rbp), %eax ; AVX1-NEXT: andl $15, %eax -; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movl 80(%rbp), %eax ; AVX1-NEXT: andl $15, %eax -; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0 -; AVX1-NEXT: movl 88(%rbp), %eax -; AVX1-NEXT: andl $15, %eax -; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0 -; AVX1-NEXT: andl $15, %edi -; AVX1-NEXT: movzwl (%rsp,%rdi,2), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: andl $15, %esi -; AVX1-NEXT: vpinsrw $1, (%rsp,%rsi,2), %xmm1, %xmm1 -; AVX1-NEXT: andl $15, %edx -; AVX1-NEXT: vpinsrw $2, (%rsp,%rdx,2), %xmm1, %xmm1 -; AVX1-NEXT: andl $15, %ecx -; AVX1-NEXT: vpinsrw $3, (%rsp,%rcx,2), %xmm1, %xmm1 -; AVX1-NEXT: andl $15, %r8d -; AVX1-NEXT: vpinsrw $4, (%rsp,%r8,2), %xmm1, %xmm1 -; AVX1-NEXT: andl $15, %r9d -; AVX1-NEXT: vpinsrw $5, (%rsp,%r9,2), %xmm1, %xmm1 -; AVX1-NEXT: movl 16(%rbp), %eax -; AVX1-NEXT: andl $15, %eax ; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm1, %xmm1 -; AVX1-NEXT: movl 24(%rbp), %eax +; AVX1-NEXT: movl 88(%rbp), %eax ; AVX1-NEXT: andl $15, %eax ; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq @@ -355,52 +355,52 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x, ; AVX2-NEXT: # kill: def $edx killed $edx def $rdx ; AVX2-NEXT: # kill: def $esi killed $esi def $rsi ; AVX2-NEXT: # kill: def $edi killed $edi def $rdi +; AVX2-NEXT: andl $15, %edi +; AVX2-NEXT: vmovaps %ymm0, (%rsp) +; AVX2-NEXT: movzwl (%rsp,%rdi,2), %eax +; AVX2-NEXT: vmovd %eax, %xmm0 +; AVX2-NEXT: andl $15, %esi +; AVX2-NEXT: vpinsrw $1, (%rsp,%rsi,2), %xmm0, %xmm0 +; AVX2-NEXT: andl $15, %edx +; AVX2-NEXT: vpinsrw $2, (%rsp,%rdx,2), %xmm0, %xmm0 +; AVX2-NEXT: andl $15, %ecx +; AVX2-NEXT: vpinsrw $3, (%rsp,%rcx,2), %xmm0, %xmm0 +; AVX2-NEXT: andl $15, %r8d +; AVX2-NEXT: vpinsrw $4, (%rsp,%r8,2), %xmm0, %xmm0 +; AVX2-NEXT: andl $15, %r9d +; AVX2-NEXT: vpinsrw $5, (%rsp,%r9,2), %xmm0, %xmm0 +; AVX2-NEXT: movl 16(%rbp), %eax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: movl 24(%rbp), %eax +; AVX2-NEXT: andl $15, %eax +; AVX2-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movl 32(%rbp), %eax ; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vmovaps %ymm0, (%rsp) ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 +; AVX2-NEXT: vmovd %eax, %xmm1 ; AVX2-NEXT: movl 40(%rbp), %eax ; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl 48(%rbp), %eax ; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl 56(%rbp), %eax ; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl 64(%rbp), %eax ; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl 72(%rbp), %eax ; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl 80(%rbp), %eax ; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0 -; AVX2-NEXT: movl 88(%rbp), %eax -; AVX2-NEXT: andl $15, %eax -; AVX2-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0 -; AVX2-NEXT: andl $15, %edi -; AVX2-NEXT: movzwl (%rsp,%rdi,2), %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: andl $15, %esi -; AVX2-NEXT: vpinsrw $1, (%rsp,%rsi,2), %xmm1, %xmm1 -; AVX2-NEXT: andl $15, %edx -; AVX2-NEXT: vpinsrw $2, (%rsp,%rdx,2), %xmm1, %xmm1 -; AVX2-NEXT: andl $15, %ecx -; AVX2-NEXT: vpinsrw $3, (%rsp,%rcx,2), %xmm1, %xmm1 -; AVX2-NEXT: andl $15, %r8d -; AVX2-NEXT: vpinsrw $4, (%rsp,%r8,2), %xmm1, %xmm1 -; AVX2-NEXT: andl $15, %r9d -; AVX2-NEXT: vpinsrw $5, (%rsp,%r9,2), %xmm1, %xmm1 -; AVX2-NEXT: movl 16(%rbp), %eax -; AVX2-NEXT: andl $15, %eax ; AVX2-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm1, %xmm1 -; AVX2-NEXT: movl 24(%rbp), %eax +; AVX2-NEXT: movl 88(%rbp), %eax ; AVX2-NEXT: andl $15, %eax ; AVX2-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq @@ -448,52 +448,52 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i ; AVX1-NEXT: # kill: def $edx killed $edx def $rdx ; AVX1-NEXT: # kill: def $esi killed $esi def $rsi ; AVX1-NEXT: # kill: def $edi killed $edi def $rdi -; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax -; AVX1-NEXT: andl $7, %eax +; AVX1-NEXT: andl $7, %edi ; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax +; AVX1-NEXT: movzwl -24(%rsp,%rdi,2), %eax ; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: andl $7, %esi +; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0 +; AVX1-NEXT: andl $7, %edx +; AVX1-NEXT: vpinsrw $2, -24(%rsp,%rdx,2), %xmm0, %xmm0 +; AVX1-NEXT: andl $7, %ecx +; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0 +; AVX1-NEXT: andl $7, %r8d +; AVX1-NEXT: vpinsrw $4, -24(%rsp,%r8,2), %xmm0, %xmm0 +; AVX1-NEXT: andl $7, %r9d +; AVX1-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0 ; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX1-NEXT: andl $7, %eax -; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX1-NEXT: andl $7, %eax -; AVX1-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX1-NEXT: andl $7, %eax -; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax +; AVX1-NEXT: vmovd %eax, %xmm1 ; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX1-NEXT: andl $7, %eax -; AVX1-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX1-NEXT: andl $7, %eax -; AVX1-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX1-NEXT: andl $7, %eax -; AVX1-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX1-NEXT: andl $7, %eax -; AVX1-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0 -; AVX1-NEXT: andl $7, %edi -; AVX1-NEXT: movzwl -24(%rsp,%rdi,2), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: andl $7, %esi -; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm1, %xmm1 -; AVX1-NEXT: andl $7, %edx -; AVX1-NEXT: vpinsrw $2, -24(%rsp,%rdx,2), %xmm1, %xmm1 -; AVX1-NEXT: andl $7, %ecx -; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm1, %xmm1 -; AVX1-NEXT: andl $7, %r8d -; AVX1-NEXT: vpinsrw $4, -24(%rsp,%r8,2), %xmm1, %xmm1 -; AVX1-NEXT: andl $7, %r9d -; AVX1-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm1, %xmm1 +; AVX1-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm1, %xmm1 +; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax +; AVX1-NEXT: andl $7, %eax +; AVX1-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX1-NEXT: andl $7, %eax ; AVX1-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX1-NEXT: andl $7, %eax ; AVX1-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16: @@ -504,52 +504,52 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i ; AVX2-NEXT: # kill: def $edx killed $edx def $rdx ; AVX2-NEXT: # kill: def $esi killed $esi def $rsi ; AVX2-NEXT: # kill: def $edi killed $edi def $rdi -; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax -; AVX2-NEXT: andl $7, %eax +; AVX2-NEXT: andl $7, %edi ; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax +; AVX2-NEXT: movzwl -24(%rsp,%rdi,2), %eax ; AVX2-NEXT: vmovd %eax, %xmm0 +; AVX2-NEXT: andl $7, %esi +; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0 +; AVX2-NEXT: andl $7, %edx +; AVX2-NEXT: vpinsrw $2, -24(%rsp,%rdx,2), %xmm0, %xmm0 +; AVX2-NEXT: andl $7, %ecx +; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0 +; AVX2-NEXT: andl $7, %r8d +; AVX2-NEXT: vpinsrw $4, -24(%rsp,%r8,2), %xmm0, %xmm0 +; AVX2-NEXT: andl $7, %r9d +; AVX2-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0 +; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax +; AVX2-NEXT: andl $7, %eax +; AVX2-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX2-NEXT: andl $7, %eax -; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX2-NEXT: andl $7, %eax -; AVX2-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax +; AVX2-NEXT: vmovd %eax, %xmm1 ; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX2-NEXT: andl $7, %eax -; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX2-NEXT: andl $7, %eax -; AVX2-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX2-NEXT: andl $7, %eax -; AVX2-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX2-NEXT: andl $7, %eax -; AVX2-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX2-NEXT: andl $7, %eax -; AVX2-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0 -; AVX2-NEXT: andl $7, %edi -; AVX2-NEXT: movzwl -24(%rsp,%rdi,2), %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: andl $7, %esi -; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm1, %xmm1 -; AVX2-NEXT: andl $7, %edx -; AVX2-NEXT: vpinsrw $2, -24(%rsp,%rdx,2), %xmm1, %xmm1 -; AVX2-NEXT: andl $7, %ecx -; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm1, %xmm1 -; AVX2-NEXT: andl $7, %r8d -; AVX2-NEXT: vpinsrw $4, -24(%rsp,%r8,2), %xmm1, %xmm1 -; AVX2-NEXT: andl $7, %r9d -; AVX2-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm1, %xmm1 +; AVX2-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX2-NEXT: andl $7, %eax ; AVX2-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax ; AVX2-NEXT: andl $7, %eax ; AVX2-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %x0 = extractelement <8 x i16> %x, i32 %i0 %x1 = extractelement <8 x i16> %x, i32 %i1 @@ -597,13 +597,13 @@ define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, ptr %i) nounwin ; ALL-NEXT: movq %rsp, %rbp ; ALL-NEXT: andq $-32, %rsp ; ALL-NEXT: subq $64, %rsp -; ALL-NEXT: movl (%rdi), %eax -; ALL-NEXT: movl 8(%rdi), %ecx +; ALL-NEXT: movq (%rdi), %rax +; ALL-NEXT: movq 8(%rdi), %rcx ; ALL-NEXT: andl $3, %eax ; ALL-NEXT: andl $3, %ecx -; ALL-NEXT: movl 16(%rdi), %edx +; ALL-NEXT: movq 16(%rdi), %rdx ; ALL-NEXT: andl $3, %edx -; ALL-NEXT: movl 24(%rdi), %esi +; ALL-NEXT: movq 24(%rdi), %rsi ; ALL-NEXT: andl $3, %esi ; ALL-NEXT: vmovaps %ymm0, (%rsp) ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -637,13 +637,13 @@ define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, ptr %i) nounwin define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, ptr %i) nounwind { ; ALL-LABEL: mem_shuffle_v4i64_v2i64_xxxx_i64: ; ALL: # %bb.0: -; ALL-NEXT: movl (%rdi), %eax -; ALL-NEXT: movl 8(%rdi), %ecx +; ALL-NEXT: movq (%rdi), %rax +; ALL-NEXT: movq 8(%rdi), %rcx ; ALL-NEXT: andl $1, %eax ; ALL-NEXT: andl $1, %ecx -; ALL-NEXT: movl 16(%rdi), %edx +; ALL-NEXT: movq 16(%rdi), %rdx ; ALL-NEXT: andl $1, %edx -; ALL-NEXT: movl 24(%rdi), %esi +; ALL-NEXT: movq 24(%rdi), %rsi ; ALL-NEXT: andl $1, %esi ; ALL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero