58 changes: 29 additions & 29 deletions llvm/test/CodeGen/X86/vec_insert-8.ll
Original file line number Diff line number Diff line change
@@ -1,25 +1,25 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64

; tests variable insert and extract of a 4 x i32

define <4 x i32> @var_insert(<4 x i32> %x, i32 %val, i32 %idx) nounwind {
; X32-LABEL: var_insert:
; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
; X32-NEXT: subl $32, %esp
; X32-NEXT: movl 12(%ebp), %eax
; X32-NEXT: andl $3, %eax
; X32-NEXT: movl 8(%ebp), %ecx
; X32-NEXT: movaps %xmm0, (%esp)
; X32-NEXT: movl %ecx, (%esp,%eax,4)
; X32-NEXT: movaps (%esp), %xmm0
; X32-NEXT: movl %ebp, %esp
; X32-NEXT: popl %ebp
; X32-NEXT: retl
; X86-LABEL: var_insert:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $32, %esp
; X86-NEXT: movl 12(%ebp), %eax
; X86-NEXT: andl $3, %eax
; X86-NEXT: movl 8(%ebp), %ecx
; X86-NEXT: movaps %xmm0, (%esp)
; X86-NEXT: movl %ecx, (%esp,%eax,4)
; X86-NEXT: movaps (%esp), %xmm0
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: var_insert:
; X64: # %bb.0: # %entry
Expand All @@ -35,19 +35,19 @@ entry:
}

define i32 @var_extract(<4 x i32> %x, i32 %idx) nounwind {
; X32-LABEL: var_extract:
; X32: # %bb.0: # %entry
; X32-NEXT: pushl %ebp
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andl $-16, %esp
; X32-NEXT: subl $32, %esp
; X32-NEXT: movl 8(%ebp), %eax
; X32-NEXT: andl $3, %eax
; X32-NEXT: movaps %xmm0, (%esp)
; X32-NEXT: movl (%esp,%eax,4), %eax
; X32-NEXT: movl %ebp, %esp
; X32-NEXT: popl %ebp
; X32-NEXT: retl
; X86-LABEL: var_extract:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $32, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: andl $3, %eax
; X86-NEXT: movaps %xmm0, (%esp)
; X86-NEXT: movl (%esp,%eax,4), %eax
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: var_extract:
; X64: # %bb.0: # %entry
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/X86/vec_insert-9.ll
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64

define <4 x i32> @var_insert2(<4 x i32> %x, i32 %val, i32 %idx) nounwind {
; X32-LABEL: var_insert2:
; X32: # %bb.0: # %entry
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: retl
; X86-LABEL: var_insert2:
; X86: # %bb.0: # %entry
; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT: retl
;
; X64-LABEL: var_insert2:
; X64: # %bb.0: # %entry
Expand Down
46 changes: 23 additions & 23 deletions llvm/test/CodeGen/X86/vec_insert-mmx.ll
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse4.1 | FileCheck %s --check-prefix=X64

; This is not an MMX operation; promoted to xmm.
define x86_mmx @t0(i32 %A) nounwind {
; X32-LABEL: t0:
; X32: ## %bb.0:
; X32-NEXT: movd {{[0-9]+}}(%esp), %mm1
; X32-NEXT: pxor %mm0, %mm0
; X32-NEXT: punpckldq %mm1, %mm0 ## mm0 = mm0[0],mm1[0]
; X32-NEXT: retl
; X86-LABEL: t0:
; X86: ## %bb.0:
; X86-NEXT: movd {{[0-9]+}}(%esp), %mm1
; X86-NEXT: pxor %mm0, %mm0
; X86-NEXT: punpckldq %mm1, %mm0 ## mm0 = mm0[0],mm1[0]
; X86-NEXT: retl
;
; X64-LABEL: t0:
; X64: ## %bb.0:
Expand All @@ -22,10 +22,10 @@ define x86_mmx @t0(i32 %A) nounwind {
}

define <8 x i8> @t1(i8 zeroext %x) nounwind {
; X32-LABEL: t1:
; X32: ## %bb.0:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: retl
; X86-LABEL: t1:
; X86: ## %bb.0:
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: retl
;
; X64-LABEL: t1:
; X64: ## %bb.0:
Expand All @@ -37,10 +37,10 @@ define <8 x i8> @t1(i8 zeroext %x) nounwind {

; PR2574
define <2 x float> @t2(<2 x float> %a0) {
; X32-LABEL: t2:
; X32: ## %bb.0:
; X32-NEXT: xorps %xmm0, %xmm0
; X32-NEXT: retl
; X86-LABEL: t2:
; X86: ## %bb.0:
; X86-NEXT: xorps %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: t2:
; X64: ## %bb.0:
Expand All @@ -56,14 +56,14 @@ define <2 x float> @t2(<2 x float> %a0) {

; PR2562
define void @t3() {
; X32-LABEL: t3:
; X32: ## %bb.0:
; X32-NEXT: movl L_g0$non_lazy_ptr, %eax
; X32-NEXT: movl L_g1$non_lazy_ptr, %ecx
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: pinsrw $0, (%eax), %xmm0
; X32-NEXT: movq %xmm0, (%ecx)
; X32-NEXT: retl
; X86-LABEL: t3:
; X86: ## %bb.0:
; X86-NEXT: movl L_g0$non_lazy_ptr, %eax
; X86-NEXT: movl L_g1$non_lazy_ptr, %ecx
; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: pinsrw $0, (%eax), %xmm0
; X86-NEXT: movq %xmm0, (%ecx)
; X86-NEXT: retl
;
; X64-LABEL: t3:
; X64: ## %bb.0:
Expand Down
64 changes: 60 additions & 4 deletions llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X86,AVX2
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,AVX512
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX512
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X86,AVX2,X86-AVX2
; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,AVX512,X86-AVX512
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64,AVX2,X64-AVX2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,AVX512,X64-AVX512

declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>)
declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>)
Expand Down Expand Up @@ -869,6 +869,62 @@ define <32 x i8> @PR52122(<32 x i8> %0, <32 x i8> %1) {
ret <32 x i8> %5
}

define void @PR63030(ptr %p0) {
; X86-AVX2-LABEL: PR63030:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX2-NEXT: vmovaps (%eax), %xmm0
; X86-AVX2-NEXT: vmovddup {{.*#+}} xmm1 = [3,0,2,0,3,0,2,0]
; X86-AVX2-NEXT: # xmm1 = mem[0,0]
; X86-AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm0[1,1,0,0]
; X86-AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
; X86-AVX2-NEXT: vmovaps {{.*#+}} xmm2 = [3,0,2,0]
; X86-AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,1]
; X86-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5,6,7]
; X86-AVX2-NEXT: vmovaps %ymm0, (%eax)
; X86-AVX2-NEXT: vmovaps %ymm1, (%eax)
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X86-AVX512-LABEL: PR63030:
; X86-AVX512: # %bb.0:
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512-NEXT: vmovdqa (%eax), %xmm0
; X86-AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [1,0,8,0,0,0,0,0,0,0,9,0,1,0,1,0]
; X86-AVX512-NEXT: vpermi2q {{\.?LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm1
; X86-AVX512-NEXT: vmovdqa64 %zmm1, (%eax)
; X86-AVX512-NEXT: vzeroupper
; X86-AVX512-NEXT: retl
;
; X64-AVX2-LABEL: PR63030:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vmovaps (%rdi), %xmm0
; X64-AVX2-NEXT: vmovddup {{.*#+}} xmm1 = [3,2,3,2]
; X64-AVX2-NEXT: # xmm1 = mem[0,0]
; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm0[1,1,0,0]
; X64-AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
; X64-AVX2-NEXT: vmovaps {{.*#+}} xmm2 = [3,2]
; X64-AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,1]
; X64-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5,6,7]
; X64-AVX2-NEXT: vmovaps %ymm0, (%rax)
; X64-AVX2-NEXT: vmovaps %ymm1, (%rax)
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: PR63030:
; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vmovdqa (%rdi), %xmm0
; X64-AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [1,8,0,0,0,9,1,1]
; X64-AVX512-NEXT: vpermi2q {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
; X64-AVX512-NEXT: vmovdqa64 %zmm1, (%rax)
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%load = load <2 x i64>, ptr %p0, align 16
%shuffle = shufflevector <2 x i64> <i64 3, i64 2>, <2 x i64> %load, <8 x i32> <i32 3, i32 0, i32 2, i32 2, i32 2, i32 1, i32 3, i32 3>
store volatile <8 x i64> %shuffle, ptr poison, align 64
ret void
}

define void @packss_zext_v8i1() {
; X86-LABEL: packss_zext_v8i1:
; X86: # %bb.0:
Expand Down