-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[X86] Add 128-bit vector test coverage for #167498 #167531
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
|
@llvm/pr-subscribers-backend-x86 Author: Simon Pilgrim (RKSimon) ChangesPatch is 21.51 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/167531.diff 1 Files Affected:
diff --git a/llvm/test/CodeGen/X86/build-vector-128.ll b/llvm/test/CodeGen/X86/build-vector-128.ll
index e2db8d4241420..b8bb417e1860c 100644
--- a/llvm/test/CodeGen/X86/build-vector-128.ll
+++ b/llvm/test/CodeGen/X86/build-vector-128.ll
@@ -410,6 +410,472 @@ define <16 x i8> @test_buildvector_v16i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
ret <16 x i8> %ins15
}
+; build vectors where integers operands are split (typically via legalization)
+
+define <4 x i32> @test_buildvector_v2i64_split_v4i32(i64 %a0, i64 %a1) nounwind {
+; SSE-32-LABEL: test_buildvector_v2i64_split_v4i32:
+; SSE-32: # %bb.0:
+; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0
+; SSE-32-NEXT: retl
+;
+; SSE2-64-LABEL: test_buildvector_v2i64_split_v4i32:
+; SSE2-64: # %bb.0:
+; SSE2-64-NEXT: movl %edi, %eax
+; SSE2-64-NEXT: movl %esi, %ecx
+; SSE2-64-NEXT: shrq $32, %rdi
+; SSE2-64-NEXT: shrq $32, %rsi
+; SSE2-64-NEXT: movd %ecx, %xmm1
+; SSE2-64-NEXT: movd %esi, %xmm0
+; SSE2-64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-64-NEXT: movd %eax, %xmm0
+; SSE2-64-NEXT: movd %edi, %xmm2
+; SSE2-64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-64-NEXT: retq
+;
+; SSE41-64-LABEL: test_buildvector_v2i64_split_v4i32:
+; SSE41-64: # %bb.0:
+; SSE41-64-NEXT: movl %edi, %eax
+; SSE41-64-NEXT: movl %esi, %ecx
+; SSE41-64-NEXT: shrq $32, %rdi
+; SSE41-64-NEXT: shrq $32, %rsi
+; SSE41-64-NEXT: movd %eax, %xmm0
+; SSE41-64-NEXT: pinsrd $1, %edi, %xmm0
+; SSE41-64-NEXT: pinsrd $2, %ecx, %xmm0
+; SSE41-64-NEXT: pinsrd $3, %esi, %xmm0
+; SSE41-64-NEXT: retq
+;
+; AVX-32-LABEL: test_buildvector_v2i64_split_v4i32:
+; AVX-32: # %bb.0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v2i64_split_v4i32:
+; AVX-64: # %bb.0:
+; AVX-64-NEXT: movl %edi, %eax
+; AVX-64-NEXT: movl %esi, %ecx
+; AVX-64-NEXT: shrq $32, %rdi
+; AVX-64-NEXT: shrq $32, %rsi
+; AVX-64-NEXT: vmovd %eax, %xmm0
+; AVX-64-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrd $3, %esi, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+ %a0.lo = trunc i64 %a0 to i32
+ %a1.lo = trunc i64 %a1 to i32
+ %a0.shr = lshr i64 %a0, 32
+ %a1.shr = lshr i64 %a1, 32
+ %a0.hi = trunc i64 %a0.shr to i32
+ %a1.hi = trunc i64 %a1.shr to i32
+ %v0 = insertelement <4 x i32> poison, i32 %a0.lo, i64 0
+ %v1 = insertelement <4 x i32> %v0, i32 %a0.hi, i64 1
+ %v2 = insertelement <4 x i32> %v1, i32 %a1.lo, i64 2
+ %v3 = insertelement <4 x i32> %v2, i32 %a1.hi, i64 3
+ ret <4 x i32> %v3
+}
+
+define <8 x i16> @test_buildvector_v4i32_split_v8i16(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
+; SSE2-32-LABEL: test_buildvector_v4i32_split_v8i16:
+; SSE2-32: # %bb.0:
+; SSE2-32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE2-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-32-NEXT: retl
+;
+; SSE2-64-LABEL: test_buildvector_v4i32_split_v8i16:
+; SSE2-64: # %bb.0:
+; SSE2-64-NEXT: movd %ecx, %xmm0
+; SSE2-64-NEXT: movd %edx, %xmm1
+; SSE2-64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-64-NEXT: movd %esi, %xmm2
+; SSE2-64-NEXT: movd %edi, %xmm0
+; SSE2-64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-64-NEXT: retq
+;
+; SSE41-32-LABEL: test_buildvector_v4i32_split_v8i16:
+; SSE41-32: # %bb.0:
+; SSE41-32-NEXT: pushl %esi
+; SSE41-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE41-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; SSE41-32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; SSE41-32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; SSE41-32-NEXT: movd %esi, %xmm0
+; SSE41-32-NEXT: shrl $16, %esi
+; SSE41-32-NEXT: pinsrw $1, %esi, %xmm0
+; SSE41-32-NEXT: pinsrw $2, %edx, %xmm0
+; SSE41-32-NEXT: shrl $16, %edx
+; SSE41-32-NEXT: pinsrw $3, %edx, %xmm0
+; SSE41-32-NEXT: pinsrw $4, %ecx, %xmm0
+; SSE41-32-NEXT: shrl $16, %ecx
+; SSE41-32-NEXT: pinsrw $5, %ecx, %xmm0
+; SSE41-32-NEXT: pinsrw $6, %eax, %xmm0
+; SSE41-32-NEXT: shrl $16, %eax
+; SSE41-32-NEXT: pinsrw $7, %eax, %xmm0
+; SSE41-32-NEXT: popl %esi
+; SSE41-32-NEXT: retl
+;
+; SSE41-64-LABEL: test_buildvector_v4i32_split_v8i16:
+; SSE41-64: # %bb.0:
+; SSE41-64-NEXT: movd %edi, %xmm0
+; SSE41-64-NEXT: shrl $16, %edi
+; SSE41-64-NEXT: pinsrw $1, %edi, %xmm0
+; SSE41-64-NEXT: pinsrw $2, %esi, %xmm0
+; SSE41-64-NEXT: shrl $16, %esi
+; SSE41-64-NEXT: pinsrw $3, %esi, %xmm0
+; SSE41-64-NEXT: pinsrw $4, %edx, %xmm0
+; SSE41-64-NEXT: shrl $16, %edx
+; SSE41-64-NEXT: pinsrw $5, %edx, %xmm0
+; SSE41-64-NEXT: pinsrw $6, %ecx, %xmm0
+; SSE41-64-NEXT: shrl $16, %ecx
+; SSE41-64-NEXT: pinsrw $7, %ecx, %xmm0
+; SSE41-64-NEXT: retq
+;
+; AVX-32-LABEL: test_buildvector_v4i32_split_v8i16:
+; AVX-32: # %bb.0:
+; AVX-32-NEXT: pushl %esi
+; AVX-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; AVX-32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; AVX-32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; AVX-32-NEXT: vmovd %esi, %xmm0
+; AVX-32-NEXT: shrl $16, %esi
+; AVX-32-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0
+; AVX-32-NEXT: shrl $16, %edx
+; AVX-32-NEXT: vpinsrw $3, %edx, %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
+; AVX-32-NEXT: shrl $16, %ecx
+; AVX-32-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: shrl $16, %eax
+; AVX-32-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: popl %esi
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v4i32_split_v8i16:
+; AVX-64: # %bb.0:
+; AVX-64-NEXT: vmovd %edi, %xmm0
+; AVX-64-NEXT: shrl $16, %edi
+; AVX-64-NEXT: vpinsrw $1, %edi, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrw $2, %esi, %xmm0, %xmm0
+; AVX-64-NEXT: shrl $16, %esi
+; AVX-64-NEXT: vpinsrw $3, %esi, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrw $4, %edx, %xmm0, %xmm0
+; AVX-64-NEXT: shrl $16, %edx
+; AVX-64-NEXT: vpinsrw $5, %edx, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: shrl $16, %ecx
+; AVX-64-NEXT: vpinsrw $7, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+ %a0.lo = trunc i32 %a0 to i16
+ %a1.lo = trunc i32 %a1 to i16
+ %a2.lo = trunc i32 %a2 to i16
+ %a3.lo = trunc i32 %a3 to i16
+ %a0.shr = lshr i32 %a0, 16
+ %a1.shr = lshr i32 %a1, 16
+ %a2.shr = lshr i32 %a2, 16
+ %a3.shr = lshr i32 %a3, 16
+ %a0.hi = trunc i32 %a0.shr to i16
+ %a1.hi = trunc i32 %a1.shr to i16
+ %a2.hi = trunc i32 %a2.shr to i16
+ %a3.hi = trunc i32 %a3.shr to i16
+ %v0 = insertelement <8 x i16> poison, i16 %a0.lo, i64 0
+ %v1 = insertelement <8 x i16> %v0, i16 %a0.hi, i64 1
+ %v2 = insertelement <8 x i16> %v1, i16 %a1.lo, i64 2
+ %v3 = insertelement <8 x i16> %v2, i16 %a1.hi, i64 3
+ %v4 = insertelement <8 x i16> %v3, i16 %a2.lo, i64 4
+ %v5 = insertelement <8 x i16> %v4, i16 %a2.hi, i64 5
+ %v6 = insertelement <8 x i16> %v5, i16 %a3.lo, i64 6
+ %v7 = insertelement <8 x i16> %v6, i16 %a3.hi, i64 7
+ ret <8 x i16> %v7
+}
+
+define <16 x i8> @test_buildvector_v8i16_split_v16i8(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
+; SSE2-32-LABEL: test_buildvector_v8i16_split_v16i8:
+; SSE2-32: # %bb.0:
+; SSE2-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE2-32-NEXT: movd %eax, %xmm1
+; SSE2-32-NEXT: movdqa %xmm1, %xmm0
+; SSE2-32-NEXT: psrld $8, %xmm0
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE2-32-NEXT: movd %eax, %xmm0
+; SSE2-32-NEXT: movdqa %xmm0, %xmm2
+; SSE2-32-NEXT: psrld $8, %xmm2
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE2-32-NEXT: movd %eax, %xmm2
+; SSE2-32-NEXT: movdqa %xmm2, %xmm1
+; SSE2-32-NEXT: psrld $8, %xmm1
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE2-32-NEXT: movd %eax, %xmm1
+; SSE2-32-NEXT: movdqa %xmm1, %xmm3
+; SSE2-32-NEXT: psrld $8, %xmm3
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-32-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE2-32-NEXT: movd %eax, %xmm0
+; SSE2-32-NEXT: movdqa %xmm0, %xmm2
+; SSE2-32-NEXT: psrld $8, %xmm2
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE2-32-NEXT: movd %eax, %xmm2
+; SSE2-32-NEXT: movdqa %xmm2, %xmm3
+; SSE2-32-NEXT: psrld $8, %xmm3
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE2-32-NEXT: movd %eax, %xmm3
+; SSE2-32-NEXT: movdqa %xmm3, %xmm0
+; SSE2-32-NEXT: psrld $8, %xmm0
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE2-32-NEXT: movd %eax, %xmm0
+; SSE2-32-NEXT: movdqa %xmm0, %xmm4
+; SSE2-32-NEXT: psrld $8, %xmm4
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-32-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-32-NEXT: retl
+;
+; SSE2-64-LABEL: test_buildvector_v8i16_split_v16i8:
+; SSE2-64: # %bb.0:
+; SSE2-64-NEXT: pushq %rbp
+; SSE2-64-NEXT: pushq %r15
+; SSE2-64-NEXT: pushq %r14
+; SSE2-64-NEXT: pushq %rbx
+; SSE2-64-NEXT: movzwl %di, %eax
+; SSE2-64-NEXT: movzwl %si, %r10d
+; SSE2-64-NEXT: movzwl %dx, %r11d
+; SSE2-64-NEXT: movzwl %cx, %ebx
+; SSE2-64-NEXT: movzwl %r8w, %ebp
+; SSE2-64-NEXT: movzwl %r9w, %r14d
+; SSE2-64-NEXT: movzwl {{[0-9]+}}(%rsp), %r15d
+; SSE2-64-NEXT: movd %r15d, %xmm0
+; SSE2-64-NEXT: movdqa %xmm0, %xmm1
+; SSE2-64-NEXT: psrld $8, %xmm1
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-64-NEXT: movzwl {{[0-9]+}}(%rsp), %r15d
+; SSE2-64-NEXT: movd %r15d, %xmm2
+; SSE2-64-NEXT: movdqa %xmm2, %xmm1
+; SSE2-64-NEXT: psrld $8, %xmm1
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-64-NEXT: movd %r9d, %xmm0
+; SSE2-64-NEXT: movd %r14d, %xmm1
+; SSE2-64-NEXT: psrld $8, %xmm1
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-64-NEXT: movd %r8d, %xmm1
+; SSE2-64-NEXT: movd %ebp, %xmm3
+; SSE2-64-NEXT: psrld $8, %xmm3
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-64-NEXT: movd %ecx, %xmm0
+; SSE2-64-NEXT: movd %ebx, %xmm2
+; SSE2-64-NEXT: psrld $8, %xmm2
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-64-NEXT: movd %edx, %xmm2
+; SSE2-64-NEXT: movd %r11d, %xmm3
+; SSE2-64-NEXT: psrld $8, %xmm3
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-64-NEXT: movd %esi, %xmm3
+; SSE2-64-NEXT: movd %r10d, %xmm0
+; SSE2-64-NEXT: psrld $8, %xmm0
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-64-NEXT: movd %edi, %xmm0
+; SSE2-64-NEXT: movd %eax, %xmm4
+; SSE2-64-NEXT: psrld $8, %xmm4
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-64-NEXT: popq %rbx
+; SSE2-64-NEXT: popq %r14
+; SSE2-64-NEXT: popq %r15
+; SSE2-64-NEXT: popq %rbp
+; SSE2-64-NEXT: retq
+;
+; SSE41-32-LABEL: test_buildvector_v8i16_split_v16i8:
+; SSE41-32: # %bb.0:
+; SSE41-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE41-32-NEXT: movd %eax, %xmm0
+; SSE41-32-NEXT: shrl $8, %eax
+; SSE41-32-NEXT: pinsrb $1, %eax, %xmm0
+; SSE41-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE41-32-NEXT: pinsrb $2, %eax, %xmm0
+; SSE41-32-NEXT: shrl $8, %eax
+; SSE41-32-NEXT: pinsrb $3, %eax, %xmm0
+; SSE41-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE41-32-NEXT: pinsrb $4, %eax, %xmm0
+; SSE41-32-NEXT: shrl $8, %eax
+; SSE41-32-NEXT: pinsrb $5, %eax, %xmm0
+; SSE41-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE41-32-NEXT: pinsrb $6, %eax, %xmm0
+; SSE41-32-NEXT: shrl $8, %eax
+; SSE41-32-NEXT: pinsrb $7, %eax, %xmm0
+; SSE41-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE41-32-NEXT: pinsrb $8, %eax, %xmm0
+; SSE41-32-NEXT: shrl $8, %eax
+; SSE41-32-NEXT: pinsrb $9, %eax, %xmm0
+; SSE41-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE41-32-NEXT: pinsrb $10, %eax, %xmm0
+; SSE41-32-NEXT: shrl $8, %eax
+; SSE41-32-NEXT: pinsrb $11, %eax, %xmm0
+; SSE41-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE41-32-NEXT: pinsrb $12, %eax, %xmm0
+; SSE41-32-NEXT: shrl $8, %eax
+; SSE41-32-NEXT: pinsrb $13, %eax, %xmm0
+; SSE41-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; SSE41-32-NEXT: pinsrb $14, %eax, %xmm0
+; SSE41-32-NEXT: shrl $8, %eax
+; SSE41-32-NEXT: pinsrb $15, %eax, %xmm0
+; SSE41-32-NEXT: retl
+;
+; SSE41-64-LABEL: test_buildvector_v8i16_split_v16i8:
+; SSE41-64: # %bb.0:
+; SSE41-64-NEXT: movd %edi, %xmm0
+; SSE41-64-NEXT: shrl $8, %edi
+; SSE41-64-NEXT: pinsrb $1, %edi, %xmm0
+; SSE41-64-NEXT: pinsrb $2, %esi, %xmm0
+; SSE41-64-NEXT: shrl $8, %esi
+; SSE41-64-NEXT: pinsrb $3, %esi, %xmm0
+; SSE41-64-NEXT: pinsrb $4, %edx, %xmm0
+; SSE41-64-NEXT: shrl $8, %edx
+; SSE41-64-NEXT: pinsrb $5, %edx, %xmm0
+; SSE41-64-NEXT: pinsrb $6, %ecx, %xmm0
+; SSE41-64-NEXT: shrl $8, %ecx
+; SSE41-64-NEXT: pinsrb $7, %ecx, %xmm0
+; SSE41-64-NEXT: pinsrb $8, %r8d, %xmm0
+; SSE41-64-NEXT: shrl $8, %r8d
+; SSE41-64-NEXT: pinsrb $9, %r8d, %xmm0
+; SSE41-64-NEXT: pinsrb $10, %r9d, %xmm0
+; SSE41-64-NEXT: shrl $8, %r9d
+; SSE41-64-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
+; SSE41-64-NEXT: pinsrb $11, %r9d, %xmm0
+; SSE41-64-NEXT: pinsrb $12, %eax, %xmm0
+; SSE41-64-NEXT: shrl $8, %eax
+; SSE41-64-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
+; SSE41-64-NEXT: pinsrb $13, %eax, %xmm0
+; SSE41-64-NEXT: pinsrb $14, %ecx, %xmm0
+; SSE41-64-NEXT: shrl $8, %ecx
+; SSE41-64-NEXT: pinsrb $15, %ecx, %xmm0
+; SSE41-64-NEXT: retq
+;
+; AVX-32-LABEL: test_buildvector_v8i16_split_v16i8:
+; AVX-32: # %bb.0:
+; AVX-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; AVX-32-NEXT: vmovd %eax, %xmm0
+; AVX-32-NEXT: shrl $8, %eax
+; AVX-32-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; AVX-32-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: shrl $8, %eax
+; AVX-32-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; AVX-32-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: shrl $8, %eax
+; AVX-32-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; AVX-32-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: shrl $8, %eax
+; AVX-32-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; AVX-32-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: shrl $8, %eax
+; AVX-32-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; AVX-32-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: shrl $8, %eax
+; AVX-32-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; AVX-32-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: shrl $8, %eax
+; AVX-32-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; AVX-32-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: shrl $8, %eax
+; AVX-32-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v8i16_split_v16i8:
+; AVX-64: # %bb.0:
+; AVX-64-NEXT: vmovd %edi, %xmm0
+; AVX-64-NEXT: shrl $8, %edi
+; AVX-64-NEXT: vpinsrb $1, %edi, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $2, %esi, %xmm0, %xmm0
+; AVX-64-NEXT: shrl $8, %esi
+; AVX-64-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $4, %edx, %xmm0, %xmm0
+; AVX-64-NEXT: shrl $8, %edx
+; AVX-64-NEXT: vpinsrb $5, %edx, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: shrl $8, %ecx
+; AVX-64-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $8, %r8d, %xmm0, %xmm0
+; AVX-64-NEXT: shrl $8, %r8d
+; AVX-64-NEXT: vpinsrb $9, %r8d, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
+; AVX-64-NEXT: shrl $8, %r9d
+; AVX-64-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
+; AVX-64-NEXT: vpinsrb $11, %r9d, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX-64-NEXT: shrl $8, %eax
+; AVX-64-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
+; AVX-64-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: shrl $8, %ecx
+; AVX-64-NEXT: vpinsrb $15, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+ %a0.lo = trunc i16 %a0 to i8
+ %a1.lo = trunc i16 %a1 to i8
+ %a2.lo = trunc i16 %a2 to i8
+ %a3.lo = trunc i16 %a3 to i8
+ %a4.lo = trunc i16 %a4 to i8
+ %a5.lo = trunc i16 %a5 to i8
+ %a6.lo = trunc i16 %a6 to i8
+ %a7.lo = trunc i16 %a7 to i8
+ %a0.shr = l...
[truncated]
|
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/14/builds/4651 Here is the relevant piece of the build log for the reference |
No description provided.