Skip to content

Commit

Permalink
[Codegen][TLI][X86] SimplifyMultipleUseDemandedBits(): 0'th vec subre…
Browse files Browse the repository at this point in the history
…g widening is free, try to perform it earlier

I believe, the profitability reasoning here is correct
"sub"reg is already located within the 0'th subreg of wider reg,
so if we have suvector insertion at index 0 into undef,
then it's always free do to.

After this, D109065 finally avoids the regression in D108382.

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D109074
  • Loading branch information
LebedevRI committed Sep 1, 2021
1 parent 68745a5 commit f575312
Show file tree
Hide file tree
Showing 4 changed files with 154 additions and 145 deletions.
12 changes: 10 additions & 2 deletions llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
Expand Up @@ -819,13 +819,21 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
break;
}
case ISD::INSERT_SUBVECTOR: {
// If we don't demand the inserted subvector, return the base vector.
SDValue Vec = Op.getOperand(0);
SDValue Sub = Op.getOperand(1);
uint64_t Idx = Op.getConstantOperandVal(2);
unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
if (DemandedElts.extractBits(NumSubElts, Idx) == 0)
APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
// If we don't demand the inserted subvector, return the base vector.
if (DemandedSubElts == 0)
return Vec;
// If this simply widens the lowest subvector, see if we can do it earlier.
if (Idx == 0 && Vec.isUndef()) {
if (SDValue NewSub = SimplifyMultipleUseDemandedBits(
Sub, DemandedBits, DemandedSubElts, DAG, Depth + 1))
return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(),
Op.getOperand(0), NewSub, Op.getOperand(2));
}
break;
}
case ISD::VECTOR_SHUFFLE: {
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
Expand Up @@ -113,7 +113,7 @@ define i8 @test_v9i8(<9 x i8> %a) nounwind {
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[4]
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: umov w9, v1.b[5]
; CHECK-NEXT: umov w9, v0.b[5]
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[6]
; CHECK-NEXT: and w8, w8, w9
Expand Down
225 changes: 113 additions & 112 deletions llvm/test/CodeGen/X86/avg.ll
Expand Up @@ -2265,25 +2265,25 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX512-NEXT: vmovq %xmm3, %rbx
; AVX512-NEXT: vpextrq $1, %xmm3, %rbp
; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX512-NEXT: vmovq %xmm4, %rbx
; AVX512-NEXT: vpextrq $1, %xmm4, %rbp
; AVX512-NEXT: vmovq %xmm3, %rdi
; AVX512-NEXT: vpextrq $1, %xmm3, %rsi
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX512-NEXT: vmovq %xmm2, %rdx
; AVX512-NEXT: vpextrq $1, %xmm2, %r15
; AVX512-NEXT: vpextrq $1, %xmm2, %r10
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-NEXT: vmovq %xmm2, %r8
; AVX512-NEXT: vpextrq $1, %xmm2, %r9
; AVX512-NEXT: vmovq %xmm2, %r13
; AVX512-NEXT: vpextrq $1, %xmm2, %r14
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX512-NEXT: vmovq %xmm2, %r11
; AVX512-NEXT: vpextrq $1, %xmm2, %r10
; AVX512-NEXT: vmovq %xmm2, %r15
; AVX512-NEXT: vpextrq $1, %xmm2, %r8
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-NEXT: vmovq %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX512-NEXT: vmovq %xmm2, %r11
; AVX512-NEXT: vpextrq $1, %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
Expand All @@ -2292,83 +2292,85 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX512-NEXT: vmovq %xmm3, %rcx
; AVX512-NEXT: addq %rbx, %rcx
; AVX512-NEXT: vpextrq $1, %xmm3, %rax
; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX512-NEXT: vmovq %xmm4, %rax
; AVX512-NEXT: addq %rbx, %rax
; AVX512-NEXT: movq %rax, %rbx
; AVX512-NEXT: vpextrq $1, %xmm4, %rax
; AVX512-NEXT: addq %rbp, %rax
; AVX512-NEXT: movq %rax, %rbp
; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512-NEXT: vmovq %xmm3, %r14
; AVX512-NEXT: addq %rdi, %r14
; AVX512-NEXT: movq %rax, %r9
; AVX512-NEXT: vmovq %xmm3, %rcx
; AVX512-NEXT: addq %rdi, %rcx
; AVX512-NEXT: vpextrq $1, %xmm3, %rax
; AVX512-NEXT: addq %rsi, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX512-NEXT: vmovq %xmm2, %rax
; AVX512-NEXT: addq %rdx, %rax
; AVX512-NEXT: movq %rax, %rdx
; AVX512-NEXT: vpextrq $1, %xmm2, %r12
; AVX512-NEXT: addq %r15, %r12
; AVX512-NEXT: vmovq %xmm2, %rbp
; AVX512-NEXT: addq %rdx, %rbp
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
; AVX512-NEXT: addq %r10, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-NEXT: vmovq %xmm2, %rax
; AVX512-NEXT: addq %r8, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: addq %r13, %rax
; AVX512-NEXT: movq %rax, %r13
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
; AVX512-NEXT: addq %r9, %rax
; AVX512-NEXT: addq %r14, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: vmovq %xmm2, %rax
; AVX512-NEXT: addq %r11, %rax
; AVX512-NEXT: addq %r15, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
; AVX512-NEXT: addq %r10, %rax
; AVX512-NEXT: addq %r8, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-NEXT: vmovq %xmm2, %r13
; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
; AVX512-NEXT: vpextrq $1, %xmm2, %rbx
; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
; AVX512-NEXT: vmovq %xmm2, %rax
; AVX512-NEXT: addq %r11, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: vpextrq $1, %xmm2, %r12
; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX512-NEXT: vmovq %xmm0, %r10
; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
; AVX512-NEXT: vpextrq $1, %xmm0, %r9
; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
; AVX512-NEXT: vmovq %xmm0, %r11
; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
; AVX512-NEXT: vpextrq $1, %xmm0, %r8
; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
; AVX512-NEXT: vmovq %xmm1, %rax
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovq %xmm0, %r8
; AVX512-NEXT: addq %rax, %r8
; AVX512-NEXT: vpextrq $1, %xmm1, %rdi
; AVX512-NEXT: vpextrq $1, %xmm0, %rsi
; AVX512-NEXT: addq %rdi, %rsi
; AVX512-NEXT: addq $-1, %rcx
; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: movl $0, %r11d
; AVX512-NEXT: adcq $-1, %r11
; AVX512-NEXT: addq $-1, %rbp
; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: movl $0, %edi
; AVX512-NEXT: adcq $-1, %rdi
; AVX512-NEXT: addq $-1, %r14
; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: vmovq %xmm0, %rdi
; AVX512-NEXT: addq %rax, %rdi
; AVX512-NEXT: vpextrq $1, %xmm1, %rsi
; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
; AVX512-NEXT: addq %rsi, %rdx
; AVX512-NEXT: addq $-1, %rbx
; AVX512-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: movl $0, %r15d
; AVX512-NEXT: adcq $-1, %r15
; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX512-NEXT: addq $-1, %r9
; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: movl $0, %r14d
; AVX512-NEXT: adcq $-1, %r14
; AVX512-NEXT: addq $-1, %rdx
; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: addq $-1, %rcx
; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: movl $0, %esi
; AVX512-NEXT: adcq $-1, %rsi
; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX512-NEXT: movl $0, %r10d
; AVX512-NEXT: adcq $-1, %r10
; AVX512-NEXT: addq $-1, %rbp
; AVX512-NEXT: movq %rbp, (%rsp) # 8-byte Spill
; AVX512-NEXT: movl $0, %r9d
; AVX512-NEXT: adcq $-1, %r9
; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX512-NEXT: movl $0, %eax
; AVX512-NEXT: adcq $-1, %rax
; AVX512-NEXT: movq %rax, (%rsp) # 8-byte Spill
; AVX512-NEXT: addq $-1, %r12
; AVX512-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: movl $0, %r12d
; AVX512-NEXT: adcq $-1, %r12
; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: addq $-1, %r13
; AVX512-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: movl $0, %eax
; AVX512-NEXT: adcq $-1, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
Expand All @@ -2384,89 +2386,89 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
; AVX512-NEXT: movl $0, %eax
; AVX512-NEXT: adcq $-1, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: addq $-1, %r13
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; AVX512-NEXT: addq $-1, %rcx
; AVX512-NEXT: movl $0, %eax
; AVX512-NEXT: adcq $-1, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: addq $-1, %rbx
; AVX512-NEXT: addq $-1, %r12
; AVX512-NEXT: movl $0, %eax
; AVX512-NEXT: adcq $-1, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: addq $-1, %r10
; AVX512-NEXT: movl $0, %edx
; AVX512-NEXT: adcq $-1, %rdx
; AVX512-NEXT: addq $-1, %r9
; AVX512-NEXT: movl $0, %ecx
; AVX512-NEXT: adcq $-1, %rcx
; AVX512-NEXT: addq $-1, %r11
; AVX512-NEXT: movl $0, %r13d
; AVX512-NEXT: adcq $-1, %r13
; AVX512-NEXT: addq $-1, %r8
; AVX512-NEXT: movl $0, %eax
; AVX512-NEXT: adcq $-1, %rax
; AVX512-NEXT: addq $-1, %rsi
; AVX512-NEXT: addq $-1, %rdi
; AVX512-NEXT: movl $0, %ebx
; AVX512-NEXT: adcq $-1, %rbx
; AVX512-NEXT: addq $-1, %rdx
; AVX512-NEXT: movl $0, %ebp
; AVX512-NEXT: adcq $-1, %rbp
; AVX512-NEXT: shldq $63, %rsi, %rbp
; AVX512-NEXT: shldq $63, %rdx, %rbp
; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: shldq $63, %rdi, %rbx
; AVX512-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: shldq $63, %r8, %rax
; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX512-NEXT: shldq $63, %r9, %rcx
; AVX512-NEXT: movq %rcx, %rbp
; AVX512-NEXT: shldq $63, %r10, %rdx
; AVX512-NEXT: movq %rdx, %r9
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
; AVX512-NEXT: shldq $63, %rbx, %r10
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
; AVX512-NEXT: shldq $63, %r13, %r8
; AVX512-NEXT: movq %rax, %r8
; AVX512-NEXT: shldq $63, %r11, %r13
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
; AVX512-NEXT: shldq $63, %r12, %r11
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
; AVX512-NEXT: shldq $63, %rcx, %r12
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %r13
; AVX512-NEXT: shldq $63, %rax, %rbp
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %rbx
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %rsi
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %rdi
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %rdx
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %r12
; AVX512-NEXT: movq (%rsp), %rcx # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %rdx
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %rcx
; AVX512-NEXT: movq (%rsp), %rax # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %r9
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %r14
; AVX512-NEXT: shldq $63, %rax, %r10
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %r15
; AVX512-NEXT: shldq $63, %rax, %rsi
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %rdi
; AVX512-NEXT: shldq $63, %rax, %r14
; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX512-NEXT: shldq $63, %rax, %r11
; AVX512-NEXT: vmovq %r11, %xmm0
; AVX512-NEXT: vmovq %rdi, %xmm1
; AVX512-NEXT: shldq $63, %rax, %r15
; AVX512-NEXT: vmovq %r15, %xmm0
; AVX512-NEXT: vmovq %r14, %xmm1
; AVX512-NEXT: vmovq %r10, %xmm2
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vpinsrb $1, %eax, %xmm0, %xmm1
; AVX512-NEXT: vmovq %r15, %xmm2
; AVX512-NEXT: vmovq %r14, %xmm3
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512-NEXT: vmovq %rsi, %xmm1
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm2
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512-NEXT: vmovd %esi, %xmm3
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vpinsrb $1, %eax, %xmm3, %xmm1
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vpinsrb $3, %eax, %xmm1, %xmm0
; AVX512-NEXT: vmovq %rcx, %xmm1
; AVX512-NEXT: vmovq %r12, %xmm2
; AVX512-NEXT: vpinsrb $4, %r9d, %xmm0, %xmm0
; AVX512-NEXT: vmovq %r9, %xmm1
; AVX512-NEXT: vmovq %rcx, %xmm2
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
; AVX512-NEXT: vmovq %rdx, %xmm2
; AVX512-NEXT: vmovq %rsi, %xmm3
; AVX512-NEXT: vmovq %rdi, %xmm3
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2
Expand All @@ -2475,16 +2477,16 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
; AVX512-NEXT: movq %rbx, %rax
; AVX512-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
; AVX512-NEXT: vmovq %rbx, %xmm1
; AVX512-NEXT: vmovq %r13, %xmm2
; AVX512-NEXT: vmovq %rbp, %xmm2
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
; AVX512-NEXT: vmovq %r8, %xmm2
; AVX512-NEXT: vmovq %r10, %xmm3
; AVX512-NEXT: vmovq %r12, %xmm2
; AVX512-NEXT: vmovq %r11, %xmm3
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2
Expand All @@ -2493,11 +2495,10 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
; AVX512-NEXT: vmovq %r9, %xmm1
; AVX512-NEXT: vmovq %rbp, %xmm2
; AVX512-NEXT: vpinsrb $12, %r13d, %xmm0, %xmm0
; AVX512-NEXT: vmovq %r13, %xmm1
; AVX512-NEXT: vmovq %r8, %xmm2
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
Expand Down

0 comments on commit f575312

Please sign in to comment.