Skip to content

Commit

Permalink
[X86][AVX] getTargetShuffleMask - don't decode VBROADCAST(EXTRACT_SUB…
Browse files Browse the repository at this point in the history
…VECTOR(X,0)) patterns.

getTargetShuffleMask is used by the various "SimplifyDemanded" folds so we can't assume that the bypassed extract_subvector can be safely simplified - getFauxShuffleMask performs a more general decode that allows us to more safely catch many of these cases so the impact is minimal.
  • Loading branch information
RKSimon committed Jul 21, 2020
1 parent 9e78371 commit 5b5dc24
Show file tree
Hide file tree
Showing 8 changed files with 244 additions and 259 deletions.
19 changes: 5 additions & 14 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Expand Up @@ -6916,25 +6916,16 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
DecodeZeroMoveLowMask(NumElems, Mask);
IsUnary = true;
break;
case X86ISD::VBROADCAST: {
SDValue N0 = N->getOperand(0);
// See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
// add the pre-extracted value to the Ops vector.
if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N0.getOperand(0).getValueType() == VT &&
N0.getConstantOperandVal(1) == 0)
Ops.push_back(N0.getOperand(0));

// We only decode broadcasts of same-sized vectors, unless the broadcast
// came from an extract from the original width. If we found one, we
// pushed it the Ops vector above.
if (N0.getValueType() == VT || !Ops.empty()) {
case X86ISD::VBROADCAST:
// We only decode broadcasts of same-sized vectors, peeking through to
// extracted subvectors is likely to cause hasOneUse issues with
// SimplifyDemandedBits etc.
if (N->getOperand(0).getValueType() == VT) {
DecodeVectorBroadcast(NumElems, Mask);
IsUnary = true;
break;
}
return false;
}
case X86ISD::VPERMILPV: {
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
IsUnary = true;
Expand Down
38 changes: 19 additions & 19 deletions llvm/test/CodeGen/X86/vector-fshl-256.ll
Expand Up @@ -1092,12 +1092,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX2-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX2-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX2-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX2-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vblendvps %ymm2, %ymm0, %ymm1, %ymm0
Expand All @@ -1110,12 +1110,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512F-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512F-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX512F-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512F-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512F-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512F-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512F-NEXT: vmovdqa %ymm1, %ymm0
Expand All @@ -1126,12 +1126,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VL-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX512VL-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VL-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VL-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VL-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VL-NEXT: vptestnmd %ymm2, %ymm2, %k1
; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
; AVX512VL-NEXT: vmovdqa %ymm1, %ymm0
Expand All @@ -1144,12 +1144,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512BW-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512BW-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX512BW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512BW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512BW-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512BW-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX512BW-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512BW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512BW-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa %ymm1, %ymm0
Expand All @@ -1162,12 +1162,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VBMI2-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX512VBMI2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VBMI2-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX512VBMI2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VBMI2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VBMI2-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VBMI2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VBMI2-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX512VBMI2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VBMI2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VBMI2-NEXT: vptestnmd %zmm2, %zmm2, %k1
; AVX512VBMI2-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; AVX512VBMI2-NEXT: vmovdqa %ymm1, %ymm0
Expand All @@ -1178,12 +1178,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VLBW-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX512VLBW-NEXT: vpslld %xmm3, %ymm0, %ymm3
; AVX512VLBW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VLBW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; AVX512VLBW-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX512VLBW-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VLBW-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX512VLBW-NEXT: vptestnmd %ymm2, %ymm2, %k1
; AVX512VLBW-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
; AVX512VLBW-NEXT: vmovdqa %ymm1, %ymm0
Expand Down Expand Up @@ -1224,12 +1224,12 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; XOPAVX2-NEXT: vpand %xmm3, %xmm2, %xmm2
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; XOPAVX2-NEXT: vpslld %xmm3, %ymm0, %ymm3
; XOPAVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [32,32,32,32]
; XOPAVX2-NEXT: vpsubd %xmm2, %xmm4, %xmm4
; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; XOPAVX2-NEXT: vpsrld %xmm4, %ymm1, %ymm1
; XOPAVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; XOPAVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; XOPAVX2-NEXT: vblendvps %ymm2, %ymm0, %ymm1, %ymm0
Expand Down Expand Up @@ -1271,12 +1271,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX2-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX2-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX2-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
Expand All @@ -1287,12 +1287,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512F-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512F-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
Expand All @@ -1303,12 +1303,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VL-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
Expand All @@ -1320,12 +1320,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512BW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512BW-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512BW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512BW-NEXT: vptestnmw %zmm2, %zmm2, %k1
; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa %ymm1, %ymm0
Expand All @@ -1337,12 +1337,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512VBMI2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VBMI2-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VBMI2-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512VBMI2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VBMI2-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VBMI2-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VBMI2-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512VBMI2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VBMI2-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VBMI2-NEXT: vptestnmw %zmm2, %zmm2, %k1
; AVX512VBMI2-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
; AVX512VBMI2-NEXT: vmovdqa %ymm1, %ymm0
Expand All @@ -1353,12 +1353,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; AVX512VLBW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VLBW-NEXT: vpbroadcastw %xmm2, %ymm2
; AVX512VLBW-NEXT: vptestnmw %ymm2, %ymm2, %k1
; AVX512VLBW-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
; AVX512VLBW-NEXT: vmovdqa %ymm1, %ymm0
Expand Down Expand Up @@ -1399,12 +1399,12 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; XOPAVX2-NEXT: vpsllw %xmm3, %ymm0, %ymm3
; XOPAVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
; XOPAVX2-NEXT: vpsubw %xmm2, %xmm4, %xmm4
; XOPAVX2-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; XOPAVX2-NEXT: vpsrlw %xmm4, %ymm1, %ymm1
; XOPAVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; XOPAVX2-NEXT: vpbroadcastw %xmm2, %ymm2
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; XOPAVX2-NEXT: vpcmpeqw %ymm3, %ymm2, %ymm2
; XOPAVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
Expand Down Expand Up @@ -1458,6 +1458,7 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX2-NEXT: vpsllw %xmm3, %xmm5, %xmm3
; AVX2-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm3
; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX2-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
Expand All @@ -1467,7 +1468,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX2-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqb %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
Expand All @@ -1482,6 +1482,7 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512F-NEXT: vpsllw %xmm3, %xmm5, %xmm3
; AVX512F-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3
; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512F-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
Expand All @@ -1491,7 +1492,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512F-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpeqb %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
Expand All @@ -1506,6 +1506,7 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VL-NEXT: vpsllw %xmm3, %xmm5, %xmm3
; AVX512VL-NEXT: vpbroadcastb %xmm3, %ymm3
; AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; AVX512VL-NEXT: vpsubb %xmm2, %xmm4, %xmm4
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
Expand All @@ -1514,9 +1515,8 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VL-NEXT: vpsrlw $8, %xmm4, %xmm4
; AVX512VL-NEXT: vpbroadcastb %xmm4, %ymm4
; AVX512VL-NEXT: vpternlogq $236, %ymm1, %ymm3, %ymm4
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm1
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vpcmpeqb %ymm1, %ymm2, %ymm1
; AVX512VL-NEXT: vpblendvb %ymm1, %ymm0, %ymm4, %ymm0
; AVX512VL-NEXT: retq
;
Expand Down

0 comments on commit 5b5dc24

Please sign in to comment.