Skip to content

Commit

Permalink
[X86][SSE] Combine unary shuffle(HORIZOP,HORIZOP) -> HORIZOP
Browse files Browse the repository at this point in the history
We had previously limited the shuffle(HORIZOP,HORIZOP) combine to binary shuffles, but we can often merge unary shuffles just as well, folding in UNDEF/ZERO values into the 64-bit half lanes.

For the (P)HADD/HSUB cases this is limited to fast-horizontal cases but PACKSS/PACKUS combines under all cases.
  • Loading branch information
RKSimon committed Apr 5, 2020
1 parent 34392b5 commit 4431a29
Show file tree
Hide file tree
Showing 11 changed files with 296 additions and 193 deletions.
43 changes: 28 additions & 15 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Expand Up @@ -35347,38 +35347,51 @@ static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
SmallVector<int, 64> TargetMask;
SmallVector<SDValue, 2> TargetOps;
if (isTargetShuffle(Opcode))
getTargetShuffleMask(N.getNode(), VT, false, TargetOps, TargetMask, IsUnary);
getTargetShuffleMask(N.getNode(), VT, true, TargetOps, TargetMask, IsUnary);

// Combine binary shuffle of 2 similar 'Horizontal' instructions into a
// single instruction. Attempt to match a v2X64 repeating shuffle pattern that
// represents the LHS/RHS inputs for the lower/upper halves.
SmallVector<int, 16> TargetMask128;
if (!TargetMask.empty() && TargetOps.size() == 2 &&
is128BitLaneRepeatedShuffleMask(VT, TargetMask, TargetMask128)) {
if (!TargetMask.empty() && 0 < TargetOps.size() && TargetOps.size() <= 2 &&
isRepeatedTargetShuffleMask(128, VT, TargetMask, TargetMask128)) {
SmallVector<int, 16> WidenedMask128 = TargetMask128;
while (WidenedMask128.size() > 2) {
SmallVector<int, 16> WidenedMask;
if (!canWidenShuffleElements(WidenedMask128, WidenedMask))
break;
WidenedMask128 = std::move(WidenedMask);
}
if (WidenedMask128.size() == 2 && isInRange(WidenedMask128, 0, 4)) {
SDValue BC0 = peekThroughBitcasts(TargetOps[0]);
SDValue BC1 = peekThroughBitcasts(TargetOps[1]);
if (WidenedMask128.size() == 2) {
assert(isUndefOrZeroOrInRange(WidenedMask128, 0, 4) && "Illegal shuffle");
SDValue BC0 = peekThroughBitcasts(TargetOps.front());
SDValue BC1 = peekThroughBitcasts(TargetOps.back());
EVT VT0 = BC0.getValueType();
EVT VT1 = BC1.getValueType();
unsigned Opcode0 = BC0.getOpcode();
unsigned Opcode1 = BC1.getOpcode();
bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB);
if (Opcode0 == Opcode1 && VT0 == VT1 &&
(Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
SDValue Lo = isInRange(WidenedMask128[0], 0, 2) ? BC0 : BC1;
SDValue Hi = isInRange(WidenedMask128[1], 0, 2) ? BC0 : BC1;
Lo = Lo.getOperand(WidenedMask128[0] & 1);
Hi = Hi.getOperand(WidenedMask128[1] & 1);
SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
return DAG.getBitcast(VT, Horiz);
(isHoriz || Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
bool SingleOp = (TargetOps.size() == 1);
if (!isHoriz || shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
SDValue Lo = isInRange(WidenedMask128[0], 0, 2) ? BC0 : BC1;
SDValue Hi = isInRange(WidenedMask128[1], 0, 2) ? BC0 : BC1;
Lo = Lo.getOperand(WidenedMask128[0] & 1);
Hi = Hi.getOperand(WidenedMask128[1] & 1);
if (SingleOp) {
MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
SDValue Undef = DAG.getUNDEF(SrcVT);
SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
Lo = (WidenedMask128[0] == SM_SentinelZero ? Zero : Lo);
Hi = (WidenedMask128[1] == SM_SentinelZero ? Zero : Hi);
Lo = (WidenedMask128[0] == SM_SentinelUndef ? Undef : Lo);
Hi = (WidenedMask128[1] == SM_SentinelUndef ? Undef : Hi);
}
SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
return DAG.getBitcast(VT, Horiz);
}
}
}
}
Expand Down
264 changes: 180 additions & 84 deletions llvm/test/CodeGen/X86/haddsub-shuf.ll
Expand Up @@ -27,21 +27,37 @@ define <4 x float> @hadd_v4f32(<4 x float> %a) {
}

define <8 x float> @hadd_v8f32a(<8 x float> %a) {
; SSSE3-LABEL: hadd_v8f32a:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movaps %xmm0, %xmm2
; SSSE3-NEXT: haddps %xmm1, %xmm2
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm2[0,0]
; SSSE3-NEXT: movaps %xmm2, %xmm1
; SSSE3-NEXT: retq
; SSSE3_SLOW-LABEL: hadd_v8f32a:
; SSSE3_SLOW: # %bb.0:
; SSSE3_SLOW-NEXT: movaps %xmm0, %xmm2
; SSSE3_SLOW-NEXT: haddps %xmm1, %xmm2
; SSSE3_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm2[0,0]
; SSSE3_SLOW-NEXT: movaps %xmm2, %xmm1
; SSSE3_SLOW-NEXT: retq
;
; AVX1-LABEL: hadd_v8f32a:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
; SSSE3_FAST-LABEL: hadd_v8f32a:
; SSSE3_FAST: # %bb.0:
; SSSE3_FAST-NEXT: movaps %xmm0, %xmm2
; SSSE3_FAST-NEXT: haddps %xmm1, %xmm2
; SSSE3_FAST-NEXT: haddps %xmm0, %xmm0
; SSSE3_FAST-NEXT: movaps %xmm2, %xmm1
; SSSE3_FAST-NEXT: retq
;
; AVX1_SLOW-LABEL: hadd_v8f32a:
; AVX1_SLOW: # %bb.0:
; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1_SLOW-NEXT: retq
;
; AVX1_FAST-LABEL: hadd_v8f32a:
; AVX1_FAST: # %bb.0:
; AVX1_FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm1
; AVX1_FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1_FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1_FAST-NEXT: retq
;
; AVX2-LABEL: hadd_v8f32a:
; AVX2: # %bb.0:
Expand Down Expand Up @@ -92,21 +108,37 @@ define <4 x float> @hsub_v4f32(<4 x float> %a) {
}

define <8 x float> @hsub_v8f32a(<8 x float> %a) {
; SSSE3-LABEL: hsub_v8f32a:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movaps %xmm0, %xmm2
; SSSE3-NEXT: hsubps %xmm1, %xmm2
; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm2[0,0]
; SSSE3-NEXT: movaps %xmm2, %xmm1
; SSSE3-NEXT: retq
; SSSE3_SLOW-LABEL: hsub_v8f32a:
; SSSE3_SLOW: # %bb.0:
; SSSE3_SLOW-NEXT: movaps %xmm0, %xmm2
; SSSE3_SLOW-NEXT: hsubps %xmm1, %xmm2
; SSSE3_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm2[0,0]
; SSSE3_SLOW-NEXT: movaps %xmm2, %xmm1
; SSSE3_SLOW-NEXT: retq
;
; AVX1-LABEL: hsub_v8f32a:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vhsubps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
; SSSE3_FAST-LABEL: hsub_v8f32a:
; SSSE3_FAST: # %bb.0:
; SSSE3_FAST-NEXT: movaps %xmm0, %xmm2
; SSSE3_FAST-NEXT: hsubps %xmm1, %xmm2
; SSSE3_FAST-NEXT: hsubps %xmm0, %xmm0
; SSSE3_FAST-NEXT: movaps %xmm2, %xmm1
; SSSE3_FAST-NEXT: retq
;
; AVX1_SLOW-LABEL: hsub_v8f32a:
; AVX1_SLOW: # %bb.0:
; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_SLOW-NEXT: vhsubps %xmm1, %xmm0, %xmm0
; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1_SLOW-NEXT: retq
;
; AVX1_FAST-LABEL: hsub_v8f32a:
; AVX1_FAST: # %bb.0:
; AVX1_FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_FAST-NEXT: vhsubps %xmm1, %xmm0, %xmm1
; AVX1_FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0
; AVX1_FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1_FAST-NEXT: retq
;
; AVX2-LABEL: hsub_v8f32a:
; AVX2: # %bb.0:
Expand Down Expand Up @@ -477,21 +509,37 @@ define <4 x i32> @hadd_v4i32(<4 x i32> %a) {
}

define <8 x i32> @hadd_v8i32a(<8 x i32> %a) {
; SSSE3-LABEL: hadd_v8i32a:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: phaddd %xmm1, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1]
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
; SSSE3_SLOW-LABEL: hadd_v8i32a:
; SSSE3_SLOW: # %bb.0:
; SSSE3_SLOW-NEXT: movdqa %xmm0, %xmm2
; SSSE3_SLOW-NEXT: phaddd %xmm1, %xmm2
; SSSE3_SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1]
; SSSE3_SLOW-NEXT: movdqa %xmm2, %xmm1
; SSSE3_SLOW-NEXT: retq
;
; AVX1-LABEL: hadd_v8i32a:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
; SSSE3_FAST-LABEL: hadd_v8i32a:
; SSSE3_FAST: # %bb.0:
; SSSE3_FAST-NEXT: movdqa %xmm0, %xmm2
; SSSE3_FAST-NEXT: phaddd %xmm1, %xmm2
; SSSE3_FAST-NEXT: phaddd %xmm0, %xmm0
; SSSE3_FAST-NEXT: movdqa %xmm2, %xmm1
; SSSE3_FAST-NEXT: retq
;
; AVX1_SLOW-LABEL: hadd_v8i32a:
; AVX1_SLOW: # %bb.0:
; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1_SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1_SLOW-NEXT: retq
;
; AVX1_FAST-LABEL: hadd_v8i32a:
; AVX1_FAST: # %bb.0:
; AVX1_FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm1
; AVX1_FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX1_FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1_FAST-NEXT: retq
;
; AVX2-LABEL: hadd_v8i32a:
; AVX2: # %bb.0:
Expand Down Expand Up @@ -551,21 +599,37 @@ define <4 x i32> @hsub_v4i32(<4 x i32> %a) {
}

define <8 x i32> @hsub_v8i32a(<8 x i32> %a) {
; SSSE3-LABEL: hsub_v8i32a:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: phsubd %xmm1, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1]
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
; SSSE3_SLOW-LABEL: hsub_v8i32a:
; SSSE3_SLOW: # %bb.0:
; SSSE3_SLOW-NEXT: movdqa %xmm0, %xmm2
; SSSE3_SLOW-NEXT: phsubd %xmm1, %xmm2
; SSSE3_SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1]
; SSSE3_SLOW-NEXT: movdqa %xmm2, %xmm1
; SSSE3_SLOW-NEXT: retq
;
; AVX1-LABEL: hsub_v8i32a:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vphsubd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
; SSSE3_FAST-LABEL: hsub_v8i32a:
; SSSE3_FAST: # %bb.0:
; SSSE3_FAST-NEXT: movdqa %xmm0, %xmm2
; SSSE3_FAST-NEXT: phsubd %xmm1, %xmm2
; SSSE3_FAST-NEXT: phsubd %xmm0, %xmm0
; SSSE3_FAST-NEXT: movdqa %xmm2, %xmm1
; SSSE3_FAST-NEXT: retq
;
; AVX1_SLOW-LABEL: hsub_v8i32a:
; AVX1_SLOW: # %bb.0:
; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_SLOW-NEXT: vphsubd %xmm1, %xmm0, %xmm0
; AVX1_SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1_SLOW-NEXT: retq
;
; AVX1_FAST-LABEL: hsub_v8i32a:
; AVX1_FAST: # %bb.0:
; AVX1_FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_FAST-NEXT: vphsubd %xmm1, %xmm0, %xmm1
; AVX1_FAST-NEXT: vphsubd %xmm0, %xmm0, %xmm0
; AVX1_FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1_FAST-NEXT: retq
;
; AVX2-LABEL: hsub_v8i32a:
; AVX2: # %bb.0:
Expand Down Expand Up @@ -625,21 +689,37 @@ define <8 x i16> @hadd_v8i16(<8 x i16> %a) {
}

define <16 x i16> @hadd_v16i16a(<16 x i16> %a) {
; SSSE3-LABEL: hadd_v16i16a:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: phaddw %xmm1, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1]
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
; SSSE3_SLOW-LABEL: hadd_v16i16a:
; SSSE3_SLOW: # %bb.0:
; SSSE3_SLOW-NEXT: movdqa %xmm0, %xmm2
; SSSE3_SLOW-NEXT: phaddw %xmm1, %xmm2
; SSSE3_SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1]
; SSSE3_SLOW-NEXT: movdqa %xmm2, %xmm1
; SSSE3_SLOW-NEXT: retq
;
; AVX1-LABEL: hadd_v16i16a:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vphaddw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
; SSSE3_FAST-LABEL: hadd_v16i16a:
; SSSE3_FAST: # %bb.0:
; SSSE3_FAST-NEXT: movdqa %xmm0, %xmm2
; SSSE3_FAST-NEXT: phaddw %xmm1, %xmm2
; SSSE3_FAST-NEXT: phaddw %xmm0, %xmm0
; SSSE3_FAST-NEXT: movdqa %xmm2, %xmm1
; SSSE3_FAST-NEXT: retq
;
; AVX1_SLOW-LABEL: hadd_v16i16a:
; AVX1_SLOW: # %bb.0:
; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_SLOW-NEXT: vphaddw %xmm1, %xmm0, %xmm0
; AVX1_SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1_SLOW-NEXT: retq
;
; AVX1_FAST-LABEL: hadd_v16i16a:
; AVX1_FAST: # %bb.0:
; AVX1_FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_FAST-NEXT: vphaddw %xmm1, %xmm0, %xmm1
; AVX1_FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX1_FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1_FAST-NEXT: retq
;
; AVX2-LABEL: hadd_v16i16a:
; AVX2: # %bb.0:
Expand Down Expand Up @@ -699,21 +779,37 @@ define <8 x i16> @hsub_v8i16(<8 x i16> %a) {
}

define <16 x i16> @hsub_v16i16a(<16 x i16> %a) {
; SSSE3-LABEL: hsub_v16i16a:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: phsubw %xmm1, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1]
; SSSE3-NEXT: movdqa %xmm2, %xmm1
; SSSE3-NEXT: retq
; SSSE3_SLOW-LABEL: hsub_v16i16a:
; SSSE3_SLOW: # %bb.0:
; SSSE3_SLOW-NEXT: movdqa %xmm0, %xmm2
; SSSE3_SLOW-NEXT: phsubw %xmm1, %xmm2
; SSSE3_SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1]
; SSSE3_SLOW-NEXT: movdqa %xmm2, %xmm1
; SSSE3_SLOW-NEXT: retq
;
; AVX1-LABEL: hsub_v16i16a:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vphsubw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
; SSSE3_FAST-LABEL: hsub_v16i16a:
; SSSE3_FAST: # %bb.0:
; SSSE3_FAST-NEXT: movdqa %xmm0, %xmm2
; SSSE3_FAST-NEXT: phsubw %xmm1, %xmm2
; SSSE3_FAST-NEXT: phsubw %xmm0, %xmm0
; SSSE3_FAST-NEXT: movdqa %xmm2, %xmm1
; SSSE3_FAST-NEXT: retq
;
; AVX1_SLOW-LABEL: hsub_v16i16a:
; AVX1_SLOW: # %bb.0:
; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_SLOW-NEXT: vphsubw %xmm1, %xmm0, %xmm0
; AVX1_SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1_SLOW-NEXT: retq
;
; AVX1_FAST-LABEL: hsub_v16i16a:
; AVX1_FAST: # %bb.0:
; AVX1_FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1_FAST-NEXT: vphsubw %xmm1, %xmm0, %xmm1
; AVX1_FAST-NEXT: vphsubw %xmm0, %xmm0, %xmm0
; AVX1_FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1_FAST-NEXT: retq
;
; AVX2-LABEL: hsub_v16i16a:
; AVX2: # %bb.0:
Expand Down
5 changes: 2 additions & 3 deletions llvm/test/CodeGen/X86/haddsub-undef.ll
Expand Up @@ -284,9 +284,9 @@ define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
;
; SSE-FAST-LABEL: test11_undef:
; SSE-FAST: # %bb.0:
; SSE-FAST-NEXT: movaps %xmm3, %xmm1
; SSE-FAST-NEXT: haddps %xmm0, %xmm0
; SSE-FAST-NEXT: haddps %xmm3, %xmm3
; SSE-FAST-NEXT: movddup {{.*#+}} xmm1 = xmm3[0,0]
; SSE-FAST-NEXT: haddps %xmm3, %xmm1
; SSE-FAST-NEXT: retq
;
; AVX-LABEL: test11_undef:
Expand Down Expand Up @@ -490,7 +490,6 @@ define <2 x double> @add_pd_010(<2 x double> %x) {
; AVX-FAST-LABEL: add_pd_010:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-FAST-NEXT: retq
%l = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 undef, i32 0>
%add = fadd <2 x double> %l, %x
Expand Down

0 comments on commit 4431a29

Please sign in to comment.