Skip to content

Commit

Permalink
[X86] When applying the shuffle-to-zero-extend transformation on floa…
Browse files Browse the repository at this point in the history
…ting point, bitcast to integer first.

Fix issue described in PR34577.

Differential Revision: https://reviews.llvm.org/D37803

llvm-svn: 313256
  • Loading branch information
aymanmusa committed Sep 14, 2017
1 parent 5891060 commit ab68449
Show file tree
Hide file tree
Showing 2 changed files with 71 additions and 4 deletions.
13 changes: 9 additions & 4 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Expand Up @@ -27153,13 +27153,18 @@ static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
}
if (Match) {
unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
SrcVT = MVT::getVectorVT(MaskVT.getScalarType(), SrcSize / MaskEltSize);
if (SrcVT != MaskVT)
MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
MVT::getIntegerVT(MaskEltSize);
SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);

if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits()) {
V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
Shuffle = unsigned(X86ISD::VZEXT);
} else
Shuffle = unsigned(ISD::ZERO_EXTEND_VECTOR_INREG);

DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
DstVT = MVT::getVectorVT(DstVT, NumDstElts);
Shuffle = SrcVT != MaskVT ? unsigned(X86ISD::VZEXT)
: unsigned(ISD::ZERO_EXTEND_VECTOR_INREG);
return true;
}
}
Expand Down
62 changes: 62 additions & 0 deletions llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
Expand Up @@ -897,3 +897,65 @@ define <32 x i8> @PR27320(<8 x i32> %a0) {
%3 = shufflevector <32 x i8> %2, <32 x i8> undef, <32 x i32> <i32 0, i32 1, i32 1, i32 2, i32 3, i32 4, i32 4, i32 5, i32 6, i32 7, i32 7, i32 8, i32 9, i32 10, i32 10, i32 11, i32 16, i32 17, i32 17, i32 18, i32 19, i32 20, i32 20, i32 21, i32 22, i32 23, i32 23, i32 24, i32 25, i32 26, i32 26, i32 27>
ret <32 x i8> %3
}

define internal fastcc <8 x float> @PR34577(<8 x float> %inp0, <8 x float> %inp1, <8 x float> %inp2) {
; X32-AVX2-LABEL: PR34577:
; X32-AVX2: # BB#0: # %entry
; X32-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; X32-AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <u,u,7,2,u,u,3,2>
; X32-AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
; X32-AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; X32-AVX2-NEXT: retl
;
; X32-AVX512-LABEL: PR34577:
; X32-AVX512: # BB#0: # %entry
; X32-AVX512-NEXT: vmovaps {{.*#+}} ymm3 = <1,u,u,u,2,u,5,0>
; X32-AVX512-NEXT: vpermps %ymm0, %ymm3, %ymm0
; X32-AVX512-NEXT: vmovaps {{.*#+}} ymm3 = <u,2,3,5,u,5,u,u>
; X32-AVX512-NEXT: vpermps %ymm2, %ymm3, %ymm2
; X32-AVX512-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5],ymm0[6,7]
; X32-AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X32-AVX512-NEXT: movb $86, %al
; X32-AVX512-NEXT: kmovw %eax, %k1
; X32-AVX512-NEXT: vblendmps %zmm0, %zmm2, %zmm0 {%k1}
; X32-AVX512-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
; X32-AVX512-NEXT: vmovaps {{.*#+}} ymm2 = <u,u,7,2,u,u,3,2>
; X32-AVX512-NEXT: vpermps %ymm1, %ymm2, %ymm1
; X32-AVX512-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; X32-AVX512-NEXT: retl
;
; X64-AVX2-LABEL: PR34577:
; X64-AVX2: # BB#0: # %entry
; X64-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; X64-AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <u,u,7,2,u,u,3,2>
; X64-AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
; X64-AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; X64-AVX2-NEXT: retq
;
; X64-AVX512-LABEL: PR34577:
; X64-AVX512: # BB#0: # %entry
; X64-AVX512-NEXT: vmovaps {{.*#+}} ymm3 = <1,u,u,u,2,u,5,0>
; X64-AVX512-NEXT: vpermps %ymm0, %ymm3, %ymm0
; X64-AVX512-NEXT: vmovaps {{.*#+}} ymm3 = <u,2,3,5,u,5,u,u>
; X64-AVX512-NEXT: vpermps %ymm2, %ymm3, %ymm2
; X64-AVX512-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5],ymm0[6,7]
; X64-AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X64-AVX512-NEXT: movb $86, %al
; X64-AVX512-NEXT: kmovw %eax, %k1
; X64-AVX512-NEXT: vblendmps %zmm0, %zmm2, %zmm0 {%k1}
; X64-AVX512-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
; X64-AVX512-NEXT: vmovaps {{.*#+}} ymm2 = <u,u,7,2,u,u,3,2>
; X64-AVX512-NEXT: vpermps %ymm1, %ymm2, %ymm1
; X64-AVX512-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; X64-AVX512-NEXT: retq
entry:
%shuf0 = shufflevector <8 x float> %inp0, <8 x float> %inp2, <8 x i32> <i32 1, i32 10, i32 11, i32 13, i32 2, i32 13, i32 5, i32 0>
%sel = select <8 x i1> <i1 false, i1 true, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <8 x float> %shuf0, <8 x float> zeroinitializer
%shuf1 = shufflevector <8 x float> zeroinitializer, <8 x float> %sel, <8 x i32> <i32 6, i32 11, i32 6, i32 15, i32 12, i32 11, i32 1, i32 3>
%shuf2 = shufflevector <8 x float> %inp1, <8 x float> %shuf1, <8 x i32> <i32 15, i32 10, i32 7, i32 2, i32 12, i32 undef, i32 3, i32 2>
ret <8 x float> %shuf2
}

0 comments on commit ab68449

Please sign in to comment.