diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index e1a4a7fb5d15e..77bb87d1d35d1 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -38836,7 +38836,9 @@ static SDValue combineX86ShufflesConstants(ArrayRef Ops, SmallVector, 16> RawBitsOps(NumOps); for (unsigned I = 0; I != NumOps; ++I) if (!getTargetConstantBitsFromNode(Ops[I], MaskSizeInBits, UndefEltsOps[I], - RawBitsOps[I])) + RawBitsOps[I], + /*AllowWholeUndefs*/ true, + /*AllowPartialUndefs*/ true)) return SDValue(); // If we're optimizing for size, only fold if at least one of the constants is @@ -39263,7 +39265,9 @@ static SDValue combineX86ShufflesRecursively( SmallVector RawBits; unsigned EltSizeInBits = RootSizeInBits / Mask.size(); return getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts, - RawBits); + RawBits, + /*AllowWholeUndefs*/ true, + /*AllowPartialUndefs*/ true); })) { return SDValue(); } @@ -42981,7 +42985,9 @@ static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG, // Detect MMX constant vectors. APInt UndefElts; SmallVector EltBits; - if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) { + if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits, + /*AllowWholeUndefs*/ true, + /*AllowPartialUndefs*/ true)) { SDLoc DL(N0); // Handle zero-extension of i32 with MOVD. if (EltBits[0].countl_zero() >= 32) @@ -47305,8 +47311,12 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG, SmallVector EltBits0, EltBits1; if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) && (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) && - getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) && - getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) { + getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0, + /*AllowWholeUndefs*/ true, + /*AllowPartialUndefs*/ true) && + getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1, + /*AllowWholeUndefs*/ true, + /*AllowPartialUndefs*/ true)) { unsigned NumLanes = VT.getSizeInBits() / 128; unsigned NumSrcElts = NumDstElts / 2; unsigned NumDstEltsPerLane = NumDstElts / NumLanes; @@ -47613,7 +47623,9 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG, auto TryConstantFold = [&](SDValue V) { APInt UndefElts; SmallVector EltBits; - if (!getTargetConstantBitsFromNode(V, NumBitsPerElt, UndefElts, EltBits)) + if (!getTargetConstantBitsFromNode(V, NumBitsPerElt, UndefElts, EltBits, + /*AllowWholeUndefs*/ true, + /*AllowPartialUndefs*/ true)) return SDValue(); assert(EltBits.size() == VT.getVectorNumElements() && "Unexpected shift value type"); @@ -52233,8 +52245,12 @@ static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG, // Constant Folding APInt Undefs0, Undefs1; SmallVector EltBits0, EltBits1; - if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0)) { - if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1)) { + if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0, + /*AllowWholeUndefs*/ true, + /*AllowPartialUndefs*/ true)) { + if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1, + /*AllowWholeUndefs*/ true, + /*AllowPartialUndefs*/ true)) { SmallVector ResultBits; for (int I = 0; I != NumElts; ++I) ResultBits.push_back(~EltBits0[I] & EltBits1[I]); @@ -53175,7 +53191,9 @@ static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG, // Perform constant folding. APInt UndefElts; SmallVector EltBits; - if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits)) { + if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits, + /*AllowWholeUndefs*/ true, + /*AllowPartialUndefs*/ true)) { APInt Imm(32, 0); for (unsigned Idx = 0; Idx != NumElts; ++Idx) if (!UndefElts[Idx] && EltBits[Idx].isNegative())