diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index fc6ab0630cb38..64057de11af57 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -40532,22 +40532,19 @@ static SDValue combineX86ShufflesConstants(ArrayRef Ops, unsigned NumOps = Ops.size(); // Extract constant bits from each source op. - bool OneUseConstantOp = false; SmallVector UndefEltsOps(NumOps); SmallVector, 16> RawBitsOps(NumOps); - for (unsigned i = 0; i != NumOps; ++i) { - SDValue SrcOp = Ops[i]; - OneUseConstantOp |= SrcOp.hasOneUse(); - if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i], - RawBitsOps[i])) + for (unsigned I = 0; I != NumOps; ++I) + if (!getTargetConstantBitsFromNode(Ops[I], MaskSizeInBits, UndefEltsOps[I], + RawBitsOps[I])) return SDValue(); - } // If we're optimizing for size, only fold if at least one of the constants is // only used once or the combined shuffle has included a variable mask // shuffle, this is to avoid constant pool bloat. bool IsOptimizingSize = DAG.shouldOptForSize(); - if (IsOptimizingSize && !OneUseConstantOp && !HasVariableMask) + if (IsOptimizingSize && !HasVariableMask && + llvm::none_of(Ops, [](SDValue SrcOp) { return SrcOp->hasOneUse(); })) return SDValue(); // Shuffle the constant bits according to the mask.