diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 1de3b87d4a2994..8e9d4f7d12ecfb 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -3075,6 +3075,39 @@ static Instruction *sinkNotIntoXor(BinaryOperator &I, return BinaryOperator::CreateXor(NotX, Y, I.getName() + ".demorgan"); } +/// Canonicalize a shifty way to code absolute value to the more common pattern +/// that uses negation and select. +static Instruction *canonicalizeAbs(BinaryOperator &Xor, + InstCombiner::BuilderTy &Builder) { + assert(Xor.getOpcode() == Instruction::Xor && "Expected an xor instruction."); + + // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1. + // We're relying on the fact that we only do this transform when the shift has + // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase + // instructions). + Value *Op0 = Xor.getOperand(0), *Op1 = Xor.getOperand(1); + if (Op0->hasNUses(2)) + std::swap(Op0, Op1); + + Type *Ty = Xor.getType(); + Value *A; + const APInt *ShAmt; + if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) && + Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 && + match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) { + // Op1 = ashr i32 A, 31 ; smear the sign bit + // xor (add A, Op1), Op1 ; add -1 and flip bits if negative + // --> (A < 0) ? -A : A + Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty)); + // Copy the nuw/nsw flags from the add to the negate. + auto *Add = cast(Op0); + Value *Neg = Builder.CreateNeg(A, "", Add->hasNoUnsignedWrap(), + Add->hasNoSignedWrap()); + return SelectInst::Create(Cmp, Neg, A); + } + return nullptr; +} + // Transform // z = (~x) &/| y // into: @@ -3413,29 +3446,6 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) { if (Instruction *CastedXor = foldCastedBitwiseLogic(I)) return CastedXor; - // Canonicalize a shifty way to code absolute value to the common pattern. - // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1. - // We're relying on the fact that we only do this transform when the shift has - // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase - // instructions). - if (Op0->hasNUses(2)) - std::swap(Op0, Op1); - - const APInt *ShAmt; - if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) && - Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 && - match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) { - // B = ashr i32 A, 31 ; smear the sign bit - // xor (add A, B), B ; add -1 and flip bits if negative - // --> (A < 0) ? -A : A - Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty)); - // Copy the nuw/nsw flags from the add to the negate. - auto *Add = cast(Op0); - Value *Neg = Builder.CreateNeg(A, "", Add->hasNoUnsignedWrap(), - Add->hasNoSignedWrap()); - return SelectInst::Create(Cmp, Neg, A); - } - // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max: // // %notx = xor i32 %x, -1 @@ -3512,6 +3522,9 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) { if (Instruction *NewXor = sinkNotIntoXor(I, Builder)) return NewXor; + if (Instruction *Abs = canonicalizeAbs(I, Builder)) + return Abs; + // Otherwise, if all else failed, try to hoist the xor-by-constant: // (X ^ C) ^ Y --> (X ^ Y) ^ C // Just like we do in other places, we completely avoid the fold