diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index c42f113feca37..944e7c4b13255 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1983,6 +1983,10 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) { return SelectInst::Create(NewICmpInst, X, ConstantInt::getNullValue(Ty)); } + // (~x) & y --> ~(x | (~y)) iff that gets rid of inversions + if (sinkNotIntoOtherHandOfAnd(I)) + return &I; + return nullptr; } @@ -3089,6 +3093,45 @@ static Instruction *sinkNotIntoXor(BinaryOperator &I, return BinaryOperator::CreateXor(NotX, Y, I.getName() + ".demorgan"); } +// Transform +// z = (~x) & y +// into: +// z = ~(x | (~y)) +// iff y is free to invert and all uses of z can be freely updated. +bool InstCombinerImpl::sinkNotIntoOtherHandOfAnd(BinaryOperator &I) { + Instruction::BinaryOps NewOpc; + switch (I.getOpcode()) { + case Instruction::And: + NewOpc = Instruction::Or; + break; + default: + return false; + }; + + Value *X, *Y; + if (!match(&I, m_c_BinOp(m_Not(m_Value(X)), m_Value(Y)))) + return false; + + // Will we be able to fold the `not` into Y eventually? + if (!InstCombiner::isFreeToInvert(Y, Y->hasOneUse())) + return false; + + // And can our users be adapted? + if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr)) + return false; + + Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); + Value *NewBinOp = + BinaryOperator::Create(NewOpc, X, NotY, I.getName() + ".not"); + Builder.Insert(NewBinOp); + replaceInstUsesWith(I, NewBinOp); + // We can not just create an outer `not`, it will most likely be immediately + // folded back, reconstructing our initial pattern, and causing an + // infinite combine loop, so immediately manually fold it away. + freelyInvertAllUsersOf(NewBinOp); + return true; +} + // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches // here. We should standardize that construct where it is needed or choose some // other way to ensure that commutated variants of patterns are not missed. diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 16bc26520c189..c56b31bd227b8 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -105,6 +105,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted); Instruction *visitAnd(BinaryOperator &I); Instruction *visitOr(BinaryOperator &I); + bool sinkNotIntoOtherHandOfAnd(BinaryOperator &I); Instruction *visitXor(BinaryOperator &I); Instruction *visitShl(BinaryOperator &I); Value *reassociateShiftAmtsOfTwoSameDirectionShifts( diff --git a/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-and.ll b/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-and.ll index 14c889b23e39c..17c37c836bb38 100644 --- a/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-and.ll +++ b/llvm/test/Transforms/InstCombine/sink-not-into-another-hand-of-and.ll @@ -12,10 +12,9 @@ declare void @use1(i1) ; Most basic positive test define i32 @t0(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) { ; CHECK-LABEL: @t0( -; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]] -; CHECK-NEXT: [[I2:%.*]] = xor i1 [[I0:%.*]], true -; CHECK-NEXT: [[I3:%.*]] = and i1 [[I1]], [[I2]] -; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3]], i32 [[V2:%.*]], i32 [[V3:%.*]] +; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = or i1 [[I1]], [[I0:%.*]] +; CHECK-NEXT: [[I4:%.*]] = select i1 [[TMP1]], i32 [[V3:%.*]], i32 [[V2:%.*]] ; CHECK-NEXT: ret i32 [[I4]] ; %i1 = icmp eq i32 %v0, %v1 @@ -27,11 +26,10 @@ define i32 @t0(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) { define i32 @t1(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) { ; CHECK-LABEL: @t1( ; CHECK-NEXT: [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]] -; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V2:%.*]], [[V3:%.*]] +; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]] ; CHECK-NEXT: call void @use1(i1 [[I0]]) -; CHECK-NEXT: [[I2:%.*]] = xor i1 [[I0]], true -; CHECK-NEXT: [[I3:%.*]] = and i1 [[I1]], [[I2]] -; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3]], i32 [[V4:%.*]], i32 [[V5:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = or i1 [[I0]], [[I1]] +; CHECK-NEXT: [[I4:%.*]] = select i1 [[TMP1]], i32 [[V5:%.*]], i32 [[V4:%.*]] ; CHECK-NEXT: ret i32 [[I4]] ; %i0 = icmp eq i32 %v0, %v1