Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[InstCombine] Resolve FIXME: Use commutative matchers #81060

Closed
wants to merge 1 commit into from

Conversation

AtariDreams
Copy link
Contributor

No description provided.

@llvmbot
Copy link
Collaborator

llvmbot commented Feb 8, 2024

@llvm/pr-subscribers-llvm-transforms

Author: AtariDreams (AtariDreams)

Changes

Patch is 26.55 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/81060.diff

2 Files Affected:

  • (modified) llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp (+20-19)
  • (modified) llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp (+61-66)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index cfff5df9ff5005..1fadaac82eba92 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1516,7 +1516,7 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
   // ~B + (A + 1) --> A - B
   // (~B + A) + 1 --> A - B
   // (A + ~B) + 1 --> A - B
-  if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))) ||
+  if (match(&I, m_c_BinOp(m_c_Add(m_Value(A), m_One()), m_Not(m_Value(B)))) ||
       match(&I, m_BinOp(m_c_Add(m_Not(m_Value(B)), m_Value(A)), m_One())))
     return BinaryOperator::CreateSub(A, B);
 
@@ -1531,7 +1531,7 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
   {
     // (A + C1) + (C2 - B) --> (A - B) + (C1 + C2)
     Constant *C1, *C2;
-    if (match(&I, m_c_Add(m_Add(m_Value(A), m_ImmConstant(C1)),
+    if (match(&I, m_c_Add(m_c_Add(m_Value(A), m_ImmConstant(C1)),
                           m_Sub(m_ImmConstant(C2), m_Value(B)))) &&
         (LHS->hasOneUse() || RHS->hasOneUse())) {
       Value *Sub = Builder.CreateSub(A, B);
@@ -1596,7 +1596,7 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
 
   // (add (or A, B) (and A, B)) --> (add A, B)
   // (add (and A, B) (or A, B)) --> (add A, B)
-  if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)),
+  if (match(&I, m_c_BinOp(m_c_Or(m_Value(A), m_Value(B)),
                           m_c_And(m_Deferred(A), m_Deferred(B))))) {
     // Replacing operands in-place to preserve nuw/nsw flags.
     replaceOperand(I, 0, A);
@@ -1618,9 +1618,9 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
 
   // Canonicalize ((A & -A) - 1) --> ((A - 1) & ~A)
   // Forms all commutable operations, and simplifies ctpop -> cttz folds.
-  if (match(&I,
-            m_Add(m_OneUse(m_c_And(m_Value(A), m_OneUse(m_Neg(m_Deferred(A))))),
-                  m_AllOnes()))) {
+  if (match(&I, m_c_Add(m_OneUse(m_c_And(m_Value(A),
+                                         m_OneUse(m_Neg(m_Deferred(A))))),
+                        m_AllOnes()))) {
     Constant *AllOnes = ConstantInt::getAllOnesValue(RHS->getType());
     Value *Dec = Builder.CreateAdd(A, AllOnes);
     Value *Not = Builder.CreateXor(A, AllOnes);
@@ -1633,7 +1633,7 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
   // ((A * -C1) + A) - 1
   // (A * (1 - C1)) - 1
   if (match(&I,
-            m_c_Add(m_OneUse(m_Not(m_OneUse(m_Mul(m_Value(A), m_APInt(C1))))),
+            m_c_Add(m_OneUse(m_Not(m_OneUse(m_c_Mul(m_Value(A), m_APInt(C1))))),
                     m_Deferred(A)))) {
     Type *Ty = I.getType();
     Constant *NewMulC = ConstantInt::get(Ty, 1 - *C1);
@@ -1643,8 +1643,9 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
 
   // (A * -2**C) + B --> B - (A << C)
   const APInt *NegPow2C;
-  if (match(&I, m_c_Add(m_OneUse(m_Mul(m_Value(A), m_NegatedPower2(NegPow2C))),
-                        m_Value(B)))) {
+  if (match(&I,
+            m_c_Add(m_OneUse(m_c_Mul(m_Value(A), m_NegatedPower2(NegPow2C))),
+                    m_Value(B)))) {
     Constant *ShiftAmtC = ConstantInt::get(Ty, NegPow2C->countr_zero());
     Value *Shl = Builder.CreateShl(A, ShiftAmtC);
     return BinaryOperator::CreateSub(B, Shl);
@@ -2223,7 +2224,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
 
   // (X + -1) - Y --> ~Y + X
   Value *X, *Y;
-  if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes()))))
+  if (match(Op0, m_OneUse(m_c_Add(m_Value(X), m_AllOnes()))))
     return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X);
 
   // Reassociate sub/add sequences to create more add instructions and
@@ -2255,7 +2256,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
     // This is done in other passes, but we want to be able to consume this
     // pattern in InstCombine so we can generate it without creating infinite
     // loops.
-    if (match(Op0, m_Add(m_Value(X), m_Value(Z))) &&
+    if (match(Op0, m_c_Add(m_Value(X), m_Value(Z))) &&
         match(Op1, m_c_Add(m_Value(Y), m_Specific(Z))))
       return BinaryOperator::CreateSub(X, Y);
 
@@ -2345,7 +2346,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
     // C2 is negative pow2 || sub nuw
     const APInt *C2, *C3;
     BinaryOperator *InnerSub;
-    if (match(Op1, m_OneUse(m_And(m_BinOp(InnerSub), m_APInt(C2)))) &&
+    if (match(Op1, m_OneUse(m_c_And(m_BinOp(InnerSub), m_APInt(C2)))) &&
         match(InnerSub, m_Sub(m_APInt(C3), m_Value(X))) &&
         (InnerSub->hasNoUnsignedWrap() || C2->isNegatedPowerOf2())) {
       APInt C2AndC3 = *C2 & *C3;
@@ -2374,7 +2375,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   // (sub (or A, B) (and A, B)) --> (xor A, B)
   {
     Value *A, *B;
-    if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
+    if (match(Op1, m_c_And(m_Value(A), m_Value(B))) &&
         match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
       return BinaryOperator::CreateXor(A, B);
   }
@@ -2382,7 +2383,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   // (sub (add A, B) (or A, B)) --> (and A, B)
   {
     Value *A, *B;
-    if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
+    if (match(Op0, m_c_And(m_Value(A), m_Value(B))) &&
         match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
       return BinaryOperator::CreateAnd(A, B);
   }
@@ -2390,7 +2391,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   // (sub (add A, B) (and A, B)) --> (or A, B)
   {
     Value *A, *B;
-    if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
+    if (match(Op0, m_c_And(m_Value(A), m_Value(B))) &&
         match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
       return BinaryOperator::CreateOr(A, B);
   }
@@ -2398,7 +2399,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   // (sub (and A, B) (or A, B)) --> neg (xor A, B)
   {
     Value *A, *B;
-    if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
+    if (match(Op0, m_c_And(m_Value(A), m_Value(B))) &&
         match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
         (Op0->hasOneUse() || Op1->hasOneUse()))
       return BinaryOperator::CreateNeg(Builder.CreateXor(A, B));
@@ -2407,7 +2408,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   // (sub (or A, B), (xor A, B)) --> (and A, B)
   {
     Value *A, *B;
-    if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
+    if (match(Op1, m_c_Xor(m_Value(A), m_Value(B))) &&
         match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
       return BinaryOperator::CreateAnd(A, B);
   }
@@ -2415,7 +2416,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   // (sub (xor A, B) (or A, B)) --> neg (and A, B)
   {
     Value *A, *B;
-    if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
+    if (match(Op0, m_c_Xor(m_Value(A), m_Value(B))) &&
         match(Op1, m_c_Or(m_Specific(A), m_Specific(B))) &&
         (Op0->hasOneUse() || Op1->hasOneUse()))
       return BinaryOperator::CreateNeg(Builder.CreateAnd(A, B));
@@ -2442,7 +2443,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   {
     // (sub (and Op1, C), Op1) --> neg (and Op1, ~C)
     Constant *C;
-    if (match(Op0, m_OneUse(m_And(m_Specific(Op1), m_Constant(C))))) {
+    if (match(Op0, m_OneUse(m_c_And(m_Specific(Op1), m_Constant(C))))) {
       return BinaryOperator::CreateNeg(
           Builder.CreateAnd(Op1, Builder.CreateNot(C)));
     }
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index aa3b9da924aa0b..94e61530fc7cb2 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -2260,9 +2260,6 @@ static Value *simplifyAndOrWithOpReplaced(Value *X, Value *Y, bool IsAnd,
   return nullptr;
 }
 
-// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
-// here. We should standardize that construct where it is needed or choose some
-// other way to ensure that commutated variants of patterns are not missed.
 Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
   Type *Ty = I.getType();
 
@@ -2331,7 +2328,7 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
 
   if (match(Op1, m_APInt(C))) {
     const APInt *XorC;
-    if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) {
+    if (match(Op0, m_OneUse(m_c_Xor(m_Value(X), m_APInt(XorC))))) {
       // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
       Constant *NewC = ConstantInt::get(Ty, *C & *XorC);
       Value *And = Builder.CreateAnd(X, Op1);
@@ -2340,7 +2337,7 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
     }
 
     const APInt *OrC;
-    if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) {
+    if (match(Op0, m_OneUse(m_c_Or(m_Value(X), m_APInt(OrC))))) {
       // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
       // NOTE: This reduces the number of bits set in the & mask, which
       // can expose opportunities for store narrowing for scalars.
@@ -2373,7 +2370,7 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
       return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, *ShiftC));
 
     const APInt *AddC;
-    if (match(Op0, m_Add(m_Value(X), m_APInt(AddC)))) {
+    if (match(Op0, m_c_Add(m_Value(X), m_APInt(AddC)))) {
       // If we are masking the result of the add down to exactly one bit and
       // the constant we are adding has no bits set below that bit, then the
       // add is flipping a single bit. Example:
@@ -2447,8 +2444,8 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
     // This is intentionally placed after the narrowing transforms for
     // efficiency (transform directly to the narrow logic op if possible).
     // If the mask is only needed on one incoming arm, push the 'and' op up.
-    if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) ||
-        match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
+    if (match(Op0, m_OneUse(m_c_Xor(m_Value(X), m_Value(Y)))) ||
+        match(Op0, m_OneUse(m_c_Or(m_Value(X), m_Value(Y))))) {
       APInt NotAndMask(~(*C));
       BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode();
       if (MaskedValueIsZero(X, NotAndMask, 0, &I)) {
@@ -2542,8 +2539,8 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
     }
   }
 
-  if (match(&I, m_And(m_OneUse(m_Shl(m_ZExt(m_Value(X)), m_Value(Y))),
-                      m_SignMask())) &&
+  if (match(&I, m_c_And(m_OneUse(m_Shl(m_ZExt(m_Value(X)), m_Value(Y))),
+                        m_SignMask())) &&
       match(Y, m_SpecificInt_ICMP(
                    ICmpInst::Predicate::ICMP_EQ,
                    APInt(Ty->getScalarSizeInBits(),
@@ -2593,8 +2590,9 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
       return BinaryOperator::CreateAnd(Op1, B);
 
     // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
-    if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
-        match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) {
+    if (match(Op0, m_c_Xor(m_Value(A), m_Value(B))) &&
+        match(Op1,
+              m_c_Xor(m_c_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) {
       Value *NotC = Op1->hasOneUse()
                         ? Builder.CreateNot(C)
                         : getFreelyInverted(C, C->hasOneUse(), &Builder);
@@ -2603,8 +2601,8 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
     }
 
     // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
-    if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))) &&
-        match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) {
+    if (match(Op0, m_c_Xor(m_c_Xor(m_Value(A), m_Value(C)), m_Value(B))) &&
+        match(Op1, m_c_Xor(m_Specific(B), m_Specific(A)))) {
       Value *NotC = Op0->hasOneUse()
                         ? Builder.CreateNot(C)
                         : getFreelyInverted(C, C->hasOneUse(), &Builder);
@@ -2727,8 +2725,9 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
                               Constant::getNullValue(Ty));
 
   // (-1 + A) & B --> A ? 0 : B where A is 0/1.
-  if (match(&I, m_c_And(m_OneUse(m_Add(m_ZExtOrSelf(m_Value(A)), m_AllOnes())),
-                        m_Value(B)))) {
+  if (match(&I,
+            m_c_And(m_OneUse(m_c_Add(m_ZExtOrSelf(m_Value(A)), m_AllOnes())),
+                    m_Value(B)))) {
     if (A->getType()->isIntOrIntVectorTy(1))
       return SelectInst::Create(A, Constant::getNullValue(Ty), B);
     if (computeKnownBits(A, /* Depth */ 0, &I).countMaxActiveBits() <= 1) {
@@ -2883,24 +2882,24 @@ static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC,
       // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
       Value *X;
       unsigned Mask = Width - 1;
-      if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
-          match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
+      if (match(L, m_c_And(m_Value(X), m_SpecificInt(Mask))) &&
+          match(R, m_c_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
         return X;
 
       // (shl ShVal, X) | (lshr ShVal, ((-X) & (Width - 1)))
-      if (match(R, m_And(m_Neg(m_Specific(L)), m_SpecificInt(Mask))))
+      if (match(R, m_c_And(m_Neg(m_Specific(L)), m_SpecificInt(Mask))))
         return L;
 
       // Similar to above, but the shift amount may be extended after masking,
       // so return the extended value as the parameter for the intrinsic.
-      if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
-          match(R,
-                m_And(m_Neg(m_ZExt(m_And(m_Specific(X), m_SpecificInt(Mask)))),
-                      m_SpecificInt(Mask))))
+      if (match(L, m_ZExt(m_c_And(m_Value(X), m_SpecificInt(Mask)))) &&
+          match(R, m_And(m_Neg(m_ZExt(
+                             m_c_And(m_Specific(X), m_SpecificInt(Mask)))),
+                         m_SpecificInt(Mask))))
         return L;
 
-      if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
-          match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
+      if (match(L, m_ZExt(m_c_And(m_Value(X), m_SpecificInt(Mask)))) &&
+          match(R, m_ZExt(m_c_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
         return L;
 
       return nullptr;
@@ -3416,9 +3415,6 @@ Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
   return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd);
 }
 
-// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
-// here. We should standardize that construct where it is needed or choose some
-// other way to ensure that commutated variants of patterns are not missed.
 Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
   if (Value *V = simplifyOrInst(I.getOperand(0), I.getOperand(1),
                                 SQ.getWithInstruction(&I)))
@@ -3485,7 +3481,8 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
 
   Value *X, *Y;
   const APInt *CV;
-  if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) &&
+  if (match(&I,
+            m_c_Or(m_OneUse(m_c_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) &&
       !CV->isAllOnes() && MaskedValueIsZero(Y, *CV, 0, &I)) {
     // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0
     // The check for a 'not' op is for efficiency (if Y is known zero --> ~X).
@@ -3495,7 +3492,7 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
 
   // If the operands have no common bits set:
   // or (mul X, Y), X --> add (mul X, Y), X --> mul X, (Y + 1)
-  if (match(&I, m_c_DisjointOr(m_OneUse(m_Mul(m_Value(X), m_Value(Y))),
+  if (match(&I, m_c_DisjointOr(m_OneUse(m_c_Mul(m_Value(X), m_Value(Y))),
                                m_Deferred(X)))) {
     Value *IncrementY = Builder.CreateAdd(Y, ConstantInt::get(Ty, 1));
     return BinaryOperator::CreateMul(X, IncrementY);
@@ -3593,13 +3590,13 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
   }
 
   // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
-  if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
-    if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
+  if (match(Op0, m_c_Xor(m_Value(A), m_Value(B))))
+    if (match(Op1, m_c_Xor(m_c_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
       return BinaryOperator::CreateOr(Op0, C);
 
   // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C
-  if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
-    if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
+  if (match(Op0, m_c_Xor(m_c_Xor(m_Value(A), m_Value(C)), m_Value(B))))
+    if (match(Op1, m_c_Xor(m_Specific(B), m_Specific(A))))
       return BinaryOperator::CreateOr(Op1, C);
 
   // ((A & B) ^ C) | B -> C | B
@@ -3615,12 +3612,12 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
 
   // Canonicalize xor to the RHS.
   bool SwappedForXor = false;
-  if (match(Op0, m_Xor(m_Value(), m_Value()))) {
+  if (match(Op0, m_c_Xor(m_Value(), m_Value()))) {
     std::swap(Op0, Op1);
     SwappedForXor = true;
   }
 
-  if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
+  if (match(Op1, m_c_Xor(m_Value(A), m_Value(B)))) {
     // (A | ?) | (A ^ B) --> (A | ?) | B
     // (B | ?) | (A ^ B) --> (B | ?) | A
     if (match(Op0, m_c_Or(m_Specific(A), m_Value())))
@@ -3630,8 +3627,8 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
 
     // (A & B) | (A ^ B) --> A | B
     // (B & A) | (A ^ B) --> A | B
-    if (match(Op0, m_And(m_Specific(A), m_Specific(B))) ||
-        match(Op0, m_And(m_Specific(B), m_Specific(A))))
+    if (match(Op0, m_c_And(m_Specific(A), m_Specific(B))) ||
+        match(Op0, m_c_And(m_Specific(B), m_Specific(A))))
       return BinaryOperator::CreateOr(A, B);
 
     // ~A | (A ^ B) --> ~(A & B)
@@ -3690,7 +3687,7 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
     // TODO: Make this recursive; it's a little tricky because an arbitrary
     // number of 'or' instructions might have to be created.
     Value *X, *Y;
-    if (LHS && match(Op1, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) {
+    if (LHS && match(Op1, m_OneUse(m_c_LogicalOr(m_Value(X), m_Value(Y))))) {
       bool IsLogical = isa<SelectInst>(Op1);
       // LHS | (X || Y) --> (LHS || X) || Y
       if (auto *Cmp = dyn_cast<ICmpInst>(X))
@@ -3707,7 +3704,7 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
                                             ? Builder.CreateLogicalOr(X, Res)
                                             : Builder.CreateOr(X, Res));
     }
-    if (RHS && match(Op0, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) {
+    if (RHS && match(Op0, m_OneUse(m_c_LogicalOr(m_Value(X), m_Value(Y))))) {
       bool IsLogical = isa<SelectInst>(Op0);
       // (X || Y) | RHS --> (X || RHS) || Y
       if (auto *Cmp = dyn_cast<ICmpInst>(X))
@@ -3756,7 +3753,7 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
   // (X|C) | V --> (X|V) | C
   ConstantInt *CI;
   if (Op0->hasOneUse() && !match(Op1, m_ConstantInt()) &&
-      match(Op0, m_Or(m_Value(A), m_ConstantInt(CI)))) {
+      match(Op0, m_c_Or(m_Value(A), m_ConstantInt(CI)))) {
     Value *Inner = Builder.CreateOr(A, Op1);
     Inner->takeName(Op0);
     return BinaryOperator::CreateOr(Inner, CI);
@@ -3795,9 +3792,9 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
     // ((A & B) ^ B) | ((A & B) ^ A) -> A ^ B
     // (B ^ (A & B)) | (A ^ (A & B)) -> A ^ B
     const auto TryXorOpt = [&](Value *Lhs, Value *Rhs) -> Instruction * {
-      if (match(Lhs, m_c_Xor(m_And(m_Value(A), m_Value(B)), m_Deferred(A))) &&
-          match(Rhs,
-                m_c_Xor(m_And(m_Specific(A), m_Specific(B)), m_Deferred(B)))) {
+      if (match(Lhs, m_c_Xor(m_c_And(m_Value(A), m_Value(B)), m_Deferred(A))) &&
+          match(Rhs, m_c_Xor(m_c_And(m_Specific(A), m_Specific(B)),
+                             m_Deferred(B)))) {
         return BinaryOperator::CreateXor(A, B);
       }
       return nullptr;
@@ -3874,7 +3871,7 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
 
   // Improve "get low bit mask up to and including bit X" pattern:
   //   (1 << X) | ((1 << X) + -1)  -->  -1 l>> (bitwidth(x) - 1 - X)
-  if (match(&I, m_c_Or(m_Add(m_Shl(m_One(), m_Value(X)), m_AllOnes()),
+  if (match(&I, m_c_Or(m_c_Add(m_Shl(m_One(), m_Value(X)), m_AllOnes()),
             ...
[truncated]

Copy link

github-actions bot commented Feb 8, 2024

⚠️ We detected that you are using a GitHub private e-mail address to contribute to the repo.
Please turn off Keep my email addresses private setting in your account.
See LLVM Discourse for more information.

@AtariDreams AtariDreams changed the title Resolve FIXME: Use commutative matchers [InstCombine] Resolve FIXME: Use commutative matchers Feb 8, 2024
Copy link

github-actions bot commented Feb 8, 2024

✅ With the latest revision this PR passed the C/C++ code formatter.

@AtariDreams AtariDreams force-pushed the cuminlative branch 2 times, most recently from bcb18c6 to 5bc6a44 Compare February 8, 2024 00:37
@AtariDreams AtariDreams force-pushed the cuminlative branch 7 times, most recently from 2be8ee9 to 39dfa1c Compare February 8, 2024 00:49
@AtariDreams AtariDreams force-pushed the cuminlative branch 3 times, most recently from a06e68d to b5e6555 Compare February 8, 2024 01:11
@dtcxzyw
Copy link
Member

dtcxzyw commented Feb 8, 2024

Please add more tests if the motivation of this patch is to handle more commuted patterns.
If this patch is NFC, I am confused why we cannot benefit from removing some duplicate logic.

dtcxzyw added a commit to dtcxzyw/llvm-opt-benchmark that referenced this pull request Feb 8, 2024
Copy link
Contributor

@nikic nikic left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Each change should be submitted separately, with appropriate test coverage. Do not submit changes for which you cannot demonstrate test changes. While some of it may be right, most of your changes in here are nonsensical.

@nikic nikic closed this Feb 8, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

None yet

5 participants