Skip to content

Commit

Permalink
[InstCombine] Fold unfolded masked merge pattern with variable mask!
Browse files Browse the repository at this point in the history
Summary:
Finally fixes [[ https://bugs.llvm.org/show_bug.cgi?id=6773 | PR6773 ]].

Now that the backend is all done, we can finally fold it!

The canonical unfolded masked merge pattern is
```(x &  m) | (y & ~m)```
There is a second, equivalent variant:
```(x | ~m) & (y |  m)```
Only one of them (the or-of-and's i think) is canonical.
And if the mask is not a constant, we should fold it to:
```((x ^ y) & M) ^ y```

https://rise4fun.com/Alive/ndQw

Reviewers: spatel, craig.topper

Reviewed By: spatel

Subscribers: nicholas, RKSimon, llvm-commits

Differential Revision: https://reviews.llvm.org/D46814

llvm-svn: 333106
  • Loading branch information
LebedevRI committed May 23, 2018
1 parent 356d606 commit 6b6c553
Show file tree
Hide file tree
Showing 7 changed files with 168 additions and 175 deletions.
36 changes: 36 additions & 0 deletions llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
Expand Up @@ -748,6 +748,9 @@ static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
return nullptr;
}

static Instruction *foldMaskedMerge(BinaryOperator &I,
InstCombiner::BuilderTy &Builder);

/// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
/// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
/// If \p Inverted is true then the check is for the inverted range, e.g.
Expand Down Expand Up @@ -1648,6 +1651,9 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
A->getType()->isIntOrIntVectorTy(1))
return SelectInst::Create(A, Op0, Constant::getNullValue(I.getType()));

if (Instruction *MM = foldMaskedMerge(I, Builder))
return MM;

return Changed ? &I : nullptr;
}

Expand Down Expand Up @@ -2287,6 +2293,9 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
}
}

if (Instruction *MM = foldMaskedMerge(I, Builder))
return MM;

return Changed ? &I : nullptr;
}

Expand Down Expand Up @@ -2422,6 +2431,33 @@ Value *InstCombiner::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
return nullptr;
}

/// Bitwise masked merge (bitwise select) is typically coded as:
/// (x & m) | (y & ~m)
/// Another variant is:
/// (x | ~m) & (y | m)
/// Canonicalize those to a form with one less IR instruction:
/// ((x ^ y) & m) ^ y
static Instruction *foldMaskedMerge(BinaryOperator &I,
InstCombiner::BuilderTy &Builder) {
Value *X, *Y;

Value *M;
if (match(&I, m_c_Or(m_OneUse(m_c_And(m_Value(Y), m_Not(m_Value(M)))),
m_OneUse(m_c_And(m_Value(X), m_Deferred(M))))) ||
match(&I, m_c_And(m_OneUse(m_c_Or(m_Value(X), m_Not(m_Value(M)))),
m_OneUse(m_c_Or(m_Value(Y), m_Deferred(M)))))) {
assert(!isa<Constant>(M) && "Shouldn't have matched a constant.");

Value *D = Builder.CreateXor(X, Y);
Value *A = Builder.CreateAnd(D, M);
return BinaryOperator::CreateXor(A, Y);
}

// FIXME: we still want to canonicalize the patterns with constants somewhat.

return nullptr;
}

/// If we have a masked merge, in the canonical form of:
/// (assuming that A only has one use.)
/// | A | |B|
Expand Down
7 changes: 3 additions & 4 deletions llvm/test/Transforms/InstCombine/and-or-not.ll
Expand Up @@ -502,11 +502,10 @@ define i32 @xor_to_xor12(float %fa, float %fb) {

define i64 @PR32830(i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: @PR32830(
; CHECK-NEXT: [[NOTA:%.*]] = xor i64 [[A:%.*]], -1
; CHECK-NEXT: [[NOTB:%.*]] = xor i64 [[B:%.*]], -1
; CHECK-NEXT: [[OR1:%.*]] = or i64 [[NOTB]], [[A]]
; CHECK-NEXT: [[OR2:%.*]] = or i64 [[NOTA]], [[C:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i64 [[OR1]], [[OR2]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[NOTB]], [[C:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[AND:%.*]] = xor i64 [[TMP2]], [[NOTB]]
; CHECK-NEXT: ret i64 [[AND]]
;
%nota = xor i64 %a, -1
Expand Down
70 changes: 30 additions & 40 deletions llvm/test/Transforms/InstCombine/masked-merge-add.ll
Expand Up @@ -18,10 +18,9 @@

define i32 @p(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %x, %m
Expand All @@ -33,10 +32,9 @@ define i32 @p(i32 %x, i32 %y, i32 %m) {

define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
; CHECK-LABEL: @p_splatvec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor <2 x i32> [[TMP2]], [[Y]]
; CHECK-NEXT: ret <2 x i32> [[RET]]
;
%and = and <2 x i32> %x, %m
Expand All @@ -48,10 +46,9 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {

define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) {
; CHECK-LABEL: @p_vec_undef(
; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 undef, i32 -1>
; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = or <3 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor <3 x i32> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[TMP2]], [[Y]]
; CHECK-NEXT: ret <3 x i32> [[RET]]
;
%and = and <3 x i32> %x, %m
Expand Down Expand Up @@ -182,10 +179,9 @@ declare i32 @gen32()

define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p_commutative0(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %m, %x ; swapped order
Expand All @@ -198,10 +194,9 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
define i32 @p_commutative1(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative1(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y]], [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
Expand All @@ -214,10 +209,9 @@ define i32 @p_commutative1(i32 %x, i32 %m) {

define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p_commutative2(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %x, %m
Expand All @@ -230,10 +224,9 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
define i32 @p_commutative3(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative3(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y]], [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
Expand All @@ -246,10 +239,9 @@ define i32 @p_commutative3(i32 %x, i32 %m) {

define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p_commutative4(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %m, %x ; swapped order
Expand All @@ -262,10 +254,9 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
define i32 @p_commutative5(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative5(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y]], [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
Expand All @@ -279,10 +270,9 @@ define i32 @p_commutative5(i32 %x, i32 %m) {
define i32 @p_commutative6(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative6(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y]], [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
Expand Down
76 changes: 33 additions & 43 deletions llvm/test/Transforms/InstCombine/masked-merge-and-of-ors.ll
Expand Up @@ -16,10 +16,9 @@

define i32 @p(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%neg = xor i32 %m, -1
Expand All @@ -31,10 +30,9 @@ define i32 @p(i32 %x, i32 %y, i32 %m) {

define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
; CHECK-LABEL: @p_splatvec(
; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M:%.*]], <i32 -1, i32 -1>
; CHECK-NEXT: [[OR:%.*]] = or <2 x i32> [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or <2 x i32> [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and <2 x i32> [[OR]], [[OR1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor <2 x i32> [[TMP2]], [[Y]]
; CHECK-NEXT: ret <2 x i32> [[RET]]
;
%neg = xor <2 x i32> %m, <i32 -1, i32 -1>
Expand All @@ -46,10 +44,9 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {

define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) {
; CHECK-LABEL: @p_vec_undef(
; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M:%.*]], <i32 -1, i32 undef, i32 -1>
; CHECK-NEXT: [[OR:%.*]] = or <3 x i32> [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or <3 x i32> [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and <3 x i32> [[OR]], [[OR1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor <3 x i32> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[TMP2]], [[Y]]
; CHECK-NEXT: ret <3 x i32> [[RET]]
;
%neg = xor <3 x i32> %m, <i32 -1, i32 undef, i32 -1>
Expand Down Expand Up @@ -124,10 +121,9 @@ declare i32 @gen32()

define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p_commutative0(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%neg = xor i32 %m, -1
Expand All @@ -140,10 +136,9 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
define i32 @p_commutative1(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative1(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y]], [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
Expand All @@ -156,10 +151,9 @@ define i32 @p_commutative1(i32 %x, i32 %m) {

define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p_commutative2(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%neg = xor i32 %m, -1
Expand All @@ -172,10 +166,9 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
define i32 @p_commutative3(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative3(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y]], [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
Expand All @@ -188,10 +181,9 @@ define i32 @p_commutative3(i32 %x, i32 %m) {

define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p_commutative4(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%neg = xor i32 %m, -1
Expand All @@ -204,10 +196,9 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
define i32 @p_commutative5(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative5(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y]], [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
Expand All @@ -221,10 +212,9 @@ define i32 @p_commutative5(i32 %x, i32 %m) {
define i32 @p_commutative6(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative6(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y]], [[X:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M:%.*]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
Expand Down Expand Up @@ -259,9 +249,9 @@ declare void @use32(i32)
define i32 @n0_oneuse_of_neg_is_ok_0(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @n0_oneuse_of_neg_is_ok_0(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[TMP2]], [[Y]]
; CHECK-NEXT: call void @use32(i32 [[NEG]])
; CHECK-NEXT: ret i32 [[RET]]
;
Expand Down

0 comments on commit 6b6c553

Please sign in to comment.