diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index e2671734d09350..51bdbb6206a8d2 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1076,6 +1076,87 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp, return nullptr; } +struct IntPart { + Value *From; + unsigned StartBit; + unsigned NumBits; +}; + +/// Match an extraction of bits from an integer. +static Optional matchIntPart(Value *V) { + Value *X; + if (!match(V, m_OneUse(m_Trunc(m_Value(X))))) + return None; + + unsigned NumOriginalBits = X->getType()->getScalarSizeInBits(); + unsigned NumExtractedBits = V->getType()->getScalarSizeInBits(); + Value *Y; + const APInt *Shift; + // For a trunc(lshr Y, Shift) pattern, make sure we're only extracting bits + // from Y, not any shifted-in zeroes. + if (match(X, m_OneUse(m_LShr(m_Value(Y), m_APInt(Shift)))) && + Shift->ule(NumOriginalBits - NumExtractedBits)) + return {{Y, (unsigned)Shift->getZExtValue(), NumExtractedBits}}; + return {{X, 0, NumExtractedBits}}; +} + +/// Materialize an extraction of bits from an integer in IR. +static Value *extractIntPart(const IntPart &P, IRBuilderBase &Builder) { + Value *V = P.From; + if (P.StartBit) + V = Builder.CreateLShr(V, P.StartBit); + Type *TruncTy = V->getType()->getWithNewBitWidth(P.NumBits); + if (TruncTy != V->getType()) + V = Builder.CreateTrunc(V, TruncTy); + return V; +} + +/// (icmp eq X0, Y0) & (icmp eq X1, Y1) -> icmp eq X01, Y01 +/// (icmp ne X0, Y0) | (icmp ne X1, Y1) -> icmp ne X01, Y01 +/// where X0, X1 and Y0, Y1 are adjacent parts extracted from an integer. +static Value *foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd, + InstCombiner::BuilderTy &Builder) { + if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse()) + return nullptr; + + CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE; + if (Cmp0->getPredicate() != Pred || Cmp1->getPredicate() != Pred) + return nullptr; + + Optional L0 = matchIntPart(Cmp0->getOperand(0)); + Optional R0 = matchIntPart(Cmp0->getOperand(1)); + Optional L1 = matchIntPart(Cmp1->getOperand(0)); + Optional R1 = matchIntPart(Cmp1->getOperand(1)); + if (!L0 || !R0 || !L1 || !R1) + return nullptr; + + // Make sure the LHS/RHS compare a part of the same value, possibly after + // an operand swap. + if (L0->From != L1->From || R0->From != R1->From) { + if (L0->From != R1->From || R0->From != L1->From) + return nullptr; + std::swap(L1, R1); + } + + // Make sure the extracted parts are adjacent, canonicalizing to L0/R0 being + // the low part and L1/R1 being the high part. + if (L0->StartBit + L0->NumBits != L1->StartBit || + R0->StartBit + R0->NumBits != R1->StartBit) { + if (L1->StartBit + L1->NumBits != L0->StartBit || + R1->StartBit + R1->NumBits != R0->StartBit) + return nullptr; + std::swap(L0, L1); + std::swap(R0, R1); + } + + // We can simplify to a comparison of these larger parts of the integers. + IntPart L = {L0->From, L0->StartBit, L0->NumBits + L1->NumBits}; + IntPart R = {R0->From, R0->StartBit, R0->NumBits + R1->NumBits}; + Value *LValue = extractIntPart(L, Builder); + Value *RValue = extractIntPart(R, Builder); + return Builder.CreateICmp(Pred, LValue, RValue); +} + /// Reduce logic-of-compares with equality to a constant by substituting a /// common operand with the constant. Callers are expected to call this with /// Cmp0/Cmp1 switched to handle logic op commutativity. @@ -1181,6 +1262,9 @@ Value *InstCombinerImpl::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, foldUnsignedUnderflowCheck(RHS, LHS, /*IsAnd=*/true, Q, Builder)) return X; + if (Value *X = foldEqOfParts(LHS, RHS, /*IsAnd=*/true, Builder)) + return X; + // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2). Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0); @@ -2411,6 +2495,9 @@ Value *InstCombinerImpl::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, foldUnsignedUnderflowCheck(RHS, LHS, /*IsAnd=*/false, Q, Builder)) return X; + if (Value *X = foldEqOfParts(LHS, RHS, /*IsAnd=*/false, Builder)) + return X; + // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0) // TODO: Remove this when foldLogOpOfMaskedICmps can handle vectors. if (PredL == ICmpInst::ICMP_NE && match(LHS1, m_Zero()) && diff --git a/llvm/test/Transforms/InstCombine/eq-of-parts.ll b/llvm/test/Transforms/InstCombine/eq-of-parts.ll index 5e8d70914dc87b..3e7ac275e38478 100644 --- a/llvm/test/Transforms/InstCombine/eq-of-parts.ll +++ b/llvm/test/Transforms/InstCombine/eq-of-parts.ll @@ -10,16 +10,10 @@ declare void @use.i1(i1) define i1 @eq_10(i32 %x, i32 %y) { ; CHECK-LABEL: @eq_10( -; CHECK-NEXT: [[X_0:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[Y_0:%.*]] = trunc i32 [[Y:%.*]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[C_0:%.*]] = icmp eq i8 [[X_0]], [[Y_0]] -; CHECK-NEXT: [[C_1:%.*]] = icmp eq i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_10:%.*]] = and i1 [[C_0]], [[C_1]] -; CHECK-NEXT: ret i1 [[C_10]] +; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i16 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[Y:%.*]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i16 [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret i1 [[TMP3]] ; %x.0 = trunc i32 %x to i8 %x.321 = lshr i32 %x, 8 @@ -35,22 +29,10 @@ define i1 @eq_10(i32 %x, i32 %y) { define i1 @eq_210(i32 %x, i32 %y) { ; CHECK-LABEL: @eq_210( -; CHECK-NEXT: [[X_0:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[Y_0:%.*]] = trunc i32 [[Y:%.*]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[C_0:%.*]] = icmp eq i8 [[X_0]], [[Y_0]] -; CHECK-NEXT: [[C_1:%.*]] = icmp eq i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i8 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_10:%.*]] = and i1 [[C_0]], [[C_1]] -; CHECK-NEXT: [[C_210:%.*]] = and i1 [[C_2]], [[C_10]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i24 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[Y:%.*]] to i24 +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i24 [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret i1 [[TMP3]] ; %x.0 = trunc i32 %x to i8 %x.321 = lshr i32 %x, 8 @@ -72,28 +54,8 @@ define i1 @eq_210(i32 %x, i32 %y) { define i1 @eq_3210(i32 %x, i32 %y) { ; CHECK-LABEL: @eq_3210( -; CHECK-NEXT: [[X_0:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[X_3_EXT:%.*]] = lshr i32 [[X]], 24 -; CHECK-NEXT: [[X_3:%.*]] = trunc i32 [[X_3_EXT]] to i8 -; CHECK-NEXT: [[Y_0:%.*]] = trunc i32 [[Y:%.*]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[Y_3_EXT:%.*]] = lshr i32 [[Y]], 24 -; CHECK-NEXT: [[Y_3:%.*]] = trunc i32 [[Y_3_EXT]] to i8 -; CHECK-NEXT: [[C_0:%.*]] = icmp eq i8 [[X_0]], [[Y_0]] -; CHECK-NEXT: [[C_1:%.*]] = icmp eq i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i8 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_3:%.*]] = icmp eq i8 [[X_3]], [[Y_3]] -; CHECK-NEXT: [[C_10:%.*]] = and i1 [[C_0]], [[C_1]] -; CHECK-NEXT: [[C_210:%.*]] = and i1 [[C_2]], [[C_10]] -; CHECK-NEXT: [[C_3210:%.*]] = and i1 [[C_3]], [[C_210]] -; CHECK-NEXT: ret i1 [[C_3210]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %x.0 = trunc i32 %x to i8 %x.321 = lshr i32 %x, 8 @@ -121,18 +83,12 @@ define i1 @eq_3210(i32 %x, i32 %y) { define i1 @eq_21(i32 %x, i32 %y) { ; CHECK-LABEL: @eq_21( -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X:%.*]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y:%.*]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[C_1:%.*]] = icmp eq i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i8 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_210:%.*]] = and i1 [[C_2]], [[C_1]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 8 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[Y:%.*]], 8 +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i16 [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret i1 [[TMP5]] ; %x.321 = lshr i32 %x, 8 %x.1 = trunc i32 %x.321 to i8 @@ -152,18 +108,12 @@ define i1 @eq_21(i32 %x, i32 %y) { define i1 @eq_21_comm_and(i32 %x, i32 %y) { ; CHECK-LABEL: @eq_21_comm_and( -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X:%.*]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y:%.*]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[C_1:%.*]] = icmp eq i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i8 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_210:%.*]] = and i1 [[C_1]], [[C_2]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 8 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[Y:%.*]], 8 +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i16 [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret i1 [[TMP5]] ; %x.321 = lshr i32 %x, 8 %x.1 = trunc i32 %x.321 to i8 @@ -181,18 +131,12 @@ define i1 @eq_21_comm_and(i32 %x, i32 %y) { define i1 @eq_21_comm_eq(i32 %x, i32 %y) { ; CHECK-LABEL: @eq_21_comm_eq( -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X:%.*]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y:%.*]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[C_1:%.*]] = icmp eq i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i8 [[Y_2]], [[X_2]] -; CHECK-NEXT: [[C_210:%.*]] = and i1 [[C_2]], [[C_1]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[Y:%.*]], 8 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[X:%.*]], 8 +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i16 [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret i1 [[TMP5]] ; %x.321 = lshr i32 %x, 8 %x.1 = trunc i32 %x.321 to i8 @@ -210,18 +154,12 @@ define i1 @eq_21_comm_eq(i32 %x, i32 %y) { define i1 @eq_21_comm_eq2(i32 %x, i32 %y) { ; CHECK-LABEL: @eq_21_comm_eq2( -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X:%.*]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y:%.*]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[C_1:%.*]] = icmp eq i8 [[Y_1]], [[X_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i8 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_210:%.*]] = and i1 [[C_2]], [[C_1]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 8 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[Y:%.*]], 8 +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i16 [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret i1 [[TMP5]] ; %x.321 = lshr i32 %x, 8 %x.1 = trunc i32 %x.321 to i8 @@ -241,18 +179,12 @@ define i1 @eq_21_comm_eq2(i32 %x, i32 %y) { define <2x i1> @eq_21_vector(<2x i32> %x, <2x i32> %y) { ; CHECK-LABEL: @eq_21_vector( -; CHECK-NEXT: [[X_321:%.*]] = lshr <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[X_1:%.*]] = trunc <2 x i32> [[X_321]] to <2 x i8> -; CHECK-NEXT: [[X_32:%.*]] = lshr <2 x i32> [[X]], -; CHECK-NEXT: [[X_2:%.*]] = trunc <2 x i32> [[X_32]] to <2 x i8> -; CHECK-NEXT: [[Y_321:%.*]] = lshr <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[Y_1:%.*]] = trunc <2 x i32> [[Y_321]] to <2 x i8> -; CHECK-NEXT: [[Y_32:%.*]] = lshr <2 x i32> [[Y]], -; CHECK-NEXT: [[Y_2:%.*]] = trunc <2 x i32> [[Y_32]] to <2 x i8> -; CHECK-NEXT: [[C_1:%.*]] = icmp eq <2 x i8> [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp eq <2 x i8> [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_210:%.*]] = and <2 x i1> [[C_2]], [[C_1]] -; CHECK-NEXT: ret <2 x i1> [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], +; CHECK-NEXT: [[TMP2:%.*]] = trunc <2 x i32> [[TMP1]] to <2 x i16> +; CHECK-NEXT: [[TMP3:%.*]] = lshr <2 x i32> [[Y:%.*]], +; CHECK-NEXT: [[TMP4:%.*]] = trunc <2 x i32> [[TMP3]] to <2 x i16> +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <2 x i16> [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret <2 x i1> [[TMP5]] ; %x.321 = lshr <2x i32> %x, %x.1 = trunc <2x i32> %x.321 to <2x i8> @@ -273,18 +205,12 @@ define <2x i1> @eq_21_vector(<2x i32> %x, <2x i32> %y) { define i1 @eq_irregular_bit_widths(i31 %x, i31 %y) { ; CHECK-LABEL: @eq_irregular_bit_widths( -; CHECK-NEXT: [[X_321:%.*]] = lshr i31 [[X:%.*]], 7 -; CHECK-NEXT: [[X_1:%.*]] = trunc i31 [[X_321]] to i6 -; CHECK-NEXT: [[X_32:%.*]] = lshr i31 [[X]], 13 -; CHECK-NEXT: [[X_2:%.*]] = trunc i31 [[X_32]] to i5 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i31 [[Y:%.*]], 7 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i31 [[Y_321]] to i6 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i31 [[Y]], 13 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i31 [[Y_32]] to i5 -; CHECK-NEXT: [[C_1:%.*]] = icmp eq i6 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i5 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_210:%.*]] = and i1 [[C_2]], [[C_1]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i31 [[X:%.*]], 7 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i31 [[TMP1]] to i11 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i31 [[Y:%.*]], 7 +; CHECK-NEXT: [[TMP4:%.*]] = trunc i31 [[TMP3]] to i11 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i11 [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret i1 [[TMP5]] ; %x.321 = lshr i31 %x, 7 %x.1 = trunc i31 %x.321 to i6 @@ -724,16 +650,10 @@ define i1 @eq_21_wrong_pred2(i32 %x, i32 %y) { define i1 @ne_10(i32 %x, i32 %y) { ; CHECK-LABEL: @ne_10( -; CHECK-NEXT: [[X_0:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[Y_0:%.*]] = trunc i32 [[Y:%.*]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[C_0:%.*]] = icmp ne i8 [[X_0]], [[Y_0]] -; CHECK-NEXT: [[C_1:%.*]] = icmp ne i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_10:%.*]] = or i1 [[C_0]], [[C_1]] -; CHECK-NEXT: ret i1 [[C_10]] +; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i16 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[Y:%.*]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i16 [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret i1 [[TMP3]] ; %x.0 = trunc i32 %x to i8 %x.321 = lshr i32 %x, 8 @@ -749,22 +669,10 @@ define i1 @ne_10(i32 %x, i32 %y) { define i1 @ne_210(i32 %x, i32 %y) { ; CHECK-LABEL: @ne_210( -; CHECK-NEXT: [[X_0:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[Y_0:%.*]] = trunc i32 [[Y:%.*]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[C_0:%.*]] = icmp ne i8 [[X_0]], [[Y_0]] -; CHECK-NEXT: [[C_1:%.*]] = icmp ne i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp ne i8 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_10:%.*]] = or i1 [[C_0]], [[C_1]] -; CHECK-NEXT: [[C_210:%.*]] = or i1 [[C_2]], [[C_10]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i24 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[Y:%.*]] to i24 +; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i24 [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret i1 [[TMP3]] ; %x.0 = trunc i32 %x to i8 %x.321 = lshr i32 %x, 8 @@ -786,28 +694,8 @@ define i1 @ne_210(i32 %x, i32 %y) { define i1 @ne_3210(i32 %x, i32 %y) { ; CHECK-LABEL: @ne_3210( -; CHECK-NEXT: [[X_0:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[X_3_EXT:%.*]] = lshr i32 [[X]], 24 -; CHECK-NEXT: [[X_3:%.*]] = trunc i32 [[X_3_EXT]] to i8 -; CHECK-NEXT: [[Y_0:%.*]] = trunc i32 [[Y:%.*]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[Y_3_EXT:%.*]] = lshr i32 [[Y]], 24 -; CHECK-NEXT: [[Y_3:%.*]] = trunc i32 [[Y_3_EXT]] to i8 -; CHECK-NEXT: [[C_0:%.*]] = icmp ne i8 [[X_0]], [[Y_0]] -; CHECK-NEXT: [[C_1:%.*]] = icmp ne i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp ne i8 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_3:%.*]] = icmp ne i8 [[X_3]], [[Y_3]] -; CHECK-NEXT: [[C_10:%.*]] = or i1 [[C_0]], [[C_1]] -; CHECK-NEXT: [[C_210:%.*]] = or i1 [[C_2]], [[C_10]] -; CHECK-NEXT: [[C_3210:%.*]] = or i1 [[C_3]], [[C_210]] -; CHECK-NEXT: ret i1 [[C_3210]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %x.0 = trunc i32 %x to i8 %x.321 = lshr i32 %x, 8 @@ -835,18 +723,12 @@ define i1 @ne_3210(i32 %x, i32 %y) { define i1 @ne_21(i32 %x, i32 %y) { ; CHECK-LABEL: @ne_21( -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X:%.*]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y:%.*]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[C_1:%.*]] = icmp ne i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp ne i8 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_210:%.*]] = or i1 [[C_2]], [[C_1]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 8 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[Y:%.*]], 8 +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i16 [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret i1 [[TMP5]] ; %x.321 = lshr i32 %x, 8 %x.1 = trunc i32 %x.321 to i8 @@ -866,18 +748,12 @@ define i1 @ne_21(i32 %x, i32 %y) { define i1 @ne_21_comm_or(i32 %x, i32 %y) { ; CHECK-LABEL: @ne_21_comm_or( -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X:%.*]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y:%.*]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[C_1:%.*]] = icmp ne i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp ne i8 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_210:%.*]] = or i1 [[C_1]], [[C_2]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 8 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[Y:%.*]], 8 +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i16 [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret i1 [[TMP5]] ; %x.321 = lshr i32 %x, 8 %x.1 = trunc i32 %x.321 to i8 @@ -895,18 +771,12 @@ define i1 @ne_21_comm_or(i32 %x, i32 %y) { define i1 @ne_21_comm_ne(i32 %x, i32 %y) { ; CHECK-LABEL: @ne_21_comm_ne( -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X:%.*]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y:%.*]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[C_1:%.*]] = icmp ne i8 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp ne i8 [[Y_2]], [[X_2]] -; CHECK-NEXT: [[C_210:%.*]] = or i1 [[C_2]], [[C_1]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[Y:%.*]], 8 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[X:%.*]], 8 +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i16 [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret i1 [[TMP5]] ; %x.321 = lshr i32 %x, 8 %x.1 = trunc i32 %x.321 to i8 @@ -924,18 +794,12 @@ define i1 @ne_21_comm_ne(i32 %x, i32 %y) { define i1 @ne_21_comm_ne2(i32 %x, i32 %y) { ; CHECK-LABEL: @ne_21_comm_ne2( -; CHECK-NEXT: [[X_321:%.*]] = lshr i32 [[X:%.*]], 8 -; CHECK-NEXT: [[X_1:%.*]] = trunc i32 [[X_321]] to i8 -; CHECK-NEXT: [[X_32:%.*]] = lshr i32 [[X]], 16 -; CHECK-NEXT: [[X_2:%.*]] = trunc i32 [[X_32]] to i8 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i32 [[Y:%.*]], 8 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i32 [[Y]], 16 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i32 [[Y_32]] to i8 -; CHECK-NEXT: [[C_1:%.*]] = icmp ne i8 [[Y_1]], [[X_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp ne i8 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_210:%.*]] = or i1 [[C_2]], [[C_1]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 8 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[Y:%.*]], 8 +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i16 [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret i1 [[TMP5]] ; %x.321 = lshr i32 %x, 8 %x.1 = trunc i32 %x.321 to i8 @@ -955,18 +819,12 @@ define i1 @ne_21_comm_ne2(i32 %x, i32 %y) { define <2x i1> @ne_21_vector(<2x i32> %x, <2x i32> %y) { ; CHECK-LABEL: @ne_21_vector( -; CHECK-NEXT: [[X_321:%.*]] = lshr <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[X_1:%.*]] = trunc <2 x i32> [[X_321]] to <2 x i8> -; CHECK-NEXT: [[X_32:%.*]] = lshr <2 x i32> [[X]], -; CHECK-NEXT: [[X_2:%.*]] = trunc <2 x i32> [[X_32]] to <2 x i8> -; CHECK-NEXT: [[Y_321:%.*]] = lshr <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[Y_1:%.*]] = trunc <2 x i32> [[Y_321]] to <2 x i8> -; CHECK-NEXT: [[Y_32:%.*]] = lshr <2 x i32> [[Y]], -; CHECK-NEXT: [[Y_2:%.*]] = trunc <2 x i32> [[Y_32]] to <2 x i8> -; CHECK-NEXT: [[C_1:%.*]] = icmp ne <2 x i8> [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp ne <2 x i8> [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_210:%.*]] = or <2 x i1> [[C_2]], [[C_1]] -; CHECK-NEXT: ret <2 x i1> [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], +; CHECK-NEXT: [[TMP2:%.*]] = trunc <2 x i32> [[TMP1]] to <2 x i16> +; CHECK-NEXT: [[TMP3:%.*]] = lshr <2 x i32> [[Y:%.*]], +; CHECK-NEXT: [[TMP4:%.*]] = trunc <2 x i32> [[TMP3]] to <2 x i16> +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <2 x i16> [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret <2 x i1> [[TMP5]] ; %x.321 = lshr <2x i32> %x, %x.1 = trunc <2x i32> %x.321 to <2x i8> @@ -987,18 +845,12 @@ define <2x i1> @ne_21_vector(<2x i32> %x, <2x i32> %y) { define i1 @ne_irregular_bit_widths(i31 %x, i31 %y) { ; CHECK-LABEL: @ne_irregular_bit_widths( -; CHECK-NEXT: [[X_321:%.*]] = lshr i31 [[X:%.*]], 7 -; CHECK-NEXT: [[X_1:%.*]] = trunc i31 [[X_321]] to i6 -; CHECK-NEXT: [[X_32:%.*]] = lshr i31 [[X]], 13 -; CHECK-NEXT: [[X_2:%.*]] = trunc i31 [[X_32]] to i5 -; CHECK-NEXT: [[Y_321:%.*]] = lshr i31 [[Y:%.*]], 7 -; CHECK-NEXT: [[Y_1:%.*]] = trunc i31 [[Y_321]] to i6 -; CHECK-NEXT: [[Y_32:%.*]] = lshr i31 [[Y]], 13 -; CHECK-NEXT: [[Y_2:%.*]] = trunc i31 [[Y_32]] to i5 -; CHECK-NEXT: [[C_1:%.*]] = icmp ne i6 [[X_1]], [[Y_1]] -; CHECK-NEXT: [[C_2:%.*]] = icmp ne i5 [[X_2]], [[Y_2]] -; CHECK-NEXT: [[C_210:%.*]] = or i1 [[C_2]], [[C_1]] -; CHECK-NEXT: ret i1 [[C_210]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i31 [[X:%.*]], 7 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i31 [[TMP1]] to i11 +; CHECK-NEXT: [[TMP3:%.*]] = lshr i31 [[Y:%.*]], 7 +; CHECK-NEXT: [[TMP4:%.*]] = trunc i31 [[TMP3]] to i11 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i11 [[TMP2]], [[TMP4]] +; CHECK-NEXT: ret i1 [[TMP5]] ; %x.321 = lshr i31 %x, 7 %x.1 = trunc i31 %x.321 to i6