diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h index 92cb79d54afc2..98cc0e5037698 100644 --- a/llvm/include/llvm/IR/PatternMatch.h +++ b/llvm/include/llvm/IR/PatternMatch.h @@ -345,7 +345,7 @@ template inline constantint_match m_ConstantInt() { /// This helper class is used to match constant scalars, vector splats, /// and fixed width vectors that satisfy a specified predicate. -/// For fixed width vector constants, undefined elements are ignored. +/// For fixed width vector constants, poison elements are ignored. template struct cstval_pred_ty : public Predicate { template bool match(ITy *V) { @@ -364,19 +364,19 @@ struct cstval_pred_ty : public Predicate { // Non-splat vector constant: check each element for a match. unsigned NumElts = FVTy->getNumElements(); assert(NumElts != 0 && "Constant vector with no elements?"); - bool HasNonUndefElements = false; + bool HasNonPoisonElements = false; for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = C->getAggregateElement(i); if (!Elt) return false; - if (isa(Elt)) + if (isa(Elt)) continue; auto *CV = dyn_cast(Elt); if (!CV || !this->isValue(CV->getValue())) return false; - HasNonUndefElements = true; + HasNonPoisonElements = true; } - return HasNonUndefElements; + return HasNonPoisonElements; } } return false; @@ -2587,31 +2587,6 @@ m_Not(const ValTy &V) { return m_c_Xor(m_AllOnes(), V); } -template struct NotForbidUndef_match { - ValTy Val; - NotForbidUndef_match(const ValTy &V) : Val(V) {} - - template bool match(OpTy *V) { - // We do not use m_c_Xor because that could match an arbitrary APInt that is - // not -1 as C and then fail to match the other operand if it is -1. - // This code should still work even when both operands are constants. - Value *X; - const APInt *C; - if (m_Xor(m_Value(X), m_APIntForbidUndef(C)).match(V) && C->isAllOnes()) - return Val.match(X); - if (m_Xor(m_APIntForbidUndef(C), m_Value(X)).match(V) && C->isAllOnes()) - return Val.match(X); - return false; - } -}; - -/// Matches a bitwise 'not' as 'xor V, -1' or 'xor -1, V'. For vectors, the -/// constant value must be composed of only -1 scalar elements. -template -inline NotForbidUndef_match m_NotForbidUndef(const ValTy &V) { - return NotForbidUndef_match(V); -} - /// Matches an SMin with LHS and RHS in either order. template inline MaxMin_match diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 8955de6375dec..06ba5ca4c6b35 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -1513,7 +1513,7 @@ static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, // -1 >>a X --> -1 // (-1 << X) a>> X --> -1 - // Do not return Op0 because it may contain undef elements if it's a vector. + // We could return the original -1 constant to preserve poison elements. if (match(Op0, m_AllOnes()) || match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1)))) return Constant::getAllOnesValue(Op0->getType()); @@ -2281,7 +2281,7 @@ static Value *simplifyOrLogic(Value *X, Value *Y) { // (B ^ ~A) | (A & B) --> B ^ ~A // (~A ^ B) | (B & A) --> ~A ^ B // (B ^ ~A) | (B & A) --> B ^ ~A - if (match(X, m_c_Xor(m_NotForbidUndef(m_Value(A)), m_Value(B))) && + if (match(X, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) && match(Y, m_c_And(m_Specific(A), m_Specific(B)))) return X; @@ -2298,31 +2298,29 @@ static Value *simplifyOrLogic(Value *X, Value *Y) { // (B & ~A) | ~(A | B) --> ~A // (B & ~A) | ~(B | A) --> ~A Value *NotA; - if (match(X, - m_c_And(m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))), - m_Value(B))) && + if (match(X, m_c_And(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))), + m_Value(B))) && match(Y, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) return NotA; // The same is true of Logical And // TODO: This could share the logic of the version above if there was a // version of LogicalAnd that allowed more than just i1 types. - if (match(X, m_c_LogicalAnd( - m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))), - m_Value(B))) && + if (match(X, m_c_LogicalAnd(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))), + m_Value(B))) && match(Y, m_Not(m_c_LogicalOr(m_Specific(A), m_Specific(B))))) return NotA; // ~(A ^ B) | (A & B) --> ~(A ^ B) // ~(A ^ B) | (B & A) --> ~(A ^ B) Value *NotAB; - if (match(X, m_CombineAnd(m_NotForbidUndef(m_Xor(m_Value(A), m_Value(B))), + if (match(X, m_CombineAnd(m_Not(m_Xor(m_Value(A), m_Value(B))), m_Value(NotAB))) && match(Y, m_c_And(m_Specific(A), m_Specific(B)))) return NotAB; // ~(A & B) | (A ^ B) --> ~(A & B) // ~(A & B) | (B ^ A) --> ~(A & B) - if (match(X, m_CombineAnd(m_NotForbidUndef(m_And(m_Value(A), m_Value(B))), + if (match(X, m_CombineAnd(m_Not(m_And(m_Value(A), m_Value(B))), m_Value(NotAB))) && match(Y, m_c_Xor(m_Specific(A), m_Specific(B)))) return NotAB; @@ -2552,9 +2550,8 @@ static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, // The 'not' op must contain a complete -1 operand (no undef elements for // vector) for the transform to be safe. Value *NotA; - if (match(X, - m_c_Or(m_CombineAnd(m_NotForbidUndef(m_Value(A)), m_Value(NotA)), - m_Value(B))) && + if (match(X, m_c_Or(m_CombineAnd(m_Not(m_Value(A)), m_Value(NotA)), + m_Value(B))) && match(Y, m_c_And(m_Specific(A), m_Specific(B)))) return NotA; diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp index a5fb497f54ed1..45b359a94b3ab 100644 --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -316,7 +316,7 @@ bool Constant::isElementWiseEqual(Value *Y) const { Constant *C0 = ConstantExpr::getBitCast(const_cast(this), IntTy); Constant *C1 = ConstantExpr::getBitCast(cast(Y), IntTy); Constant *CmpEq = ConstantExpr::getICmp(ICmpInst::ICMP_EQ, C0, C1); - return isa(CmpEq) || match(CmpEq, m_One()); + return isa(CmpEq) || match(CmpEq, m_One()); } static bool diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index d311690be64f1..0f4fbf5bbfbbd 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -2538,6 +2538,8 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) { } } + // and(shl(zext(X), Y), SignMask) -> and(sext(X), SignMask) + // where Y is a valid shift amount. if (match(&I, m_And(m_OneUse(m_Shl(m_ZExt(m_Value(X)), m_Value(Y))), m_SignMask())) && match(Y, m_SpecificInt_ICMP( @@ -2546,15 +2548,7 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) { Ty->getScalarSizeInBits() - X->getType()->getScalarSizeInBits())))) { auto *SExt = Builder.CreateSExt(X, Ty, X->getName() + ".signext"); - auto *SanitizedSignMask = cast(Op1); - // We must be careful with the undef elements of the sign bit mask, however: - // the mask elt can be undef iff the shift amount for that lane was undef, - // otherwise we need to sanitize undef masks to zero. - SanitizedSignMask = Constant::replaceUndefsWith( - SanitizedSignMask, ConstantInt::getNullValue(Ty->getScalarType())); - SanitizedSignMask = - Constant::mergeUndefsWith(SanitizedSignMask, cast(Y)); - return BinaryOperator::CreateAnd(SExt, SanitizedSignMask); + return BinaryOperator::CreateAnd(SExt, Op1); } if (Instruction *Z = narrowMaskedBinOp(I)) diff --git a/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll b/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll index 4600a6654a362..b1e5fa4f9e1c9 100644 --- a/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll +++ b/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll @@ -2032,23 +2032,23 @@ define <4 x i64> @avx2_psrlv_q_256_allbig(<4 x i64> %v) { ret <4 x i64> %1 } -; The shift amount is 0 (the undef lane could be 0), so we return the unshifted input. +; The shift amount is 0 (the poison lane could be 0), so we return the unshifted input. -define <2 x i64> @avx2_psrlv_q_128_undef(<2 x i64> %v) { -; CHECK-LABEL: @avx2_psrlv_q_128_undef( +define <2 x i64> @avx2_psrlv_q_128_poison(<2 x i64> %v) { +; CHECK-LABEL: @avx2_psrlv_q_128_poison( ; CHECK-NEXT: ret <2 x i64> [[V:%.*]] ; - %1 = insertelement <2 x i64> , i64 undef, i64 1 + %1 = insertelement <2 x i64> , i64 poison, i64 1 %2 = tail call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %v, <2 x i64> %1) ret <2 x i64> %2 } -define <4 x i64> @avx2_psrlv_q_256_undef(<4 x i64> %v) { -; CHECK-LABEL: @avx2_psrlv_q_256_undef( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <4 x i64> [[V:%.*]], +define <4 x i64> @avx2_psrlv_q_256_poison(<4 x i64> %v) { +; CHECK-LABEL: @avx2_psrlv_q_256_poison( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <4 x i64> [[V:%.*]], ; CHECK-NEXT: ret <4 x i64> [[TMP1]] ; - %1 = insertelement <4 x i64> , i64 undef, i64 0 + %1 = insertelement <4 x i64> , i64 poison, i64 0 %2 = tail call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %v, <4 x i64> %1) ret <4 x i64> %2 } @@ -2435,21 +2435,21 @@ define <4 x i64> @avx2_psllv_q_256_allbig(<4 x i64> %v) { ; The shift amount is 0 (the undef lane could be 0), so we return the unshifted input. -define <2 x i64> @avx2_psllv_q_128_undef(<2 x i64> %v) { -; CHECK-LABEL: @avx2_psllv_q_128_undef( +define <2 x i64> @avx2_psllv_q_128_poison(<2 x i64> %v) { +; CHECK-LABEL: @avx2_psllv_q_128_poison( ; CHECK-NEXT: ret <2 x i64> [[V:%.*]] ; - %1 = insertelement <2 x i64> , i64 undef, i64 1 + %1 = insertelement <2 x i64> , i64 poison, i64 1 %2 = tail call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %v, <2 x i64> %1) ret <2 x i64> %2 } -define <4 x i64> @avx2_psllv_q_256_undef(<4 x i64> %v) { -; CHECK-LABEL: @avx2_psllv_q_256_undef( -; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i64> [[V:%.*]], +define <4 x i64> @avx2_psllv_q_256_poison(<4 x i64> %v) { +; CHECK-LABEL: @avx2_psllv_q_256_poison( +; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i64> [[V:%.*]], ; CHECK-NEXT: ret <4 x i64> [[TMP1]] ; - %1 = insertelement <4 x i64> , i64 undef, i64 0 + %1 = insertelement <4 x i64> , i64 poison, i64 0 %2 = tail call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %v, <4 x i64> %1) ret <4 x i64> %2 } diff --git a/llvm/test/Transforms/InstCombine/abs-1.ll b/llvm/test/Transforms/InstCombine/abs-1.ll index 7355c560c820b..32bd7a37053ed 100644 --- a/llvm/test/Transforms/InstCombine/abs-1.ll +++ b/llvm/test/Transforms/InstCombine/abs-1.ll @@ -63,14 +63,14 @@ define <2 x i8> @abs_canonical_2(<2 x i8> %x) { ret <2 x i8> %abs } -; Even if a constant has undef elements. +; Even if a constant has poison elements. -define <2 x i8> @abs_canonical_2_vec_undef_elts(<2 x i8> %x) { -; CHECK-LABEL: @abs_canonical_2_vec_undef_elts( +define <2 x i8> @abs_canonical_2_vec_poison_elts(<2 x i8> %x) { +; CHECK-LABEL: @abs_canonical_2_vec_poison_elts( ; CHECK-NEXT: [[ABS:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[X:%.*]], i1 false) ; CHECK-NEXT: ret <2 x i8> [[ABS]] ; - %cmp = icmp sgt <2 x i8> %x, + %cmp = icmp sgt <2 x i8> %x, %neg = sub <2 x i8> zeroinitializer, %x %abs = select <2 x i1> %cmp, <2 x i8> %x, <2 x i8> %neg ret <2 x i8> %abs @@ -208,15 +208,15 @@ define <2 x i8> @nabs_canonical_2(<2 x i8> %x) { ret <2 x i8> %abs } -; Even if a constant has undef elements. +; Even if a constant has poison elements. -define <2 x i8> @nabs_canonical_2_vec_undef_elts(<2 x i8> %x) { -; CHECK-LABEL: @nabs_canonical_2_vec_undef_elts( +define <2 x i8> @nabs_canonical_2_vec_poison_elts(<2 x i8> %x) { +; CHECK-LABEL: @nabs_canonical_2_vec_poison_elts( ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[X:%.*]], i1 false) ; CHECK-NEXT: [[ABS:%.*]] = sub <2 x i8> zeroinitializer, [[TMP1]] ; CHECK-NEXT: ret <2 x i8> [[ABS]] ; - %cmp = icmp sgt <2 x i8> %x, + %cmp = icmp sgt <2 x i8> %x, %neg = sub <2 x i8> zeroinitializer, %x %abs = select <2 x i1> %cmp, <2 x i8> %neg, <2 x i8> %x ret <2 x i8> %abs diff --git a/llvm/test/Transforms/InstCombine/add-mask-neg.ll b/llvm/test/Transforms/InstCombine/add-mask-neg.ll index 5fad6155d348e..0e579f3097607 100644 --- a/llvm/test/Transforms/InstCombine/add-mask-neg.ll +++ b/llvm/test/Transforms/InstCombine/add-mask-neg.ll @@ -89,8 +89,8 @@ define <2 x i32> @dec_mask_neg_v2i32(<2 x i32> %X) { ret <2 x i32> %dec } -define <2 x i32> @dec_mask_neg_v2i32_undef(<2 x i32> %X) { -; CHECK-LABEL: @dec_mask_neg_v2i32_undef( +define <2 x i32> @dec_mask_neg_v2i32_poison(<2 x i32> %X) { +; CHECK-LABEL: @dec_mask_neg_v2i32_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[X]], ; CHECK-NEXT: [[DEC:%.*]] = and <2 x i32> [[TMP1]], [[TMP2]] @@ -98,7 +98,7 @@ define <2 x i32> @dec_mask_neg_v2i32_undef(<2 x i32> %X) { ; %neg = sub <2 x i32> zeroinitializer, %X %mask = and <2 x i32> %neg, %X - %dec = add <2 x i32> %mask, + %dec = add <2 x i32> %mask, ret <2 x i32> %dec } diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll index 408b0c6559b00..39b4ad8055088 100644 --- a/llvm/test/Transforms/InstCombine/add.ll +++ b/llvm/test/Transforms/InstCombine/add.ll @@ -150,24 +150,24 @@ define i32 @test5_add_nsw(i32 %A, i32 %B) { ret i32 %D } -define <2 x i8> @neg_op0_vec_undef_elt(<2 x i8> %a, <2 x i8> %b) { -; CHECK-LABEL: @neg_op0_vec_undef_elt( +define <2 x i8> @neg_op0_vec_poison_elt(<2 x i8> %a, <2 x i8> %b) { +; CHECK-LABEL: @neg_op0_vec_poison_elt( ; CHECK-NEXT: [[R:%.*]] = sub <2 x i8> [[B:%.*]], [[A:%.*]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; - %nega = sub <2 x i8> , %a + %nega = sub <2 x i8> , %a %r = add <2 x i8> %nega, %b ret <2 x i8> %r } -define <2 x i8> @neg_neg_vec_undef_elt(<2 x i8> %a, <2 x i8> %b) { -; CHECK-LABEL: @neg_neg_vec_undef_elt( +define <2 x i8> @neg_neg_vec_poison_elt(<2 x i8> %a, <2 x i8> %b) { +; CHECK-LABEL: @neg_neg_vec_poison_elt( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[R:%.*]] = sub <2 x i8> zeroinitializer, [[TMP1]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; - %nega = sub <2 x i8> , %a - %negb = sub <2 x i8> , %b + %nega = sub <2 x i8> , %a + %negb = sub <2 x i8> , %b %r = add <2 x i8> %nega, %negb ret <2 x i8> %r } @@ -1196,14 +1196,14 @@ define <2 x i32> @test44_vec_non_matching(<2 x i32> %A) { ret <2 x i32> %C } -define <2 x i32> @test44_vec_undef(<2 x i32> %A) { -; CHECK-LABEL: @test44_vec_undef( -; CHECK-NEXT: [[B:%.*]] = or <2 x i32> [[A:%.*]], -; CHECK-NEXT: [[C:%.*]] = add <2 x i32> [[B]], +define <2 x i32> @test44_vec_poison(<2 x i32> %A) { +; CHECK-LABEL: @test44_vec_poison( +; CHECK-NEXT: [[B:%.*]] = or <2 x i32> [[A:%.*]], +; CHECK-NEXT: [[C:%.*]] = add nsw <2 x i32> [[B]], ; CHECK-NEXT: ret <2 x i32> [[C]] ; - %B = or <2 x i32> %A, - %C = add <2 x i32> %B, + %B = or <2 x i32> %A, + %C = add <2 x i32> %B, ret <2 x i32> %C } @@ -2983,7 +2983,7 @@ define i8 @signum_i8_i8_use3(i8 %x) { ret i8 %r } -; poison/undef is ok to propagate in shift amount +; poison is ok to propagate in shift amount ; complexity canonicalization guarantees that shift is op0 of add define <2 x i5> @signum_v2i5_v2i5(<2 x i5> %x) { diff --git a/llvm/test/Transforms/InstCombine/and-or-icmps.ll b/llvm/test/Transforms/InstCombine/and-or-icmps.ll index 63b11d0c0bc08..c20f48a985b3e 100644 --- a/llvm/test/Transforms/InstCombine/and-or-icmps.ll +++ b/llvm/test/Transforms/InstCombine/and-or-icmps.ll @@ -952,8 +952,8 @@ define i1 @substitute_constant_or_ne_uge_commute_logical(i8 %x, i8 %y) { ; Negative test - not safe to substitute vector constant with undef element -define <2 x i1> @substitute_constant_or_ne_slt_swap_vec(<2 x i8> %x, <2 x i8> %y) { -; CHECK-LABEL: @substitute_constant_or_ne_slt_swap_vec( +define <2 x i1> @substitute_constant_or_ne_slt_swap_vec_undef(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @substitute_constant_or_ne_slt_swap_vec_undef( ; CHECK-NEXT: [[C1:%.*]] = icmp ne <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[C2:%.*]] = icmp slt <2 x i8> [[Y:%.*]], [[X]] ; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[C1]], [[C2]] @@ -965,14 +965,29 @@ define <2 x i1> @substitute_constant_or_ne_slt_swap_vec(<2 x i8> %x, <2 x i8> %y ret <2 x i1> %r } +; TODO: The poison case would be valid to fold. + +define <2 x i1> @substitute_constant_or_ne_slt_swap_vec_poison(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @substitute_constant_or_ne_slt_swap_vec_poison( +; CHECK-NEXT: [[C1:%.*]] = icmp ne <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[C2:%.*]] = icmp slt <2 x i8> [[Y:%.*]], [[X]] +; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[C1]], [[C2]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %c1 = icmp ne <2 x i8> %x, + %c2 = icmp slt <2 x i8> %y, %x + %r = or <2 x i1> %c1, %c2 + ret <2 x i1> %r +} + define <2 x i1> @substitute_constant_or_ne_slt_swap_vec_logical(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @substitute_constant_or_ne_slt_swap_vec_logical( -; CHECK-NEXT: [[C1:%.*]] = icmp ne <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[C1:%.*]] = icmp ne <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[C2:%.*]] = icmp slt <2 x i8> [[Y:%.*]], [[X]] ; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[C1]], <2 x i1> , <2 x i1> [[C2]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %c1 = icmp ne <2 x i8> %x, + %c1 = icmp ne <2 x i8> %x, %c2 = icmp slt <2 x i8> %y, %x %r = select <2 x i1> %c1, <2 x i1> , <2 x i1> %c2 ret <2 x i1> %r @@ -2497,29 +2512,29 @@ define <2 x i1> @icmp_eq_m1_and_eq_m1(<2 x i8> %x, <2 x i8> %y) { ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[TMP1]], ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %rx = icmp eq <2 x i8> %x, - %ry = icmp eq <2 x i8> %y, + %rx = icmp eq <2 x i8> %x, + %ry = icmp eq <2 x i8> %y, %r = and <2 x i1> %rx, %ry ret <2 x i1> %r } -define <2 x i1> @icmp_eq_m1_and_eq_undef_m1(<2 x i8> %x, <2 x i8> %y) { -; CHECK-LABEL: @icmp_eq_m1_and_eq_undef_m1( +define <2 x i1> @icmp_eq_m1_and_eq_poison_m1(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @icmp_eq_m1_and_eq_poison_m1( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[TMP1]], ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %rx = icmp eq <2 x i8> %x, - %ry = icmp eq <2 x i8> %y, + %rx = icmp eq <2 x i8> %x, + %ry = icmp eq <2 x i8> %y, %r = and <2 x i1> %rx, %ry ret <2 x i1> %r } -define <2 x i1> @icmp_eq_undef_and_eq_m1_m2(<2 x i8> %x, <2 x i8> %y) { -; CHECK-LABEL: @icmp_eq_undef_and_eq_m1_m2( -; CHECK-NEXT: ret <2 x i1> zeroinitializer +define <2 x i1> @icmp_eq_poison_and_eq_m1_m2(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @icmp_eq_poison_and_eq_m1_m2( +; CHECK-NEXT: ret <2 x i1> poison ; - %rx = icmp eq <2 x i8> %x, + %rx = icmp eq <2 x i8> %x, %ry = icmp eq <2 x i8> %y, %r = and <2 x i1> %rx, %ry ret <2 x i1> %r @@ -2527,13 +2542,13 @@ define <2 x i1> @icmp_eq_undef_and_eq_m1_m2(<2 x i8> %x, <2 x i8> %y) { define <2 x i1> @icmp_ne_m1_and_ne_m1_fail(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @icmp_ne_m1_and_ne_m1_fail( -; CHECK-NEXT: [[RX:%.*]] = icmp ne <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[RY:%.*]] = icmp ne <2 x i8> [[Y:%.*]], +; CHECK-NEXT: [[RX:%.*]] = icmp ne <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[RY:%.*]] = icmp ne <2 x i8> [[Y:%.*]], ; CHECK-NEXT: [[R:%.*]] = and <2 x i1> [[RX]], [[RY]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %rx = icmp ne <2 x i8> %x, - %ry = icmp ne <2 x i8> %y, + %rx = icmp ne <2 x i8> %x, + %ry = icmp ne <2 x i8> %y, %r = and <2 x i1> %rx, %ry ret <2 x i1> %r } @@ -2541,13 +2556,13 @@ define <2 x i1> @icmp_ne_m1_and_ne_m1_fail(<2 x i8> %x, <2 x i8> %y) { define <2 x i1> @icmp_eq_m1_or_eq_m1_fail(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @icmp_eq_m1_or_eq_m1_fail( -; CHECK-NEXT: [[RX:%.*]] = icmp eq <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[RY:%.*]] = icmp eq <2 x i8> [[Y:%.*]], +; CHECK-NEXT: [[RX:%.*]] = icmp eq <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[RY:%.*]] = icmp eq <2 x i8> [[Y:%.*]], ; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[RX]], [[RY]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %rx = icmp eq <2 x i8> %x, - %ry = icmp eq <2 x i8> %y, + %rx = icmp eq <2 x i8> %x, + %ry = icmp eq <2 x i8> %y, %r = or <2 x i1> %rx, %ry ret <2 x i1> %r } @@ -2560,7 +2575,7 @@ define <2 x i1> @icmp_ne_m1_or_ne_m1(<2 x i8> %x, <2 x i8> %y) { ; CHECK-NEXT: ret <2 x i1> [[R]] ; %rx = icmp ne <2 x i8> %x, - %ry = icmp ne <2 x i8> %y, + %ry = icmp ne <2 x i8> %y, %r = or <2 x i1> %rx, %ry ret <2 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/and-xor-or.ll b/llvm/test/Transforms/InstCombine/and-xor-or.ll index d072dc15cbb2c..b26d6e16c2db2 100644 --- a/llvm/test/Transforms/InstCombine/and-xor-or.ll +++ b/llvm/test/Transforms/InstCombine/and-xor-or.ll @@ -843,7 +843,7 @@ define <2 x i6> @not_or_or_not_2i6(<2 x i6> %a0, <2 x i6> %b, <2 x i6> %c) { ; %a = sdiv <2 x i6> , %a0 ; thwart complexity-based canonicalization %not1 = xor <2 x i6> %b, - %not2 = xor <2 x i6> %c, + %not2 = xor <2 x i6> %c, %or1 = or <2 x i6> %a, %not1 %or2 = or <2 x i6> %or1, %not2 ret <2 x i6> %or2 @@ -4018,7 +4018,7 @@ define <2 x i4> @and_orn_xor_commute1(<2 x i4> %a, <2 x i4> %b) { ; CHECK-NEXT: ret <2 x i4> [[R]] ; %xor = xor <2 x i4> %a, %b - %nota = xor <2 x i4> %a, + %nota = xor <2 x i4> %a, %or = or <2 x i4> %nota, %b %r = and <2 x i4> %xor, %or ret <2 x i4> %r diff --git a/llvm/test/Transforms/InstCombine/and.ll b/llvm/test/Transforms/InstCombine/and.ll index ffd8c2a06c86e..b5250fc1a7849 100644 --- a/llvm/test/Transforms/InstCombine/and.ll +++ b/llvm/test/Transforms/InstCombine/and.ll @@ -752,16 +752,16 @@ define <2 x i64> @test36_uniform(<2 x i32> %X) { ret <2 x i64> %res } -define <2 x i64> @test36_undef(<2 x i32> %X) { -; CHECK-LABEL: @test36_undef( +define <2 x i64> @test36_poison(<2 x i32> %X) { +; CHECK-LABEL: @test36_poison( ; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> -; CHECK-NEXT: [[ZSUB:%.*]] = add <2 x i64> [[ZEXT]], -; CHECK-NEXT: [[RES:%.*]] = and <2 x i64> [[ZSUB]], +; CHECK-NEXT: [[ZSUB:%.*]] = add nuw nsw <2 x i64> [[ZEXT]], +; CHECK-NEXT: [[RES:%.*]] = and <2 x i64> [[ZSUB]], ; CHECK-NEXT: ret <2 x i64> [[RES]] ; %zext = zext <2 x i32> %X to <2 x i64> - %zsub = add <2 x i64> %zext, - %res = and <2 x i64> %zsub, + %zsub = add <2 x i64> %zext, + %res = and <2 x i64> %zsub, ret <2 x i64> %res } @@ -1630,16 +1630,16 @@ define <2 x i8> @lowmask_add_splat(<2 x i8> %x, ptr %p) { ret <2 x i8> %r } -define <2 x i8> @lowmask_add_splat_undef(<2 x i8> %x, ptr %p) { -; CHECK-LABEL: @lowmask_add_splat_undef( -; CHECK-NEXT: [[A:%.*]] = add <2 x i8> [[X:%.*]], +define <2 x i8> @lowmask_add_splat_poison(<2 x i8> %x, ptr %p) { +; CHECK-LABEL: @lowmask_add_splat_poison( +; CHECK-NEXT: [[A:%.*]] = add <2 x i8> [[X:%.*]], ; CHECK-NEXT: store <2 x i8> [[A]], ptr [[P:%.*]], align 2 -; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[A]], +; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[X]], ; CHECK-NEXT: ret <2 x i8> [[R]] ; - %a = add <2 x i8> %x, ; 0xc0 + %a = add <2 x i8> %x, ; 0xc0 store <2 x i8> %a, ptr %p - %r = and <2 x i8> %a, ; 0x20 + %r = and <2 x i8> %a, ; 0x20 ret <2 x i8> %r } @@ -1679,14 +1679,14 @@ define <2 x i8> @flip_masked_bit_uniform(<2 x i8> %A) { ret <2 x i8> %C } -define <2 x i8> @flip_masked_bit_undef(<2 x i8> %A) { -; CHECK-LABEL: @flip_masked_bit_undef( +define <2 x i8> @flip_masked_bit_poison(<2 x i8> %A) { +; CHECK-LABEL: @flip_masked_bit_poison( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[A:%.*]], -; CHECK-NEXT: [[C:%.*]] = and <2 x i8> [[TMP1]], +; CHECK-NEXT: [[C:%.*]] = and <2 x i8> [[TMP1]], ; CHECK-NEXT: ret <2 x i8> [[C]] ; - %B = add <2 x i8> %A, - %C = and <2 x i8> %B, + %B = add <2 x i8> %A, + %C = and <2 x i8> %B, ret <2 x i8> %C } @@ -2004,7 +2004,7 @@ define i16 @invert_signbit_splat_mask_use2(i8 %x, i16 %y) { ret i16 %r } -; extra use of sext is ok +; extra use of sext is ok define i16 @invert_signbit_splat_mask_use3(i8 %x, i16 %y) { ; CHECK-LABEL: @invert_signbit_splat_mask_use3( @@ -2120,41 +2120,40 @@ define <3 x i16> @shl_lshr_pow2_const_case1_non_uniform_vec_negative(<3 x i16> % ret <3 x i16> %r } -define <3 x i16> @shl_lshr_pow2_const_case1_undef1_vec(<3 x i16> %x) { -; CHECK-LABEL: @shl_lshr_pow2_const_case1_undef1_vec( +define <3 x i16> @shl_lshr_pow2_const_case1_poison1_vec(<3 x i16> %x) { +; CHECK-LABEL: @shl_lshr_pow2_const_case1_poison1_vec( ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <3 x i16> [[X:%.*]], ; CHECK-NEXT: [[R:%.*]] = select <3 x i1> [[TMP1]], <3 x i16> , <3 x i16> zeroinitializer ; CHECK-NEXT: ret <3 x i16> [[R]] ; - %shl = shl <3 x i16> , %x + %shl = shl <3 x i16> , %x %lshr = lshr <3 x i16> %shl, %r = and <3 x i16> %lshr, ret <3 x i16> %r } -define <3 x i16> @shl_lshr_pow2_const_case1_undef2_vec(<3 x i16> %x) { -; CHECK-LABEL: @shl_lshr_pow2_const_case1_undef2_vec( -; CHECK-NEXT: [[SHL:%.*]] = shl <3 x i16> , [[X:%.*]] -; CHECK-NEXT: [[LSHR:%.*]] = lshr <3 x i16> [[SHL]], -; CHECK-NEXT: [[R:%.*]] = and <3 x i16> [[LSHR]], +define <3 x i16> @shl_lshr_pow2_const_case1_poison2_vec(<3 x i16> %x) { +; CHECK-LABEL: @shl_lshr_pow2_const_case1_poison2_vec( +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <3 x i16> [[X:%.*]], +; CHECK-NEXT: [[R:%.*]] = select <3 x i1> [[TMP1]], <3 x i16> , <3 x i16> zeroinitializer ; CHECK-NEXT: ret <3 x i16> [[R]] ; %shl = shl <3 x i16> , %x - %lshr = lshr <3 x i16> %shl, + %lshr = lshr <3 x i16> %shl, %r = and <3 x i16> %lshr, ret <3 x i16> %r } -define <3 x i16> @shl_lshr_pow2_const_case1_undef3_vec(<3 x i16> %x) { -; CHECK-LABEL: @shl_lshr_pow2_const_case1_undef3_vec( +define <3 x i16> @shl_lshr_pow2_const_case1_poison3_vec(<3 x i16> %x) { +; CHECK-LABEL: @shl_lshr_pow2_const_case1_poison3_vec( ; CHECK-NEXT: [[SHL:%.*]] = shl <3 x i16> , [[X:%.*]] ; CHECK-NEXT: [[LSHR:%.*]] = lshr <3 x i16> [[SHL]], -; CHECK-NEXT: [[R:%.*]] = and <3 x i16> [[LSHR]], +; CHECK-NEXT: [[R:%.*]] = and <3 x i16> [[LSHR]], ; CHECK-NEXT: ret <3 x i16> [[R]] ; %shl = shl <3 x i16> , %x %lshr = lshr <3 x i16> %shl, - %r = and <3 x i16> %lshr, + %r = and <3 x i16> %lshr, ret <3 x i16> %r } @@ -2417,40 +2416,41 @@ define <3 x i16> @lshr_shl_pow2_const_case1_non_uniform_vec_negative(<3 x i16> % ret <3 x i16> %r } -define <3 x i16> @lshr_shl_pow2_const_case1_undef1_vec(<3 x i16> %x) { -; CHECK-LABEL: @lshr_shl_pow2_const_case1_undef1_vec( +define <3 x i16> @lshr_shl_pow2_const_case1_poison1_vec(<3 x i16> %x) { +; CHECK-LABEL: @lshr_shl_pow2_const_case1_poison1_vec( ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <3 x i16> [[X:%.*]], ; CHECK-NEXT: [[R:%.*]] = select <3 x i1> [[TMP1]], <3 x i16> , <3 x i16> zeroinitializer ; CHECK-NEXT: ret <3 x i16> [[R]] ; - %lshr = lshr <3 x i16> , %x + %lshr = lshr <3 x i16> , %x %shl = shl <3 x i16> %lshr, %r = and <3 x i16> %shl, ret <3 x i16> %r } -define <3 x i16> @lshr_shl_pow2_const_case1_undef2_vec(<3 x i16> %x) { -; CHECK-LABEL: @lshr_shl_pow2_const_case1_undef2_vec( -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <3 x i16> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = select <3 x i1> [[TMP1]], <3 x i16> , <3 x i16> zeroinitializer +define <3 x i16> @lshr_shl_pow2_const_case1_poison2_vec(<3 x i16> %x) { +; CHECK-LABEL: @lshr_shl_pow2_const_case1_poison2_vec( +; CHECK-NEXT: [[LSHR:%.*]] = lshr <3 x i16> , [[X:%.*]] +; CHECK-NEXT: [[SHL:%.*]] = shl <3 x i16> [[LSHR]], +; CHECK-NEXT: [[R:%.*]] = and <3 x i16> [[SHL]], ; CHECK-NEXT: ret <3 x i16> [[R]] ; %lshr = lshr <3 x i16> , %x - %shl = shl <3 x i16> %lshr, + %shl = shl <3 x i16> %lshr, %r = and <3 x i16> %shl, ret <3 x i16> %r } -define <3 x i16> @lshr_shl_pow2_const_case1_undef3_vec(<3 x i16> %x) { -; CHECK-LABEL: @lshr_shl_pow2_const_case1_undef3_vec( +define <3 x i16> @lshr_shl_pow2_const_case1_poison3_vec(<3 x i16> %x) { +; CHECK-LABEL: @lshr_shl_pow2_const_case1_poison3_vec( ; CHECK-NEXT: [[LSHR:%.*]] = lshr <3 x i16> , [[X:%.*]] ; CHECK-NEXT: [[SHL:%.*]] = shl <3 x i16> [[LSHR]], -; CHECK-NEXT: [[R:%.*]] = and <3 x i16> [[SHL]], +; CHECK-NEXT: [[R:%.*]] = and <3 x i16> [[SHL]], ; CHECK-NEXT: ret <3 x i16> [[R]] ; %lshr = lshr <3 x i16> , %x %shl = shl <3 x i16> %lshr, - %r = and <3 x i16> %shl, + %r = and <3 x i16> %shl, ret <3 x i16> %r } diff --git a/llvm/test/Transforms/InstCombine/and2.ll b/llvm/test/Transforms/InstCombine/and2.ll index 73bdadc86710e..104486e7638f5 100644 --- a/llvm/test/Transforms/InstCombine/and2.ll +++ b/llvm/test/Transforms/InstCombine/and2.ll @@ -168,14 +168,14 @@ define <2 x i8> @and1_shl1_is_cmp_eq_0_vec(<2 x i8> %x) { ret <2 x i8> %and } -define <2 x i8> @and1_shl1_is_cmp_eq_0_vec_undef(<2 x i8> %x) { -; CHECK-LABEL: @and1_shl1_is_cmp_eq_0_vec_undef( +define <2 x i8> @and1_shl1_is_cmp_eq_0_vec_poison(<2 x i8> %x) { +; CHECK-LABEL: @and1_shl1_is_cmp_eq_0_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> [[X:%.*]], zeroinitializer ; CHECK-NEXT: [[AND:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8> ; CHECK-NEXT: ret <2 x i8> [[AND]] ; - %sh = shl <2 x i8> , %x - %and = and <2 x i8> %sh, + %sh = shl <2 x i8> , %x + %and = and <2 x i8> %sh, ret <2 x i8> %and } @@ -215,14 +215,13 @@ define <2 x i8> @and1_lshr1_is_cmp_eq_0_vec(<2 x i8> %x) { ret <2 x i8> %and } -define <2 x i8> @and1_lshr1_is_cmp_eq_0_vec_undef(<2 x i8> %x) { -; CHECK-LABEL: @and1_lshr1_is_cmp_eq_0_vec_undef( -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> [[X:%.*]], zeroinitializer -; CHECK-NEXT: [[AND:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8> +define <2 x i8> @and1_lshr1_is_cmp_eq_0_vec_poison(<2 x i8> %x) { +; CHECK-LABEL: @and1_lshr1_is_cmp_eq_0_vec_poison( +; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i8> , [[X:%.*]] ; CHECK-NEXT: ret <2 x i8> [[AND]] ; - %sh = lshr <2 x i8> , %x - %and = and <2 x i8> %sh, + %sh = lshr <2 x i8> , %x + %and = and <2 x i8> %sh, ret <2 x i8> %and } diff --git a/llvm/test/Transforms/InstCombine/ashr-lshr.ll b/llvm/test/Transforms/InstCombine/ashr-lshr.ll index 60fa5b2597ba9..ac206dc7999dd 100644 --- a/llvm/test/Transforms/InstCombine/ashr-lshr.ll +++ b/llvm/test/Transforms/InstCombine/ashr-lshr.ll @@ -229,24 +229,24 @@ define <2 x i32> @ashr_lshr_inv_nonsplat_vec(<2 x i32> %x, <2 x i32> %y) { ret <2 x i32> %ret } -define <2 x i32> @ashr_lshr_vec_undef(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @ashr_lshr_vec_undef( +define <2 x i32> @ashr_lshr_vec_poison(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @ashr_lshr_vec_poison( ; CHECK-NEXT: [[CMP12:%.*]] = ashr <2 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i32> [[CMP12]] ; - %cmp = icmp sgt <2 x i32> %x, + %cmp = icmp sgt <2 x i32> %x, %l = lshr <2 x i32> %x, %y %r = ashr exact <2 x i32> %x, %y %ret = select <2 x i1> %cmp, <2 x i32> %l, <2 x i32> %r ret <2 x i32> %ret } -define <2 x i32> @ashr_lshr_vec_undef2(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @ashr_lshr_vec_undef2( +define <2 x i32> @ashr_lshr_vec_poison2(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @ashr_lshr_vec_poison2( ; CHECK-NEXT: [[CMP1:%.*]] = ashr exact <2 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i32> [[CMP1]] ; - %cmp = icmp slt <2 x i32> %x, + %cmp = icmp slt <2 x i32> %x, %l = lshr exact <2 x i32> %x, %y %r = ashr exact <2 x i32> %x, %y %ret = select <2 x i1> %cmp, <2 x i32> %r, <2 x i32> %l @@ -498,14 +498,14 @@ define <3 x i42> @lshr_sub_nsw_splat(<3 x i42> %x, <3 x i42> %y) { ret <3 x i42> %shr } -define <3 x i42> @lshr_sub_nsw_splat_undef(<3 x i42> %x, <3 x i42> %y) { -; CHECK-LABEL: @lshr_sub_nsw_splat_undef( +define <3 x i42> @lshr_sub_nsw_splat_poison(<3 x i42> %x, <3 x i42> %y) { +; CHECK-LABEL: @lshr_sub_nsw_splat_poison( ; CHECK-NEXT: [[SUB:%.*]] = sub nsw <3 x i42> [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[SHR:%.*]] = lshr <3 x i42> [[SUB]], +; CHECK-NEXT: [[SHR:%.*]] = lshr <3 x i42> [[SUB]], ; CHECK-NEXT: ret <3 x i42> [[SHR]] ; %sub = sub nsw <3 x i42> %x, %y - %shr = lshr <3 x i42> %sub, + %shr = lshr <3 x i42> %sub, ret <3 x i42> %shr } @@ -572,14 +572,14 @@ define <3 x i43> @ashr_sub_nsw_splat(<3 x i43> %x, <3 x i43> %y) { ret <3 x i43> %shr } -define <3 x i43> @ashr_sub_nsw_splat_undef(<3 x i43> %x, <3 x i43> %y) { -; CHECK-LABEL: @ashr_sub_nsw_splat_undef( +define <3 x i43> @ashr_sub_nsw_splat_poison(<3 x i43> %x, <3 x i43> %y) { +; CHECK-LABEL: @ashr_sub_nsw_splat_poison( ; CHECK-NEXT: [[SUB:%.*]] = sub nsw <3 x i43> [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[SHR:%.*]] = ashr <3 x i43> [[SUB]], +; CHECK-NEXT: [[SHR:%.*]] = ashr <3 x i43> [[SUB]], ; CHECK-NEXT: ret <3 x i43> [[SHR]] ; %sub = sub nsw <3 x i43> %x, %y - %shr = ashr <3 x i43> %sub, + %shr = ashr <3 x i43> %sub, ret <3 x i43> %shr } diff --git a/llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll b/llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll index 3cf312e426edf..46a7f2f1189e2 100644 --- a/llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll +++ b/llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll @@ -62,13 +62,13 @@ define <4 x i32> @ashr_or_mul_to_abs_vec2(<4 x i32> %X) { ret <4 x i32> %i2 } -define <4 x i32> @ashr_or_mul_to_abs_vec3_undef(<4 x i32> %X) { -; CHECK-LABEL: @ashr_or_mul_to_abs_vec3_undef( +define <4 x i32> @ashr_or_mul_to_abs_vec3_poison(<4 x i32> %X) { +; CHECK-LABEL: @ashr_or_mul_to_abs_vec3_poison( ; CHECK-NEXT: [[I2:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[X:%.*]], i1 false) ; CHECK-NEXT: ret <4 x i32> [[I2]] ; - %i = ashr <4 x i32> %X, - %i1 = or <4 x i32> %i, + %i = ashr <4 x i32> %X, + %i1 = or <4 x i32> %i, %i2 = mul <4 x i32> %i1, %X ret <4 x i32> %i2 } diff --git a/llvm/test/Transforms/InstCombine/binop-and-shifts.ll b/llvm/test/Transforms/InstCombine/binop-and-shifts.ll index 148963894b89f..f776dc13bb4e5 100644 --- a/llvm/test/Transforms/InstCombine/binop-and-shifts.ll +++ b/llvm/test/Transforms/InstCombine/binop-and-shifts.ll @@ -178,27 +178,27 @@ define <2 x i8> @shl_xor_and(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @shl_xor_and( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]] -; CHECK-NEXT: [[BW1:%.*]] = shl <2 x i8> [[TMP2]], +; CHECK-NEXT: [[BW1:%.*]] = shl <2 x i8> [[TMP2]], ; CHECK-NEXT: ret <2 x i8> [[BW1]] ; - %shift1 = shl <2 x i8> %x, - %shift2 = shl <2 x i8> %y, - %bw2 = xor <2 x i8> %shift2, + %shift1 = shl <2 x i8> %x, + %shift2 = shl <2 x i8> %y, + %bw2 = xor <2 x i8> %shift2, %bw1 = and <2 x i8> %bw2, %shift1 ret <2 x i8> %bw1 } define <2 x i8> @shl_xor_and_fail(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @shl_xor_and_fail( -; CHECK-NEXT: [[SHIFT1:%.*]] = shl <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[SHIFT2:%.*]] = shl <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[BW2:%.*]] = xor <2 x i8> [[SHIFT2]], +; CHECK-NEXT: [[SHIFT1:%.*]] = shl <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[SHIFT2:%.*]] = shl <2 x i8> [[Y:%.*]], +; CHECK-NEXT: [[BW2:%.*]] = xor <2 x i8> [[SHIFT2]], ; CHECK-NEXT: [[BW1:%.*]] = and <2 x i8> [[SHIFT1]], [[BW2]] ; CHECK-NEXT: ret <2 x i8> [[BW1]] ; - %shift1 = shl <2 x i8> %x, - %shift2 = shl <2 x i8> %y, - %bw2 = xor <2 x i8> %shift2, + %shift1 = shl <2 x i8> %x, + %shift2 = shl <2 x i8> %y, + %bw2 = xor <2 x i8> %shift2, %bw1 = and <2 x i8> %shift1, %bw2 ret <2 x i8> %bw1 } @@ -321,13 +321,13 @@ define <2 x i8> @lshr_add_and(<2 x i8> %x, <2 x i8> %y) { define <2 x i8> @lshr_add_or_fail_dif_masks(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @lshr_add_or_fail_dif_masks( ; CHECK-NEXT: [[SHIFT1:%.*]] = lshr <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[SHIFT2:%.*]] = lshr <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[BW2:%.*]] = add <2 x i8> [[SHIFT2]], +; CHECK-NEXT: [[SHIFT2:%.*]] = lshr <2 x i8> [[Y:%.*]], +; CHECK-NEXT: [[BW2:%.*]] = add nsw <2 x i8> [[SHIFT2]], ; CHECK-NEXT: [[BW1:%.*]] = and <2 x i8> [[SHIFT1]], [[BW2]] ; CHECK-NEXT: ret <2 x i8> [[BW1]] ; %shift1 = lshr <2 x i8> %x, - %shift2 = lshr <2 x i8> %y, + %shift2 = lshr <2 x i8> %y, %bw2 = add <2 x i8> %shift2, %bw1 = and <2 x i8> %shift1, %bw2 ret <2 x i8> %bw1 @@ -659,8 +659,8 @@ define <4 x i8> @and_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %s ret <4 x i8> %and } -define <4 x i8> @and_ashr_not_vec_undef_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { -; CHECK-LABEL: @and_ashr_not_vec_undef_1( +define <4 x i8> @and_ashr_not_vec_poison_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { +; CHECK-LABEL: @and_ashr_not_vec_poison_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: [[AND:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]] @@ -668,18 +668,18 @@ define <4 x i8> @and_ashr_not_vec_undef_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %sh ; %x.shift = ashr <4 x i8> %x, %shamt %y.shift = ashr <4 x i8> %y, %shamt - %y.shift.not = xor <4 x i8> %y.shift, + %y.shift.not = xor <4 x i8> %y.shift, %and = and <4 x i8> %x.shift, %y.shift.not ret <4 x i8> %and } -define <4 x i8> @and_ashr_not_vec_undef_2(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { -; CHECK-LABEL: @and_ashr_not_vec_undef_2( -; CHECK-NEXT: ret <4 x i8> zeroinitializer +define <4 x i8> @and_ashr_not_vec_poison_2(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { +; CHECK-LABEL: @and_ashr_not_vec_poison_2( +; CHECK-NEXT: ret <4 x i8> poison ; %x.shift = ashr <4 x i8> %x, %shamt %y.shift = ashr <4 x i8> %y, %shamt - %y.shift.not = xor <4 x i8> %y.shift, + %y.shift.not = xor <4 x i8> %y.shift, %and = and <4 x i8> %x.shift, %y.shift.not ret <4 x i8> %and } @@ -793,8 +793,8 @@ define <4 x i8> @or_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %sh ret <4 x i8> %or } -define <4 x i8> @or_ashr_not_vec_undef_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { -; CHECK-LABEL: @or_ashr_not_vec_undef_1( +define <4 x i8> @or_ashr_not_vec_poison_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { +; CHECK-LABEL: @or_ashr_not_vec_poison_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: [[OR:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]] @@ -802,18 +802,18 @@ define <4 x i8> @or_ashr_not_vec_undef_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %sha ; %x.shift = ashr <4 x i8> %x, %shamt %y.shift = ashr <4 x i8> %y, %shamt - %y.shift.not = xor <4 x i8> %y.shift, + %y.shift.not = xor <4 x i8> %y.shift, %or = or <4 x i8> %x.shift, %y.shift.not ret <4 x i8> %or } -define <4 x i8> @or_ashr_not_vec_undef_2(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { -; CHECK-LABEL: @or_ashr_not_vec_undef_2( -; CHECK-NEXT: ret <4 x i8> +define <4 x i8> @or_ashr_not_vec_poison_2(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { +; CHECK-LABEL: @or_ashr_not_vec_poison_2( +; CHECK-NEXT: ret <4 x i8> poison ; %x.shift = ashr <4 x i8> %x, %shamt %y.shift = ashr <4 x i8> %y, %shamt - %y.shift.not = xor <4 x i8> %y.shift, + %y.shift.not = xor <4 x i8> %y.shift, %or = or <4 x i8> %x.shift, %y.shift.not ret <4 x i8> %or } @@ -926,8 +926,8 @@ define <4 x i8> @xor_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %s ret <4 x i8> %xor } -define <4 x i8> @xor_ashr_not_vec_undef_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { -; CHECK-LABEL: @xor_ashr_not_vec_undef_1( +define <4 x i8> @xor_ashr_not_vec_poison_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { +; CHECK-LABEL: @xor_ashr_not_vec_poison_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[DOTNOT:%.*]] = ashr <4 x i8> [[TMP1]], [[SHAMT:%.*]] ; CHECK-NEXT: [[XOR:%.*]] = xor <4 x i8> [[DOTNOT]], @@ -935,18 +935,18 @@ define <4 x i8> @xor_ashr_not_vec_undef_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %sh ; %x.shift = ashr <4 x i8> %x, %shamt %y.shift = ashr <4 x i8> %y, %shamt - %y.shift.not = xor <4 x i8> %y.shift, + %y.shift.not = xor <4 x i8> %y.shift, %xor = xor <4 x i8> %x.shift, %y.shift.not ret <4 x i8> %xor } -define <4 x i8> @xor_ashr_not_vec_undef_2(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { -; CHECK-LABEL: @xor_ashr_not_vec_undef_2( -; CHECK-NEXT: ret <4 x i8> undef +define <4 x i8> @xor_ashr_not_vec_poison_2(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { +; CHECK-LABEL: @xor_ashr_not_vec_poison_2( +; CHECK-NEXT: ret <4 x i8> poison ; %x.shift = ashr <4 x i8> %x, %shamt %y.shift = ashr <4 x i8> %y, %shamt - %y.shift.not = xor <4 x i8> %y.shift, + %y.shift.not = xor <4 x i8> %y.shift, %xor = xor <4 x i8> %x.shift, %y.shift.not ret <4 x i8> %xor } diff --git a/llvm/test/Transforms/InstCombine/binop-of-displaced-shifts.ll b/llvm/test/Transforms/InstCombine/binop-of-displaced-shifts.ll index 27a3c8743368a..a16ad4ddb806f 100644 --- a/llvm/test/Transforms/InstCombine/binop-of-displaced-shifts.ll +++ b/llvm/test/Transforms/InstCombine/binop-of-displaced-shifts.ll @@ -202,41 +202,41 @@ define <2 x i8> @shl_or_non_splat(<2 x i8> %x) { ret <2 x i8> %binop } -define <2 x i8> @shl_or_undef_in_add(<2 x i8> %x) { -; CHECK-LABEL: define <2 x i8> @shl_or_undef_in_add +define <2 x i8> @shl_or_poison_in_add(<2 x i8> %x) { +; CHECK-LABEL: define <2 x i8> @shl_or_poison_in_add ; CHECK-SAME: (<2 x i8> [[X:%.*]]) { ; CHECK-NEXT: [[BINOP:%.*]] = shl <2 x i8> , [[X]] ; CHECK-NEXT: ret <2 x i8> [[BINOP]] ; %shift = shl <2 x i8> , %x - %add = add <2 x i8> %x, + %add = add <2 x i8> %x, %shift2 = shl <2 x i8> , %add %binop = or <2 x i8> %shift, %shift2 ret <2 x i8> %binop } -define <2 x i8> @shl_or_undef_in_shift1(<2 x i8> %x) { -; CHECK-LABEL: define <2 x i8> @shl_or_undef_in_shift1 +define <2 x i8> @shl_or_poison_in_shift1(<2 x i8> %x) { +; CHECK-LABEL: define <2 x i8> @shl_or_poison_in_shift1 ; CHECK-SAME: (<2 x i8> [[X:%.*]]) { -; CHECK-NEXT: [[BINOP:%.*]] = shl <2 x i8> , [[X]] +; CHECK-NEXT: [[BINOP:%.*]] = shl <2 x i8> , [[X]] ; CHECK-NEXT: ret <2 x i8> [[BINOP]] ; - %shift = shl <2 x i8> , %x + %shift = shl <2 x i8> , %x %add = add <2 x i8> %x, %shift2 = shl <2 x i8> , %add %binop = or <2 x i8> %shift, %shift2 ret <2 x i8> %binop } -define <2 x i8> @shl_or_undef_in_shift2(<2 x i8> %x) { -; CHECK-LABEL: define <2 x i8> @shl_or_undef_in_shift2 +define <2 x i8> @shl_or_poison_in_shift2(<2 x i8> %x) { +; CHECK-LABEL: define <2 x i8> @shl_or_poison_in_shift2 ; CHECK-SAME: (<2 x i8> [[X:%.*]]) { -; CHECK-NEXT: [[BINOP:%.*]] = shl <2 x i8> , [[X]] +; CHECK-NEXT: [[BINOP:%.*]] = shl <2 x i8> , [[X]] ; CHECK-NEXT: ret <2 x i8> [[BINOP]] ; %shift = shl <2 x i8> , %x %add = add <2 x i8> %x, - %shift2 = shl <2 x i8> , %add + %shift2 = shl <2 x i8> , %add %binop = or <2 x i8> %shift, %shift2 ret <2 x i8> %binop } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-clamp-like-pattern-between-zero-and-positive-threshold.ll b/llvm/test/Transforms/InstCombine/canonicalize-clamp-like-pattern-between-zero-and-positive-threshold.ll index 4547008b76093..c555970ea4348 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-clamp-like-pattern-between-zero-and-positive-threshold.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-clamp-like-pattern-between-zero-and-positive-threshold.ll @@ -338,22 +338,22 @@ define <2 x i32> @t18_ult_slt_vec_nonsplat(<2 x i32> %x, <2 x i32> %replacement_ ret <2 x i32> %r } -define <3 x i32> @t19_ult_slt_vec_undef0(<3 x i32> %x, <3 x i32> %replacement_low, <3 x i32> %replacement_high) { -; CHECK-LABEL: @t19_ult_slt_vec_undef0( +define <3 x i32> @t19_ult_slt_vec_poison0(<3 x i32> %x, <3 x i32> %replacement_low, <3 x i32> %replacement_high) { +; CHECK-LABEL: @t19_ult_slt_vec_poison0( ; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <3 x i32> [[X:%.*]], zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <3 x i32> [[X]], ; CHECK-NEXT: [[TMP3:%.*]] = select <3 x i1> [[TMP1]], <3 x i32> [[REPLACEMENT_LOW:%.*]], <3 x i32> [[X]] ; CHECK-NEXT: [[R:%.*]] = select <3 x i1> [[TMP2]], <3 x i32> [[REPLACEMENT_HIGH:%.*]], <3 x i32> [[TMP3]] ; CHECK-NEXT: ret <3 x i32> [[R]] ; - %t0 = icmp slt <3 x i32> %x, + %t0 = icmp slt <3 x i32> %x, %t1 = select <3 x i1> %t0, <3 x i32> %replacement_low, <3 x i32> %replacement_high %t2 = icmp ult <3 x i32> %x, %r = select <3 x i1> %t2, <3 x i32> %x, <3 x i32> %t1 ret <3 x i32> %r } -define <3 x i32> @t20_ult_slt_vec_undef1(<3 x i32> %x, <3 x i32> %replacement_low, <3 x i32> %replacement_high) { -; CHECK-LABEL: @t20_ult_slt_vec_undef1( +define <3 x i32> @t20_ult_slt_vec_poison1(<3 x i32> %x, <3 x i32> %replacement_low, <3 x i32> %replacement_high) { +; CHECK-LABEL: @t20_ult_slt_vec_poison1( ; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <3 x i32> [[X:%.*]], zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <3 x i32> [[X]], ; CHECK-NEXT: [[TMP3:%.*]] = select <3 x i1> [[TMP1]], <3 x i32> [[REPLACEMENT_LOW:%.*]], <3 x i32> [[X]] @@ -362,21 +362,21 @@ define <3 x i32> @t20_ult_slt_vec_undef1(<3 x i32> %x, <3 x i32> %replacement_lo ; %t0 = icmp slt <3 x i32> %x, %t1 = select <3 x i1> %t0, <3 x i32> %replacement_low, <3 x i32> %replacement_high - %t2 = icmp ult <3 x i32> %x, + %t2 = icmp ult <3 x i32> %x, %r = select <3 x i1> %t2, <3 x i32> %x, <3 x i32> %t1 ret <3 x i32> %r } -define <3 x i32> @t21_ult_slt_vec_undef2(<3 x i32> %x, <3 x i32> %replacement_low, <3 x i32> %replacement_high) { -; CHECK-LABEL: @t21_ult_slt_vec_undef2( +define <3 x i32> @t21_ult_slt_vec_poison2(<3 x i32> %x, <3 x i32> %replacement_low, <3 x i32> %replacement_high) { +; CHECK-LABEL: @t21_ult_slt_vec_poison2( ; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <3 x i32> [[X:%.*]], zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <3 x i32> [[X]], ; CHECK-NEXT: [[TMP3:%.*]] = select <3 x i1> [[TMP1]], <3 x i32> [[REPLACEMENT_LOW:%.*]], <3 x i32> [[X]] ; CHECK-NEXT: [[R:%.*]] = select <3 x i1> [[TMP2]], <3 x i32> [[REPLACEMENT_HIGH:%.*]], <3 x i32> [[TMP3]] ; CHECK-NEXT: ret <3 x i32> [[R]] ; - %t0 = icmp slt <3 x i32> %x, + %t0 = icmp slt <3 x i32> %x, %t1 = select <3 x i1> %t0, <3 x i32> %replacement_low, <3 x i32> %replacement_high - %t2 = icmp ult <3 x i32> %x, + %t2 = icmp ult <3 x i32> %x, %r = select <3 x i1> %t2, <3 x i32> %x, <3 x i32> %t1 ret <3 x i32> %r } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll index 5b7a99d53c308..759770688cf20 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll @@ -79,12 +79,12 @@ define <2 x i1> @p2_vec_nonsplat_edgecase1(<2 x i8> %x) { ret <2 x i1> %ret } -define <3 x i1> @p3_vec_splat_undef(<3 x i8> %x) { -; CHECK-LABEL: @p3_vec_splat_undef( +define <3 x i1> @p3_vec_splat_poison(<3 x i8> %x) { +; CHECK-LABEL: @p3_vec_splat_poison( ; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[X:%.*]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp eq <3 x i8> %tmp0, %x ret <3 x i1> %ret } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll index 160d968b9ac4c..95e6d5ac6a5f8 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll @@ -79,22 +79,22 @@ define <2 x i1> @p2_vec_nonsplat_edgecase1(<2 x i8> %x) { ret <2 x i1> %ret } -define <3 x i1> @p3_vec_splat_undef(<3 x i8> %x) { -; CHECK-LABEL: @p3_vec_splat_undef( +define <3 x i1> @p3_vec_splat_poison(<3 x i8> %x) { +; CHECK-LABEL: @p3_vec_splat_poison( ; CHECK-NEXT: [[RET:%.*]] = icmp ugt <3 x i8> [[X:%.*]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp ne <3 x i8> %tmp0, %x ret <3 x i1> %ret } -define <3 x i1> @p3_vec_nonsplat_undef(<3 x i8> %x) { -; CHECK-LABEL: @p3_vec_nonsplat_undef( +define <3 x i1> @p3_vec_nonsplat_poison(<3 x i8> %x) { +; CHECK-LABEL: @p3_vec_nonsplat_poison( ; CHECK-NEXT: [[RET:%.*]] = icmp ugt <3 x i8> [[X:%.*]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp ne <3 x i8> %tmp0, %x ret <3 x i1> %ret } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll index 60921042d5243..ae503bfb1cfe2 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll @@ -58,12 +58,12 @@ define <2 x i1> @p2_vec_nonsplat_edgecase(<2 x i8> %x) { ret <2 x i1> %ret } -define <3 x i1> @p3_vec_splat_undef(<3 x i8> %x) { -; CHECK-LABEL: @p3_vec_splat_undef( +define <3 x i1> @p3_vec_splat_poison(<3 x i8> %x) { +; CHECK-LABEL: @p3_vec_splat_poison( ; CHECK-NEXT: [[RET:%.*]] = icmp slt <3 x i8> [[X:%.*]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp sge <3 x i8> %tmp0, %x ret <3 x i1> %ret } @@ -175,11 +175,11 @@ define <2 x i1> @n3_vec(<2 x i8> %x) { define <3 x i1> @n4_vec(<3 x i8> %x) { ; CHECK-LABEL: @n4_vec( -; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X:%.*]], +; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X:%.*]], ; CHECK-NEXT: [[RET:%.*]] = icmp sge <3 x i8> [[TMP0]], [[X]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp sge <3 x i8> %tmp0, %x ret <3 x i1> %ret } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sgt-to-icmp-sgt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sgt-to-icmp-sgt.ll index 6345e70d7220e..f1333fed2c517 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sgt-to-icmp-sgt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sgt-to-icmp-sgt.ll @@ -72,26 +72,26 @@ define <2 x i1> @p2_vec_nonsplat_edgecase() { ret <2 x i1> %ret } -define <3 x i1> @p3_vec_splat_undef() { -; CHECK-LABEL: @p3_vec_splat_undef( +define <3 x i1> @p3_vec_splat_poison() { +; CHECK-LABEL: @p3_vec_splat_poison( ; CHECK-NEXT: [[X:%.*]] = call <3 x i8> @gen3x8() ; CHECK-NEXT: [[RET:%.*]] = icmp sgt <3 x i8> [[X]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %x = call <3 x i8> @gen3x8() - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp sgt <3 x i8> %x, %tmp0 ret <3 x i1> %ret } -define <3 x i1> @p3_vec_nonsplat_undef() { -; CHECK-LABEL: @p3_vec_nonsplat_undef( +define <3 x i1> @p3_vec_nonsplat_poison() { +; CHECK-LABEL: @p3_vec_nonsplat_poison( ; CHECK-NEXT: [[X:%.*]] = call <3 x i8> @gen3x8() ; CHECK-NEXT: [[RET:%.*]] = icmp sgt <3 x i8> [[X]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %x = call <3 x i8> @gen3x8() - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp sgt <3 x i8> %x, %tmp0 ret <3 x i1> %ret } @@ -212,12 +212,12 @@ define <2 x i1> @n3_vec() { define <3 x i1> @n4_vec() { ; CHECK-LABEL: @n4_vec( ; CHECK-NEXT: [[X:%.*]] = call <3 x i8> @gen3x8() -; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X]], +; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X]], ; CHECK-NEXT: [[RET:%.*]] = icmp sgt <3 x i8> [[X]], [[TMP0]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %x = call <3 x i8> @gen3x8() - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp sgt <3 x i8> %x, %tmp0 ret <3 x i1> %ret } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sle-to-icmp-sle.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sle-to-icmp-sle.ll index b7aec53fed676..4bed21a525f05 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sle-to-icmp-sle.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sle-to-icmp-sle.ll @@ -72,14 +72,14 @@ define <2 x i1> @p2_vec_nonsplat_edgecase() { ret <2 x i1> %ret } -define <3 x i1> @p3_vec_splat_undef() { -; CHECK-LABEL: @p3_vec_splat_undef( +define <3 x i1> @p3_vec_splat_poison() { +; CHECK-LABEL: @p3_vec_splat_poison( ; CHECK-NEXT: [[X:%.*]] = call <3 x i8> @gen3x8() ; CHECK-NEXT: [[RET:%.*]] = icmp slt <3 x i8> [[X]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %x = call <3 x i8> @gen3x8() - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp sle <3 x i8> %x, %tmp0 ret <3 x i1> %ret } @@ -200,12 +200,12 @@ define <2 x i1> @n3_vec() { define <3 x i1> @n4_vec() { ; CHECK-LABEL: @n4_vec( ; CHECK-NEXT: [[X:%.*]] = call <3 x i8> @gen3x8() -; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X]], +; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X]], ; CHECK-NEXT: [[RET:%.*]] = icmp sle <3 x i8> [[X]], [[TMP0]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %x = call <3 x i8> @gen3x8() - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp sle <3 x i8> %x, %tmp0 ret <3 x i1> %ret } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll index 56661d335c4f6..be6e3d0306bcd 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll @@ -58,22 +58,22 @@ define <2 x i1> @p2_vec_nonsplat_edgecase(<2 x i8> %x) { ret <2 x i1> %ret } -define <3 x i1> @p3_vec_splat_undef(<3 x i8> %x) { -; CHECK-LABEL: @p3_vec_splat_undef( +define <3 x i1> @p3_vec_splat_poison(<3 x i8> %x) { +; CHECK-LABEL: @p3_vec_splat_poison( ; CHECK-NEXT: [[RET:%.*]] = icmp sgt <3 x i8> [[X:%.*]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp slt <3 x i8> %tmp0, %x ret <3 x i1> %ret } -define <3 x i1> @p3_vec_nonsplat_undef(<3 x i8> %x) { -; CHECK-LABEL: @p3_vec_nonsplat_undef( +define <3 x i1> @p3_vec_nonsplat_poison(<3 x i8> %x) { +; CHECK-LABEL: @p3_vec_nonsplat_poison( ; CHECK-NEXT: [[RET:%.*]] = icmp sgt <3 x i8> [[X:%.*]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp slt <3 x i8> %tmp0, %x ret <3 x i1> %ret } @@ -185,11 +185,11 @@ define <2 x i1> @n3(<2 x i8> %x) { define <3 x i1> @n4(<3 x i8> %x) { ; CHECK-LABEL: @n4( -; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X:%.*]], +; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X:%.*]], ; CHECK-NEXT: [[RET:%.*]] = icmp slt <3 x i8> [[TMP0]], [[X]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp slt <3 x i8> %tmp0, %x ret <3 x i1> %ret } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll index a93e8f779435f..cfd48821b2c1d 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll @@ -79,12 +79,12 @@ define <2 x i1> @p2_vec_nonsplat_edgecase1(<2 x i8> %x) { ret <2 x i1> %ret } -define <3 x i1> @p3_vec_splat_undef(<3 x i8> %x) { -; CHECK-LABEL: @p3_vec_splat_undef( +define <3 x i1> @p3_vec_splat_poison(<3 x i8> %x) { +; CHECK-LABEL: @p3_vec_splat_poison( ; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[X:%.*]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp uge <3 x i8> %tmp0, %x ret <3 x i1> %ret } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ugt-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ugt-to-icmp-ugt.ll index 73ea4d456d246..6f6ba95a81c76 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ugt-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ugt-to-icmp-ugt.ll @@ -95,26 +95,26 @@ define <2 x i1> @p2_vec_nonsplat_edgecase1() { ret <2 x i1> %ret } -define <3 x i1> @p3_vec_splat_undef() { -; CHECK-LABEL: @p3_vec_splat_undef( +define <3 x i1> @p3_vec_splat_poison() { +; CHECK-LABEL: @p3_vec_splat_poison( ; CHECK-NEXT: [[X:%.*]] = call <3 x i8> @gen3x8() ; CHECK-NEXT: [[RET:%.*]] = icmp ugt <3 x i8> [[X]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %x = call <3 x i8> @gen3x8() - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp ugt <3 x i8> %x, %tmp0 ret <3 x i1> %ret } -define <3 x i1> @p3_vec_nonsplat_undef() { -; CHECK-LABEL: @p3_vec_nonsplat_undef( +define <3 x i1> @p3_vec_nonsplat_poison() { +; CHECK-LABEL: @p3_vec_nonsplat_poison( ; CHECK-NEXT: [[X:%.*]] = call <3 x i8> @gen3x8() ; CHECK-NEXT: [[RET:%.*]] = icmp ugt <3 x i8> [[X]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %x = call <3 x i8> @gen3x8() - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp ugt <3 x i8> %x, %tmp0 ret <3 x i1> %ret } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ule-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ule-to-icmp-ule.ll index 53886b5f2dc9c..54f00321c4cf0 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ule-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ule-to-icmp-ule.ll @@ -95,14 +95,14 @@ define <2 x i1> @p2_vec_nonsplat_edgecase1() { ret <2 x i1> %ret } -define <3 x i1> @p3_vec_splat_undef() { -; CHECK-LABEL: @p3_vec_splat_undef( +define <3 x i1> @p3_vec_splat_poison() { +; CHECK-LABEL: @p3_vec_splat_poison( ; CHECK-NEXT: [[X:%.*]] = call <3 x i8> @gen3x8() ; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[X]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %x = call <3 x i8> @gen3x8() - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp ule <3 x i8> %x, %tmp0 ret <3 x i1> %ret } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll index d66be571008c2..008fc6d2d6eda 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll @@ -80,22 +80,22 @@ define <2 x i1> @p2_vec_nonsplat_edgecase1(<2 x i8> %x) { ret <2 x i1> %ret } -define <3 x i1> @p3_vec_splat_undef(<3 x i8> %x) { -; CHECK-LABEL: @p3_vec_splat_undef( +define <3 x i1> @p3_vec_splat_poison(<3 x i8> %x) { +; CHECK-LABEL: @p3_vec_splat_poison( ; CHECK-NEXT: [[RET:%.*]] = icmp ugt <3 x i8> [[X:%.*]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp ult <3 x i8> %tmp0, %x ret <3 x i1> %ret } -define <3 x i1> @p3_vec_nonsplat_undef(<3 x i8> %x) { -; CHECK-LABEL: @p3_vec_nonsplat_undef( +define <3 x i1> @p3_vec_nonsplat_poison(<3 x i8> %x) { +; CHECK-LABEL: @p3_vec_nonsplat_poison( ; CHECK-NEXT: [[RET:%.*]] = icmp ugt <3 x i8> [[X:%.*]], ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = and <3 x i8> %x, + %tmp0 = and <3 x i8> %x, %ret = icmp ult <3 x i8> %tmp0, %x ret <3 x i1> %ret } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll index 38611d8b53a98..dc5658d302d99 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll @@ -40,13 +40,13 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ret <2 x i1> %ret } -define <3 x i1> @p2_vec_undef(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @p2_vec_undef( -; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> , [[Y:%.*]] +define <3 x i1> @p2_vec_poison(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @p2_vec_poison( +; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> , [[Y:%.*]] ; CHECK-NEXT: [[RET:%.*]] = icmp uge <3 x i8> [[TMP0]], [[X:%.*]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = lshr <3 x i8> , %y + %tmp0 = lshr <3 x i8> , %y %tmp1 = and <3 x i8> %tmp0, %x %ret = icmp eq <3 x i8> %tmp1, %x ret <3 x i1> %ret diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll index 37d317b695f60..8fbbd2bb9907d 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll @@ -40,13 +40,13 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ret <2 x i1> %ret } -define <3 x i1> @p2_vec_undef(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @p2_vec_undef( -; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> , [[Y:%.*]] +define <3 x i1> @p2_vec_poison(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @p2_vec_poison( +; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> , [[Y:%.*]] ; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[TMP0]], [[X:%.*]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %tmp0 = lshr <3 x i8> , %y + %tmp0 = lshr <3 x i8> , %y %tmp1 = and <3 x i8> %tmp0, %x %ret = icmp ne <3 x i8> %tmp1, %x ret <3 x i1> %ret diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll index dfd67eae8aafd..88487b38e2c70 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll @@ -44,40 +44,40 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ret <2 x i1> %ret } -define <3 x i1> @p2_vec_undef0(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @p2_vec_undef0( +define <3 x i1> @p2_vec_poison0(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @p2_vec_poison0( ; CHECK-NEXT: [[X_HIGHBITS:%.*]] = lshr <3 x i8> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq <3 x i8> [[X_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %t0 = shl <3 x i8> , %y + %t0 = shl <3 x i8> , %y %t1 = xor <3 x i8> %t0, %t2 = and <3 x i8> %t1, %x %ret = icmp eq <3 x i8> %t2, %x ret <3 x i1> %ret } -define <3 x i1> @p3_vec_undef0(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @p3_vec_undef0( +define <3 x i1> @p3_vec_poison0(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @p3_vec_poison0( ; CHECK-NEXT: [[X_HIGHBITS:%.*]] = lshr <3 x i8> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq <3 x i8> [[X_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %t0 = shl <3 x i8> , %y - %t1 = xor <3 x i8> %t0, + %t1 = xor <3 x i8> %t0, %t2 = and <3 x i8> %t1, %x %ret = icmp eq <3 x i8> %t2, %x ret <3 x i1> %ret } -define <3 x i1> @p4_vec_undef2(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @p4_vec_undef2( +define <3 x i1> @p4_vec_poison2(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @p4_vec_poison2( ; CHECK-NEXT: [[X_HIGHBITS:%.*]] = lshr <3 x i8> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq <3 x i8> [[X_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %t0 = shl <3 x i8> , %y - %t1 = xor <3 x i8> %t0, + %t0 = shl <3 x i8> , %y + %t1 = xor <3 x i8> %t0, %t2 = and <3 x i8> %t1, %x %ret = icmp eq <3 x i8> %t2, %x ret <3 x i1> %ret diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll index 608e133ec7f73..b717925fd644f 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll @@ -44,40 +44,40 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ret <2 x i1> %ret } -define <3 x i1> @p2_vec_undef0(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @p2_vec_undef0( +define <3 x i1> @p2_vec_poison0(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @p2_vec_poison0( ; CHECK-NEXT: [[X_HIGHBITS:%.*]] = lshr <3 x i8> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne <3 x i8> [[X_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %t0 = shl <3 x i8> , %y + %t0 = shl <3 x i8> , %y %t1 = xor <3 x i8> %t0, %t2 = and <3 x i8> %t1, %x %ret = icmp ne <3 x i8> %t2, %x ret <3 x i1> %ret } -define <3 x i1> @p3_vec_undef0(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @p3_vec_undef0( +define <3 x i1> @p3_vec_poison0(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @p3_vec_poison0( ; CHECK-NEXT: [[X_HIGHBITS:%.*]] = lshr <3 x i8> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne <3 x i8> [[X_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %t0 = shl <3 x i8> , %y - %t1 = xor <3 x i8> %t0, + %t1 = xor <3 x i8> %t0, %t2 = and <3 x i8> %t1, %x %ret = icmp ne <3 x i8> %t2, %x ret <3 x i1> %ret } -define <3 x i1> @p4_vec_undef2(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @p4_vec_undef2( +define <3 x i1> @p4_vec_poison2(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @p4_vec_poison2( ; CHECK-NEXT: [[X_HIGHBITS:%.*]] = lshr <3 x i8> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne <3 x i8> [[X_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %t0 = shl <3 x i8> , %y - %t1 = xor <3 x i8> %t0, + %t0 = shl <3 x i8> , %y + %t1 = xor <3 x i8> %t0, %t2 = and <3 x i8> %t1, %x %ret = icmp ne <3 x i8> %t2, %x ret <3 x i1> %ret diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll index d13129c1248a4..f48d284e085bc 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll @@ -54,15 +54,15 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ret <2 x i1> %ret } -define <3 x i1> @p2_vec_undef0(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @p2_vec_undef0( -; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> , [[Y:%.*]] +define <3 x i1> @p2_vec_poison0(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @p2_vec_poison0( +; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i8> , [[Y:%.*]] ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> , [[Y]] ; CHECK-NEXT: [[RET:%.*]] = icmp uge <3 x i8> [[T1]], [[X:%.*]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %t0 = shl <3 x i8> , %y + %t0 = shl <3 x i8> , %y call void @use3i8(<3 x i8> %t0) %t1 = lshr <3 x i8> %t0, %y %t2 = and <3 x i8> %t1, %x diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll index a1517b36d0b9d..f4b3c67164e49 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll @@ -54,15 +54,15 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ret <2 x i1> %ret } -define <3 x i1> @p2_vec_undef0(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @p2_vec_undef0( -; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> , [[Y:%.*]] +define <3 x i1> @p2_vec_poison0(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @p2_vec_poison0( +; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i8> , [[Y:%.*]] ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> , [[Y]] ; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[T1]], [[X:%.*]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; - %t0 = shl <3 x i8> , %y + %t0 = shl <3 x i8> , %y call void @use3i8(<3 x i8> %t0) %t1 = lshr <3 x i8> %t0, %y %t2 = and <3 x i8> %t1, %x diff --git a/llvm/test/Transforms/InstCombine/cast-int-icmp-eq-0.ll b/llvm/test/Transforms/InstCombine/cast-int-icmp-eq-0.ll index 9b51a7649992f..7b6d07a14a30e 100644 --- a/llvm/test/Transforms/InstCombine/cast-int-icmp-eq-0.ll +++ b/llvm/test/Transforms/InstCombine/cast-int-icmp-eq-0.ll @@ -603,7 +603,7 @@ define i1 @i16_cast_cmp_sgt_int_m1_sitofp_half(i16 %i) { ret i1 %cmp } -; Verify that vector types and vector constants including undef elements are transformed too. +; Verify that vector types and vector constants including poison elements are transformed too. define <3 x i1> @i32_cast_cmp_ne_int_0_sitofp_double_vec(<3 x i32> %i) { ; CHECK-LABEL: @i32_cast_cmp_ne_int_0_sitofp_double_vec( @@ -616,38 +616,38 @@ define <3 x i1> @i32_cast_cmp_ne_int_0_sitofp_double_vec(<3 x i32> %i) { ret <3 x i1> %cmp } -; TODO: Can we propagate the constant vector with undef element? +; TODO: Can we propagate the constant vector with poison element? -define <3 x i1> @i32_cast_cmp_eq_int_0_sitofp_float_vec_undef(<3 x i32> %i) { -; CHECK-LABEL: @i32_cast_cmp_eq_int_0_sitofp_float_vec_undef( +define <3 x i1> @i32_cast_cmp_eq_int_0_sitofp_float_vec_poison(<3 x i32> %i) { +; CHECK-LABEL: @i32_cast_cmp_eq_int_0_sitofp_float_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq <3 x i32> [[I:%.*]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[CMP]] ; %f = sitofp <3 x i32> %i to <3 x float> %b = bitcast <3 x float> %f to <3 x i32> - %cmp = icmp eq <3 x i32> %b, + %cmp = icmp eq <3 x i32> %b, ret <3 x i1> %cmp } -define <3 x i1> @i64_cast_cmp_slt_int_1_sitofp_half_vec_undef(<3 x i64> %i) { -; CHECK-LABEL: @i64_cast_cmp_slt_int_1_sitofp_half_vec_undef( +define <3 x i1> @i64_cast_cmp_slt_int_1_sitofp_half_vec_poison(<3 x i64> %i) { +; CHECK-LABEL: @i64_cast_cmp_slt_int_1_sitofp_half_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = icmp slt <3 x i64> [[I:%.*]], ; CHECK-NEXT: ret <3 x i1> [[CMP]] ; %f = sitofp <3 x i64> %i to <3 x half> %b = bitcast <3 x half> %f to <3 x i16> - %cmp = icmp slt <3 x i16> %b, + %cmp = icmp slt <3 x i16> %b, ret <3 x i1> %cmp } -define <3 x i1> @i16_cast_cmp_sgt_int_m1_sitofp_float_vec_undef(<3 x i16> %i) { -; CHECK-LABEL: @i16_cast_cmp_sgt_int_m1_sitofp_float_vec_undef( +define <3 x i1> @i16_cast_cmp_sgt_int_m1_sitofp_float_vec_poison(<3 x i16> %i) { +; CHECK-LABEL: @i16_cast_cmp_sgt_int_m1_sitofp_float_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <3 x i16> [[I:%.*]], ; CHECK-NEXT: ret <3 x i1> [[CMP]] ; %f = sitofp <3 x i16> %i to <3 x float> %b = bitcast <3 x float> %f to <3 x i32> - %cmp = icmp sgt <3 x i32> %b, + %cmp = icmp sgt <3 x i32> %b, ret <3 x i1> %cmp } diff --git a/llvm/test/Transforms/InstCombine/cast-unsigned-icmp-eqcmp-0.ll b/llvm/test/Transforms/InstCombine/cast-unsigned-icmp-eqcmp-0.ll index 0752576fad45f..1565fb7c0a6a9 100644 --- a/llvm/test/Transforms/InstCombine/cast-unsigned-icmp-eqcmp-0.ll +++ b/llvm/test/Transforms/InstCombine/cast-unsigned-icmp-eqcmp-0.ll @@ -27,14 +27,14 @@ define <2 x i1> @i32_cast_cmp_eq_int_0_uitofp_float_vec(<2 x i32> %i) { ret <2 x i1> %cmp } -define <3 x i1> @i32_cast_cmp_eq_int_0_uitofp_float_vec_undef(<3 x i32> %i) { -; CHECK-LABEL: @i32_cast_cmp_eq_int_0_uitofp_float_vec_undef( +define <3 x i1> @i32_cast_cmp_eq_int_0_uitofp_float_vec_poison(<3 x i32> %i) { +; CHECK-LABEL: @i32_cast_cmp_eq_int_0_uitofp_float_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq <3 x i32> [[I:%.*]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[CMP]] ; %f = uitofp <3 x i32> %i to <3 x float> %b = bitcast <3 x float> %f to <3 x i32> - %cmp = icmp eq <3 x i32> %b, + %cmp = icmp eq <3 x i32> %b, ret <3 x i1> %cmp } @@ -60,14 +60,14 @@ define <2 x i1> @i32_cast_cmp_ne_int_0_uitofp_float_vec(<2 x i32> %i) { ret <2 x i1> %cmp } -define <3 x i1> @i32_cast_cmp_ne_int_0_uitofp_float_vec_undef(<3 x i32> %i) { -; CHECK-LABEL: @i32_cast_cmp_ne_int_0_uitofp_float_vec_undef( +define <3 x i1> @i32_cast_cmp_ne_int_0_uitofp_float_vec_poison(<3 x i32> %i) { +; CHECK-LABEL: @i32_cast_cmp_ne_int_0_uitofp_float_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = icmp ne <3 x i32> [[I:%.*]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[CMP]] ; %f = uitofp <3 x i32> %i to <3 x float> %b = bitcast <3 x float> %f to <3 x i32> - %cmp = icmp ne <3 x i32> %b, + %cmp = icmp ne <3 x i32> %b, ret <3 x i1> %cmp } @@ -93,14 +93,14 @@ define <2 x i1> @i32_cast_cmp_eq_int_0_uitofp_double_vec(<2 x i32> %i) { ret <2 x i1> %cmp } -define <3 x i1> @i32_cast_cmp_eq_int_0_uitofp_double_vec_undef(<3 x i32> %i) { -; CHECK-LABEL: @i32_cast_cmp_eq_int_0_uitofp_double_vec_undef( +define <3 x i1> @i32_cast_cmp_eq_int_0_uitofp_double_vec_poison(<3 x i32> %i) { +; CHECK-LABEL: @i32_cast_cmp_eq_int_0_uitofp_double_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq <3 x i32> [[I:%.*]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[CMP]] ; %f = uitofp <3 x i32> %i to <3 x double> %b = bitcast <3 x double> %f to <3 x i64> - %cmp = icmp eq <3 x i64> %b, + %cmp = icmp eq <3 x i64> %b, ret <3 x i1> %cmp } @@ -126,14 +126,14 @@ define <2 x i1> @i32_cast_cmp_ne_int_0_uitofp_double_vec(<2 x i32> %i) { ret <2 x i1> %cmp } -define <3 x i1> @i32_cast_cmp_ne_int_0_uitofp_double_vec_undef(<3 x i32> %i) { -; CHECK-LABEL: @i32_cast_cmp_ne_int_0_uitofp_double_vec_undef( +define <3 x i1> @i32_cast_cmp_ne_int_0_uitofp_double_vec_poison(<3 x i32> %i) { +; CHECK-LABEL: @i32_cast_cmp_ne_int_0_uitofp_double_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = icmp ne <3 x i32> [[I:%.*]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[CMP]] ; %f = uitofp <3 x i32> %i to <3 x double> %b = bitcast <3 x double> %f to <3 x i64> - %cmp = icmp ne <3 x i64> %b, + %cmp = icmp ne <3 x i64> %b, ret <3 x i1> %cmp } @@ -159,14 +159,14 @@ define <2 x i1> @i32_cast_cmp_eq_int_0_uitofp_half_vec(<2 x i32> %i) { ret <2 x i1> %cmp } -define <3 x i1> @i32_cast_cmp_eq_int_0_uitofp_half_vec_undef(<3 x i32> %i) { -; CHECK-LABEL: @i32_cast_cmp_eq_int_0_uitofp_half_vec_undef( +define <3 x i1> @i32_cast_cmp_eq_int_0_uitofp_half_vec_poison(<3 x i32> %i) { +; CHECK-LABEL: @i32_cast_cmp_eq_int_0_uitofp_half_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq <3 x i32> [[I:%.*]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[CMP]] ; %f = uitofp <3 x i32> %i to <3 x half> %b = bitcast <3 x half> %f to <3 x i16> - %cmp = icmp eq <3 x i16> %b, + %cmp = icmp eq <3 x i16> %b, ret <3 x i1> %cmp } @@ -192,13 +192,13 @@ define <2 x i1> @i32_cast_cmp_ne_int_0_uitofp_half_vec(<2 x i32> %i) { ret <2 x i1> %cmp } -define <3 x i1> @i32_cast_cmp_ne_int_0_uitofp_half_vec_undef(<3 x i32> %i) { -; CHECK-LABEL: @i32_cast_cmp_ne_int_0_uitofp_half_vec_undef( +define <3 x i1> @i32_cast_cmp_ne_int_0_uitofp_half_vec_poison(<3 x i32> %i) { +; CHECK-LABEL: @i32_cast_cmp_ne_int_0_uitofp_half_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = icmp ne <3 x i32> [[I:%.*]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[CMP]] ; %f = uitofp <3 x i32> %i to <3 x half> %b = bitcast <3 x half> %f to <3 x i16> - %cmp = icmp ne <3 x i16> %b, + %cmp = icmp ne <3 x i16> %b, ret <3 x i1> %cmp } diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll index d9c93ba277295..04a3e8931e62c 100644 --- a/llvm/test/Transforms/InstCombine/cast.ll +++ b/llvm/test/Transforms/InstCombine/cast.ll @@ -508,18 +508,16 @@ define <2 x i16> @test40vec_nonuniform(<2 x i16> %a) { ret <2 x i16> %r } -define <2 x i16> @test40vec_undef(<2 x i16> %a) { -; ALL-LABEL: @test40vec_undef( -; ALL-NEXT: [[T:%.*]] = zext <2 x i16> [[A:%.*]] to <2 x i32> -; ALL-NEXT: [[T21:%.*]] = lshr <2 x i32> [[T]], -; ALL-NEXT: [[T5:%.*]] = shl <2 x i32> [[T]], -; ALL-NEXT: [[T32:%.*]] = or <2 x i32> [[T21]], [[T5]] -; ALL-NEXT: [[R:%.*]] = trunc <2 x i32> [[T32]] to <2 x i16> +define <2 x i16> @test40vec_poison(<2 x i16> %a) { +; ALL-LABEL: @test40vec_poison( +; ALL-NEXT: [[T21:%.*]] = lshr <2 x i16> [[A:%.*]], +; ALL-NEXT: [[T5:%.*]] = shl <2 x i16> [[A]], +; ALL-NEXT: [[R:%.*]] = or disjoint <2 x i16> [[T21]], [[T5]] ; ALL-NEXT: ret <2 x i16> [[R]] ; %t = zext <2 x i16> %a to <2 x i32> - %t21 = lshr <2 x i32> %t, - %t5 = shl <2 x i32> %t, + %t21 = lshr <2 x i32> %t, + %t5 = shl <2 x i32> %t, %t32 = or <2 x i32> %t21, %t5 %r = trunc <2 x i32> %t32 to <2 x i16> ret <2 x i16> %r @@ -1452,7 +1450,7 @@ define i32 @test89() { ; LE-LABEL: @test89( ; LE-NEXT: ret i32 6 ; - ret i32 bitcast (<2 x i16> to i32) + ret i32 bitcast (<2 x i16> to i32) } define <2 x i32> @test90() { @@ -1462,7 +1460,7 @@ define <2 x i32> @test90() { ; LE-LABEL: @test90( ; LE-NEXT: ret <2 x i32> ; - %t6 = bitcast <4 x half> to <2 x i32> + %t6 = bitcast <4 x half> to <2 x i32> ret <2 x i32> %t6 } @@ -1537,13 +1535,13 @@ define <2 x i8> @trunc_lshr_sext_uniform(<2 x i8> %A) { ret <2 x i8> %D } -define <2 x i8> @trunc_lshr_sext_uniform_undef(<2 x i8> %A) { -; ALL-LABEL: @trunc_lshr_sext_uniform_undef( -; ALL-NEXT: [[D:%.*]] = ashr <2 x i8> [[A:%.*]], +define <2 x i8> @trunc_lshr_sext_uniform_poison(<2 x i8> %A) { +; ALL-LABEL: @trunc_lshr_sext_uniform_poison( +; ALL-NEXT: [[D:%.*]] = ashr <2 x i8> [[A:%.*]], ; ALL-NEXT: ret <2 x i8> [[D]] ; %B = sext <2 x i8> %A to <2 x i32> - %C = lshr <2 x i32> %B, + %C = lshr <2 x i32> %B, %D = trunc <2 x i32> %C to <2 x i8> ret <2 x i8> %D } @@ -1559,13 +1557,13 @@ define <2 x i8> @trunc_lshr_sext_nonuniform(<2 x i8> %A) { ret <2 x i8> %D } -define <3 x i8> @trunc_lshr_sext_nonuniform_undef(<3 x i8> %A) { -; ALL-LABEL: @trunc_lshr_sext_nonuniform_undef( -; ALL-NEXT: [[D:%.*]] = ashr <3 x i8> [[A:%.*]], +define <3 x i8> @trunc_lshr_sext_nonuniform_poison(<3 x i8> %A) { +; ALL-LABEL: @trunc_lshr_sext_nonuniform_poison( +; ALL-NEXT: [[D:%.*]] = ashr <3 x i8> [[A:%.*]], ; ALL-NEXT: ret <3 x i8> [[D]] ; %B = sext <3 x i8> %A to <3 x i32> - %C = lshr <3 x i32> %B, + %C = lshr <3 x i32> %B, %D = trunc <3 x i32> %C to <3 x i8> ret <3 x i8> %D } @@ -2014,15 +2012,13 @@ define <2 x i8> @trunc_lshr_zext_uniform(<2 x i8> %A) { ret <2 x i8> %D } -define <2 x i8> @trunc_lshr_zext_uniform_undef(<2 x i8> %A) { -; ALL-LABEL: @trunc_lshr_zext_uniform_undef( -; ALL-NEXT: [[B:%.*]] = zext <2 x i8> [[A:%.*]] to <2 x i32> -; ALL-NEXT: [[C:%.*]] = lshr <2 x i32> [[B]], -; ALL-NEXT: [[D:%.*]] = trunc nuw <2 x i32> [[C]] to <2 x i8> +define <2 x i8> @trunc_lshr_zext_uniform_poison(<2 x i8> %A) { +; ALL-LABEL: @trunc_lshr_zext_uniform_poison( +; ALL-NEXT: [[D:%.*]] = lshr <2 x i8> [[A:%.*]], ; ALL-NEXT: ret <2 x i8> [[D]] ; %B = zext <2 x i8> %A to <2 x i32> - %C = lshr <2 x i32> %B, + %C = lshr <2 x i32> %B, %D = trunc <2 x i32> %C to <2 x i8> ret <2 x i8> %D } @@ -2038,15 +2034,13 @@ define <2 x i8> @trunc_lshr_zext_nonuniform(<2 x i8> %A) { ret <2 x i8> %D } -define <3 x i8> @trunc_lshr_zext_nonuniform_undef(<3 x i8> %A) { -; ALL-LABEL: @trunc_lshr_zext_nonuniform_undef( -; ALL-NEXT: [[B:%.*]] = zext <3 x i8> [[A:%.*]] to <3 x i32> -; ALL-NEXT: [[C:%.*]] = lshr <3 x i32> [[B]], -; ALL-NEXT: [[D:%.*]] = trunc nuw <3 x i32> [[C]] to <3 x i8> +define <3 x i8> @trunc_lshr_zext_nonuniform_poison(<3 x i8> %A) { +; ALL-LABEL: @trunc_lshr_zext_nonuniform_poison( +; ALL-NEXT: [[D:%.*]] = lshr <3 x i8> [[A:%.*]], ; ALL-NEXT: ret <3 x i8> [[D]] ; %B = zext <3 x i8> %A to <3 x i32> - %C = lshr <3 x i32> %B, + %C = lshr <3 x i32> %B, %D = trunc <3 x i32> %C to <3 x i8> ret <3 x i8> %D } diff --git a/llvm/test/Transforms/InstCombine/ctpop-cttz.ll b/llvm/test/Transforms/InstCombine/ctpop-cttz.ll index 5d27f374d8921..70868554bdc1b 100644 --- a/llvm/test/Transforms/InstCombine/ctpop-cttz.ll +++ b/llvm/test/Transforms/InstCombine/ctpop-cttz.ll @@ -116,14 +116,14 @@ define <2 x i32> @ctpop3v(<2 x i32> %0) { ret <2 x i32> %5 } -define <2 x i32> @ctpop3v_undef(<2 x i32> %0) { -; CHECK-LABEL: @ctpop3v_undef( +define <2 x i32> @ctpop3v_poison(<2 x i32> %0) { +; CHECK-LABEL: @ctpop3v_poison( ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[TMP0:%.*]], i1 false), !range [[RNG0]] ; CHECK-NEXT: ret <2 x i32> [[TMP2]] ; %2 = sub <2 x i32> zeroinitializer, %0 %3 = and <2 x i32> %2, %0 - %4 = add <2 x i32> %3, + %4 = add <2 x i32> %3, %5 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %4) ret <2 x i32> %5 } diff --git a/llvm/test/Transforms/InstCombine/ctpop.ll b/llvm/test/Transforms/InstCombine/ctpop.ll index 27194724b7d83..b3653e5071ba2 100644 --- a/llvm/test/Transforms/InstCombine/ctpop.ll +++ b/llvm/test/Transforms/InstCombine/ctpop.ll @@ -155,28 +155,27 @@ define <2 x i32> @_parity_of_not_vec(<2 x i32> %x) { ret <2 x i32> %r } -define <2 x i32> @_parity_of_not_undef(<2 x i32> %x) { -; CHECK-LABEL: @_parity_of_not_undef( +define <2 x i32> @_parity_of_not_poison(<2 x i32> %x) { +; CHECK-LABEL: @_parity_of_not_poison( ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[X:%.*]]), !range [[RNG1]] ; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[TMP1]], ; CHECK-NEXT: ret <2 x i32> [[R]] ; - %neg = xor <2 x i32> %x, + %neg = xor <2 x i32> %x, %cnt = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %neg) %r = and <2 x i32> %cnt, ret <2 x i32> %r } -define <2 x i32> @_parity_of_not_undef2(<2 x i32> %x) { -; CHECK-LABEL: @_parity_of_not_undef2( -; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[CNT:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[NEG]]), !range [[RNG1]] -; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[CNT]], +define <2 x i32> @_parity_of_not_poison2(<2 x i32> %x) { +; CHECK-LABEL: @_parity_of_not_poison2( +; CHECK-NEXT: [[CNT:%.*]] = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[X:%.*]]), !range [[RNG1]] +; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[CNT]], ; CHECK-NEXT: ret <2 x i32> [[R]] ; %neg = xor <2 x i32> %x, %cnt = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %neg) - %r = and <2 x i32> %cnt, + %r = and <2 x i32> %cnt, ret <2 x i32> %r } diff --git a/llvm/test/Transforms/InstCombine/fabs-as-int.ll b/llvm/test/Transforms/InstCombine/fabs-as-int.ll index f32c00e453f22..4e49ff159f875 100644 --- a/llvm/test/Transforms/InstCombine/fabs-as-int.ll +++ b/llvm/test/Transforms/InstCombine/fabs-as-int.ll @@ -137,15 +137,15 @@ define <2 x i32> @not_fabs_as_int_v2f32_nonsplat(<2 x float> %x) { ret <2 x i32> %and } -define <3 x i32> @fabs_as_int_v3f32_undef(<3 x float> %x) { -; CHECK-LABEL: define <3 x i32> @fabs_as_int_v3f32_undef +define <3 x i32> @fabs_as_int_v3f32_poison(<3 x float> %x) { +; CHECK-LABEL: define <3 x i32> @fabs_as_int_v3f32_poison ; CHECK-SAME: (<3 x float> [[X:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = call <3 x float> @llvm.fabs.v3f32(<3 x float> [[X]]) ; CHECK-NEXT: [[AND:%.*]] = bitcast <3 x float> [[TMP1]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[AND]] ; %bc = bitcast <3 x float> %x to <3 x i32> - %and = and <3 x i32> %bc, + %and = and <3 x i32> %bc, ret <3 x i32> %and } diff --git a/llvm/test/Transforms/InstCombine/fabs.ll b/llvm/test/Transforms/InstCombine/fabs.ll index 7e380c2e4590a..5ec65784e7a34 100644 --- a/llvm/test/Transforms/InstCombine/fabs.ll +++ b/llvm/test/Transforms/InstCombine/fabs.ll @@ -321,7 +321,7 @@ define <2 x float> @select_fcmp_nnan_ole_negzero(<2 x float> %x) { ; CHECK-NEXT: ret <2 x float> [[FABS]] ; %lezero = fcmp ole <2 x float> %x, - %negx = fsub nnan <2 x float> , %x + %negx = fsub nnan <2 x float> , %x %fabs = select <2 x i1> %lezero, <2 x float> %negx, <2 x float> %x ret <2 x float> %fabs } @@ -332,7 +332,7 @@ define <2 x float> @select_nnan_fcmp_nnan_ole_negzero(<2 x float> %x) { ; CHECK-NEXT: ret <2 x float> [[FABS]] ; %lezero = fcmp ole <2 x float> %x, - %negx = fsub nnan <2 x float> , %x + %negx = fsub nnan <2 x float> , %x %fabs = select nnan <2 x i1> %lezero, <2 x float> %negx, <2 x float> %x ret <2 x float> %fabs } diff --git a/llvm/test/Transforms/InstCombine/fast-math.ll b/llvm/test/Transforms/InstCombine/fast-math.ll index 916955e34efac..83f2091244e52 100644 --- a/llvm/test/Transforms/InstCombine/fast-math.ll +++ b/llvm/test/Transforms/InstCombine/fast-math.ll @@ -541,12 +541,12 @@ define float @fneg2(float %x) { ret float %sub } -define <2 x float> @fneg2_vec_undef(<2 x float> %x) { -; CHECK-LABEL: @fneg2_vec_undef( +define <2 x float> @fneg2_vec_poison(<2 x float> %x) { +; CHECK-LABEL: @fneg2_vec_poison( ; CHECK-NEXT: [[SUB:%.*]] = fneg nsz <2 x float> [[X:%.*]] ; CHECK-NEXT: ret <2 x float> [[SUB]] ; - %sub = fsub nsz <2 x float> , %x + %sub = fsub nsz <2 x float> , %x ret <2 x float> %sub } diff --git a/llvm/test/Transforms/InstCombine/fcmp-special.ll b/llvm/test/Transforms/InstCombine/fcmp-special.ll index 88bfe930ffdd6..64bc86f4266c7 100644 --- a/llvm/test/Transforms/InstCombine/fcmp-special.ll +++ b/llvm/test/Transforms/InstCombine/fcmp-special.ll @@ -144,21 +144,21 @@ define <2 x i1> @uno_vec_with_nan(<2 x double> %x) { ret <2 x i1> %f } -define <2 x i1> @uno_vec_with_undef(<2 x double> %x) { -; CHECK-LABEL: @uno_vec_with_undef( +define <2 x i1> @uno_vec_with_poison(<2 x double> %x) { +; CHECK-LABEL: @uno_vec_with_poison( ; CHECK-NEXT: [[F:%.*]] = fcmp uno <2 x double> [[X:%.*]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[F]] ; - %f = fcmp uno <2 x double> %x, + %f = fcmp uno <2 x double> %x, ret <2 x i1> %f } -define <2 x i1> @ord_vec_with_undef(<2 x double> %x) { -; CHECK-LABEL: @ord_vec_with_undef( -; CHECK-NEXT: [[F:%.*]] = fcmp ord <2 x double> [[X:%.*]], +define <2 x i1> @ord_vec_with_poison(<2 x double> %x) { +; CHECK-LABEL: @ord_vec_with_poison( +; CHECK-NEXT: [[F:%.*]] = fcmp ord <2 x double> [[X:%.*]], ; CHECK-NEXT: ret <2 x i1> [[F]] ; - %f = fcmp ord <2 x double> %x, + %f = fcmp ord <2 x double> %x, ret <2 x i1> %f } @@ -224,12 +224,12 @@ define <2 x i1> @negative_zero_olt_vec(<2 x float> %x) { ret <2 x i1> %r } -define <2 x i1> @negative_zero_une_vec_undef(<2 x double> %x) { -; CHECK-LABEL: @negative_zero_une_vec_undef( +define <2 x i1> @negative_zero_une_vec_poison(<2 x double> %x) { +; CHECK-LABEL: @negative_zero_une_vec_poison( ; CHECK-NEXT: [[R:%.*]] = fcmp nnan une <2 x double> [[X:%.*]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %r = fcmp nnan une <2 x double> %x, + %r = fcmp nnan une <2 x double> %x, ret <2 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/fcmp.ll b/llvm/test/Transforms/InstCombine/fcmp.ll index 069512b0f2d8e..389264e2f7075 100644 --- a/llvm/test/Transforms/InstCombine/fcmp.ll +++ b/llvm/test/Transforms/InstCombine/fcmp.ll @@ -102,12 +102,12 @@ define <2 x i1> @unary_fneg_constant_swap_pred_vec(<2 x float> %x) { ret <2 x i1> %cmp } -define <2 x i1> @fneg_constant_swap_pred_vec_undef(<2 x float> %x) { -; CHECK-LABEL: @fneg_constant_swap_pred_vec_undef( +define <2 x i1> @fneg_constant_swap_pred_vec_poison(<2 x float> %x) { +; CHECK-LABEL: @fneg_constant_swap_pred_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = fcmp olt <2 x float> [[X:%.*]], ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; - %neg = fsub <2 x float> , %x + %neg = fsub <2 x float> , %x %cmp = fcmp ogt <2 x float> %neg, ret <2 x i1> %cmp } @@ -234,34 +234,34 @@ define <2 x i1> @fneg_unary_fneg_swap_pred_vec(<2 x float> %x, <2 x float> %y) { ret <2 x i1> %cmp } -define <2 x i1> @fneg_fneg_swap_pred_vec_undef(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @fneg_fneg_swap_pred_vec_undef( +define <2 x i1> @fneg_fneg_swap_pred_vec_poison(<2 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @fneg_fneg_swap_pred_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <2 x float> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; - %neg1 = fsub <2 x float> , %x - %neg2 = fsub <2 x float> , %y + %neg1 = fsub <2 x float> , %x + %neg2 = fsub <2 x float> , %y %cmp = fcmp olt <2 x float> %neg1, %neg2 ret <2 x i1> %cmp } -define <2 x i1> @unary_fneg_fneg_swap_pred_vec_undef(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @unary_fneg_fneg_swap_pred_vec_undef( +define <2 x i1> @unary_fneg_fneg_swap_pred_vec_poison(<2 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @unary_fneg_fneg_swap_pred_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <2 x float> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %neg1 = fneg <2 x float> %x - %neg2 = fsub <2 x float> , %y + %neg2 = fsub <2 x float> , %y %cmp = fcmp olt <2 x float> %neg1, %neg2 ret <2 x i1> %cmp } -define <2 x i1> @fneg_unary_fneg_swap_pred_vec_undef(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @fneg_unary_fneg_swap_pred_vec_undef( +define <2 x i1> @fneg_unary_fneg_swap_pred_vec_poison(<2 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @fneg_unary_fneg_swap_pred_vec_poison( ; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <2 x float> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; - %neg1 = fsub <2 x float> , %x + %neg1 = fsub <2 x float> , %x %neg2 = fneg <2 x float> %y %cmp = fcmp olt <2 x float> %neg1, %neg2 ret <2 x i1> %cmp diff --git a/llvm/test/Transforms/InstCombine/fdiv.ll b/llvm/test/Transforms/InstCombine/fdiv.ll index a0710c2bb0484..ca11685c98417 100644 --- a/llvm/test/Transforms/InstCombine/fdiv.ll +++ b/llvm/test/Transforms/InstCombine/fdiv.ll @@ -141,12 +141,12 @@ define <2 x float> @not_exact_inverse_vec_arcp(<2 x float> %x) { ret <2 x float> %div } -define <2 x float> @not_exact_inverse_vec_arcp_with_undef_elt(<2 x float> %x) { -; CHECK-LABEL: @not_exact_inverse_vec_arcp_with_undef_elt( -; CHECK-NEXT: [[DIV:%.*]] = fdiv arcp <2 x float> [[X:%.*]], +define <2 x float> @not_exact_inverse_vec_arcp_with_poison_elt(<2 x float> %x) { +; CHECK-LABEL: @not_exact_inverse_vec_arcp_with_poison_elt( +; CHECK-NEXT: [[DIV:%.*]] = fdiv arcp <2 x float> [[X:%.*]], ; CHECK-NEXT: ret <2 x float> [[DIV]] ; - %div = fdiv arcp <2 x float> %x, + %div = fdiv arcp <2 x float> %x, ret <2 x float> %div } @@ -333,13 +333,13 @@ define <2 x float> @unary_fneg_fneg_vec(<2 x float> %x, <2 x float> %y) { ret <2 x float> %div } -define <2 x float> @fneg_fneg_vec_undef_elts(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @fneg_fneg_vec_undef_elts( +define <2 x float> @fneg_fneg_vec_poison_elts(<2 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @fneg_fneg_vec_poison_elts( ; CHECK-NEXT: [[DIV:%.*]] = fdiv <2 x float> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x float> [[DIV]] ; - %xneg = fsub <2 x float> , %x - %yneg = fsub <2 x float> , %y + %xneg = fsub <2 x float> , %x + %yneg = fsub <2 x float> , %y %div = fdiv <2 x float> %xneg, %yneg ret <2 x float> %div } @@ -404,12 +404,12 @@ define <2 x float> @unary_fneg_dividend_constant_divisor_vec(<2 x float> %x) { ret <2 x float> %div } -define <2 x float> @fneg_dividend_constant_divisor_vec_undef_elt(<2 x float> %x) { -; CHECK-LABEL: @fneg_dividend_constant_divisor_vec_undef_elt( +define <2 x float> @fneg_dividend_constant_divisor_vec_poison_elt(<2 x float> %x) { +; CHECK-LABEL: @fneg_dividend_constant_divisor_vec_poison_elt( ; CHECK-NEXT: [[DIV:%.*]] = fdiv ninf <2 x float> [[X:%.*]], ; CHECK-NEXT: ret <2 x float> [[DIV]] ; - %neg = fsub <2 x float> , %x + %neg = fsub <2 x float> , %x %div = fdiv ninf <2 x float> %neg, ret <2 x float> %div } diff --git a/llvm/test/Transforms/InstCombine/fma.ll b/llvm/test/Transforms/InstCombine/fma.ll index 8b413ae6f664b..cf3d7f3c525a5 100644 --- a/llvm/test/Transforms/InstCombine/fma.ll +++ b/llvm/test/Transforms/InstCombine/fma.ll @@ -60,13 +60,13 @@ define <2 x float> @fma_unary_fneg_x_unary_fneg_y_vec(<2 x float> %x, <2 x float ret <2 x float> %fma } -define <2 x float> @fma_fneg_x_fneg_y_vec_undef(<2 x float> %x, <2 x float> %y, <2 x float> %z) { -; CHECK-LABEL: @fma_fneg_x_fneg_y_vec_undef( +define <2 x float> @fma_fneg_x_fneg_y_vec_poison(<2 x float> %x, <2 x float> %y, <2 x float> %z) { +; CHECK-LABEL: @fma_fneg_x_fneg_y_vec_poison( ; CHECK-NEXT: [[FMA:%.*]] = call <2 x float> @llvm.fma.v2f32(<2 x float> [[X:%.*]], <2 x float> [[Y:%.*]], <2 x float> [[Z:%.*]]) ; CHECK-NEXT: ret <2 x float> [[FMA]] ; - %xn = fsub <2 x float> , %x - %yn = fsub <2 x float> , %y + %xn = fsub <2 x float> , %x + %yn = fsub <2 x float> , %y %fma = call <2 x float> @llvm.fma.v2f32(<2 x float> %xn, <2 x float> %yn, <2 x float> %z) ret <2 x float> %fma } diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll index 39f9e74f899d1..e9c86a1270493 100644 --- a/llvm/test/Transforms/InstCombine/fmul.ll +++ b/llvm/test/Transforms/InstCombine/fmul.ll @@ -42,12 +42,12 @@ define <2 x float> @unary_neg_constant_vec(<2 x float> %x) { ret <2 x float> %mul } -define <2 x float> @neg_constant_vec_undef(<2 x float> %x) { -; CHECK-LABEL: @neg_constant_vec_undef( +define <2 x float> @neg_constant_vec_poison(<2 x float> %x) { +; CHECK-LABEL: @neg_constant_vec_poison( ; CHECK-NEXT: [[MUL:%.*]] = fmul ninf <2 x float> [[X:%.*]], ; CHECK-NEXT: ret <2 x float> [[MUL]] ; - %sub = fsub <2 x float> , %x + %sub = fsub <2 x float> , %x %mul = fmul ninf <2 x float> %sub, ret <2 x float> %mul } @@ -162,34 +162,34 @@ define <2 x float> @neg_unary_neg_vec(<2 x float> %x, <2 x float> %y) { ret <2 x float> %mul } -define <2 x float> @neg_neg_vec_undef(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @neg_neg_vec_undef( +define <2 x float> @neg_neg_vec_poison(<2 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @neg_neg_vec_poison( ; CHECK-NEXT: [[MUL:%.*]] = fmul arcp <2 x float> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x float> [[MUL]] ; - %sub1 = fsub <2 x float> , %x - %sub2 = fsub <2 x float> , %y + %sub1 = fsub <2 x float> , %x + %sub2 = fsub <2 x float> , %y %mul = fmul arcp <2 x float> %sub1, %sub2 ret <2 x float> %mul } -define <2 x float> @unary_neg_neg_vec_undef(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @unary_neg_neg_vec_undef( +define <2 x float> @unary_neg_neg_vec_poison(<2 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @unary_neg_neg_vec_poison( ; CHECK-NEXT: [[MUL:%.*]] = fmul arcp <2 x float> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x float> [[MUL]] ; %neg = fneg <2 x float> %x - %sub = fsub <2 x float> , %y + %sub = fsub <2 x float> , %y %mul = fmul arcp <2 x float> %neg, %sub ret <2 x float> %mul } -define <2 x float> @neg_unary_neg_vec_undef(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @neg_unary_neg_vec_undef( +define <2 x float> @neg_unary_neg_vec_poison(<2 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @neg_unary_neg_vec_poison( ; CHECK-NEXT: [[MUL:%.*]] = fmul arcp <2 x float> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x float> [[MUL]] ; - %sub = fsub <2 x float> , %x + %sub = fsub <2 x float> , %x %neg = fneg <2 x float> %y %mul = fmul arcp <2 x float> %sub, %neg ret <2 x float> %mul @@ -322,13 +322,13 @@ define <2 x float> @unary_neg_mul_vec(<2 x float> %x, <2 x float> %y) { ret <2 x float> %mul } -define <2 x float> @neg_mul_vec_undef(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @neg_mul_vec_undef( +define <2 x float> @neg_mul_vec_poison(<2 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @neg_mul_vec_poison( ; CHECK-NEXT: [[SUB:%.*]] = fneg <2 x float> [[X:%.*]] ; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[SUB]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x float> [[MUL]] ; - %sub = fsub <2 x float> , %x + %sub = fsub <2 x float> , %x %mul = fmul <2 x float> %sub, %y ret <2 x float> %mul } @@ -388,9 +388,9 @@ define void @test8(ptr %inout, i1 %c1) { entry: %0 = load i32, ptr %inout, align 4 %conv = uitofp i32 %0 to float - %vecinit = insertelement <4 x float> , float %conv, i32 3 + %vecinit = insertelement <4 x float> , float %conv, i32 3 %sub = fsub <4 x float> , %vecinit - %1 = shufflevector <4 x float> %sub, <4 x float> undef, <4 x i32> + %1 = shufflevector <4 x float> %sub, <4 x float> poison, <4 x i32> %mul = fmul <4 x float> zeroinitializer, %1 br label %for.cond @@ -742,7 +742,7 @@ define <4 x float> @fdiv_constant_denominator_fmul_vec_constexpr(<4 x float> %x) ; CHECK-NEXT: [[T3:%.*]] = fmul reassoc <4 x float> [[X:%.*]], ; CHECK-NEXT: ret <4 x float> [[T3]] ; - %constExprMul = bitcast i128 trunc (i160 bitcast (<5 x float> to i160) to i128) to <4 x float> + %constExprMul = bitcast i128 trunc (i160 bitcast (<5 x float> to i160) to i128) to <4 x float> %t1 = fdiv reassoc <4 x float> %x, %t3 = fmul reassoc <4 x float> %t1, %constExprMul ret <4 x float> %t3 @@ -1270,7 +1270,7 @@ define @mul_scalable_splat_zero( %z) { ; CHECK-LABEL: @mul_scalable_splat_zero( ; CHECK-NEXT: ret zeroinitializer ; - %shuf = shufflevector insertelement ( undef, float 0.0, i32 0), undef, zeroinitializer + %shuf = shufflevector insertelement ( poison, float 0.0, i32 0), poison, zeroinitializer %t3 = fmul fast %shuf, %z ret %t3 } @@ -1393,7 +1393,7 @@ define <3 x float> @mul_neg_zero_nnan_ninf_vec(<3 x float> nofpclass(inf nan) %a ; CHECK-NEXT: ret <3 x float> [[RET]] ; entry: - %ret = fmul <3 x float> %a, + %ret = fmul <3 x float> %a, ret <3 x float> %ret } diff --git a/llvm/test/Transforms/InstCombine/fneg-as-int.ll b/llvm/test/Transforms/InstCombine/fneg-as-int.ll index d28e599cacf36..e3067b0d02461 100644 --- a/llvm/test/Transforms/InstCombine/fneg-as-int.ll +++ b/llvm/test/Transforms/InstCombine/fneg-as-int.ll @@ -139,15 +139,15 @@ define <2 x i32> @not_fneg_as_int_v2f32_nonsplat(<2 x float> %x) { ret <2 x i32> %xor } -define <3 x i32> @fneg_as_int_v3f32_undef(<3 x float> %x) { -; CHECK-LABEL: define <3 x i32> @fneg_as_int_v3f32_undef +define <3 x i32> @fneg_as_int_v3f32_poison(<3 x float> %x) { +; CHECK-LABEL: define <3 x i32> @fneg_as_int_v3f32_poison ; CHECK-SAME: (<3 x float> [[X:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = fneg <3 x float> [[X]] ; CHECK-NEXT: [[XOR:%.*]] = bitcast <3 x float> [[TMP1]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[XOR]] ; %bc = bitcast <3 x float> %x to <3 x i32> - %xor = xor <3 x i32> %bc, + %xor = xor <3 x i32> %bc, ret <3 x i32> %xor } diff --git a/llvm/test/Transforms/InstCombine/fneg-fabs-as-int.ll b/llvm/test/Transforms/InstCombine/fneg-fabs-as-int.ll index 9aa8d4944e39a..8c3e6958fe083 100644 --- a/llvm/test/Transforms/InstCombine/fneg-fabs-as-int.ll +++ b/llvm/test/Transforms/InstCombine/fneg-fabs-as-int.ll @@ -158,8 +158,8 @@ define <2 x i32> @not_fneg_fabs_as_int_v2f32_nonsplat(<2 x float> %x) { ret <2 x i32> %or } -define <3 x i32> @fneg_fabs_as_int_v3f32_undef(<3 x float> %x) { -; CHECK-LABEL: define <3 x i32> @fneg_fabs_as_int_v3f32_undef +define <3 x i32> @fneg_fabs_as_int_v3f32_poison(<3 x float> %x) { +; CHECK-LABEL: define <3 x i32> @fneg_fabs_as_int_v3f32_poison ; CHECK-SAME: (<3 x float> [[X:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = call <3 x float> @llvm.fabs.v3f32(<3 x float> [[X]]) ; CHECK-NEXT: [[TMP2:%.*]] = fneg <3 x float> [[TMP1]] @@ -167,7 +167,7 @@ define <3 x i32> @fneg_fabs_as_int_v3f32_undef(<3 x float> %x) { ; CHECK-NEXT: ret <3 x i32> [[OR]] ; %bc = bitcast <3 x float> %x to <3 x i32> - %or = or <3 x i32> %bc, + %or = or <3 x i32> %bc, ret <3 x i32> %or } diff --git a/llvm/test/Transforms/InstCombine/fneg.ll b/llvm/test/Transforms/InstCombine/fneg.ll index ed68ba50d36ee..7c9289c447113 100644 --- a/llvm/test/Transforms/InstCombine/fneg.ll +++ b/llvm/test/Transforms/InstCombine/fneg.ll @@ -87,24 +87,24 @@ define float @fmul_fneg_extra_use(float %x) { ret float %r } -; Try a vector. Use special constants (NaN, INF, undef) because they don't change anything. +; Try a vector. Use special constants (NaN, INF, poison) because they don't change anything. define <4 x double> @fmul_fsub_vec(<4 x double> %x) { ; CHECK-LABEL: @fmul_fsub_vec( -; CHECK-NEXT: [[R:%.*]] = fmul <4 x double> [[X:%.*]], +; CHECK-NEXT: [[R:%.*]] = fmul <4 x double> [[X:%.*]], ; CHECK-NEXT: ret <4 x double> [[R]] ; - %m = fmul <4 x double> %x, + %m = fmul <4 x double> %x, %r = fsub <4 x double> , %m ret <4 x double> %r } define <4 x double> @fmul_fneg_vec(<4 x double> %x) { ; CHECK-LABEL: @fmul_fneg_vec( -; CHECK-NEXT: [[R:%.*]] = fmul <4 x double> [[X:%.*]], +; CHECK-NEXT: [[R:%.*]] = fmul <4 x double> [[X:%.*]], ; CHECK-NEXT: ret <4 x double> [[R]] ; - %m = fmul <4 x double> %x, + %m = fmul <4 x double> %x, %r = fneg <4 x double> %m ret <4 x double> %r } @@ -181,24 +181,24 @@ define float @fdiv_op1_constant_fneg_extra_use(float %x) { ret float %r } -; Try a vector. Use special constants (NaN, INF, undef) because they don't change anything. +; Try a vector. Use special constants (NaN, INF, poison) because they don't change anything. define <4 x double> @fdiv_op1_constant_fsub_vec(<4 x double> %x) { ; CHECK-LABEL: @fdiv_op1_constant_fsub_vec( -; CHECK-NEXT: [[R:%.*]] = fdiv <4 x double> [[X:%.*]], +; CHECK-NEXT: [[R:%.*]] = fdiv <4 x double> [[X:%.*]], ; CHECK-NEXT: ret <4 x double> [[R]] ; - %d = fdiv <4 x double> %x, + %d = fdiv <4 x double> %x, %r = fsub <4 x double> , %d ret <4 x double> %r } define <4 x double> @fdiv_op1_constant_fneg_vec(<4 x double> %x) { ; CHECK-LABEL: @fdiv_op1_constant_fneg_vec( -; CHECK-NEXT: [[R:%.*]] = fdiv <4 x double> [[X:%.*]], +; CHECK-NEXT: [[R:%.*]] = fdiv <4 x double> [[X:%.*]], ; CHECK-NEXT: ret <4 x double> [[R]] ; - %d = fdiv <4 x double> %x, + %d = fdiv <4 x double> %x, %r = fneg <4 x double> %d ret <4 x double> %r } @@ -335,24 +335,24 @@ define float @fdiv_op0_constant_fneg_extra_use(float %x) { ret float %r } -; Try a vector. Use special constants (NaN, INF, undef) because they don't change anything. +; Try a vector. Use special constants (NaN, INF, poison) because they don't change anything. define <4 x double> @fdiv_op0_constant_fsub_vec(<4 x double> %x) { ; CHECK-LABEL: @fdiv_op0_constant_fsub_vec( -; CHECK-NEXT: [[R:%.*]] = fdiv <4 x double> , [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = fdiv <4 x double> , [[X:%.*]] ; CHECK-NEXT: ret <4 x double> [[R]] ; - %d = fdiv <4 x double> , %x + %d = fdiv <4 x double> , %x %r = fsub <4 x double> , %d ret <4 x double> %r } define <4 x double> @fdiv_op0_constant_fneg_vec(<4 x double> %x) { ; CHECK-LABEL: @fdiv_op0_constant_fneg_vec( -; CHECK-NEXT: [[R:%.*]] = fdiv <4 x double> , [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = fdiv <4 x double> , [[X:%.*]] ; CHECK-NEXT: ret <4 x double> [[R]] ; - %d = fdiv <4 x double> , %x + %d = fdiv <4 x double> , %x %r = fneg <4 x double> %d ret <4 x double> %r } @@ -584,11 +584,11 @@ define <2 x float> @fneg_nsz_fadd_constant_vec(<2 x float> %x) { define <2 x float> @fake_fneg_nsz_fadd_constant_vec(<2 x float> %x) { ; CHECK-LABEL: @fake_fneg_nsz_fadd_constant_vec( -; CHECK-NEXT: [[R:%.*]] = fsub nsz <2 x float> , [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = fsub nsz <2 x float> , [[X:%.*]] ; CHECK-NEXT: ret <2 x float> [[R]] ; - %a = fadd <2 x float> %x, - %r = fsub nsz <2 x float> , %a + %a = fadd <2 x float> %x, + %r = fsub nsz <2 x float> , %a ret <2 x float> %r } diff --git a/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll b/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll index b482cfdfde197..1fd570bf2635b 100644 --- a/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll +++ b/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll @@ -36,36 +36,36 @@ define <4 x i32> @t1_vec_splat(<4 x i32> %x, <4 x i32> %y) { ret <4 x i32> %t2 } -define <4 x i32> @t2_vec_undef0(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @t2_vec_undef0( +define <4 x i32> @t2_vec_poison0(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @t2_vec_poison0( ; CHECK-NEXT: [[T2:%.*]] = sub <4 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <4 x i32> [[T2]] ; - %t0 = xor <4 x i32> %x, + %t0 = xor <4 x i32> %x, %t1 = add <4 x i32> %t0, %y %t2 = add <4 x i32> %t1, ret <4 x i32> %t2 } -define <4 x i32> @t3_vec_undef1(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @t3_vec_undef1( +define <4 x i32> @t3_vec_poison1(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @t3_vec_poison1( ; CHECK-NEXT: [[T2:%.*]] = sub <4 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <4 x i32> [[T2]] ; %t0 = xor <4 x i32> %x, %t1 = add <4 x i32> %t0, %y - %t2 = add <4 x i32> %t1, + %t2 = add <4 x i32> %t1, ret <4 x i32> %t2 } -define <4 x i32> @t4_vec_undef2(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @t4_vec_undef2( +define <4 x i32> @t4_vec_poison2(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @t4_vec_poison2( ; CHECK-NEXT: [[T2:%.*]] = sub <4 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <4 x i32> [[T2]] ; - %t0 = xor <4 x i32> %x, + %t0 = xor <4 x i32> %x, %t1 = add <4 x i32> %t0, %y - %t2 = add <4 x i32> %t1, + %t2 = add <4 x i32> %t1, ret <4 x i32> %t2 } diff --git a/llvm/test/Transforms/InstCombine/fold-sub-of-not-to-inc-of-add.ll b/llvm/test/Transforms/InstCombine/fold-sub-of-not-to-inc-of-add.ll index 6f311f05fb017..af580ba57513c 100644 --- a/llvm/test/Transforms/InstCombine/fold-sub-of-not-to-inc-of-add.ll +++ b/llvm/test/Transforms/InstCombine/fold-sub-of-not-to-inc-of-add.ll @@ -50,13 +50,13 @@ define <4 x i32> @p1_vector_splat(<4 x i32> %x, <4 x i32> %y) { ret <4 x i32> %t1 } -define <4 x i32> @p2_vector_undef(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @p2_vector_undef( +define <4 x i32> @p2_vector_poison(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @p2_vector_poison( ; CHECK-NEXT: [[T0_NEG:%.*]] = add <4 x i32> [[X:%.*]], ; CHECK-NEXT: [[T1:%.*]] = add <4 x i32> [[T0_NEG]], [[Y:%.*]] ; CHECK-NEXT: ret <4 x i32> [[T1]] ; - %t0 = xor <4 x i32> %x, + %t0 = xor <4 x i32> %x, %t1 = sub <4 x i32> %y, %t0 ret <4 x i32> %t1 } diff --git a/llvm/test/Transforms/InstCombine/fpcast.ll b/llvm/test/Transforms/InstCombine/fpcast.ll index d2c932ba447e4..69daac773a645 100644 --- a/llvm/test/Transforms/InstCombine/fpcast.ll +++ b/llvm/test/Transforms/InstCombine/fpcast.ll @@ -51,13 +51,13 @@ define half @unary_fneg_fptrunc(float %a) { ret half %c } -define <2 x half> @fneg_fptrunc_vec_undef(<2 x float> %a) { -; CHECK-LABEL: @fneg_fptrunc_vec_undef( +define <2 x half> @fneg_fptrunc_vec_poison(<2 x float> %a) { +; CHECK-LABEL: @fneg_fptrunc_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc <2 x float> [[A:%.*]] to <2 x half> ; CHECK-NEXT: [[C:%.*]] = fneg <2 x half> [[TMP1]] ; CHECK-NEXT: ret <2 x half> [[C]] ; - %b = fsub <2 x float> , %a + %b = fsub <2 x float> , %a %c = fptrunc <2 x float> %b to <2 x half> ret <2 x half> %c } diff --git a/llvm/test/Transforms/InstCombine/fsub.ll b/llvm/test/Transforms/InstCombine/fsub.ll index 6e13c33b126d5..f1e7086e697e8 100644 --- a/llvm/test/Transforms/InstCombine/fsub.ll +++ b/llvm/test/Transforms/InstCombine/fsub.ll @@ -153,12 +153,12 @@ define <2 x float> @constant_op1_vec(<2 x float> %x, <2 x float> %y) { ret <2 x float> %r } -define <2 x float> @constant_op1_vec_undef(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @constant_op1_vec_undef( -; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[X:%.*]], +define <2 x float> @constant_op1_vec_poison(<2 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @constant_op1_vec_poison( +; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[X:%.*]], ; CHECK-NEXT: ret <2 x float> [[R]] ; - %r = fsub <2 x float> %x, + %r = fsub <2 x float> %x, ret <2 x float> %r } @@ -204,12 +204,12 @@ define <2 x float> @unary_neg_op1_vec(<2 x float> %x, <2 x float> %y) { ret <2 x float> %r } -define <2 x float> @neg_op1_vec_undef(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @neg_op1_vec_undef( +define <2 x float> @neg_op1_vec_poison(<2 x float> %x, <2 x float> %y) { +; CHECK-LABEL: @neg_op1_vec_poison( ; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x float> [[R]] ; - %negy = fsub <2 x float> , %y + %negy = fsub <2 x float> , %y %r = fsub <2 x float> %x, %negy ret <2 x float> %r } diff --git a/llvm/test/Transforms/InstCombine/funnel.ll b/llvm/test/Transforms/InstCombine/funnel.ll index 162519e648f3e..a54e6e4642b75 100644 --- a/llvm/test/Transforms/InstCombine/funnel.ll +++ b/llvm/test/Transforms/InstCombine/funnel.ll @@ -43,24 +43,24 @@ define <2 x i16> @fshl_v2i16_constant_splat(<2 x i16> %x, <2 x i16> %y) { ret <2 x i16> %r } -define <2 x i16> @fshl_v2i16_constant_splat_undef0(<2 x i16> %x, <2 x i16> %y) { -; CHECK-LABEL: @fshl_v2i16_constant_splat_undef0( +define <2 x i16> @fshl_v2i16_constant_splat_poison0(<2 x i16> %x, <2 x i16> %y) { +; CHECK-LABEL: @fshl_v2i16_constant_splat_poison0( ; CHECK-NEXT: [[R:%.*]] = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[Y:%.*]], <2 x i16> ) ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %shl = shl <2 x i16> %x, + %shl = shl <2 x i16> %x, %shr = lshr <2 x i16> %y, %r = or <2 x i16> %shl, %shr ret <2 x i16> %r } -define <2 x i16> @fshl_v2i16_constant_splat_undef1(<2 x i16> %x, <2 x i16> %y) { -; CHECK-LABEL: @fshl_v2i16_constant_splat_undef1( +define <2 x i16> @fshl_v2i16_constant_splat_poison1(<2 x i16> %x, <2 x i16> %y) { +; CHECK-LABEL: @fshl_v2i16_constant_splat_poison1( ; CHECK-NEXT: [[R:%.*]] = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[Y:%.*]], <2 x i16> ) ; CHECK-NEXT: ret <2 x i16> [[R]] ; %shl = shl <2 x i16> %x, - %shr = lshr <2 x i16> %y, + %shr = lshr <2 x i16> %y, %r = or <2 x i16> %shl, %shr ret <2 x i16> %r } @@ -78,30 +78,30 @@ define <2 x i17> @fshr_v2i17_constant_splat(<2 x i17> %x, <2 x i17> %y) { ret <2 x i17> %r } -define <2 x i17> @fshr_v2i17_constant_splat_undef0(<2 x i17> %x, <2 x i17> %y) { -; CHECK-LABEL: @fshr_v2i17_constant_splat_undef0( +define <2 x i17> @fshr_v2i17_constant_splat_poison0(<2 x i17> %x, <2 x i17> %y) { +; CHECK-LABEL: @fshr_v2i17_constant_splat_poison0( ; CHECK-NEXT: [[R:%.*]] = call <2 x i17> @llvm.fshl.v2i17(<2 x i17> [[Y:%.*]], <2 x i17> [[X:%.*]], <2 x i17> ) ; CHECK-NEXT: ret <2 x i17> [[R]] ; - %shr = lshr <2 x i17> %x, - %shl = shl <2 x i17> %y, + %shr = lshr <2 x i17> %x, + %shl = shl <2 x i17> %y, %r = or <2 x i17> %shr, %shl ret <2 x i17> %r } -define <2 x i17> @fshr_v2i17_constant_splat_undef1(<2 x i17> %x, <2 x i17> %y) { -; CHECK-LABEL: @fshr_v2i17_constant_splat_undef1( +define <2 x i17> @fshr_v2i17_constant_splat_poison1(<2 x i17> %x, <2 x i17> %y) { +; CHECK-LABEL: @fshr_v2i17_constant_splat_poison1( ; CHECK-NEXT: [[R:%.*]] = call <2 x i17> @llvm.fshl.v2i17(<2 x i17> [[Y:%.*]], <2 x i17> [[X:%.*]], <2 x i17> ) ; CHECK-NEXT: ret <2 x i17> [[R]] ; - %shr = lshr <2 x i17> %x, - %shl = shl <2 x i17> %y, + %shr = lshr <2 x i17> %x, + %shl = shl <2 x i17> %y, %r = or <2 x i17> %shr, %shl ret <2 x i17> %r } ; Allow arbitrary shift constants. -; Support undef elements. +; Support poison elements. define <2 x i32> @fshr_v2i32_constant_nonsplat(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @fshr_v2i32_constant_nonsplat( @@ -114,24 +114,24 @@ define <2 x i32> @fshr_v2i32_constant_nonsplat(<2 x i32> %x, <2 x i32> %y) { ret <2 x i32> %r } -define <2 x i32> @fshr_v2i32_constant_nonsplat_undef0(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @fshr_v2i32_constant_nonsplat_undef0( +define <2 x i32> @fshr_v2i32_constant_nonsplat_poison0(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @fshr_v2i32_constant_nonsplat_poison0( ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> [[X:%.*]], <2 x i32> ) ; CHECK-NEXT: ret <2 x i32> [[R]] ; - %shr = lshr <2 x i32> %x, + %shr = lshr <2 x i32> %x, %shl = shl <2 x i32> %y, %r = or <2 x i32> %shl, %shr ret <2 x i32> %r } -define <2 x i32> @fshr_v2i32_constant_nonsplat_undef1(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @fshr_v2i32_constant_nonsplat_undef1( -; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> [[X:%.*]], <2 x i32> ) +define <2 x i32> @fshr_v2i32_constant_nonsplat_poison1(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @fshr_v2i32_constant_nonsplat_poison1( +; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> [[X:%.*]], <2 x i32> ) ; CHECK-NEXT: ret <2 x i32> [[R]] ; %shr = lshr <2 x i32> %x, - %shl = shl <2 x i32> %y, + %shl = shl <2 x i32> %y, %r = or <2 x i32> %shl, %shr ret <2 x i32> %r } @@ -147,13 +147,13 @@ define <2 x i36> @fshl_v2i36_constant_nonsplat(<2 x i36> %x, <2 x i36> %y) { ret <2 x i36> %r } -define <3 x i36> @fshl_v3i36_constant_nonsplat_undef0(<3 x i36> %x, <3 x i36> %y) { -; CHECK-LABEL: @fshl_v3i36_constant_nonsplat_undef0( -; CHECK-NEXT: [[R:%.*]] = call <3 x i36> @llvm.fshl.v3i36(<3 x i36> [[X:%.*]], <3 x i36> [[Y:%.*]], <3 x i36> ) +define <3 x i36> @fshl_v3i36_constant_nonsplat_poison0(<3 x i36> %x, <3 x i36> %y) { +; CHECK-LABEL: @fshl_v3i36_constant_nonsplat_poison0( +; CHECK-NEXT: [[R:%.*]] = call <3 x i36> @llvm.fshl.v3i36(<3 x i36> [[X:%.*]], <3 x i36> [[Y:%.*]], <3 x i36> ) ; CHECK-NEXT: ret <3 x i36> [[R]] ; - %shl = shl <3 x i36> %x, - %shr = lshr <3 x i36> %y, + %shl = shl <3 x i36> %x, + %shr = lshr <3 x i36> %y, %r = or <3 x i36> %shl, %shr ret <3 x i36> %r } diff --git a/llvm/test/Transforms/InstCombine/get-lowbitmask-upto-and-including-bit.ll b/llvm/test/Transforms/InstCombine/get-lowbitmask-upto-and-including-bit.ll index 12a81f0cd2f0f..40caa57891369 100644 --- a/llvm/test/Transforms/InstCombine/get-lowbitmask-upto-and-including-bit.ll +++ b/llvm/test/Transforms/InstCombine/get-lowbitmask-upto-and-including-bit.ll @@ -41,36 +41,36 @@ define <2 x i8> @t2_vec(<2 x i8> %x) { %mask = or <2 x i8> %lowbitmask, %bitmask ret <2 x i8> %mask } -define <3 x i8> @t3_vec_undef0(<3 x i8> %x) { -; CHECK-LABEL: @t3_vec_undef0( +define <3 x i8> @t3_vec_poison0(<3 x i8> %x) { +; CHECK-LABEL: @t3_vec_poison0( ; CHECK-NEXT: [[TMP1:%.*]] = sub <3 x i8> , [[X:%.*]] ; CHECK-NEXT: [[MASK:%.*]] = lshr <3 x i8> , [[TMP1]] ; CHECK-NEXT: ret <3 x i8> [[MASK]] ; - %bitmask = shl <3 x i8> , %x + %bitmask = shl <3 x i8> , %x %lowbitmask = add <3 x i8> %bitmask, %mask = or <3 x i8> %lowbitmask, %bitmask ret <3 x i8> %mask } -define <3 x i8> @t4_vec_undef1(<3 x i8> %x) { -; CHECK-LABEL: @t4_vec_undef1( +define <3 x i8> @t4_vec_poison1(<3 x i8> %x) { +; CHECK-LABEL: @t4_vec_poison1( ; CHECK-NEXT: [[TMP1:%.*]] = sub <3 x i8> , [[X:%.*]] ; CHECK-NEXT: [[MASK:%.*]] = lshr <3 x i8> , [[TMP1]] ; CHECK-NEXT: ret <3 x i8> [[MASK]] ; %bitmask = shl <3 x i8> , %x - %lowbitmask = add <3 x i8> %bitmask, + %lowbitmask = add <3 x i8> %bitmask, %mask = or <3 x i8> %lowbitmask, %bitmask ret <3 x i8> %mask } -define <3 x i8> @t5_vec_undef2(<3 x i8> %x) { -; CHECK-LABEL: @t5_vec_undef2( +define <3 x i8> @t5_vec_poison2(<3 x i8> %x) { +; CHECK-LABEL: @t5_vec_poison2( ; CHECK-NEXT: [[TMP1:%.*]] = sub <3 x i8> , [[X:%.*]] ; CHECK-NEXT: [[MASK:%.*]] = lshr <3 x i8> , [[TMP1]] ; CHECK-NEXT: ret <3 x i8> [[MASK]] ; - %bitmask = shl <3 x i8> , %x - %lowbitmask = add <3 x i8> %bitmask, + %bitmask = shl <3 x i8> , %x + %lowbitmask = add <3 x i8> %bitmask, %mask = or <3 x i8> %lowbitmask, %bitmask ret <3 x i8> %mask } diff --git a/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll b/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll index c8f14595ea673..e4cae13519783 100644 --- a/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll +++ b/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll @@ -55,14 +55,14 @@ define <2 x i8> @t2_vec(<2 x i8> %x, <2 x i8> %y) { ret <2 x i8> %negbias } -define <2 x i8> @t3_vec_undef(<2 x i8> %x, <2 x i8> %y) { -; CHECK-LABEL: @t3_vec_undef( +define <2 x i8> @t3_vec_poison(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @t3_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[Y:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: [[NEGBIAS:%.*]] = sub <2 x i8> zeroinitializer, [[TMP2]] ; CHECK-NEXT: ret <2 x i8> [[NEGBIAS]] ; - %negy = sub <2 x i8> , %y + %negy = sub <2 x i8> , %y %unbiasedx = and <2 x i8> %negy, %x %negbias = sub <2 x i8> %unbiasedx, %x ret <2 x i8> %negbias diff --git a/llvm/test/Transforms/InstCombine/hoist-not-from-ashr-operand.ll b/llvm/test/Transforms/InstCombine/hoist-not-from-ashr-operand.ll index e0242855e2683..2217666f0f49a 100644 --- a/llvm/test/Transforms/InstCombine/hoist-not-from-ashr-operand.ll +++ b/llvm/test/Transforms/InstCombine/hoist-not-from-ashr-operand.ll @@ -41,14 +41,14 @@ define <2 x i8> @t2_vec(<2 x i8> %x, <2 x i8> %y) { %ashr = ashr <2 x i8> %not_x, %y ret <2 x i8> %ashr } -; Note that we must sanitize undef elts of -1 constant to -1 or 0. -define <2 x i8> @t3_vec_undef(<2 x i8> %x, <2 x i8> %y) { -; CHECK-LABEL: @t3_vec_undef( +; Note that we must sanitize poison elts of -1 constant to -1 or 0. +define <2 x i8> @t3_vec_poison(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @t3_vec_poison( ; CHECK-NEXT: [[NOT_X_NOT:%.*]] = ashr <2 x i8> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[ASHR:%.*]] = xor <2 x i8> [[NOT_X_NOT]], ; CHECK-NEXT: ret <2 x i8> [[ASHR]] ; - %not_x = xor <2 x i8> %x, + %not_x = xor <2 x i8> %x, %ashr = ashr <2 x i8> %not_x, %y ret <2 x i8> %ashr } diff --git a/llvm/test/Transforms/InstCombine/icmp-uge-of-add-of-shl-one-by-bits-to-allones-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll b/llvm/test/Transforms/InstCombine/icmp-uge-of-add-of-shl-one-by-bits-to-allones-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll index 5adf476f7a79f..32ef6267cdf8b 100644 --- a/llvm/test/Transforms/InstCombine/icmp-uge-of-add-of-shl-one-by-bits-to-allones-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll +++ b/llvm/test/Transforms/InstCombine/icmp-uge-of-add-of-shl-one-by-bits-to-allones-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll @@ -56,8 +56,8 @@ define <3 x i1> @p2_vec_undef0(<3 x i8> %val, <3 x i8> %bits) { ; CHECK-LABEL: @p2_vec_undef0( ; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> , [[BITS:%.*]] ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]]) -; CHECK-NEXT: [[VAL_HIGHBITS:%.*]] = lshr <3 x i8> [[VAL:%.*]], [[BITS]] -; CHECK-NEXT: [[R:%.*]] = icmp eq <3 x i8> [[VAL_HIGHBITS]], zeroinitializer +; CHECK-NEXT: [[T1:%.*]] = add <3 x i8> [[T0]], +; CHECK-NEXT: [[R:%.*]] = icmp uge <3 x i8> [[T1]], [[VAL:%.*]] ; CHECK-NEXT: ret <3 x i1> [[R]] ; %t0 = shl <3 x i8> , %bits diff --git a/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll b/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll index 7f4603881f23c..27b02c8c6e936 100644 --- a/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll +++ b/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll @@ -40,38 +40,38 @@ define <2 x i1> @p1_vec(<2 x i8> %val, <2 x i8> %bits) { ret <2 x i1> %r } -define <3 x i1> @p2_vec_undef0(<3 x i8> %val, <3 x i8> %bits) { -; CHECK-LABEL: @p2_vec_undef0( +define <3 x i1> @p2_vec_poison0(<3 x i8> %val, <3 x i8> %bits) { +; CHECK-LABEL: @p2_vec_poison0( ; CHECK-NEXT: [[VAL_HIGHBITS:%.*]] = lshr <3 x i8> [[VAL:%.*]], [[BITS:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp eq <3 x i8> [[VAL_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[R]] ; - %t0 = shl <3 x i8> , %bits + %t0 = shl <3 x i8> , %bits %t1 = xor <3 x i8> %t0, %r = icmp uge <3 x i8> %t1, %val ret <3 x i1> %r } -define <3 x i1> @p2_vec_undef1(<3 x i8> %val, <3 x i8> %bits) { -; CHECK-LABEL: @p2_vec_undef1( +define <3 x i1> @p2_vec_poison1(<3 x i8> %val, <3 x i8> %bits) { +; CHECK-LABEL: @p2_vec_poison1( ; CHECK-NEXT: [[VAL_HIGHBITS:%.*]] = lshr <3 x i8> [[VAL:%.*]], [[BITS:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp eq <3 x i8> [[VAL_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[R]] ; %t0 = shl <3 x i8> , %bits - %t1 = xor <3 x i8> %t0, + %t1 = xor <3 x i8> %t0, %r = icmp uge <3 x i8> %t1, %val ret <3 x i1> %r } -define <3 x i1> @p2_vec_undef2(<3 x i8> %val, <3 x i8> %bits) { -; CHECK-LABEL: @p2_vec_undef2( +define <3 x i1> @p2_vec_poison2(<3 x i8> %val, <3 x i8> %bits) { +; CHECK-LABEL: @p2_vec_poison2( ; CHECK-NEXT: [[VAL_HIGHBITS:%.*]] = lshr <3 x i8> [[VAL:%.*]], [[BITS:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp eq <3 x i8> [[VAL_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[R]] ; - %t0 = shl <3 x i8> , %bits - %t1 = xor <3 x i8> %t0, + %t0 = shl <3 x i8> , %bits + %t1 = xor <3 x i8> %t0, %r = icmp uge <3 x i8> %t1, %val ret <3 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/icmp-ugt-of-shl-1-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll b/llvm/test/Transforms/InstCombine/icmp-ugt-of-shl-1-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll index 550e8bb17229f..72cfb5a9f8bd0 100644 --- a/llvm/test/Transforms/InstCombine/icmp-ugt-of-shl-1-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll +++ b/llvm/test/Transforms/InstCombine/icmp-ugt-of-shl-1-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll @@ -38,13 +38,13 @@ define <2 x i1> @p1_vec(<2 x i8> %val, <2 x i8> %bits) { ret <2 x i1> %r } -define <3 x i1> @p2_vec_undef(<3 x i8> %val, <3 x i8> %bits) { -; CHECK-LABEL: @p2_vec_undef( +define <3 x i1> @p2_vec_poison(<3 x i8> %val, <3 x i8> %bits) { +; CHECK-LABEL: @p2_vec_poison( ; CHECK-NEXT: [[VAL_HIGHBITS:%.*]] = lshr <3 x i8> [[VAL:%.*]], [[BITS:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp eq <3 x i8> [[VAL_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[R]] ; - %t0 = shl <3 x i8> , %bits + %t0 = shl <3 x i8> , %bits %r = icmp ugt <3 x i8> %t0, %val ret <3 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/icmp-ule-of-shl-1-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll b/llvm/test/Transforms/InstCombine/icmp-ule-of-shl-1-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll index 26b667d36728a..79e6914f09531 100644 --- a/llvm/test/Transforms/InstCombine/icmp-ule-of-shl-1-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll +++ b/llvm/test/Transforms/InstCombine/icmp-ule-of-shl-1-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll @@ -38,13 +38,13 @@ define <2 x i1> @p1_vec(<2 x i8> %val, <2 x i8> %bits) { ret <2 x i1> %r } -define <3 x i1> @p2_vec_undef(<3 x i8> %val, <3 x i8> %bits) { -; CHECK-LABEL: @p2_vec_undef( +define <3 x i1> @p2_vec_poison(<3 x i8> %val, <3 x i8> %bits) { +; CHECK-LABEL: @p2_vec_poison( ; CHECK-NEXT: [[VAL_HIGHBITS:%.*]] = lshr <3 x i8> [[VAL:%.*]], [[BITS:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp ne <3 x i8> [[VAL_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[R]] ; - %t0 = shl <3 x i8> , %bits + %t0 = shl <3 x i8> , %bits %r = icmp ule <3 x i8> %t0, %val ret <3 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/icmp-ult-of-add-of-shl-one-by-bits-to-allones-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll b/llvm/test/Transforms/InstCombine/icmp-ult-of-add-of-shl-one-by-bits-to-allones-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll index dd353d44218bf..25894a22f0075 100644 --- a/llvm/test/Transforms/InstCombine/icmp-ult-of-add-of-shl-one-by-bits-to-allones-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll +++ b/llvm/test/Transforms/InstCombine/icmp-ult-of-add-of-shl-one-by-bits-to-allones-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll @@ -56,8 +56,8 @@ define <3 x i1> @p2_vec_undef0(<3 x i8> %val, <3 x i8> %bits) { ; CHECK-LABEL: @p2_vec_undef0( ; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> , [[BITS:%.*]] ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]]) -; CHECK-NEXT: [[VAL_HIGHBITS:%.*]] = lshr <3 x i8> [[VAL:%.*]], [[BITS]] -; CHECK-NEXT: [[R:%.*]] = icmp ne <3 x i8> [[VAL_HIGHBITS]], zeroinitializer +; CHECK-NEXT: [[T1:%.*]] = add <3 x i8> [[T0]], +; CHECK-NEXT: [[R:%.*]] = icmp ult <3 x i8> [[T1]], [[VAL:%.*]] ; CHECK-NEXT: ret <3 x i1> [[R]] ; %t0 = shl <3 x i8> , %bits diff --git a/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll b/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll index c7a45c5cdc11a..8441033d4857e 100644 --- a/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll +++ b/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll @@ -40,38 +40,38 @@ define <2 x i1> @p1_vec(<2 x i8> %val, <2 x i8> %bits) { ret <2 x i1> %r } -define <3 x i1> @p2_vec_undef0(<3 x i8> %val, <3 x i8> %bits) { -; CHECK-LABEL: @p2_vec_undef0( +define <3 x i1> @p2_vec_poison0(<3 x i8> %val, <3 x i8> %bits) { +; CHECK-LABEL: @p2_vec_poison0( ; CHECK-NEXT: [[VAL_HIGHBITS:%.*]] = lshr <3 x i8> [[VAL:%.*]], [[BITS:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp ne <3 x i8> [[VAL_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[R]] ; - %t0 = shl <3 x i8> , %bits + %t0 = shl <3 x i8> , %bits %t1 = xor <3 x i8> %t0, %r = icmp ult <3 x i8> %t1, %val ret <3 x i1> %r } -define <3 x i1> @p2_vec_undef1(<3 x i8> %val, <3 x i8> %bits) { -; CHECK-LABEL: @p2_vec_undef1( +define <3 x i1> @p2_vec_poison1(<3 x i8> %val, <3 x i8> %bits) { +; CHECK-LABEL: @p2_vec_poison1( ; CHECK-NEXT: [[VAL_HIGHBITS:%.*]] = lshr <3 x i8> [[VAL:%.*]], [[BITS:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp ne <3 x i8> [[VAL_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[R]] ; %t0 = shl <3 x i8> , %bits - %t1 = xor <3 x i8> %t0, + %t1 = xor <3 x i8> %t0, %r = icmp ult <3 x i8> %t1, %val ret <3 x i1> %r } -define <3 x i1> @p2_vec_undef2(<3 x i8> %val, <3 x i8> %bits) { -; CHECK-LABEL: @p2_vec_undef2( +define <3 x i1> @p2_vec_poison2(<3 x i8> %val, <3 x i8> %bits) { +; CHECK-LABEL: @p2_vec_poison2( ; CHECK-NEXT: [[VAL_HIGHBITS:%.*]] = lshr <3 x i8> [[VAL:%.*]], [[BITS:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp ne <3 x i8> [[VAL_HIGHBITS]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[R]] ; - %t0 = shl <3 x i8> , %bits - %t1 = xor <3 x i8> %t0, + %t0 = shl <3 x i8> , %bits + %t1 = xor <3 x i8> %t0, %r = icmp ult <3 x i8> %t1, %val ret <3 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/icmp.ll b/llvm/test/Transforms/InstCombine/icmp.ll index 10ab1fe118348..31093c7ca1036 100644 --- a/llvm/test/Transforms/InstCombine/icmp.ll +++ b/llvm/test/Transforms/InstCombine/icmp.ll @@ -1790,14 +1790,14 @@ define <2 x i1> @icmp_add20_eq_add57_splat(<2 x i32> %x, <2 x i32> %y) { ret <2 x i1> %cmp } -define <2 x i1> @icmp_add20_eq_add57_undef(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @icmp_add20_eq_add57_undef( +define <2 x i1> @icmp_add20_eq_add57_poison(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @icmp_add20_eq_add57_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[Y:%.*]], ; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %1 = add <2 x i32> %x, - %2 = add <2 x i32> %y, + %2 = add <2 x i32> %y, %cmp = icmp eq <2 x i32> %1, %2 ret <2 x i1> %cmp } @@ -1838,14 +1838,14 @@ define <2 x i1> @icmp_sub57_ne_sub20_splat(<2 x i32> %x, <2 x i32> %y) { ret <2 x i1> %cmp } -define <2 x i1> @icmp_sub57_ne_sub20_vec_undef(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @icmp_sub57_ne_sub20_vec_undef( +define <2 x i1> @icmp_sub57_ne_sub20_vec_poison(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @icmp_sub57_ne_sub20_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; - %1 = add <2 x i32> %x, - %2 = add <2 x i32> %y, + %1 = add <2 x i32> %x, + %2 = add <2 x i32> %y, %cmp = icmp ne <2 x i32> %1, %2 ret <2 x i1> %cmp } @@ -1926,14 +1926,14 @@ define <2 x i1> @icmp_add20_sge_add57_splat(<2 x i32> %x, <2 x i32> %y) { ret <2 x i1> %cmp } -define <2 x i1> @icmp_add20_sge_add57_undef(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @icmp_add20_sge_add57_undef( +define <2 x i1> @icmp_add20_sge_add57_poison(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @icmp_add20_sge_add57_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add nsw <2 x i32> [[Y:%.*]], ; CHECK-NEXT: [[CMP:%.*]] = icmp sle <2 x i32> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %1 = add nsw <2 x i32> %x, - %2 = add nsw <2 x i32> %y, + %2 = add nsw <2 x i32> %y, %cmp = icmp sge <2 x i32> %1, %2 ret <2 x i1> %cmp } @@ -1975,14 +1975,14 @@ define <2 x i1> @icmp_sub57_sge_sub20_splat(<2 x i32> %x, <2 x i32> %y) { ret <2 x i1> %cmp } -define <2 x i1> @icmp_sub57_sge_sub20_vec_undef(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @icmp_sub57_sge_sub20_vec_undef( +define <2 x i1> @icmp_sub57_sge_sub20_vec_poison(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @icmp_sub57_sge_sub20_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add nsw <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[CMP:%.*]] = icmp sge <2 x i32> [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; - %1 = add nsw <2 x i32> %x, - %2 = add nsw <2 x i32> %y, + %1 = add nsw <2 x i32> %x, + %2 = add nsw <2 x i32> %y, %cmp = icmp sge <2 x i32> %1, %2 ret <2 x i1> %cmp } @@ -2557,13 +2557,13 @@ define <2 x i1> @or_icmp_eq_B_0_icmp_ult_A_B_uniform(<2 x i64> %a, <2 x i64> %b) ret <2 x i1> %3 } -define <2 x i1> @or_icmp_eq_B_0_icmp_ult_A_B_undef(<2 x i64> %a, <2 x i64> %b) { -; CHECK-LABEL: @or_icmp_eq_B_0_icmp_ult_A_B_undef( +define <2 x i1> @or_icmp_eq_B_0_icmp_ult_A_B_poison(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: @or_icmp_eq_B_0_icmp_ult_A_B_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i64> [[B:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = icmp uge <2 x i64> [[TMP1]], [[A:%.*]] ; CHECK-NEXT: ret <2 x i1> [[TMP2]] ; - %1 = icmp eq <2 x i64> %b, + %1 = icmp eq <2 x i64> %b, %2 = icmp ult <2 x i64> %a, %b %3 = or <2 x i1> %1, %2 ret <2 x i1> %3 @@ -2606,14 +2606,14 @@ define <2 x i1> @or_icmp_ne_A_0_icmp_ne_B_0_uniform(<2 x i64> %a, <2 x i64> %b) ret <2 x i1> %3 } -define <2 x i1> @or_icmp_ne_A_0_icmp_ne_B_0_undef(<2 x i64> %a, <2 x i64> %b) { -; CHECK-LABEL: @or_icmp_ne_A_0_icmp_ne_B_0_undef( +define <2 x i1> @or_icmp_ne_A_0_icmp_ne_B_0_poison(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: @or_icmp_ne_A_0_icmp_ne_B_0_poison( ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i64> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <2 x i64> [[TMP1]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[TMP2]] ; - %1 = icmp ne <2 x i64> %a, - %2 = icmp ne <2 x i64> %b, + %1 = icmp ne <2 x i64> %a, + %2 = icmp ne <2 x i64> %b, %3 = or <2 x i1> %1, %2 ret <2 x i1> %3 } @@ -2803,13 +2803,13 @@ define <2 x i1> @and_icmp_ne_B_0_icmp_uge_A_B_uniform(<2 x i64> %a, <2 x i64> %b ret <2 x i1> %3 } -define <2 x i1> @and_icmp_ne_B_0_icmp_uge_A_B_undef(<2 x i64> %a, <2 x i64> %b) { -; CHECK-LABEL: @and_icmp_ne_B_0_icmp_uge_A_B_undef( +define <2 x i1> @and_icmp_ne_B_0_icmp_uge_A_B_poison(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: @and_icmp_ne_B_0_icmp_uge_A_B_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i64> [[B:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult <2 x i64> [[TMP1]], [[A:%.*]] ; CHECK-NEXT: ret <2 x i1> [[TMP2]] ; - %1 = icmp ne <2 x i64> %b, + %1 = icmp ne <2 x i64> %b, %2 = icmp uge <2 x i64> %a, %b %3 = and <2 x i1> %1, %2 ret <2 x i1> %3 @@ -3272,13 +3272,13 @@ define <2 x i1> @icmp_and_or_lshr_cst_vec_nonuniform(<2 x i32> %x) { ret <2 x i1> %ret } -define <2 x i1> @icmp_and_or_lshr_cst_vec_undef(<2 x i32> %x) { -; CHECK-LABEL: @icmp_and_or_lshr_cst_vec_undef( +define <2 x i1> @icmp_and_or_lshr_cst_vec_poison(<2 x i32> %x) { +; CHECK-LABEL: @icmp_and_or_lshr_cst_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[RET:%.*]] = icmp ne <2 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[RET]] ; - %shf = lshr <2 x i32> %x, + %shf = lshr <2 x i32> %x, %or = or <2 x i32> %shf, %x %and = and <2 x i32> %or, %ret = icmp ne <2 x i32> %and, zeroinitializer @@ -3315,15 +3315,15 @@ define <2 x i1> @icmp_and_or_lshr_cst_vec_nonuniform_commute(<2 x i32> %xp) { ret <2 x i1> %ret } -define <2 x i1> @icmp_and_or_lshr_cst_vec_undef_commute(<2 x i32> %xp) { -; CHECK-LABEL: @icmp_and_or_lshr_cst_vec_undef_commute( +define <2 x i1> @icmp_and_or_lshr_cst_vec_poison_commute(<2 x i32> %xp) { +; CHECK-LABEL: @icmp_and_or_lshr_cst_vec_poison_commute( ; CHECK-NEXT: [[X:%.*]] = srem <2 x i32> [[XP:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[X]], ; CHECK-NEXT: [[RET:%.*]] = icmp ne <2 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[RET]] ; %x = srem <2 x i32> %xp, ; prevent complexity-based canonicalization - %shf = lshr <2 x i32> %x, + %shf = lshr <2 x i32> %x, %or = or <2 x i32> %x, %shf %and = and <2 x i32> %or, %ret = icmp ne <2 x i32> %and, zeroinitializer @@ -4360,7 +4360,7 @@ define <2 x i1> @signbit_false_logic(<2 x i5> %x) { ; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i5> [[X:%.*]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %dec = add <2 x i5> %x, + %dec = add <2 x i5> %x, %not = xor <2 x i5> %x, %and = and <2 x i5> %dec, %not %r = icmp sgt <2 x i5> %and, diff --git a/llvm/test/Transforms/InstCombine/integer-round-up-pow2-alignment.ll b/llvm/test/Transforms/InstCombine/integer-round-up-pow2-alignment.ll index 7cef922eaf0ce..c7e0553992b90 100644 --- a/llvm/test/Transforms/InstCombine/integer-round-up-pow2-alignment.ll +++ b/llvm/test/Transforms/InstCombine/integer-round-up-pow2-alignment.ll @@ -86,9 +86,9 @@ define <2 x i8> @t4_splat(<2 x i8> %x) { ret <2 x i8> %x.roundedup } -; Splat-with-undef -define <2 x i8> @t5_splat_undef_0b0001(<2 x i8> %x) { -; CHECK-LABEL: @t5_splat_undef_0b0001( +; Splat-with-poison +define <2 x i8> @t5_splat_poison_0b0001(<2 x i8> %x) { +; CHECK-LABEL: @t5_splat_poison_0b0001( ; CHECK-NEXT: [[X_BIASED1:%.*]] = add <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and <2 x i8> [[X_BIASED1]], ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] @@ -96,43 +96,43 @@ define <2 x i8> @t5_splat_undef_0b0001(<2 x i8> %x) { %x.lowbits = and <2 x i8> %x, %x.lowbits.are.zero = icmp eq <2 x i8> %x.lowbits, %x.biased = add <2 x i8> %x, - %x.biased.highbits = and <2 x i8> %x.biased, + %x.biased.highbits = and <2 x i8> %x.biased, %x.roundedup = select <2 x i1> %x.lowbits.are.zero, <2 x i8> %x, <2 x i8> %x.biased.highbits ret <2 x i8> %x.roundedup } -define <2 x i8> @t5_splat_undef_0b0010(<2 x i8> %x) { -; CHECK-LABEL: @t5_splat_undef_0b0010( +define <2 x i8> @t5_splat_poison_0b0010(<2 x i8> %x) { +; CHECK-LABEL: @t5_splat_poison_0b0010( ; CHECK-NEXT: [[X_BIASED1:%.*]] = add <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and <2 x i8> [[X_BIASED1]], ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] ; %x.lowbits = and <2 x i8> %x, %x.lowbits.are.zero = icmp eq <2 x i8> %x.lowbits, - %x.biased = add <2 x i8> %x, + %x.biased = add <2 x i8> %x, %x.biased.highbits = and <2 x i8> %x.biased, %x.roundedup = select <2 x i1> %x.lowbits.are.zero, <2 x i8> %x, <2 x i8> %x.biased.highbits ret <2 x i8> %x.roundedup } -define <2 x i8> @t5_splat_undef_0b0100(<2 x i8> %x) { -; CHECK-LABEL: @t5_splat_undef_0b0100( +define <2 x i8> @t5_splat_poison_0b0100(<2 x i8> %x) { +; CHECK-LABEL: @t5_splat_poison_0b0100( ; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and <2 x i8> [[X_BIASED]], ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] ; %x.lowbits = and <2 x i8> %x, - %x.lowbits.are.zero = icmp eq <2 x i8> %x.lowbits, + %x.lowbits.are.zero = icmp eq <2 x i8> %x.lowbits, %x.biased = add <2 x i8> %x, %x.biased.highbits = and <2 x i8> %x.biased, %x.roundedup = select <2 x i1> %x.lowbits.are.zero, <2 x i8> %x, <2 x i8> %x.biased.highbits ret <2 x i8> %x.roundedup } -define <2 x i8> @t5_splat_undef_0b1000(<2 x i8> %x) { -; CHECK-LABEL: @t5_splat_undef_0b1000( +define <2 x i8> @t5_splat_poison_0b1000(<2 x i8> %x) { +; CHECK-LABEL: @t5_splat_poison_0b1000( ; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = and <2 x i8> [[X_BIASED]], ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] ; - %x.lowbits = and <2 x i8> %x, + %x.lowbits = and <2 x i8> %x, %x.lowbits.are.zero = icmp eq <2 x i8> %x.lowbits, %x.biased = add <2 x i8> %x, %x.biased.highbits = and <2 x i8> %x.biased, @@ -177,64 +177,64 @@ define <2 x i8> @t7_nonsplat_bias(<2 x i8> %x) { } ; Splat-in-disguise vector tests -define <2 x i8> @t8_nonsplat_masked_by_undef_0b0001(<2 x i8> %x) { -; CHECK-LABEL: @t8_nonsplat_masked_by_undef_0b0001( +define <2 x i8> @t8_nonsplat_masked_by_poison_0b0001(<2 x i8> %x) { +; CHECK-LABEL: @t8_nonsplat_masked_by_poison_0b0001( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq <2 x i8> [[X_LOWBITS]], zeroinitializer ; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i8> [[X]], -; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and <2 x i8> [[X_BIASED]], +; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and <2 x i8> [[X_BIASED]], ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select <2 x i1> [[X_LOWBITS_ARE_ZERO]], <2 x i8> [[X]], <2 x i8> [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] ; %x.lowbits = and <2 x i8> %x, %x.lowbits.are.zero = icmp eq <2 x i8> %x.lowbits, %x.biased = add <2 x i8> %x, - %x.biased.highbits = and <2 x i8> %x.biased, + %x.biased.highbits = and <2 x i8> %x.biased, %x.roundedup = select <2 x i1> %x.lowbits.are.zero, <2 x i8> %x, <2 x i8> %x.biased.highbits ret <2 x i8> %x.roundedup } -define <2 x i8> @t8_nonsplat_masked_by_undef_0b0010(<2 x i8> %x) { -; CHECK-LABEL: @t8_nonsplat_masked_by_undef_0b0010( +define <2 x i8> @t8_nonsplat_masked_by_poison_0b0010(<2 x i8> %x) { +; CHECK-LABEL: @t8_nonsplat_masked_by_poison_0b0010( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq <2 x i8> [[X_LOWBITS]], zeroinitializer -; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i8> [[X]], +; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i8> [[X]], ; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and <2 x i8> [[X_BIASED]], ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select <2 x i1> [[X_LOWBITS_ARE_ZERO]], <2 x i8> [[X]], <2 x i8> [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] ; %x.lowbits = and <2 x i8> %x, %x.lowbits.are.zero = icmp eq <2 x i8> %x.lowbits, - %x.biased = add <2 x i8> %x, + %x.biased = add <2 x i8> %x, %x.biased.highbits = and <2 x i8> %x.biased, %x.roundedup = select <2 x i1> %x.lowbits.are.zero, <2 x i8> %x, <2 x i8> %x.biased.highbits ret <2 x i8> %x.roundedup } -define <2 x i8> @t8_nonsplat_masked_by_undef_0b0100(<2 x i8> %x) { -; CHECK-LABEL: @t8_nonsplat_masked_by_undef_0b0100( +define <2 x i8> @t8_nonsplat_masked_by_poison_0b0100(<2 x i8> %x) { +; CHECK-LABEL: @t8_nonsplat_masked_by_poison_0b0100( ; CHECK-NEXT: [[X_LOWBITS:%.*]] = and <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq <2 x i8> [[X_LOWBITS]], +; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq <2 x i8> [[X_LOWBITS]], ; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i8> [[X]], ; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and <2 x i8> [[X_BIASED]], ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select <2 x i1> [[X_LOWBITS_ARE_ZERO]], <2 x i8> [[X]], <2 x i8> [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] ; %x.lowbits = and <2 x i8> %x, - %x.lowbits.are.zero = icmp eq <2 x i8> %x.lowbits, + %x.lowbits.are.zero = icmp eq <2 x i8> %x.lowbits, %x.biased = add <2 x i8> %x, %x.biased.highbits = and <2 x i8> %x.biased, %x.roundedup = select <2 x i1> %x.lowbits.are.zero, <2 x i8> %x, <2 x i8> %x.biased.highbits ret <2 x i8> %x.roundedup } -define <2 x i8> @t8_nonsplat_masked_by_undef_0b1000(<2 x i8> %x) { -; CHECK-LABEL: @t8_nonsplat_masked_by_undef_0b1000( -; CHECK-NEXT: [[X_LOWBITS:%.*]] = and <2 x i8> [[X:%.*]], +define <2 x i8> @t8_nonsplat_masked_by_poison_0b1000(<2 x i8> %x) { +; CHECK-LABEL: @t8_nonsplat_masked_by_poison_0b1000( +; CHECK-NEXT: [[X_LOWBITS:%.*]] = and <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq <2 x i8> [[X_LOWBITS]], zeroinitializer ; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i8> [[X]], ; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and <2 x i8> [[X_BIASED]], ; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select <2 x i1> [[X_LOWBITS_ARE_ZERO]], <2 x i8> [[X]], <2 x i8> [[X_BIASED_HIGHBITS]] ; CHECK-NEXT: ret <2 x i8> [[X_ROUNDEDUP]] ; - %x.lowbits = and <2 x i8> %x, + %x.lowbits = and <2 x i8> %x, %x.lowbits.are.zero = icmp eq <2 x i8> %x.lowbits, %x.biased = add <2 x i8> %x, %x.biased.highbits = and <2 x i8> %x.biased, @@ -442,28 +442,28 @@ define i8 @t17_oneuse(i8 %x) { define <2 x i4> @t18_replacement_0b0001(<2 x i4> %x) { ; CHECK-LABEL: @t18_replacement_0b0001( ; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i4> [[X:%.*]], -; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and <2 x i4> [[X_BIASED]], +; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and <2 x i4> [[X_BIASED]], ; CHECK-NEXT: call void @use.v2i4(<2 x i4> [[X_BIASED_HIGHBITS]]) ; CHECK-NEXT: ret <2 x i4> [[X_BIASED_HIGHBITS]] ; %x.lowbits = and <2 x i4> %x, %x.lowbits.are.zero = icmp eq <2 x i4> %x.lowbits, %x.biased = add <2 x i4> %x, - %x.biased.highbits = and <2 x i4> %x.biased, + %x.biased.highbits = and <2 x i4> %x.biased, call void @use.v2i4(<2 x i4> %x.biased.highbits) %x.roundedup = select <2 x i1> %x.lowbits.are.zero, <2 x i4> %x, <2 x i4> %x.biased.highbits ret <2 x i4> %x.roundedup } define <2 x i4> @t18_replacement_0b0010(<2 x i4> %x) { ; CHECK-LABEL: @t18_replacement_0b0010( -; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i4> [[X:%.*]], +; CHECK-NEXT: [[X_BIASED:%.*]] = add <2 x i4> [[X:%.*]], ; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and <2 x i4> [[X_BIASED]], ; CHECK-NEXT: call void @use.v2i4(<2 x i4> [[X_BIASED_HIGHBITS]]) ; CHECK-NEXT: ret <2 x i4> [[X_BIASED_HIGHBITS]] ; %x.lowbits = and <2 x i4> %x, %x.lowbits.are.zero = icmp eq <2 x i4> %x.lowbits, - %x.biased = add <2 x i4> %x, + %x.biased = add <2 x i4> %x, %x.biased.highbits = and <2 x i4> %x.biased, call void @use.v2i4(<2 x i4> %x.biased.highbits) %x.roundedup = select <2 x i1> %x.lowbits.are.zero, <2 x i4> %x, <2 x i4> %x.biased.highbits @@ -477,7 +477,7 @@ define <2 x i4> @t18_replacement_0b0100(<2 x i4> %x) { ; CHECK-NEXT: ret <2 x i4> [[X_BIASED_HIGHBITS]] ; %x.lowbits = and <2 x i4> %x, - %x.lowbits.are.zero = icmp eq <2 x i4> %x.lowbits, + %x.lowbits.are.zero = icmp eq <2 x i4> %x.lowbits, %x.biased = add <2 x i4> %x, %x.biased.highbits = and <2 x i4> %x.biased, call void @use.v2i4(<2 x i4> %x.biased.highbits) @@ -491,7 +491,7 @@ define <2 x i4> @t18_replacement_0b1000(<2 x i4> %x) { ; CHECK-NEXT: call void @use.v2i4(<2 x i4> [[X_BIASED_HIGHBITS]]) ; CHECK-NEXT: ret <2 x i4> [[X_BIASED_HIGHBITS]] ; - %x.lowbits = and <2 x i4> %x, + %x.lowbits = and <2 x i4> %x, %x.lowbits.are.zero = icmp eq <2 x i4> %x.lowbits, %x.biased = add <2 x i4> %x, %x.biased.highbits = and <2 x i4> %x.biased, diff --git a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll index 486113202ddd7..a76662c4bc439 100644 --- a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll +++ b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll @@ -20,14 +20,14 @@ define <2 x i4> @vector (<2 x i4> %x, <2 x i4> %y, <2 x i4> %m) { ret <2 x i4> %r } -define <3 x i4> @vector_undef (<3 x i4> %x, <3 x i4> %y, <3 x i4> %m) { -; CHECK-LABEL: @vector_undef( +define <3 x i4> @vector_poison (<3 x i4> %x, <3 x i4> %y, <3 x i4> %m) { +; CHECK-LABEL: @vector_poison( ; CHECK-NEXT: [[N0:%.*]] = xor <3 x i4> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i4> [[N0]], [[M:%.*]] ; CHECK-NEXT: [[R:%.*]] = xor <3 x i4> [[TMP1]], [[X]] ; CHECK-NEXT: ret <3 x i4> [[R]] ; - %im = xor <3 x i4> %m, + %im = xor <3 x i4> %m, %n0 = xor <3 x i4> %x, %y %n1 = and <3 x i4> %n0, %im %r = xor <3 x i4> %n1, %y @@ -78,17 +78,17 @@ define <2 x i4> @in_constant_varx_6_invmask_nonsplat(<2 x i4> %x, <2 x i4> %mask ret <2 x i4> %r } -define <3 x i4> @in_constant_varx_6_invmask_undef(<3 x i4> %x, <3 x i4> %mask) { -; CHECK-LABEL: @in_constant_varx_6_invmask_undef( -; CHECK-NEXT: [[N0:%.*]] = xor <3 x i4> [[X:%.*]], +define <3 x i4> @in_constant_varx_6_invmask_poison(<3 x i4> %x, <3 x i4> %mask) { +; CHECK-LABEL: @in_constant_varx_6_invmask_poison( +; CHECK-NEXT: [[N0:%.*]] = xor <3 x i4> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i4> [[N0]], [[MASK:%.*]] ; CHECK-NEXT: [[R:%.*]] = xor <3 x i4> [[TMP1]], [[X]] ; CHECK-NEXT: ret <3 x i4> [[R]] ; - %notmask = xor <3 x i4> %mask, - %n0 = xor <3 x i4> %x, ; %x + %notmask = xor <3 x i4> %mask, + %n0 = xor <3 x i4> %x, ; %x %n1 = and <3 x i4> %n0, %notmask - %r = xor <3 x i4> %n1, + %r = xor <3 x i4> %n1, ret <3 x i4> %r } @@ -133,15 +133,15 @@ define <2 x i4> @in_constant_6_vary_invmask_nonsplat(<2 x i4> %y, <2 x i4> %mask ret <2 x i4> %r } -define <3 x i4> @in_constant_6_vary_invmask_undef(<3 x i4> %y, <3 x i4> %mask) { -; CHECK-LABEL: @in_constant_6_vary_invmask_undef( -; CHECK-NEXT: [[N0:%.*]] = xor <3 x i4> [[Y:%.*]], +define <3 x i4> @in_constant_6_vary_invmask_poison(<3 x i4> %y, <3 x i4> %mask) { +; CHECK-LABEL: @in_constant_6_vary_invmask_poison( +; CHECK-NEXT: [[N0:%.*]] = xor <3 x i4> [[Y:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i4> [[N0]], [[MASK:%.*]] -; CHECK-NEXT: [[R:%.*]] = xor <3 x i4> [[TMP1]], +; CHECK-NEXT: [[R:%.*]] = xor <3 x i4> [[TMP1]], ; CHECK-NEXT: ret <3 x i4> [[R]] ; - %notmask = xor <3 x i4> %mask, - %n0 = xor <3 x i4> %y, ; %x + %notmask = xor <3 x i4> %mask, + %n0 = xor <3 x i4> %y, ; %x %n1 = and <3 x i4> %n0, %notmask %r = xor <3 x i4> %n1, %y ret <3 x i4> %r diff --git a/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll b/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll index 847a7940bad8c..5d058b20be720 100644 --- a/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll +++ b/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll @@ -81,39 +81,39 @@ define <4 x i1> @vec_4xi32_lshr_and_negC_eq(<4 x i32> %x, <4 x i32> %y) { ret <4 x i1> %r } -define <4 x i1> @vec_lshr_and_negC_eq_undef1(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_lshr_and_negC_eq_undef1( +define <4 x i1> @vec_lshr_and_negC_eq_poison1(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_lshr_and_negC_eq_poison1( ; CHECK-NEXT: [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp ult <4 x i32> [[LSHR]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %lshr = lshr <4 x i32> %x, %y - %and = and <4 x i32> %lshr, ; ~7 + %and = and <4 x i32> %lshr, ; ~7 %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } -define <4 x i1> @vec_lshr_and_negC_eq_undef2(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_lshr_and_negC_eq_undef2( +define <4 x i1> @vec_lshr_and_negC_eq_poison2(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_lshr_and_negC_eq_poison2( ; CHECK-NEXT: [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp ult <4 x i32> [[LSHR]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %lshr = lshr <4 x i32> %x, %y %and = and <4 x i32> %lshr, ; ~7 - %r = icmp eq <4 x i32> %and, + %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } -define <4 x i1> @vec_lshr_and_negC_eq_undef3(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_lshr_and_negC_eq_undef3( +define <4 x i1> @vec_lshr_and_negC_eq_poison3(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_lshr_and_negC_eq_poison3( ; CHECK-NEXT: [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp ult <4 x i32> [[LSHR]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %lshr = lshr <4 x i32> %x, %y - %and = and <4 x i32> %lshr, ; ~7 - %r = icmp eq <4 x i32> %and, + %and = and <4 x i32> %lshr, ; ~7 + %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll b/llvm/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll index 39f4e58b25dc8..0166680309ea8 100644 --- a/llvm/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll +++ b/llvm/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll @@ -81,39 +81,39 @@ define <4 x i1> @vec_4xi32_lshr_and_signbit_eq(<4 x i32> %x, <4 x i32> %y) { ret <4 x i1> %r } -define <4 x i1> @vec_4xi32_lshr_and_signbit_eq_undef1(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_4xi32_lshr_and_signbit_eq_undef1( +define <4 x i1> @vec_4xi32_lshr_and_signbit_eq_poison1(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_4xi32_lshr_and_signbit_eq_poison1( ; CHECK-NEXT: [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt <4 x i32> [[LSHR]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %lshr = lshr <4 x i32> %x, %y - %and = and <4 x i32> %lshr, + %and = and <4 x i32> %lshr, %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } -define <4 x i1> @vec_4xi32_lshr_and_signbit_eq_undef2(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_4xi32_lshr_and_signbit_eq_undef2( +define <4 x i1> @vec_4xi32_lshr_and_signbit_eq_poison2(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_4xi32_lshr_and_signbit_eq_poison2( ; CHECK-NEXT: [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt <4 x i32> [[LSHR]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %lshr = lshr <4 x i32> %x, %y %and = and <4 x i32> %lshr, - %r = icmp eq <4 x i32> %and, + %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } -define <4 x i1> @vec_4xi32_lshr_and_signbit_eq_undef3(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_4xi32_lshr_and_signbit_eq_undef3( +define <4 x i1> @vec_4xi32_lshr_and_signbit_eq_poison3(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_4xi32_lshr_and_signbit_eq_poison3( ; CHECK-NEXT: [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt <4 x i32> [[LSHR]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %lshr = lshr <4 x i32> %x, %y - %and = and <4 x i32> %lshr, - %r = icmp eq <4 x i32> %and, + %and = and <4 x i32> %lshr, + %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/masked-merge-add.ll b/llvm/test/Transforms/InstCombine/masked-merge-add.ll index f655153108a43..0484369e99d6a 100644 --- a/llvm/test/Transforms/InstCombine/masked-merge-add.ll +++ b/llvm/test/Transforms/InstCombine/masked-merge-add.ll @@ -51,7 +51,7 @@ define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], ; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = add <3 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <3 x i32> [[RET]] ; %and = and <3 x i32> %x, %m @@ -61,6 +61,21 @@ define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) ret <3 x i32> %ret } +define <3 x i32> @p_vec_poison(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) { +; CHECK-LABEL: @p_vec_poison( +; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] +; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], +; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: ret <3 x i32> [[RET]] +; + %and = and <3 x i32> %x, %m + %neg = xor <3 x i32> %m, + %and1 = and <3 x i32> %neg, %y + %ret = add <3 x i32> %and, %and1 + ret <3 x i32> %ret +} + ; ============================================================================ ; ; Constant mask. ; ============================================================================ ; diff --git a/llvm/test/Transforms/InstCombine/masked-merge-or.ll b/llvm/test/Transforms/InstCombine/masked-merge-or.ll index b49ec07706e28..0531a532fc7e0 100644 --- a/llvm/test/Transforms/InstCombine/masked-merge-or.ll +++ b/llvm/test/Transforms/InstCombine/masked-merge-or.ll @@ -51,7 +51,7 @@ define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], ; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or <3 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <3 x i32> [[RET]] ; %and = and <3 x i32> %x, %m @@ -61,6 +61,21 @@ define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) ret <3 x i32> %ret } +define <3 x i32> @p_vec_poison(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) { +; CHECK-LABEL: @p_vec_poison( +; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] +; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], +; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: ret <3 x i32> [[RET]] +; + %and = and <3 x i32> %x, %m + %neg = xor <3 x i32> %m, + %and1 = and <3 x i32> %neg, %y + %ret = or <3 x i32> %and, %and1 + ret <3 x i32> %ret +} + ; ============================================================================ ; ; Constant mask. ; ============================================================================ ; diff --git a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll index a6d201be68cee..74cc7625aebff 100644 --- a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll +++ b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll @@ -51,7 +51,7 @@ define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], ; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <3 x i32> [[RET]] ; %and = and <3 x i32> %x, %m @@ -61,6 +61,21 @@ define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) ret <3 x i32> %ret } +define <3 x i32> @p_vec_poison(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) { +; CHECK-LABEL: @p_vec_poison( +; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] +; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], +; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: ret <3 x i32> [[RET]] +; + %and = and <3 x i32> %x, %m + %neg = xor <3 x i32> %m, + %and1 = and <3 x i32> %neg, %y + %ret = xor <3 x i32> %and, %and1 + ret <3 x i32> %ret +} + ; ============================================================================ ; ; Constant mask. ; ============================================================================ ; diff --git a/llvm/test/Transforms/InstCombine/min-positive.ll b/llvm/test/Transforms/InstCombine/min-positive.ll index 1fb212b738725..d2c2e9018792b 100644 --- a/llvm/test/Transforms/InstCombine/min-positive.ll +++ b/llvm/test/Transforms/InstCombine/min-positive.ll @@ -67,16 +67,16 @@ define <2 x i1> @smin_commute_vec(<2 x i32> %x, <2 x i32> %other) { ret <2 x i1> %test } -define <2 x i1> @smin_commute_vec_undef_elts(<2 x i32> %x, <2 x i32> %other) { -; CHECK-LABEL: @smin_commute_vec_undef_elts( -; CHECK-NEXT: [[TEST:%.*]] = icmp sgt <2 x i32> [[OTHER:%.*]], +define <2 x i1> @smin_commute_vec_poison_elts(<2 x i32> %x, <2 x i32> %other) { +; CHECK-LABEL: @smin_commute_vec_poison_elts( +; CHECK-NEXT: [[TEST:%.*]] = icmp sgt <2 x i32> [[OTHER:%.*]], ; CHECK-NEXT: ret <2 x i1> [[TEST]] ; %notneg = and <2 x i32> %x, %positive = or <2 x i32> %notneg, %cmp = icmp slt <2 x i32> %other, %positive %sel = select <2 x i1> %cmp, <2 x i32> %other, <2 x i32> %positive - %test = icmp sgt <2 x i32> %sel, + %test = icmp sgt <2 x i32> %sel, ret <2 x i1> %test } ; %positive might be zero diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll index bbbbf9eb6eafe..8b47dc7a28079 100644 --- a/llvm/test/Transforms/InstCombine/minmax-fold.ll +++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll @@ -1360,14 +1360,15 @@ define i8 @PR14613_smax(i8 %x) { define i8 @PR46271(<2 x i8> %x) { ; CHECK-LABEL: @PR46271( -; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.smax.v2i8(<2 x i8> [[X:%.*]], <2 x i8> ) +; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[A_INV:%.*]] = icmp slt <2 x i8> [[X]], zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> [[A_INV]], <2 x i8> , <2 x i8> [[TMP3]] ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i8> [[TMP1]], i64 1 -; CHECK-NEXT: [[R:%.*]] = xor i8 [[TMP2]], -1 -; CHECK-NEXT: ret i8 [[R]] +; CHECK-NEXT: ret i8 [[TMP2]] ; %a = icmp sgt <2 x i8> %x, - %b = select <2 x i1> %a, <2 x i8> %x, <2 x i8> - %not = xor <2 x i8> %b, + %b = select <2 x i1> %a, <2 x i8> %x, <2 x i8> + %not = xor <2 x i8> %b, %r = extractelement <2 x i8> %not, i32 1 ret i8 %r } diff --git a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll index bd1a47bbfcc19..a76f0f84ba340 100644 --- a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll +++ b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll @@ -393,7 +393,7 @@ define i8 @smax_of_nots(i8 %x, i8 %y) { ret i8 %m } -; Vectors are ok (including undef lanes of not ops) +; Vectors are ok (including poison lanes of not ops) define <3 x i8> @smin_of_nots(<3 x i8> %x, <3 x i8> %y) { ; CHECK-LABEL: @smin_of_nots( @@ -401,8 +401,8 @@ define <3 x i8> @smin_of_nots(<3 x i8> %x, <3 x i8> %y) { ; CHECK-NEXT: [[M:%.*]] = xor <3 x i8> [[TMP1]], ; CHECK-NEXT: ret <3 x i8> [[M]] ; - %notx = xor <3 x i8> %x, - %noty = xor <3 x i8> %y, + %notx = xor <3 x i8> %x, + %noty = xor <3 x i8> %y, %m = call <3 x i8> @llvm.smin.v3i8(<3 x i8> %notx, <3 x i8> %noty) ret <3 x i8> %m } @@ -473,16 +473,16 @@ define i8 @smax_of_not_and_const(i8 %x) { ret i8 %m } -; Vectors are ok (including undef lanes of not ops and min/max constant operand) +; Vectors are ok (including poison lanes of not ops and min/max constant operand) define <3 x i8> @smin_of_not_and_const(<3 x i8> %x) { ; CHECK-LABEL: @smin_of_not_and_const( -; CHECK-NEXT: [[TMP1:%.*]] = call <3 x i8> @llvm.smax.v3i8(<3 x i8> [[X:%.*]], <3 x i8> ) +; CHECK-NEXT: [[TMP1:%.*]] = call <3 x i8> @llvm.smax.v3i8(<3 x i8> [[X:%.*]], <3 x i8> ) ; CHECK-NEXT: [[M:%.*]] = xor <3 x i8> [[TMP1]], ; CHECK-NEXT: ret <3 x i8> [[M]] ; - %notx = xor <3 x i8> %x, - %m = call <3 x i8> @llvm.smin.v3i8(<3 x i8> , <3 x i8> %notx) + %notx = xor <3 x i8> %x, + %m = call <3 x i8> @llvm.smin.v3i8(<3 x i8> , <3 x i8> %notx) ret <3 x i8> %m } @@ -706,7 +706,7 @@ define <3 x i8> @smax_negation_vec(<3 x i8> %x) { ; CHECK-NEXT: [[R:%.*]] = call <3 x i8> @llvm.abs.v3i8(<3 x i8> [[X:%.*]], i1 false) ; CHECK-NEXT: ret <3 x i8> [[R]] ; - %s = sub <3 x i8> , %x + %s = sub <3 x i8> , %x %r = call <3 x i8> @llvm.smax.v3i8(<3 x i8> %x, <3 x i8> %s) ret <3 x i8> %r } @@ -912,7 +912,7 @@ define <3 x i8> @umin_non_zero_idiom4(<3 x i8> %a) { ; CHECK-NEXT: [[RES:%.*]] = zext <3 x i1> [[TMP1]] to <3 x i8> ; CHECK-NEXT: ret <3 x i8> [[RES]] ; - %res = call <3 x i8> @llvm.umin.v3i8(<3 x i8> %a, <3 x i8> ) + %res = call <3 x i8> @llvm.umin.v3i8(<3 x i8> %a, <3 x i8> ) ret <3 x i8> %res } @@ -2118,15 +2118,15 @@ define i8 @umin_offset_uses(i8 %x) { ret i8 %m } -; TODO: This could transform, but undef element must not propagate to the new add. +; TODO: This could transform -define <3 x i8> @umax_vector_splat_undef(<3 x i8> %x) { -; CHECK-LABEL: @umax_vector_splat_undef( -; CHECK-NEXT: [[A:%.*]] = add nuw <3 x i8> [[X:%.*]], +define <3 x i8> @umax_vector_splat_poison(<3 x i8> %x) { +; CHECK-LABEL: @umax_vector_splat_poison( +; CHECK-NEXT: [[A:%.*]] = add nuw <3 x i8> [[X:%.*]], ; CHECK-NEXT: [[R:%.*]] = call <3 x i8> @llvm.umax.v3i8(<3 x i8> [[A]], <3 x i8> ) ; CHECK-NEXT: ret <3 x i8> [[R]] ; - %a = add nuw <3 x i8> %x, + %a = add nuw <3 x i8> %x, %r = call <3 x i8> @llvm.umax.v3i8(<3 x i8> %a, <3 x i8> ) ret <3 x i8> %r } @@ -2506,8 +2506,8 @@ entry: ret i8 %val } -define <3 x i8> @fold_umax_with_knownbits_info_undef_in_splat(<3 x i8> %a, <3 x i8> %b) { -; CHECK-LABEL: @fold_umax_with_knownbits_info_undef_in_splat( +define <3 x i8> @fold_umax_with_knownbits_info_poison_in_splat(<3 x i8> %a, <3 x i8> %b) { +; CHECK-LABEL: @fold_umax_with_knownbits_info_poison_in_splat( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A1:%.*]] = or <3 x i8> [[A:%.*]], ; CHECK-NEXT: [[A2:%.*]] = shl <3 x i8> [[B:%.*]], @@ -2518,7 +2518,7 @@ entry: %a1 = or <3 x i8> %a, %a2 = shl <3 x i8> %b, %sub = sub <3 x i8> %a1, %a2 - %val = call <3 x i8> @llvm.umax.v3i8(<3 x i8> %sub, <3 x i8> ) + %val = call <3 x i8> @llvm.umax.v3i8(<3 x i8> %sub, <3 x i8> ) ret <3 x i8> %val } @@ -2535,8 +2535,8 @@ entry: ret i8 %val } -define <3 x i8> @fold_umin_with_knownbits_info_undef_in_splat(<3 x i8> %a, <3 x i8> %b) { -; CHECK-LABEL: @fold_umin_with_knownbits_info_undef_in_splat( +define <3 x i8> @fold_umin_with_knownbits_info_poison_in_splat(<3 x i8> %a, <3 x i8> %b) { +; CHECK-LABEL: @fold_umin_with_knownbits_info_poison_in_splat( ; CHECK-NEXT: entry: ; CHECK-NEXT: ret <3 x i8> ; @@ -2544,7 +2544,7 @@ entry: %a1 = or <3 x i8> %a, %a2 = shl <3 x i8> %b, %sub = sub <3 x i8> %a1, %a2 - %val = call <3 x i8> @llvm.umin.v3i8(<3 x i8> %sub, <3 x i8> ) + %val = call <3 x i8> @llvm.umin.v3i8(<3 x i8> %sub, <3 x i8> ) ret <3 x i8> %val } diff --git a/llvm/test/Transforms/InstCombine/mul-inseltpoison.ll b/llvm/test/Transforms/InstCombine/mul-inseltpoison.ll index 8fe4261bbf009..f47c5577075cb 100644 --- a/llvm/test/Transforms/InstCombine/mul-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/mul-inseltpoison.ll @@ -784,7 +784,7 @@ define <2 x i8> @negate_if_false_commute(<2 x i8> %px, <2 x i1> %cond) { ; CHECK-NEXT: ret <2 x i8> [[R]] ; %x = sdiv <2 x i8> , %px ; thwart complexity-based canonicalization - %sel = select <2 x i1> %cond, <2 x i8> , <2 x i8> + %sel = select <2 x i1> %cond, <2 x i8> , <2 x i8> %r = mul <2 x i8> %x, %sel ret <2 x i8> %r } @@ -931,7 +931,7 @@ define @mul_scalable_splat_zero( %z) { ; CHECK-LABEL: @mul_scalable_splat_zero( ; CHECK-NEXT: ret zeroinitializer ; - %shuf = shufflevector insertelement ( undef, i64 0, i32 0), poison, zeroinitializer + %shuf = shufflevector insertelement ( poison, i64 0, i32 0), poison, zeroinitializer %t3 = mul %shuf, %z ret %t3 } @@ -973,14 +973,14 @@ define <2 x i32> @mulsub1_vec_nonuniform(<2 x i32> %a0, <2 x i32> %a1) { ret <2 x i32> %mul } -define <2 x i32> @mulsub1_vec_nonuniform_undef(<2 x i32> %a0, <2 x i32> %a1) { -; CHECK-LABEL: @mulsub1_vec_nonuniform_undef( +define <2 x i32> @mulsub1_vec_nonuniform_poison(<2 x i32> %a0, <2 x i32> %a1) { +; CHECK-LABEL: @mulsub1_vec_nonuniform_poison( ; CHECK-NEXT: [[SUB_NEG:%.*]] = sub <2 x i32> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[MUL:%.*]] = shl <2 x i32> [[SUB_NEG]], ; CHECK-NEXT: ret <2 x i32> [[MUL]] ; %sub = sub <2 x i32> %a1, %a0 - %mul = mul <2 x i32> %sub, + %mul = mul <2 x i32> %sub, ret <2 x i32> %mul } @@ -1017,14 +1017,14 @@ define <2 x i32> @mulsub2_vec_nonuniform(<2 x i32> %a0) { ret <2 x i32> %mul } -define <2 x i32> @mulsub2_vec_nonuniform_undef(<2 x i32> %a0) { -; CHECK-LABEL: @mulsub2_vec_nonuniform_undef( +define <2 x i32> @mulsub2_vec_nonuniform_poison(<2 x i32> %a0) { +; CHECK-LABEL: @mulsub2_vec_nonuniform_poison( ; CHECK-NEXT: [[SUB_NEG:%.*]] = add <2 x i32> [[A0:%.*]], ; CHECK-NEXT: [[MUL:%.*]] = shl <2 x i32> [[SUB_NEG]], ; CHECK-NEXT: ret <2 x i32> [[MUL]] ; %sub = sub <2 x i32> , %a0 - %mul = mul <2 x i32> %sub, + %mul = mul <2 x i32> %sub, ret <2 x i32> %mul } @@ -1061,14 +1061,14 @@ define <2 x i32> @muladd2_vec_nonuniform(<2 x i32> %a0) { ret <2 x i32> %mul } -define <2 x i32> @muladd2_vec_nonuniform_undef(<2 x i32> %a0) { -; CHECK-LABEL: @muladd2_vec_nonuniform_undef( +define <2 x i32> @muladd2_vec_nonuniform_poison(<2 x i32> %a0) { +; CHECK-LABEL: @muladd2_vec_nonuniform_poison( ; CHECK-NEXT: [[ADD_NEG:%.*]] = sub <2 x i32> , [[A0:%.*]] ; CHECK-NEXT: [[MUL:%.*]] = shl <2 x i32> [[ADD_NEG]], ; CHECK-NEXT: ret <2 x i32> [[MUL]] ; %add = add <2 x i32> %a0, - %mul = mul <2 x i32> %add, + %mul = mul <2 x i32> %add, ret <2 x i32> %mul } diff --git a/llvm/test/Transforms/InstCombine/mul.ll b/llvm/test/Transforms/InstCombine/mul.ll index d4a689c60786e..227ca4a6d5cfa 100644 --- a/llvm/test/Transforms/InstCombine/mul.ll +++ b/llvm/test/Transforms/InstCombine/mul.ll @@ -1496,7 +1496,7 @@ define <2 x i8> @negate_if_false_commute(<2 x i8> %px, <2 x i1> %cond) { ; CHECK-NEXT: ret <2 x i8> [[R]] ; %x = sdiv <2 x i8> , %px ; thwart complexity-based canonicalization - %sel = select <2 x i1> %cond, <2 x i8> , <2 x i8> + %sel = select <2 x i1> %cond, <2 x i8> , <2 x i8> %r = mul <2 x i8> %x, %sel ret <2 x i8> %r } @@ -1643,7 +1643,7 @@ define @mul_scalable_splat_zero( %z) { ; CHECK-LABEL: @mul_scalable_splat_zero( ; CHECK-NEXT: ret zeroinitializer ; - %shuf = shufflevector insertelement ( undef, i64 0, i32 0), undef, zeroinitializer + %shuf = shufflevector insertelement ( poison, i64 0, i32 0), poison, zeroinitializer %t3 = mul %shuf, %z ret %t3 } @@ -1752,14 +1752,14 @@ define <2 x i32> @mulsub1_vec_nonuniform(<2 x i32> %a0, <2 x i32> %a1) { ret <2 x i32> %mul } -define <2 x i32> @mulsub1_vec_nonuniform_undef(<2 x i32> %a0, <2 x i32> %a1) { -; CHECK-LABEL: @mulsub1_vec_nonuniform_undef( +define <2 x i32> @mulsub1_vec_nonuniform_poison(<2 x i32> %a0, <2 x i32> %a1) { +; CHECK-LABEL: @mulsub1_vec_nonuniform_poison( ; CHECK-NEXT: [[SUB_NEG:%.*]] = sub <2 x i32> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[MUL:%.*]] = shl <2 x i32> [[SUB_NEG]], ; CHECK-NEXT: ret <2 x i32> [[MUL]] ; %sub = sub <2 x i32> %a1, %a0 - %mul = mul <2 x i32> %sub, + %mul = mul <2 x i32> %sub, ret <2 x i32> %mul } @@ -1796,14 +1796,14 @@ define <2 x i32> @mulsub2_vec_nonuniform(<2 x i32> %a0) { ret <2 x i32> %mul } -define <2 x i32> @mulsub2_vec_nonuniform_undef(<2 x i32> %a0) { -; CHECK-LABEL: @mulsub2_vec_nonuniform_undef( +define <2 x i32> @mulsub2_vec_nonuniform_poison(<2 x i32> %a0) { +; CHECK-LABEL: @mulsub2_vec_nonuniform_poison( ; CHECK-NEXT: [[SUB_NEG:%.*]] = add <2 x i32> [[A0:%.*]], ; CHECK-NEXT: [[MUL:%.*]] = shl <2 x i32> [[SUB_NEG]], ; CHECK-NEXT: ret <2 x i32> [[MUL]] ; %sub = sub <2 x i32> , %a0 - %mul = mul <2 x i32> %sub, + %mul = mul <2 x i32> %sub, ret <2 x i32> %mul } @@ -1819,15 +1819,15 @@ define i8 @mulsub_nsw(i8 %a1, i8 %a2) { } ; It would be safe to keep the nsw on the shl here, but only because the mul -; to shl transform happens to replace undef with 0. -define <2 x i8> @mulsub_nsw_undef(<2 x i8> %a1, <2 x i8> %a2) { -; CHECK-LABEL: @mulsub_nsw_undef( +; to shl transform happens to replace poison with 0. +define <2 x i8> @mulsub_nsw_poison(<2 x i8> %a1, <2 x i8> %a2) { +; CHECK-LABEL: @mulsub_nsw_poison( ; CHECK-NEXT: [[A_NEG:%.*]] = sub nsw <2 x i8> [[A2:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[MUL:%.*]] = shl <2 x i8> [[A_NEG]], ; CHECK-NEXT: ret <2 x i8> [[MUL]] ; %a = sub nsw <2 x i8> %a1, %a2 - %mul = mul nsw <2 x i8> %a, + %mul = mul nsw <2 x i8> %a, ret <2 x i8> %mul } @@ -1864,14 +1864,14 @@ define <2 x i32> @muladd2_vec_nonuniform(<2 x i32> %a0) { ret <2 x i32> %mul } -define <2 x i32> @muladd2_vec_nonuniform_undef(<2 x i32> %a0) { -; CHECK-LABEL: @muladd2_vec_nonuniform_undef( +define <2 x i32> @muladd2_vec_nonuniform_poison(<2 x i32> %a0) { +; CHECK-LABEL: @muladd2_vec_nonuniform_poison( ; CHECK-NEXT: [[ADD_NEG:%.*]] = sub <2 x i32> , [[A0:%.*]] ; CHECK-NEXT: [[MUL:%.*]] = shl <2 x i32> [[ADD_NEG]], ; CHECK-NEXT: ret <2 x i32> [[MUL]] ; %add = add <2 x i32> %a0, - %mul = mul <2 x i32> %add, + %mul = mul <2 x i32> %add, ret <2 x i32> %mul } diff --git a/llvm/test/Transforms/InstCombine/not-add.ll b/llvm/test/Transforms/InstCombine/not-add.ll index 877f558ffd503..9ba37b6bba39e 100644 --- a/llvm/test/Transforms/InstCombine/not-add.ll +++ b/llvm/test/Transforms/InstCombine/not-add.ll @@ -115,26 +115,26 @@ define <4 x i32> @vector_test(<4 x i32> %x, <4 x i32> %y) { ret <4 x i32> %nota } -define <4 x i32> @vector_test_undef(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vector_test_undef( +define <4 x i32> @vector_test_poison(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vector_test_poison( ; CHECK-NEXT: [[NOTA:%.*]] = sub <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <4 x i32> [[NOTA]] ; - %notx = xor <4 x i32> %x, + %notx = xor <4 x i32> %x, %a = add <4 x i32> %notx, %y - %nota = xor <4 x i32> %a, + %nota = xor <4 x i32> %a, ret <4 x i32> %nota } -define <4 x i32> @vector_test_undef_nsw_nuw(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vector_test_undef_nsw_nuw( +define <4 x i32> @vector_test_poison_nsw_nuw(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vector_test_poison_nsw_nuw( ; CHECK-NEXT: [[NOTA:%.*]] = sub nuw nsw <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <4 x i32> [[NOTA]] ; - %notx = xor <4 x i32> %x, + %notx = xor <4 x i32> %x, %a = add nsw nuw <4 x i32> %notx, %y - %nota = xor <4 x i32> %a, + %nota = xor <4 x i32> %a, ret <4 x i32> %nota } diff --git a/llvm/test/Transforms/InstCombine/not.ll b/llvm/test/Transforms/InstCombine/not.ll index 98b5d98041560..0c2c6195e3240 100644 --- a/llvm/test/Transforms/InstCombine/not.ll +++ b/llvm/test/Transforms/InstCombine/not.ll @@ -430,9 +430,9 @@ define <3 x i5> @not_or_neg_commute_vec(<3 x i5> %x, <3 x i5> %p) { ; CHECK-NEXT: ret <3 x i5> [[NOT]] ; %y = mul <3 x i5> %p, ; thwart complexity-based-canonicalization - %s = sub <3 x i5> , %x + %s = sub <3 x i5> , %x %o = or <3 x i5> %y, %s - %not = xor <3 x i5> %o, + %not = xor <3 x i5> %o, ret <3 x i5> %not } diff --git a/llvm/test/Transforms/InstCombine/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll b/llvm/test/Transforms/InstCombine/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll index c16633efe4ce3..3fd4a17d972af 100644 --- a/llvm/test/Transforms/InstCombine/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll +++ b/llvm/test/Transforms/InstCombine/omit-urem-of-power-of-two-or-zero-when-comparing-with-zero.ll @@ -95,41 +95,41 @@ define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) { ret <4 x i1> %t2 } -define <4 x i1> @p6_vector_urem_by_const__nonsplat_undef0(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @p6_vector_urem_by_const__nonsplat_undef0( -; CHECK-NEXT: [[T0:%.*]] = and <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[T1:%.*]] = urem <4 x i32> [[T0]], -; CHECK-NEXT: [[T2:%.*]] = icmp eq <4 x i32> [[T1]], zeroinitializer +; The poison value in the vector makes the whole function UB. + +define <4 x i1> @p6_vector_urem_by_const__nonsplat_poison0(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @p6_vector_urem_by_const__nonsplat_poison0( +; CHECK-NEXT: [[T0:%.*]] = and <4 x i32> [[X:%.*]], +; CHECK-NEXT: [[T2:%.*]] = icmp eq <4 x i32> [[T0]], zeroinitializer ; CHECK-NEXT: ret <4 x i1> [[T2]] ; - %t0 = and <4 x i32> %x, + %t0 = and <4 x i32> %x, %t1 = urem <4 x i32> %t0, ; '6' is clearly not a power of two %t2 = icmp eq <4 x i32> %t1, ret <4 x i1> %t2 } -define <4 x i1> @p7_vector_urem_by_const__nonsplat_undef2(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @p7_vector_urem_by_const__nonsplat_undef2( +define <4 x i1> @p7_vector_urem_by_const__nonsplat_poison2(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @p7_vector_urem_by_const__nonsplat_poison2( ; CHECK-NEXT: [[T0:%.*]] = and <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[T2:%.*]] = icmp eq <4 x i32> [[T0]], +; CHECK-NEXT: [[T2:%.*]] = icmp eq <4 x i32> [[T0]], ; CHECK-NEXT: ret <4 x i1> [[T2]] ; %t0 = and <4 x i32> %x, ; clearly a power-of-two or zero %t1 = urem <4 x i32> %t0, ; '6' is clearly not a power of two - %t2 = icmp eq <4 x i32> %t1, + %t2 = icmp eq <4 x i32> %t1, ret <4 x i1> %t2 } -define <4 x i1> @p8_vector_urem_by_const__nonsplat_undef3(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @p8_vector_urem_by_const__nonsplat_undef3( -; CHECK-NEXT: [[T0:%.*]] = and <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[T1:%.*]] = urem <4 x i32> [[T0]], -; CHECK-NEXT: [[T2:%.*]] = icmp eq <4 x i32> [[T1]], +define <4 x i1> @p8_vector_urem_by_const__nonsplat_poison3(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @p8_vector_urem_by_const__nonsplat_poison3( +; CHECK-NEXT: [[T0:%.*]] = and <4 x i32> [[X:%.*]], +; CHECK-NEXT: [[T2:%.*]] = icmp eq <4 x i32> [[T0]], ; CHECK-NEXT: ret <4 x i1> [[T2]] ; - %t0 = and <4 x i32> %x, + %t0 = and <4 x i32> %x, %t1 = urem <4 x i32> %t0, ; '6' is clearly not a power of two - %t2 = icmp eq <4 x i32> %t1, + %t2 = icmp eq <4 x i32> %t1, ret <4 x i1> %t2 } diff --git a/llvm/test/Transforms/InstCombine/operand-complexity.ll b/llvm/test/Transforms/InstCombine/operand-complexity.ll index 62cfc76d9d24e..541a15275b617 100644 --- a/llvm/test/Transforms/InstCombine/operand-complexity.ll +++ b/llvm/test/Transforms/InstCombine/operand-complexity.ll @@ -29,15 +29,15 @@ define <2 x i8> @neg_vec(<2 x i8> %x) { ret <2 x i8> %r } -define <2 x i8> @neg_vec_undef(<2 x i8> %x) { -; CHECK-LABEL: @neg_vec_undef( +define <2 x i8> @neg_vec_poison(<2 x i8> %x) { +; CHECK-LABEL: @neg_vec_poison( ; CHECK-NEXT: [[BO:%.*]] = udiv <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[NEGX:%.*]] = sub <2 x i8> , [[X]] +; CHECK-NEXT: [[NEGX:%.*]] = sub <2 x i8> , [[X]] ; CHECK-NEXT: [[R:%.*]] = xor <2 x i8> [[BO]], [[NEGX]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %bo = udiv <2 x i8> %x, - %negx = sub <2 x i8> , %x + %negx = sub <2 x i8> , %x %r = xor <2 x i8> %negx, %bo ret <2 x i8> %r } @@ -70,15 +70,15 @@ define <2 x i8> @not_vec(<2 x i8> %x) { ret <2 x i8> %r } -define <2 x i8> @not_vec_undef(<2 x i8> %x) { -; CHECK-LABEL: @not_vec_undef( +define <2 x i8> @not_vec_poison(<2 x i8> %x) { +; CHECK-LABEL: @not_vec_poison( ; CHECK-NEXT: [[BO:%.*]] = udiv <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[NOTX:%.*]] = xor <2 x i8> [[X]], +; CHECK-NEXT: [[NOTX:%.*]] = xor <2 x i8> [[X]], ; CHECK-NEXT: [[R:%.*]] = mul <2 x i8> [[BO]], [[NOTX]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %bo = udiv <2 x i8> %x, - %notx = xor <2 x i8> , %x + %notx = xor <2 x i8> , %x %r = mul <2 x i8> %notx, %bo ret <2 x i8> %r } @@ -134,8 +134,8 @@ define <2 x float> @fneg_vec(<2 x float> %x) { ret <2 x float> %r } -define <2 x float> @fneg_vec_undef(<2 x float> %x) { -; CHECK-LABEL: @fneg_vec_undef( +define <2 x float> @fneg_vec_poison(<2 x float> %x) { +; CHECK-LABEL: @fneg_vec_poison( ; CHECK-NEXT: [[BO:%.*]] = fdiv <2 x float> [[X:%.*]], ; CHECK-NEXT: [[FNEGX:%.*]] = fneg <2 x float> [[X]] ; CHECK-NEXT: [[R:%.*]] = fmul <2 x float> [[BO]], [[FNEGX]] @@ -143,7 +143,7 @@ define <2 x float> @fneg_vec_undef(<2 x float> %x) { ; CHECK-NEXT: ret <2 x float> [[R]] ; %bo = fdiv <2 x float> %x, - %fnegx = fsub <2 x float> , %x + %fnegx = fsub <2 x float> , %x %r = fmul <2 x float> %fnegx, %bo call void @use_vec(<2 x float> %fnegx) ret <2 x float> %r diff --git a/llvm/test/Transforms/InstCombine/or.ll b/llvm/test/Transforms/InstCombine/or.ll index 1b1a6ffbf0f2d..6e2085a8bb6c7 100644 --- a/llvm/test/Transforms/InstCombine/or.ll +++ b/llvm/test/Transforms/InstCombine/or.ll @@ -262,26 +262,26 @@ define <2 x i1> @and_icmp_eq_0_vector(<2 x i32> %A, <2 x i32> %B) { ret <2 x i1> %D } -define <2 x i1> @and_icmp_eq_0_vector_undef1(<2 x i32> %A, <2 x i32> %B) { -; CHECK-LABEL: @and_icmp_eq_0_vector_undef1( +define <2 x i1> @and_icmp_eq_0_vector_poison1(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @and_icmp_eq_0_vector_poison1( ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[D:%.*]] = icmp eq <2 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[D]] ; - %C1 = icmp eq <2 x i32> %A, - %C2 = icmp eq <2 x i32> %B, + %C1 = icmp eq <2 x i32> %A, + %C2 = icmp eq <2 x i32> %B, %D = and <2 x i1> %C1, %C2 ret <2 x i1> %D } -define <2 x i1> @and_icmp_eq_0_vector_undef2(<2 x i32> %A, <2 x i32> %B) { -; CHECK-LABEL: @and_icmp_eq_0_vector_undef2( +define <2 x i1> @and_icmp_eq_0_vector_poison2(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @and_icmp_eq_0_vector_poison2( ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[D:%.*]] = icmp eq <2 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[D]] ; - %C1 = icmp eq <2 x i32> %A, - %C2 = icmp eq <2 x i32> %B, + %C1 = icmp eq <2 x i32> %A, + %C2 = icmp eq <2 x i32> %B, %D = and <2 x i1> %C1, %C2 ret <2 x i1> %D } @@ -566,17 +566,17 @@ define <2 x i1> @test37_uniform(<2 x i32> %x) { ret <2 x i1> %ret1 } -define <2 x i1> @test37_undef(<2 x i32> %x) { -; CHECK-LABEL: @test37_undef( -; CHECK-NEXT: [[ADD1:%.*]] = add <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <2 x i32> [[ADD1]], -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq <2 x i32> [[X]], +define <2 x i1> @test37_poison(<2 x i32> %x) { +; CHECK-LABEL: @test37_poison( +; CHECK-NEXT: [[ADD1:%.*]] = add <2 x i32> [[X:%.*]], +; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <2 x i32> [[ADD1]], +; CHECK-NEXT: [[CMP2:%.*]] = icmp eq <2 x i32> [[X]], ; CHECK-NEXT: [[RET1:%.*]] = or <2 x i1> [[CMP1]], [[CMP2]] ; CHECK-NEXT: ret <2 x i1> [[RET1]] ; - %add1 = add <2 x i32> %x, - %cmp1 = icmp ult <2 x i32> %add1, - %cmp2 = icmp eq <2 x i32> %x, + %add1 = add <2 x i32> %x, + %cmp1 = icmp ult <2 x i32> %add1, + %cmp2 = icmp eq <2 x i32> %x, %ret1 = or <2 x i1> %cmp1, %cmp2 ret <2 x i1> %ret1 } @@ -874,19 +874,19 @@ define <2 x i1> @test46_uniform(<2 x i8> %c) { ret <2 x i1> %or } -define <2 x i1> @test46_undef(<2 x i8> %c) { -; CHECK-LABEL: @test46_undef( -; CHECK-NEXT: [[C_OFF:%.*]] = add <2 x i8> [[C:%.*]], -; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <2 x i8> [[C_OFF]], -; CHECK-NEXT: [[C_OFF17:%.*]] = add <2 x i8> [[C]], -; CHECK-NEXT: [[CMP2:%.*]] = icmp ult <2 x i8> [[C_OFF17]], +define <2 x i1> @test46_poison(<2 x i8> %c) { +; CHECK-LABEL: @test46_poison( +; CHECK-NEXT: [[C_OFF:%.*]] = add <2 x i8> [[C:%.*]], +; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <2 x i8> [[C_OFF]], +; CHECK-NEXT: [[C_OFF17:%.*]] = add <2 x i8> [[C]], +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult <2 x i8> [[C_OFF17]], ; CHECK-NEXT: [[OR:%.*]] = or <2 x i1> [[CMP1]], [[CMP2]] ; CHECK-NEXT: ret <2 x i1> [[OR]] ; - %c.off = add <2 x i8> %c, - %cmp1 = icmp ult <2 x i8> %c.off, - %c.off17 = add <2 x i8> %c, - %cmp2 = icmp ult <2 x i8> %c.off17, + %c.off = add <2 x i8> %c, + %cmp1 = icmp ult <2 x i8> %c.off, + %c.off17 = add <2 x i8> %c, + %cmp2 = icmp ult <2 x i8> %c.off17, %or = or <2 x i1> %cmp1, %cmp2 ret <2 x i1> %or } diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll index f0c2f129e3df3..5ed7d641df65b 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll @@ -89,13 +89,13 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) { ret <8 x i32> %t7 } -define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { -; CHECK-LABEL: @t2_vec_splat_undef( -; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], +define <8 x i32> @t2_vec_splat_poison(<8 x i64> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_splat_poison( +; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], ; CHECK-NEXT: [[T1:%.*]] = zext <8 x i32> [[T0]] to <8 x i64> -; CHECK-NEXT: [[T2:%.*]] = shl <8 x i64> , [[T1]] -; CHECK-NEXT: [[T3:%.*]] = xor <8 x i64> [[T2]], -; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> , [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = shl nsw <8 x i64> , [[T1]] +; CHECK-NEXT: [[T3:%.*]] = xor <8 x i64> [[T2]], +; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> , [[NBITS]] ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T2]]) @@ -106,11 +106,11 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: [[T7:%.*]] = and <8 x i32> [[TMP2]], ; CHECK-NEXT: ret <8 x i32> [[T7]] ; - %t0 = add <8 x i32> %nbits, + %t0 = add <8 x i32> %nbits, %t1 = zext <8 x i32> %t0 to <8 x i64> - %t2 = shl <8 x i64> , %t1 ; shifting by nbits-1 - %t3 = xor <8 x i64> %t2, - %t4 = sub <8 x i32> , %nbits + %t2 = shl <8 x i64> , %t1 ; shifting by nbits-1 + %t3 = xor <8 x i64> %t2, + %t4 = sub <8 x i32> , %nbits call void @use8xi32(<8 x i32> %t0) call void @use8xi64(<8 x i64> %t1) diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll index 46d1de5781b71..1a711e58c333b 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-c.ll @@ -73,11 +73,11 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) { ret <8 x i32> %t5 } -define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { -; CHECK-LABEL: @t2_vec_splat_undef( +define <8 x i32> @t2_vec_splat_poison(<8 x i64> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_splat_poison( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> -; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> , [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> , [[T0]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) @@ -87,8 +87,8 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: ret <8 x i32> [[T5]] ; %t0 = zext <8 x i32> %nbits to <8 x i64> - %t1 = lshr <8 x i64> , %t0 - %t2 = add <8 x i32> %nbits, + %t1 = lshr <8 x i64> , %t0 + %t2 = add <8 x i32> %nbits, call void @use8xi64(<8 x i64> %t0) call void @use8xi64(<8 x i64> %t1) @@ -103,8 +103,8 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t3_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> -; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> , [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> , [[T0]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) @@ -114,8 +114,8 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: ret <8 x i32> [[T5]] ; %t0 = zext <8 x i32> %nbits to <8 x i64> - %t1 = lshr <8 x i64> , %t0 - %t2 = add <8 x i32> %nbits, + %t1 = lshr <8 x i64> , %t0 + %t2 = add <8 x i32> %nbits, call void @use8xi64(<8 x i64> %t0) call void @use8xi64(<8 x i64> %t1) diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll index 48873852cfc7c..cd0098ecdb0a6 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll @@ -81,12 +81,12 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) { ret <8 x i32> %t6 } -define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { -; CHECK-LABEL: @t2_vec_splat_undef( +define <8 x i32> @t2_vec_splat_poison(<8 x i64> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_splat_poison( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> -; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> , [[T0]] +; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> , [[T0]] ; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> , [[T0]] -; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T2]]) @@ -97,9 +97,9 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: ret <8 x i32> [[T6]] ; %t0 = zext <8 x i32> %nbits to <8 x i64> - %t1 = shl <8 x i64> , %t0 + %t1 = shl <8 x i64> , %t0 %t2 = lshr <8 x i64> %t1, %t0 - %t3 = add <8 x i32> %nbits, + %t3 = add <8 x i32> %nbits, call void @use8xi64(<8 x i64> %t0) call void @use8xi64(<8 x i64> %t1) @@ -115,9 +115,9 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t3_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> -; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> , [[T0]] +; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> , [[T0]] ; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> , [[T0]] -; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T2]]) @@ -128,9 +128,9 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: ret <8 x i32> [[T6]] ; %t0 = zext <8 x i32> %nbits to <8 x i64> - %t1 = shl <8 x i64> , %t0 + %t1 = shl <8 x i64> , %t0 %t2 = lshr <8 x i64> %t1, %t0 - %t3 = add <8 x i32> %nbits, + %t3 = add <8 x i32> %nbits, call void @use8xi64(<8 x i64> %t0) call void @use8xi64(<8 x i64> %t1) diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll index 8b3f01bcb7691..1debf111b18cd 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll @@ -71,12 +71,12 @@ define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ret <8 x i32> %t5 } -define <8 x i32> @t1_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { -; CHECK-LABEL: @t1_vec_splat_undef( -; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], -; CHECK-NEXT: [[T1:%.*]] = shl <8 x i32> , [[T0]] -; CHECK-NEXT: [[T2:%.*]] = xor <8 x i32> [[T1]], -; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> , [[NBITS]] +define <8 x i32> @t1_vec_splat_poison(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_splat_poison( +; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], +; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i32> , [[T0]] +; CHECK-NEXT: [[T2:%.*]] = xor <8 x i32> [[T1]], +; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> , [[NBITS]] ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) @@ -85,11 +85,11 @@ define <8 x i32> @t1_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-NEXT: [[T5:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T5]] ; - %t0 = add <8 x i32> %nbits, - %t1 = shl <8 x i32> , %t0 - %t2 = xor <8 x i32> %t1, + %t0 = add <8 x i32> %nbits, + %t1 = shl <8 x i32> , %t0 + %t2 = xor <8 x i32> %t1, %t3 = and <8 x i32> %t2, %x - %t4 = sub <8 x i32> , %nbits + %t4 = sub <8 x i32> , %nbits call void @use8xi32(<8 x i32> %t0) call void @use8xi32(<8 x i32> %t1) call void @use8xi32(<8 x i32> %t2) diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll index 58a905063fac4..55d0b3f80a519 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll @@ -55,19 +55,19 @@ define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ret <8 x i32> %t3 } -define <8 x i32> @t1_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { -; CHECK-LABEL: @t1_vec_splat_undef( -; CHECK-NEXT: [[T0:%.*]] = lshr <8 x i32> , [[NBITS:%.*]] -; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +define <8 x i32> @t1_vec_splat_poison(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_splat_poison( +; CHECK-NEXT: [[T0:%.*]] = lshr <8 x i32> , [[NBITS:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) ; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T2]] ; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T3]] ; - %t0 = lshr <8 x i32> , %nbits + %t0 = lshr <8 x i32> , %nbits %t1 = and <8 x i32> %t0, %x - %t2 = add <8 x i32> %nbits, + %t2 = add <8 x i32> %nbits, call void @use8xi32(<8 x i32> %t0) call void @use8xi32(<8 x i32> %t2) %t3 = shl <8 x i32> %t1, %t2 ; shift is smaller than mask diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll index 9c096d1418a5b..7ad99a6bb0a38 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll @@ -63,11 +63,11 @@ define <8 x i32> @t2_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ret <8 x i32> %t4 } -define <8 x i32> @t2_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { -; CHECK-LABEL: @t2_vec_splat_undef( -; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> , [[NBITS:%.*]] +define <8 x i32> @t2_vec_splat_poison(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_splat_poison( +; CHECK-NEXT: [[T0:%.*]] = shl nsw <8 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> , [[NBITS]] -; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]]) @@ -75,10 +75,10 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-NEXT: [[T4:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T4]] ; - %t0 = shl <8 x i32> , %nbits + %t0 = shl <8 x i32> , %nbits %t1 = lshr <8 x i32> %t0, %nbits %t2 = and <8 x i32> %t1, %x - %t3 = add <8 x i32> %nbits, + %t3 = add <8 x i32> %nbits, call void @use8xi32(<8 x i32> %t0) call void @use8xi32(<8 x i32> %t1) call void @use8xi32(<8 x i32> %t3) diff --git a/llvm/test/Transforms/InstCombine/pr53357.ll b/llvm/test/Transforms/InstCombine/pr53357.ll index 0a6d2993ce46a..0ae690869c1c4 100644 --- a/llvm/test/Transforms/InstCombine/pr53357.ll +++ b/llvm/test/Transforms/InstCombine/pr53357.ll @@ -30,16 +30,16 @@ define <2 x i32> @src_vec(<2 x i32> noundef %0, <2 x i32> noundef %1) { ret <2 x i32> %6 } -; vector version of src with undef values -define <2 x i32> @src_vec_undef(<2 x i32> noundef %0, <2 x i32> noundef %1) { -; CHECK-LABEL: @src_vec_undef( +; vector version of src with poison values +define <2 x i32> @src_vec_poison(<2 x i32> noundef %0, <2 x i32> noundef %1) { +; CHECK-LABEL: @src_vec_poison( ; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i32> [[TMP1:%.*]], [[TMP0:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i32> [[TMP3]], ; CHECK-NEXT: ret <2 x i32> [[TMP4]] ; %3 = and <2 x i32> %1, %0 %4 = or <2 x i32> %1, %0 - %5 = xor <2 x i32> %4, + %5 = xor <2 x i32> %4, %6 = add <2 x i32> %3, %5 ret <2 x i32> %6 } diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll index d49cfe990d82d..cb6775e689b8c 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll @@ -89,12 +89,12 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) { ret <8 x i32> %t6 } -define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { -; CHECK-LABEL: @t2_vec_splat_undef( +define <8 x i32> @t2_vec_splat_poison(<8 x i64> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_splat_poison( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> -; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> , [[T0]] -; CHECK-NEXT: [[T2:%.*]] = xor <8 x i64> [[T1]], -; CHECK-NEXT: [[T3:%.*]] = sub <8 x i32> , [[NBITS]] +; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> , [[T0]] +; CHECK-NEXT: [[T2:%.*]] = xor <8 x i64> [[T1]], +; CHECK-NEXT: [[T3:%.*]] = sub <8 x i32> , [[NBITS]] ; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]] ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[NBITS]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) @@ -107,9 +107,9 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: ret <8 x i32> [[T6]] ; %t0 = zext <8 x i32> %nbits to <8 x i64> - %t1 = shl <8 x i64> , %t0 - %t2 = xor <8 x i64> %t1, - %t3 = sub <8 x i32> , %nbits + %t1 = shl <8 x i64> , %t0 + %t2 = xor <8 x i64> %t1, + %t3 = sub <8 x i32> , %nbits %t4 = and <8 x i64> %t2, %x call void @use8xi32(<8 x i32> %nbits) diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll index fbbeffbba630b..a78246781c7f9 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-c.ll @@ -77,11 +77,11 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) { ret <8 x i32> %t5 } -define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { -; CHECK-LABEL: @t2_vec_splat_undef( +define <8 x i32> @t2_vec_splat_poison(<8 x i64> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_splat_poison( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> -; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> , [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> , [[T0]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) @@ -92,8 +92,8 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: ret <8 x i32> [[T5]] ; %t0 = zext <8 x i32> %nbits to <8 x i64> - %t1 = lshr <8 x i64> , %t0 - %t2 = add <8 x i32> %nbits, + %t1 = lshr <8 x i64> , %t0 + %t2 = add <8 x i32> %nbits, %t3 = and <8 x i64> %t1, %x call void @use8xi64(<8 x i64> %t0) @@ -109,8 +109,8 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t3_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> -; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> , [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> , [[T0]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: [[T3:%.*]] = and <8 x i64> [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) @@ -121,8 +121,8 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: ret <8 x i32> [[T5]] ; %t0 = zext <8 x i32> %nbits to <8 x i64> - %t1 = lshr <8 x i64> , %t0 - %t2 = add <8 x i32> %nbits, + %t1 = lshr <8 x i64> , %t0 + %t2 = add <8 x i32> %nbits, %t3 = and <8 x i64> %t1, %x call void @use8xi64(<8 x i64> %t0) diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll index 1a977f67a6a5a..b79ab79097527 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll @@ -85,12 +85,12 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) { ret <8 x i32> %t6 } -define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { -; CHECK-LABEL: @t2_vec_splat_undef( +define <8 x i32> @t2_vec_splat_poison(<8 x i64> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_splat_poison( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> -; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> , [[T0]] +; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> , [[T0]] ; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> , [[T0]] -; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]] ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) @@ -102,9 +102,9 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: ret <8 x i32> [[T6]] ; %t0 = zext <8 x i32> %nbits to <8 x i64> - %t1 = shl <8 x i64> , %t0 + %t1 = shl <8 x i64> , %t0 %t2 = lshr <8 x i64> %t1, %t0 - %t3 = add <8 x i32> %nbits, + %t3 = add <8 x i32> %nbits, %t4 = and <8 x i64> %t2, %x call void @use8xi64(<8 x i64> %t0) @@ -121,9 +121,9 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) { define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t3_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64> -; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> , [[T0]] +; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> , [[T0]] ; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> , [[T0]] -; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], +; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]] ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) @@ -135,9 +135,9 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: ret <8 x i32> [[T6]] ; %t0 = zext <8 x i32> %nbits to <8 x i64> - %t1 = shl <8 x i64> , %t0 + %t1 = shl <8 x i64> , %t0 %t2 = lshr <8 x i64> %t1, %t0 - %t3 = add <8 x i32> %nbits, + %t3 = add <8 x i32> %nbits, %t4 = and <8 x i64> %t2, %x call void @use8xi64(<8 x i64> %t0) diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll index ddaef5f4b47c8..4b955a894fcfe 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll @@ -155,12 +155,12 @@ define <3 x i32> @t4_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) { ret <3 x i32> %t5 } -define <3 x i32> @t5_vec_undef(<3 x i32> %x, <3 x i32> %nbits) { -; CHECK-LABEL: @t5_vec_undef( -; CHECK-NEXT: [[T1:%.*]] = shl <3 x i32> , [[NBITS:%.*]] -; CHECK-NEXT: [[T2:%.*]] = xor <3 x i32> [[T1]], +define <3 x i32> @t5_vec_poison(<3 x i32> %x, <3 x i32> %nbits) { +; CHECK-LABEL: @t5_vec_poison( +; CHECK-NEXT: [[T1:%.*]] = shl nsw <3 x i32> , [[NBITS:%.*]] +; CHECK-NEXT: [[T2:%.*]] = xor <3 x i32> [[T1]], ; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[T2]], [[X:%.*]] -; CHECK-NEXT: [[T4:%.*]] = sub <3 x i32> , [[NBITS]] +; CHECK-NEXT: [[T4:%.*]] = sub <3 x i32> , [[NBITS]] ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[NBITS]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]]) @@ -169,11 +169,11 @@ define <3 x i32> @t5_vec_undef(<3 x i32> %x, <3 x i32> %nbits) { ; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[X]], [[T4]] ; CHECK-NEXT: ret <3 x i32> [[T5]] ; - %t0 = add <3 x i32> %nbits, - %t1 = shl <3 x i32> , %t0 - %t2 = xor <3 x i32> %t1, + %t0 = add <3 x i32> %nbits, + %t1 = shl <3 x i32> , %t0 + %t2 = xor <3 x i32> %t1, %t3 = and <3 x i32> %t2, %x - %t4 = sub <3 x i32> , %nbits + %t4 = sub <3 x i32> , %nbits call void @use3xi32(<3 x i32> %t0) call void @use3xi32(<3 x i32> %t1) call void @use3xi32(<3 x i32> %t2) diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-c.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-c.ll index c7747cfafcff5..8428ef67d6b86 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-c.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-c.ll @@ -99,20 +99,20 @@ define <3 x i32> @t3_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) { ret <3 x i32> %t3 } -define <3 x i32> @t4_vec_undef(<3 x i32> %x, <3 x i32> %nbits) { -; CHECK-LABEL: @t4_vec_undef( -; CHECK-NEXT: [[T0:%.*]] = lshr <3 x i32> , [[NBITS:%.*]] +define <3 x i32> @t4_vec_poison(<3 x i32> %x, <3 x i32> %nbits) { +; CHECK-LABEL: @t4_vec_poison( +; CHECK-NEXT: [[T0:%.*]] = lshr <3 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = and <3 x i32> [[T0]], [[X:%.*]] -; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[NBITS]], +; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[NBITS]], ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]]) ; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[X]], [[T2]] ; CHECK-NEXT: ret <3 x i32> [[T3]] ; - %t0 = lshr <3 x i32> , %nbits + %t0 = lshr <3 x i32> , %nbits %t1 = and <3 x i32> %t0, %x - %t2 = add <3 x i32> %nbits, + %t2 = add <3 x i32> %nbits, call void @use3xi32(<3 x i32> %t0) call void @use3xi32(<3 x i32> %t1) call void @use3xi32(<3 x i32> %t2) diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll index 549729fe8b59c..5d8ff9e9fb71b 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-d.ll @@ -115,9 +115,9 @@ define <3 x i32> @t3_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) { ret <3 x i32> %t4 } -define <3 x i32> @t4_vec_undef(<3 x i32> %x, <3 x i32> %nbits) { -; CHECK-LABEL: @t4_vec_undef( -; CHECK-NEXT: [[T0:%.*]] = shl <3 x i32> , [[NBITS:%.*]] +define <3 x i32> @t4_vec_poison(<3 x i32> %x, <3 x i32> %nbits) { +; CHECK-LABEL: @t4_vec_poison( +; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i32> , [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = and <3 x i32> [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]]) @@ -127,10 +127,10 @@ define <3 x i32> @t4_vec_undef(<3 x i32> %x, <3 x i32> %nbits) { ; CHECK-NEXT: [[T4:%.*]] = shl <3 x i32> [[X]], [[NBITS]] ; CHECK-NEXT: ret <3 x i32> [[T4]] ; - %t0 = shl <3 x i32> , %nbits + %t0 = shl <3 x i32> , %nbits %t1 = lshr <3 x i32> %t0, %nbits %t2 = and <3 x i32> %t1, %x - %t3 = add <3 x i32> %nbits, + %t3 = add <3 x i32> %nbits, call void @use3xi32(<3 x i32> %t0) call void @use3xi32(<3 x i32> %t1) call void @use3xi32(<3 x i32> %t2) diff --git a/llvm/test/Transforms/InstCombine/reuse-constant-from-select-in-icmp.ll b/llvm/test/Transforms/InstCombine/reuse-constant-from-select-in-icmp.ll index fd0d942ad840b..301ead708a08f 100644 --- a/llvm/test/Transforms/InstCombine/reuse-constant-from-select-in-icmp.ll +++ b/llvm/test/Transforms/InstCombine/reuse-constant-from-select-in-icmp.ll @@ -102,36 +102,36 @@ define <2 x i32> @p7_vec_splat_sgt(<2 x i32> %x, <2 x i32> %y) { ret <2 x i32> %r } -; Vectors with undef +; Vectors with poison -define <2 x i32> @p8_vec_nonsplat_undef0(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @p8_vec_nonsplat_undef0( +define <2 x i32> @p8_vec_nonsplat_poison0(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @p8_vec_nonsplat_poison0( ; CHECK-NEXT: [[T_INV:%.*]] = icmp ugt <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[T_INV]], <2 x i32> , <2 x i32> [[Y:%.*]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; - %t = icmp ult <2 x i32> %x, + %t = icmp ult <2 x i32> %x, %r = select <2 x i1> %t, <2 x i32> %y, <2 x i32> ret <2 x i32> %r } -define <2 x i32> @p9_vec_nonsplat_undef1(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @p9_vec_nonsplat_undef1( +define <2 x i32> @p9_vec_nonsplat_poison1(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @p9_vec_nonsplat_poison1( ; CHECK-NEXT: [[T_INV:%.*]] = icmp ugt <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[T_INV]], <2 x i32> , <2 x i32> [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[T_INV]], <2 x i32> , <2 x i32> [[Y:%.*]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; %t = icmp ult <2 x i32> %x, - %r = select <2 x i1> %t, <2 x i32> %y, <2 x i32> + %r = select <2 x i1> %t, <2 x i32> %y, <2 x i32> ret <2 x i32> %r } -define <2 x i32> @p10_vec_nonsplat_undef2(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @p10_vec_nonsplat_undef2( +define <2 x i32> @p10_vec_nonsplat_poison2(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @p10_vec_nonsplat_poison2( ; CHECK-NEXT: [[T_INV:%.*]] = icmp ugt <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[T_INV]], <2 x i32> , <2 x i32> [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[T_INV]], <2 x i32> , <2 x i32> [[Y:%.*]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; - %t = icmp ult <2 x i32> %x, - %r = select <2 x i1> %t, <2 x i32> %y, <2 x i32> + %t = icmp ult <2 x i32> %x, + %r = select <2 x i1> %t, <2 x i32> %y, <2 x i32> ret <2 x i32> %r } diff --git a/llvm/test/Transforms/InstCombine/rotate.ll b/llvm/test/Transforms/InstCombine/rotate.ll index 6c70c791fd881..eec623e2f193a 100644 --- a/llvm/test/Transforms/InstCombine/rotate.ll +++ b/llvm/test/Transforms/InstCombine/rotate.ll @@ -65,24 +65,24 @@ define <2 x i16> @rotl_v2i16_constant_splat(<2 x i16> %x) { ret <2 x i16> %r } -define <2 x i16> @rotl_v2i16_constant_splat_undef0(<2 x i16> %x) { -; CHECK-LABEL: @rotl_v2i16_constant_splat_undef0( +define <2 x i16> @rotl_v2i16_constant_splat_poison0(<2 x i16> %x) { +; CHECK-LABEL: @rotl_v2i16_constant_splat_poison0( ; CHECK-NEXT: [[R:%.*]] = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[X]], <2 x i16> ) ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %shl = shl <2 x i16> %x, + %shl = shl <2 x i16> %x, %shr = lshr <2 x i16> %x, %r = or <2 x i16> %shl, %shr ret <2 x i16> %r } -define <2 x i16> @rotl_v2i16_constant_splat_undef1(<2 x i16> %x) { -; CHECK-LABEL: @rotl_v2i16_constant_splat_undef1( +define <2 x i16> @rotl_v2i16_constant_splat_poison1(<2 x i16> %x) { +; CHECK-LABEL: @rotl_v2i16_constant_splat_poison1( ; CHECK-NEXT: [[R:%.*]] = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[X]], <2 x i16> ) ; CHECK-NEXT: ret <2 x i16> [[R]] ; %shl = shl <2 x i16> %x, - %shr = lshr <2 x i16> %x, + %shr = lshr <2 x i16> %x, %r = or <2 x i16> %shl, %shr ret <2 x i16> %r } @@ -100,30 +100,30 @@ define <2 x i17> @rotr_v2i17_constant_splat(<2 x i17> %x) { ret <2 x i17> %r } -define <2 x i17> @rotr_v2i17_constant_splat_undef0(<2 x i17> %x) { -; CHECK-LABEL: @rotr_v2i17_constant_splat_undef0( +define <2 x i17> @rotr_v2i17_constant_splat_poison0(<2 x i17> %x) { +; CHECK-LABEL: @rotr_v2i17_constant_splat_poison0( ; CHECK-NEXT: [[R:%.*]] = call <2 x i17> @llvm.fshl.v2i17(<2 x i17> [[X:%.*]], <2 x i17> [[X]], <2 x i17> ) ; CHECK-NEXT: ret <2 x i17> [[R]] ; - %shl = shl <2 x i17> %x, - %shr = lshr <2 x i17> %x, + %shl = shl <2 x i17> %x, + %shr = lshr <2 x i17> %x, %r = or <2 x i17> %shr, %shl ret <2 x i17> %r } -define <2 x i17> @rotr_v2i17_constant_splat_undef1(<2 x i17> %x) { -; CHECK-LABEL: @rotr_v2i17_constant_splat_undef1( +define <2 x i17> @rotr_v2i17_constant_splat_poison1(<2 x i17> %x) { +; CHECK-LABEL: @rotr_v2i17_constant_splat_poison1( ; CHECK-NEXT: [[R:%.*]] = call <2 x i17> @llvm.fshl.v2i17(<2 x i17> [[X:%.*]], <2 x i17> [[X]], <2 x i17> ) ; CHECK-NEXT: ret <2 x i17> [[R]] ; - %shl = shl <2 x i17> %x, - %shr = lshr <2 x i17> %x, + %shl = shl <2 x i17> %x, + %shr = lshr <2 x i17> %x, %r = or <2 x i17> %shr, %shl ret <2 x i17> %r } ; Allow arbitrary shift constants. -; Support undef elements. +; Support poison elements. define <2 x i32> @rotr_v2i32_constant_nonsplat(<2 x i32> %x) { ; CHECK-LABEL: @rotr_v2i32_constant_nonsplat( @@ -136,17 +136,6 @@ define <2 x i32> @rotr_v2i32_constant_nonsplat(<2 x i32> %x) { ret <2 x i32> %r } -define <2 x i32> @rotr_v2i32_constant_nonsplat_undef0(<2 x i32> %x) { -; CHECK-LABEL: @rotr_v2i32_constant_nonsplat_undef0( -; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[X]], <2 x i32> ) -; CHECK-NEXT: ret <2 x i32> [[R]] -; - %shl = shl <2 x i32> %x, - %shr = lshr <2 x i32> %x, - %r = or <2 x i32> %shl, %shr - ret <2 x i32> %r -} - define <2 x i32> @rotr_v2i32_constant_nonsplat_poison0(<2 x i32> %x) { ; CHECK-LABEL: @rotr_v2i32_constant_nonsplat_poison0( ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[X]], <2 x i32> ) @@ -158,13 +147,13 @@ define <2 x i32> @rotr_v2i32_constant_nonsplat_poison0(<2 x i32> %x) { ret <2 x i32> %r } -define <2 x i32> @rotr_v2i32_constant_nonsplat_undef1(<2 x i32> %x) { -; CHECK-LABEL: @rotr_v2i32_constant_nonsplat_undef1( +define <2 x i32> @rotr_v2i32_constant_nonsplat_poison1(<2 x i32> %x) { +; CHECK-LABEL: @rotr_v2i32_constant_nonsplat_poison1( ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[X]], <2 x i32> ) ; CHECK-NEXT: ret <2 x i32> [[R]] ; %shl = shl <2 x i32> %x, - %shr = lshr <2 x i32> %x, + %shr = lshr <2 x i32> %x, %r = or <2 x i32> %shl, %shr ret <2 x i32> %r } @@ -180,13 +169,13 @@ define <2 x i36> @rotl_v2i36_constant_nonsplat(<2 x i36> %x) { ret <2 x i36> %r } -define <3 x i36> @rotl_v3i36_constant_nonsplat_undef0(<3 x i36> %x) { -; CHECK-LABEL: @rotl_v3i36_constant_nonsplat_undef0( -; CHECK-NEXT: [[R:%.*]] = call <3 x i36> @llvm.fshl.v3i36(<3 x i36> [[X:%.*]], <3 x i36> [[X]], <3 x i36> ) +define <3 x i36> @rotl_v3i36_constant_nonsplat_poison0(<3 x i36> %x) { +; CHECK-LABEL: @rotl_v3i36_constant_nonsplat_poison0( +; CHECK-NEXT: [[R:%.*]] = call <3 x i36> @llvm.fshl.v3i36(<3 x i36> [[X:%.*]], <3 x i36> [[X]], <3 x i36> ) ; CHECK-NEXT: ret <3 x i36> [[R]] ; - %shl = shl <3 x i36> %x, - %shr = lshr <3 x i36> %x, + %shl = shl <3 x i36> %x, + %shr = lshr <3 x i36> %x, %r = or <3 x i36> %shl, %shr ret <3 x i36> %r } diff --git a/llvm/test/Transforms/InstCombine/saturating-add-sub.ll b/llvm/test/Transforms/InstCombine/saturating-add-sub.ll index c1bb6941d4568..57977a72cd08f 100644 --- a/llvm/test/Transforms/InstCombine/saturating-add-sub.ll +++ b/llvm/test/Transforms/InstCombine/saturating-add-sub.ll @@ -559,14 +559,14 @@ define <2 x i8> @test_simplify_decrement_vec(<2 x i8> %a) { ret <2 x i8> %i2 } -define <2 x i8> @test_simplify_decrement_vec_undef(<2 x i8> %a) { -; CHECK-LABEL: @test_simplify_decrement_vec_undef( +define <2 x i8> @test_simplify_decrement_vec_poison(<2 x i8> %a) { +; CHECK-LABEL: @test_simplify_decrement_vec_poison( ; CHECK-NEXT: [[I2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> ) ; CHECK-NEXT: ret <2 x i8> [[I2]] ; %i = icmp eq <2 x i8> %a, %i1 = sub <2 x i8> %a, - %i2 = select <2 x i1> %i, <2 x i8> , <2 x i8> %i1 + %i2 = select <2 x i1> %i, <2 x i8> , <2 x i8> %i1 ret <2 x i8> %i2 } @@ -1818,14 +1818,14 @@ define <4 x i32> @uadd_sat_constant_vec_commute(<4 x i32> %x) { define <4 x i32> @uadd_sat_constant_vec_commute_undefs(<4 x i32> %x) { ; CHECK-LABEL: @uadd_sat_constant_vec_commute_undefs( -; CHECK-NEXT: [[A:%.*]] = add <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[C:%.*]] = icmp ult <4 x i32> [[X]], -; CHECK-NEXT: [[R:%.*]] = select <4 x i1> [[C]], <4 x i32> [[A]], <4 x i32> +; CHECK-NEXT: [[A:%.*]] = add <4 x i32> [[X:%.*]], +; CHECK-NEXT: [[C:%.*]] = icmp ult <4 x i32> [[X]], +; CHECK-NEXT: [[R:%.*]] = select <4 x i1> [[C]], <4 x i32> [[A]], <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; - %a = add <4 x i32> %x, - %c = icmp ult <4 x i32> %x, - %r = select <4 x i1> %c, <4 x i32> %a, <4 x i32> + %a = add <4 x i32> %x, + %c = icmp ult <4 x i32> %x, + %r = select <4 x i1> %c, <4 x i32> %a, <4 x i32> ret <4 x i32> %r } diff --git a/llvm/test/Transforms/InstCombine/select-of-bittest.ll b/llvm/test/Transforms/InstCombine/select-of-bittest.ll index a6f14cbfbfadf..e3eb76de459e2 100644 --- a/llvm/test/Transforms/InstCombine/select-of-bittest.ll +++ b/llvm/test/Transforms/InstCombine/select-of-bittest.ll @@ -80,19 +80,18 @@ define <2 x i32> @and_lshr_and_vec_v2(<2 x i32> %arg) { ret <2 x i32> %t4 } -define <3 x i32> @and_lshr_and_vec_undef(<3 x i32> %arg) { -; CHECK-LABEL: @and_lshr_and_vec_undef( +define <3 x i32> @and_lshr_and_vec_poison(<3 x i32> %arg) { +; CHECK-LABEL: @and_lshr_and_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i32> [[ARG:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <3 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[T4:%.*]] = zext <3 x i1> [[TMP2]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[T4]] ; - %t = and <3 x i32> %arg, - %t1 = icmp eq <3 x i32> %t, - %t2 = lshr <3 x i32> %arg, - %t3 = and <3 x i32> %t2, - ; The second element of %t4 is poison because it is (undef ? poison : undef). - %t4 = select <3 x i1> %t1, <3 x i32> %t3, <3 x i32> + %t = and <3 x i32> %arg, + %t1 = icmp eq <3 x i32> %t, + %t2 = lshr <3 x i32> %arg, + %t3 = and <3 x i32> %t2, + %t4 = select <3 x i1> %t1, <3 x i32> %t3, <3 x i32> ret <3 x i32> %t4 } @@ -138,17 +137,17 @@ define <2 x i32> @and_and_vec(<2 x i32> %arg) { ret <2 x i32> %t3 } -define <3 x i32> @and_and_vec_undef(<3 x i32> %arg) { -; CHECK-LABEL: @and_and_vec_undef( -; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i32> [[ARG:%.*]], +define <3 x i32> @and_and_vec_poison(<3 x i32> %arg) { +; CHECK-LABEL: @and_and_vec_poison( +; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i32> [[ARG:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <3 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[T3:%.*]] = zext <3 x i1> [[TMP2]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[T3]] ; - %t = and <3 x i32> %arg, - %t1 = icmp eq <3 x i32> %t, - %t2 = and <3 x i32> %arg, - %t3 = select <3 x i1> %t1, <3 x i32> %t2, <3 x i32> + %t = and <3 x i32> %arg, + %t1 = icmp eq <3 x i32> %t, + %t2 = and <3 x i32> %arg, + %t3 = select <3 x i1> %t1, <3 x i32> %t2, <3 x i32> ret <3 x i32> %t3 } @@ -221,8 +220,8 @@ define <2 x i32> @f_var0_vec(<2 x i32> %arg, <2 x i32> %arg1) { ret <2 x i32> %t5 } -define <3 x i32> @f_var0_vec_undef(<3 x i32> %arg, <3 x i32> %arg1) { -; CHECK-LABEL: @f_var0_vec_undef( +define <3 x i32> @f_var0_vec_poison(<3 x i32> %arg, <3 x i32> %arg1) { +; CHECK-LABEL: @f_var0_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = or <3 x i32> [[ARG1:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[ARG:%.*]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer @@ -230,11 +229,11 @@ define <3 x i32> @f_var0_vec_undef(<3 x i32> %arg, <3 x i32> %arg1) { ; CHECK-NEXT: ret <3 x i32> [[T5]] ; %t = and <3 x i32> %arg, %arg1 - %t2 = icmp eq <3 x i32> %t, - %t3 = lshr <3 x i32> %arg, - %t4 = and <3 x i32> %t3, - ; The second element of %t5 is poison because it is (undef ? poison : undef). - %t5 = select <3 x i1> %t2, <3 x i32> %t4, <3 x i32> + %t2 = icmp eq <3 x i32> %t, + %t3 = lshr <3 x i32> %arg, + %t4 = and <3 x i32> %t3, + ; The second element of %t5 is poison because it is (poison ? poison : poison). + %t5 = select <3 x i1> %t2, <3 x i32> %t4, <3 x i32> ret <3 x i32> %t5 } @@ -284,8 +283,8 @@ define <2 x i32> @f_var1_vec(<2 x i32> %arg, <2 x i32> %arg1) { ret <2 x i32> %t4 } -define <3 x i32> @f_var1_vec_undef(<3 x i32> %arg, <3 x i32> %arg1) { -; CHECK-LABEL: @f_var1_vec_undef( +define <3 x i32> @f_var1_vec_poison(<3 x i32> %arg, <3 x i32> %arg1) { +; CHECK-LABEL: @f_var1_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = or <3 x i32> [[ARG1:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[ARG:%.*]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer @@ -293,9 +292,9 @@ define <3 x i32> @f_var1_vec_undef(<3 x i32> %arg, <3 x i32> %arg1) { ; CHECK-NEXT: ret <3 x i32> [[T4]] ; %t = and <3 x i32> %arg, %arg1 - %t2 = icmp eq <3 x i32> %t, - %t3 = and <3 x i32> %arg, - %t4 = select <3 x i1> %t2, <3 x i32> %t3, <3 x i32> + %t2 = icmp eq <3 x i32> %t, + %t3 = and <3 x i32> %arg, + %t4 = select <3 x i1> %t2, <3 x i32> %t3, <3 x i32> ret <3 x i32> %t4 } @@ -354,20 +353,20 @@ define <2 x i32> @f_var2_vec(<2 x i32> %arg, <2 x i32> %arg1) { ret <2 x i32> %t5 } -define <3 x i32> @f_var2_vec_undef(<3 x i32> %arg, <3 x i32> %arg1) { -; CHECK-LABEL: @f_var2_vec_undef( -; CHECK-NEXT: [[T:%.*]] = and <3 x i32> [[ARG:%.*]], -; CHECK-NEXT: [[T2:%.*]] = icmp eq <3 x i32> [[T]], +define <3 x i32> @f_var2_vec_poison(<3 x i32> %arg, <3 x i32> %arg1) { +; CHECK-LABEL: @f_var2_vec_poison( +; CHECK-NEXT: [[T:%.*]] = and <3 x i32> [[ARG:%.*]], +; CHECK-NEXT: [[T2:%.*]] = icmp eq <3 x i32> [[T]], ; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[ARG]], [[ARG1:%.*]] -; CHECK-NEXT: [[T4:%.*]] = and <3 x i32> [[T3]], -; CHECK-NEXT: [[T5:%.*]] = select <3 x i1> [[T2]], <3 x i32> [[T4]], <3 x i32> +; CHECK-NEXT: [[T4:%.*]] = and <3 x i32> [[T3]], +; CHECK-NEXT: [[T5:%.*]] = select <3 x i1> [[T2]], <3 x i32> [[T4]], <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[T5]] ; - %t = and <3 x i32> %arg, - %t2 = icmp eq <3 x i32> %t, + %t = and <3 x i32> %arg, + %t2 = icmp eq <3 x i32> %t, %t3 = lshr <3 x i32> %arg, %arg1 - %t4 = and <3 x i32> %t3, - %t5 = select <3 x i1> %t2, <3 x i32> %t4, <3 x i32> + %t4 = and <3 x i32> %t3, + %t5 = select <3 x i1> %t2, <3 x i32> %t4, <3 x i32> ret <3 x i32> %t5 } @@ -427,20 +426,20 @@ define <2 x i32> @f_var3_splatvec(<2 x i32> %arg, <2 x i32> %arg1, <2 x i32> %ar ret <2 x i32> %t6 } -define <3 x i32> @f_var3_vec_undef(<3 x i32> %arg, <3 x i32> %arg1, <3 x i32> %arg2) { -; CHECK-LABEL: @f_var3_vec_undef( +define <3 x i32> @f_var3_vec_poison(<3 x i32> %arg, <3 x i32> %arg1, <3 x i32> %arg2) { +; CHECK-LABEL: @f_var3_vec_poison( ; CHECK-NEXT: [[T:%.*]] = and <3 x i32> [[ARG:%.*]], [[ARG1:%.*]] -; CHECK-NEXT: [[T3:%.*]] = icmp eq <3 x i32> [[T]], +; CHECK-NEXT: [[T3:%.*]] = icmp eq <3 x i32> [[T]], ; CHECK-NEXT: [[T4:%.*]] = lshr <3 x i32> [[ARG]], [[ARG2:%.*]] -; CHECK-NEXT: [[T5:%.*]] = and <3 x i32> [[T4]], -; CHECK-NEXT: [[T6:%.*]] = select <3 x i1> [[T3]], <3 x i32> [[T5]], <3 x i32> +; CHECK-NEXT: [[T5:%.*]] = and <3 x i32> [[T4]], +; CHECK-NEXT: [[T6:%.*]] = select <3 x i1> [[T3]], <3 x i32> [[T5]], <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[T6]] ; %t = and <3 x i32> %arg, %arg1 - %t3 = icmp eq <3 x i32> %t, + %t3 = icmp eq <3 x i32> %t, %t4 = lshr <3 x i32> %arg, %arg2 - %t5 = and <3 x i32> %t4, - %t6 = select <3 x i1> %t3, <3 x i32> %t5, <3 x i32> + %t5 = and <3 x i32> %t4, + %t6 = select <3 x i1> %t3, <3 x i32> %t5, <3 x i32> ret <3 x i32> %t6 } diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll index bd8145ab2a35b..8654691c6f875 100644 --- a/llvm/test/Transforms/InstCombine/select.ll +++ b/llvm/test/Transforms/InstCombine/select.ll @@ -3109,45 +3109,46 @@ define <4 x i32> @mul_select_eq_zero_vector(<4 x i32> %x, <4 x i32> %y) { } ; Check that a select is folded into multiplication if condition's operand -; is a vector consisting of zeros and undefs. -; select ( x == {0, undef, ...}), 0, x * y --> freeze(y) * x -define <2 x i32> @mul_select_eq_undef_vector(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @mul_select_eq_undef_vector( -; CHECK-NEXT: [[Y_FR:%.*]] = freeze <2 x i32> [[Y:%.*]] +; is a vector consisting of zeros and poisons. +; select ( x == {0, poison, ...}), 0, x * y --> freeze(y) * x +define <2 x i32> @mul_select_eq_poison_vector(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @mul_select_eq_poison_vector( +; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i32> [[Y_FR:%.*]], ; CHECK-NEXT: [[M:%.*]] = mul <2 x i32> [[Y_FR]], [[X:%.*]] -; CHECK-NEXT: ret <2 x i32> [[M]] +; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[C]], <2 x i32> , <2 x i32> [[M]] +; CHECK-NEXT: ret <2 x i32> [[R]] ; - %c = icmp eq <2 x i32> %x, + %c = icmp eq <2 x i32> %x, %m = mul <2 x i32> %x, %y %r = select <2 x i1> %c, <2 x i32> , <2 x i32> %m ret <2 x i32> %r } ; Check that a select is folded into multiplication if other select's operand -; is a vector consisting of zeros and undefs. -; select ( x == 0), {0, undef, ...}, x * y --> freeze(y) * x -define <2 x i32> @mul_select_eq_zero_sel_undef_vector(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @mul_select_eq_zero_sel_undef_vector( +; is a vector consisting of zeros and poisons. +; select ( x == 0), {0, poison, ...}, x * y --> freeze(y) * x +define <2 x i32> @mul_select_eq_zero_sel_poison_vector(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @mul_select_eq_zero_sel_poison_vector( ; CHECK-NEXT: [[Y_FR:%.*]] = freeze <2 x i32> [[Y:%.*]] ; CHECK-NEXT: [[M:%.*]] = mul <2 x i32> [[Y_FR]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i32> [[M]] ; %c = icmp eq <2 x i32> %x, zeroinitializer %m = mul <2 x i32> %x, %y - %r = select <2 x i1> %c, <2 x i32> , <2 x i32> %m + %r = select <2 x i1> %c, <2 x i32> , <2 x i32> %m ret <2 x i32> %r } ; Negative test: select should not be folded into mul because ; condition's operand and select's operand do not merge into zero vector. -define <2 x i32> @mul_select_eq_undef_vector_not_merging_to_zero(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @mul_select_eq_undef_vector_not_merging_to_zero( -; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i32> [[X:%.*]], +define <2 x i32> @mul_select_eq_poison_vector_not_merging_to_zero(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @mul_select_eq_poison_vector_not_merging_to_zero( +; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[M:%.*]] = mul <2 x i32> [[X]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[C]], <2 x i32> , <2 x i32> [[M]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; - %c = icmp eq <2 x i32> %x, + %c = icmp eq <2 x i32> %x, %m = mul <2 x i32> %x, %y %r = select <2 x i1> %c, <2 x i32> , <2 x i32> %m ret <2 x i32> %r diff --git a/llvm/test/Transforms/InstCombine/select_meta.ll b/llvm/test/Transforms/InstCombine/select_meta.ll index aa794e82e0fdc..3898fd9fa1f57 100644 --- a/llvm/test/Transforms/InstCombine/select_meta.ll +++ b/llvm/test/Transforms/InstCombine/select_meta.ll @@ -301,15 +301,15 @@ define <2 x i32> @not_cond_vec(<2 x i1> %c, <2 x i32> %tv, <2 x i32> %fv) { ret <2 x i32> %r } -; Should match vector 'not' with undef element. +; Should match vector 'not' with poison element. ; The condition is inverted, and the select ops are swapped. The metadata should be swapped. -define <2 x i32> @not_cond_vec_undef(<2 x i1> %c, <2 x i32> %tv, <2 x i32> %fv) { -; CHECK-LABEL: @not_cond_vec_undef( +define <2 x i32> @not_cond_vec_poison(<2 x i1> %c, <2 x i32> %tv, <2 x i32> %fv) { +; CHECK-LABEL: @not_cond_vec_poison( ; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[C:%.*]], <2 x i32> [[FV:%.*]], <2 x i32> [[TV:%.*]], !prof [[PROF1]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; - %notc = xor <2 x i1> %c, + %notc = xor <2 x i1> %c, %r = select <2 x i1> %notc, <2 x i32> %tv, <2 x i32> %fv, !prof !1 ret <2 x i32> %r } diff --git a/llvm/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll b/llvm/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll index 3ee0224eb1d03..a3c8d3393d04f 100644 --- a/llvm/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll +++ b/llvm/test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll @@ -196,36 +196,36 @@ define <2 x i32> @shl_add_vec(<2 x i32> %NBits) { ret <2 x i32> %ret } -define <3 x i32> @shl_add_vec_undef0(<3 x i32> %NBits) { -; CHECK-LABEL: @shl_add_vec_undef0( +define <3 x i32> @shl_add_vec_poison0(<3 x i32> %NBits) { +; CHECK-LABEL: @shl_add_vec_poison0( ; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw <3 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], ; CHECK-NEXT: ret <3 x i32> [[RET]] ; - %setbit = shl <3 x i32> , %NBits + %setbit = shl <3 x i32> , %NBits %ret = add <3 x i32> %setbit, ret <3 x i32> %ret } -define <3 x i32> @shl_add_vec_undef1(<3 x i32> %NBits) { -; CHECK-LABEL: @shl_add_vec_undef1( +define <3 x i32> @shl_add_vec_poison1(<3 x i32> %NBits) { +; CHECK-LABEL: @shl_add_vec_poison1( ; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw <3 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], ; CHECK-NEXT: ret <3 x i32> [[RET]] ; %setbit = shl <3 x i32> , %NBits - %ret = add <3 x i32> %setbit, + %ret = add <3 x i32> %setbit, ret <3 x i32> %ret } -define <3 x i32> @shl_add_vec_undef2(<3 x i32> %NBits) { -; CHECK-LABEL: @shl_add_vec_undef2( +define <3 x i32> @shl_add_vec_poison2(<3 x i32> %NBits) { +; CHECK-LABEL: @shl_add_vec_poison2( ; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw <3 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], ; CHECK-NEXT: ret <3 x i32> [[RET]] ; - %setbit = shl <3 x i32> , %NBits - %ret = add <3 x i32> %setbit, + %setbit = shl <3 x i32> , %NBits + %ret = add <3 x i32> %setbit, ret <3 x i32> %ret } diff --git a/llvm/test/Transforms/InstCombine/sext.ll b/llvm/test/Transforms/InstCombine/sext.ll index e3b6058ce7f80..6d263cfcda057 100644 --- a/llvm/test/Transforms/InstCombine/sext.ll +++ b/llvm/test/Transforms/InstCombine/sext.ll @@ -167,39 +167,39 @@ define <2 x i32> @test10_vec_nonuniform(<2 x i32> %i) { ret <2 x i32> %D } -define <2 x i32> @test10_vec_undef0(<2 x i32> %i) { -; CHECK-LABEL: @test10_vec_undef0( -; CHECK-NEXT: [[D1:%.*]] = shl <2 x i32> [[I:%.*]], -; CHECK-NEXT: [[D:%.*]] = ashr exact <2 x i32> [[D1]], +define <2 x i32> @test10_vec_poison0(<2 x i32> %i) { +; CHECK-LABEL: @test10_vec_poison0( +; CHECK-NEXT: [[D1:%.*]] = shl <2 x i32> [[I:%.*]], +; CHECK-NEXT: [[D:%.*]] = ashr exact <2 x i32> [[D1]], ; CHECK-NEXT: ret <2 x i32> [[D]] ; %A = trunc <2 x i32> %i to <2 x i8> %B = shl <2 x i8> %A, - %C = ashr <2 x i8> %B, + %C = ashr <2 x i8> %B, %D = sext <2 x i8> %C to <2 x i32> ret <2 x i32> %D } -define <2 x i32> @test10_vec_undef1(<2 x i32> %i) { -; CHECK-LABEL: @test10_vec_undef1( +define <2 x i32> @test10_vec_poison1(<2 x i32> %i) { +; CHECK-LABEL: @test10_vec_poison1( ; CHECK-NEXT: [[D1:%.*]] = shl <2 x i32> [[I:%.*]], ; CHECK-NEXT: [[D:%.*]] = ashr exact <2 x i32> [[D1]], ; CHECK-NEXT: ret <2 x i32> [[D]] ; %A = trunc <2 x i32> %i to <2 x i8> - %B = shl <2 x i8> %A, + %B = shl <2 x i8> %A, %C = ashr <2 x i8> %B, %D = sext <2 x i8> %C to <2 x i32> ret <2 x i32> %D } -define <2 x i32> @test10_vec_undef2(<2 x i32> %i) { -; CHECK-LABEL: @test10_vec_undef2( -; CHECK-NEXT: [[D1:%.*]] = shl <2 x i32> [[I:%.*]], -; CHECK-NEXT: [[D:%.*]] = ashr exact <2 x i32> [[D1]], +define <2 x i32> @test10_vec_poison2(<2 x i32> %i) { +; CHECK-LABEL: @test10_vec_poison2( +; CHECK-NEXT: [[D1:%.*]] = shl <2 x i32> [[I:%.*]], +; CHECK-NEXT: [[D:%.*]] = ashr exact <2 x i32> [[D1]], ; CHECK-NEXT: ret <2 x i32> [[D]] ; %A = trunc <2 x i32> %i to <2 x i8> - %B = shl <2 x i8> %A, - %C = ashr <2 x i8> %B, + %B = shl <2 x i8> %A, + %C = ashr <2 x i8> %B, %D = sext <2 x i8> %C to <2 x i32> ret <2 x i32> %D } diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest.ll index 0262db1a01e5c..96d429c62a88f 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest.ll @@ -143,34 +143,34 @@ define <2 x i1> @t8_const_lshr_shl_ne_vec_nonsplat(<2 x i32> %x, <2 x i32> %y) { %t3 = icmp ne <2 x i32> %t2, ret <2 x i1> %t3 } -define <3 x i1> @t9_const_lshr_shl_ne_vec_undef0(<3 x i32> %x, <3 x i32> %y) { -; CHECK-LABEL: @t9_const_lshr_shl_ne_vec_undef0( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], +define <3 x i1> @t9_const_lshr_shl_ne_vec_poison0(<3 x i32> %x, <3 x i32> %y) { +; CHECK-LABEL: @t9_const_lshr_shl_ne_vec_poison0( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: [[T3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[T3]] ; - %t0 = lshr <3 x i32> %x, + %t0 = lshr <3 x i32> %x, %t1 = shl <3 x i32> %y, %t2 = and <3 x i32> %t1, %t0 %t3 = icmp ne <3 x i32> %t2, ret <3 x i1> %t3 } -define <3 x i1> @t10_const_lshr_shl_ne_vec_undef1(<3 x i32> %x, <3 x i32> %y) { -; CHECK-LABEL: @t10_const_lshr_shl_ne_vec_undef1( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], +define <3 x i1> @t10_const_lshr_shl_ne_vec_poison1(<3 x i32> %x, <3 x i32> %y) { +; CHECK-LABEL: @t10_const_lshr_shl_ne_vec_poison1( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: [[T3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[T3]] ; %t0 = lshr <3 x i32> %x, - %t1 = shl <3 x i32> %y, + %t1 = shl <3 x i32> %y, %t2 = and <3 x i32> %t1, %t0 %t3 = icmp ne <3 x i32> %t2, ret <3 x i1> %t3 } -define <3 x i1> @t11_const_lshr_shl_ne_vec_undef2(<3 x i32> %x, <3 x i32> %y) { -; CHECK-LABEL: @t11_const_lshr_shl_ne_vec_undef2( +define <3 x i1> @t11_const_lshr_shl_ne_vec_poison2(<3 x i32> %x, <3 x i32> %y) { +; CHECK-LABEL: @t11_const_lshr_shl_ne_vec_poison2( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: [[T3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer @@ -179,59 +179,59 @@ define <3 x i1> @t11_const_lshr_shl_ne_vec_undef2(<3 x i32> %x, <3 x i32> %y) { %t0 = lshr <3 x i32> %x, %t1 = shl <3 x i32> %y, %t2 = and <3 x i32> %t1, %t0 - %t3 = icmp ne <3 x i32> %t2, + %t3 = icmp ne <3 x i32> %t2, ret <3 x i1> %t3 } -define <3 x i1> @t12_const_lshr_shl_ne_vec_undef3(<3 x i32> %x, <3 x i32> %y) { -; CHECK-LABEL: @t12_const_lshr_shl_ne_vec_undef3( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], +define <3 x i1> @t12_const_lshr_shl_ne_vec_poison3(<3 x i32> %x, <3 x i32> %y) { +; CHECK-LABEL: @t12_const_lshr_shl_ne_vec_poison3( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: [[T3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[T3]] ; - %t0 = lshr <3 x i32> %x, - %t1 = shl <3 x i32> %y, + %t0 = lshr <3 x i32> %x, + %t1 = shl <3 x i32> %y, %t2 = and <3 x i32> %t1, %t0 %t3 = icmp ne <3 x i32> %t2, ret <3 x i1> %t3 } -define <3 x i1> @t13_const_lshr_shl_ne_vec_undef4(<3 x i32> %x, <3 x i32> %y) { -; CHECK-LABEL: @t13_const_lshr_shl_ne_vec_undef4( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], +define <3 x i1> @t13_const_lshr_shl_ne_vec_poison4(<3 x i32> %x, <3 x i32> %y) { +; CHECK-LABEL: @t13_const_lshr_shl_ne_vec_poison4( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: [[T3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[T3]] ; %t0 = lshr <3 x i32> %x, - %t1 = shl <3 x i32> %y, + %t1 = shl <3 x i32> %y, %t2 = and <3 x i32> %t1, %t0 - %t3 = icmp ne <3 x i32> %t2, + %t3 = icmp ne <3 x i32> %t2, ret <3 x i1> %t3 } -define <3 x i1> @t14_const_lshr_shl_ne_vec_undef5(<3 x i32> %x, <3 x i32> %y) { -; CHECK-LABEL: @t14_const_lshr_shl_ne_vec_undef5( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], +define <3 x i1> @t14_const_lshr_shl_ne_vec_poison5(<3 x i32> %x, <3 x i32> %y) { +; CHECK-LABEL: @t14_const_lshr_shl_ne_vec_poison5( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: [[T3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[T3]] ; - %t0 = lshr <3 x i32> %x, + %t0 = lshr <3 x i32> %x, %t1 = shl <3 x i32> %y, %t2 = and <3 x i32> %t1, %t0 - %t3 = icmp ne <3 x i32> %t2, + %t3 = icmp ne <3 x i32> %t2, ret <3 x i1> %t3 } -define <3 x i1> @t15_const_lshr_shl_ne_vec_undef6(<3 x i32> %x, <3 x i32> %y) { -; CHECK-LABEL: @t15_const_lshr_shl_ne_vec_undef6( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], +define <3 x i1> @t15_const_lshr_shl_ne_vec_poison6(<3 x i32> %x, <3 x i32> %y) { +; CHECK-LABEL: @t15_const_lshr_shl_ne_vec_poison6( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: [[T3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: ret <3 x i1> [[T3]] ; - %t0 = lshr <3 x i32> %x, - %t1 = shl <3 x i32> %y, + %t0 = lshr <3 x i32> %x, + %t1 = shl <3 x i32> %y, %t2 = and <3 x i32> %t1, %t0 - %t3 = icmp ne <3 x i32> %t2, + %t3 = icmp ne <3 x i32> %t2, ret <3 x i1> %t3 } diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll index 84dd4c57ebc61..9efc30cc9d916 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll @@ -42,13 +42,13 @@ define <2 x i16> @t1_vec_splat(<2 x i32> %x, <2 x i16> %y) { ret <2 x i16> %t5 } -define <3 x i16> @t3_vec_nonsplat_undef0(<3 x i32> %x, <3 x i16> %y) { -; CHECK-LABEL: @t3_vec_nonsplat_undef0( -; CHECK-NEXT: [[TMP1:%.*]] = ashr <3 x i32> [[X:%.*]], +define <3 x i16> @t3_vec_nonsplat_poison0(<3 x i32> %x, <3 x i16> %y) { +; CHECK-LABEL: @t3_vec_nonsplat_poison0( +; CHECK-NEXT: [[TMP1:%.*]] = ashr <3 x i32> [[X:%.*]], ; CHECK-NEXT: [[T5:%.*]] = trunc <3 x i32> [[TMP1]] to <3 x i16> ; CHECK-NEXT: ret <3 x i16> [[T5]] ; - %t0 = sub <3 x i16> , %y + %t0 = sub <3 x i16> , %y %t1 = zext <3 x i16> %t0 to <3 x i32> %t2 = ashr <3 x i32> %x, %t1 %t3 = trunc <3 x i32> %t2 to <3 x i16> @@ -57,9 +57,9 @@ define <3 x i16> @t3_vec_nonsplat_undef0(<3 x i32> %x, <3 x i16> %y) { ret <3 x i16> %t5 } -define <3 x i16> @t4_vec_nonsplat_undef1(<3 x i32> %x, <3 x i16> %y) { -; CHECK-LABEL: @t4_vec_nonsplat_undef1( -; CHECK-NEXT: [[TMP1:%.*]] = ashr <3 x i32> [[X:%.*]], +define <3 x i16> @t4_vec_nonsplat_poison1(<3 x i32> %x, <3 x i16> %y) { +; CHECK-LABEL: @t4_vec_nonsplat_poison1( +; CHECK-NEXT: [[TMP1:%.*]] = ashr <3 x i32> [[X:%.*]], ; CHECK-NEXT: [[T5:%.*]] = trunc <3 x i32> [[TMP1]] to <3 x i16> ; CHECK-NEXT: ret <3 x i16> [[T5]] ; @@ -67,22 +67,22 @@ define <3 x i16> @t4_vec_nonsplat_undef1(<3 x i32> %x, <3 x i16> %y) { %t1 = zext <3 x i16> %t0 to <3 x i32> %t2 = ashr <3 x i32> %x, %t1 %t3 = trunc <3 x i32> %t2 to <3 x i16> - %t4 = add <3 x i16> %y, + %t4 = add <3 x i16> %y, %t5 = ashr <3 x i16> %t3, %t4 ret <3 x i16> %t5 } -define <3 x i16> @t5_vec_nonsplat_undef1(<3 x i32> %x, <3 x i16> %y) { -; CHECK-LABEL: @t5_vec_nonsplat_undef1( -; CHECK-NEXT: [[TMP1:%.*]] = ashr <3 x i32> [[X:%.*]], +define <3 x i16> @t5_vec_nonsplat_poison1(<3 x i32> %x, <3 x i16> %y) { +; CHECK-LABEL: @t5_vec_nonsplat_poison1( +; CHECK-NEXT: [[TMP1:%.*]] = ashr <3 x i32> [[X:%.*]], ; CHECK-NEXT: [[T5:%.*]] = trunc <3 x i32> [[TMP1]] to <3 x i16> ; CHECK-NEXT: ret <3 x i16> [[T5]] ; - %t0 = sub <3 x i16> , %y + %t0 = sub <3 x i16> , %y %t1 = zext <3 x i16> %t0 to <3 x i32> %t2 = ashr <3 x i32> %x, %t1 %t3 = trunc <3 x i32> %t2 to <3 x i16> - %t4 = add <3 x i16> %y, + %t4 = add <3 x i16> %y, %t5 = ashr <3 x i16> %t3, %t4 ret <3 x i16> %t5 } diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll index 214ec88d2e551..c31b6ed3ea2ba 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll @@ -42,13 +42,13 @@ define <2 x i16> @t1_vec_splat(<2 x i32> %x, <2 x i16> %y) { ret <2 x i16> %t5 } -define <3 x i16> @t3_vec_nonsplat_undef0(<3 x i32> %x, <3 x i16> %y) { -; CHECK-LABEL: @t3_vec_nonsplat_undef0( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], -; CHECK-NEXT: [[T5:%.*]] = trunc <3 x i32> [[TMP1]] to <3 x i16> +define <3 x i16> @t3_vec_nonsplat_poison0(<3 x i32> %x, <3 x i16> %y) { +; CHECK-LABEL: @t3_vec_nonsplat_poison0( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], +; CHECK-NEXT: [[T5:%.*]] = trunc nuw nsw <3 x i32> [[TMP1]] to <3 x i16> ; CHECK-NEXT: ret <3 x i16> [[T5]] ; - %t0 = sub <3 x i16> , %y + %t0 = sub <3 x i16> , %y %t1 = zext <3 x i16> %t0 to <3 x i32> %t2 = lshr <3 x i32> %x, %t1 %t3 = trunc <3 x i32> %t2 to <3 x i16> @@ -57,32 +57,32 @@ define <3 x i16> @t3_vec_nonsplat_undef0(<3 x i32> %x, <3 x i16> %y) { ret <3 x i16> %t5 } -define <3 x i16> @t4_vec_nonsplat_undef1(<3 x i32> %x, <3 x i16> %y) { -; CHECK-LABEL: @t4_vec_nonsplat_undef1( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], -; CHECK-NEXT: [[T5:%.*]] = trunc <3 x i32> [[TMP1]] to <3 x i16> +define <3 x i16> @t4_vec_nonsplat_poison1(<3 x i32> %x, <3 x i16> %y) { +; CHECK-LABEL: @t4_vec_nonsplat_poison1( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], +; CHECK-NEXT: [[T5:%.*]] = trunc nuw nsw <3 x i32> [[TMP1]] to <3 x i16> ; CHECK-NEXT: ret <3 x i16> [[T5]] ; %t0 = sub <3 x i16> , %y %t1 = zext <3 x i16> %t0 to <3 x i32> %t2 = lshr <3 x i32> %x, %t1 %t3 = trunc <3 x i32> %t2 to <3 x i16> - %t4 = add <3 x i16> %y, + %t4 = add <3 x i16> %y, %t5 = lshr <3 x i16> %t3, %t4 ret <3 x i16> %t5 } -define <3 x i16> @t5_vec_nonsplat_undef1(<3 x i32> %x, <3 x i16> %y) { -; CHECK-LABEL: @t5_vec_nonsplat_undef1( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], -; CHECK-NEXT: [[T5:%.*]] = trunc <3 x i32> [[TMP1]] to <3 x i16> +define <3 x i16> @t5_vec_nonsplat_poison1(<3 x i32> %x, <3 x i16> %y) { +; CHECK-LABEL: @t5_vec_nonsplat_poison1( +; CHECK-NEXT: [[TMP1:%.*]] = lshr <3 x i32> [[X:%.*]], +; CHECK-NEXT: [[T5:%.*]] = trunc nuw nsw <3 x i32> [[TMP1]] to <3 x i16> ; CHECK-NEXT: ret <3 x i16> [[T5]] ; - %t0 = sub <3 x i16> , %y + %t0 = sub <3 x i16> , %y %t1 = zext <3 x i16> %t0 to <3 x i32> %t2 = lshr <3 x i32> %x, %t1 %t3 = trunc <3 x i32> %t2 to <3 x i16> - %t4 = add <3 x i16> %y, + %t4 = add <3 x i16> %y, %t5 = lshr <3 x i16> %t3, %t4 ret <3 x i16> %t5 } diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation.ll index b96bcd6bab4f1..6bbe4c5151e45 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation.ll @@ -48,38 +48,38 @@ define <2 x i32> @t2_vec_nonsplat(<2 x i32> %x, <2 x i32> %y) { ; Basic vector tests -define <3 x i32> @t3_vec_nonsplat_undef0(<3 x i32> %x, <3 x i32> %y) { -; CHECK-LABEL: @t3_vec_nonsplat_undef0( -; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[X:%.*]], +define <3 x i32> @t3_vec_nonsplat_poison0(<3 x i32> %x, <3 x i32> %y) { +; CHECK-LABEL: @t3_vec_nonsplat_poison0( +; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: ret <3 x i32> [[T3]] ; - %t0 = sub <3 x i32> , %y + %t0 = sub <3 x i32> , %y %t1 = lshr <3 x i32> %x, %t0 %t2 = add <3 x i32> %y, %t3 = lshr <3 x i32> %t1, %t2 ret <3 x i32> %t3 } -define <3 x i32> @t4_vec_nonsplat_undef1(<3 x i32> %x, <3 x i32> %y) { -; CHECK-LABEL: @t4_vec_nonsplat_undef1( -; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[X:%.*]], +define <3 x i32> @t4_vec_nonsplat_poison1(<3 x i32> %x, <3 x i32> %y) { +; CHECK-LABEL: @t4_vec_nonsplat_poison1( +; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: ret <3 x i32> [[T3]] ; %t0 = sub <3 x i32> , %y %t1 = lshr <3 x i32> %x, %t0 - %t2 = add <3 x i32> %y, + %t2 = add <3 x i32> %y, %t3 = lshr <3 x i32> %t1, %t2 ret <3 x i32> %t3 } -define <3 x i32> @t5_vec_nonsplat_undef1(<3 x i32> %x, <3 x i32> %y) { -; CHECK-LABEL: @t5_vec_nonsplat_undef1( -; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[X:%.*]], +define <3 x i32> @t5_vec_nonsplat_poison1(<3 x i32> %x, <3 x i32> %y) { +; CHECK-LABEL: @t5_vec_nonsplat_poison1( +; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: ret <3 x i32> [[T3]] ; - %t0 = sub <3 x i32> , %y + %t0 = sub <3 x i32> , %y %t1 = lshr <3 x i32> %x, %t0 - %t2 = add <3 x i32> %y, + %t2 = add <3 x i32> %y, %t3 = lshr <3 x i32> %t1, %t2 ret <3 x i32> %t3 } diff --git a/llvm/test/Transforms/InstCombine/shift-logic.ll b/llvm/test/Transforms/InstCombine/shift-logic.ll index c982b45b504e9..b591400c6a260 100644 --- a/llvm/test/Transforms/InstCombine/shift-logic.ll +++ b/llvm/test/Transforms/InstCombine/shift-logic.ll @@ -44,18 +44,18 @@ define i16 @shl_or(i16 %x, i16 %py) { ret i16 %sh1 } -define <2 x i16> @shl_or_undef(<2 x i16> %x, <2 x i16> %py) { -; CHECK-LABEL: @shl_or_undef( +define <2 x i16> @shl_or_poison(<2 x i16> %x, <2 x i16> %py) { +; CHECK-LABEL: @shl_or_poison( ; CHECK-NEXT: [[Y:%.*]] = srem <2 x i16> [[PY:%.*]], -; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i16> [[X:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = shl <2 x i16> [[Y]], +; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i16> [[X:%.*]], +; CHECK-NEXT: [[TMP2:%.*]] = shl nsw <2 x i16> [[Y]], ; CHECK-NEXT: [[SH1:%.*]] = or <2 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i16> [[SH1]] ; %y = srem <2 x i16> %py, ; thwart complexity-based canonicalization - %sh0 = shl <2 x i16> %x, + %sh0 = shl <2 x i16> %x, %r = or <2 x i16> %y, %sh0 - %sh1 = shl <2 x i16> %r, + %sh1 = shl <2 x i16> %r, ret <2 x i16> %sh1 } @@ -100,18 +100,18 @@ define i64 @lshr_and(i64 %x, i64 %py) { ret i64 %sh1 } -define <2 x i64> @lshr_and_undef(<2 x i64> %x, <2 x i64> %py) { -; CHECK-LABEL: @lshr_and_undef( +define <2 x i64> @lshr_and_poison(<2 x i64> %x, <2 x i64> %py) { +; CHECK-LABEL: @lshr_and_poison( ; CHECK-NEXT: [[Y:%.*]] = srem <2 x i64> [[PY:%.*]], -; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i64> [[X:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = lshr <2 x i64> [[Y]], +; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i64> [[X:%.*]], +; CHECK-NEXT: [[TMP2:%.*]] = lshr <2 x i64> [[Y]], ; CHECK-NEXT: [[SH1:%.*]] = and <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i64> [[SH1]] ; %y = srem <2 x i64> %py, ; thwart complexity-based canonicalization - %sh0 = lshr <2 x i64> %x, + %sh0 = lshr <2 x i64> %x, %r = and <2 x i64> %y, %sh0 - %sh1 = lshr <2 x i64> %r, + %sh1 = lshr <2 x i64> %r, ret <2 x i64> %sh1 } @@ -212,16 +212,16 @@ define i32 @ashr_overshift_xor(i32 %x, i32 %y) { ret i32 %sh1 } -define <2 x i32> @ashr_undef_undef_xor(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @ashr_undef_undef_xor( -; CHECK-NEXT: [[SH0:%.*]] = ashr <2 x i32> [[X:%.*]], +define <2 x i32> @ashr_poison_poison_xor(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @ashr_poison_poison_xor( +; CHECK-NEXT: [[SH0:%.*]] = ashr <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[R:%.*]] = xor <2 x i32> [[SH0]], [[Y:%.*]] -; CHECK-NEXT: [[SH1:%.*]] = ashr <2 x i32> [[R]], +; CHECK-NEXT: [[SH1:%.*]] = ashr <2 x i32> [[R]], ; CHECK-NEXT: ret <2 x i32> [[SH1]] ; - %sh0 = ashr <2 x i32> %x, + %sh0 = ashr <2 x i32> %x, %r = xor <2 x i32> %y, %sh0 - %sh1 = ashr <2 x i32> %r, + %sh1 = ashr <2 x i32> %r, ret <2 x i32> %sh1 } @@ -390,18 +390,18 @@ define <2 x i8> @shl_add_nonuniform(<2 x i8> %x, <2 x i8> %y) { } -define <2 x i64> @shl_add_undef(<2 x i64> %x, <2 x i64> %py) { -; CHECK-LABEL: @shl_add_undef( +define <2 x i64> @shl_add_poison(<2 x i64> %x, <2 x i64> %py) { +; CHECK-LABEL: @shl_add_poison( ; CHECK-NEXT: [[Y:%.*]] = srem <2 x i64> [[PY:%.*]], -; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i64> [[X:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[Y]], +; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i64> [[X:%.*]], +; CHECK-NEXT: [[TMP2:%.*]] = shl nsw <2 x i64> [[Y]], ; CHECK-NEXT: [[SH1:%.*]] = add <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i64> [[SH1]] ; %y = srem <2 x i64> %py, ; thwart complexity-based canonicalization - %sh0 = shl <2 x i64> %x, + %sh0 = shl <2 x i64> %x, %r = add <2 x i64> %y, %sh0 - %sh1 = shl <2 x i64> %r, + %sh1 = shl <2 x i64> %r, ret <2 x i64> %sh1 } @@ -432,18 +432,18 @@ define <2 x i8> @lshr_add_nonuniform(<2 x i8> %x, <2 x i8> %y) { ret <2 x i8> %sh1 } -define <2 x i64> @lshr_add_undef(<2 x i64> %x, <2 x i64> %py) { -; CHECK-LABEL: @lshr_add_undef( +define <2 x i64> @lshr_add_poison(<2 x i64> %x, <2 x i64> %py) { +; CHECK-LABEL: @lshr_add_poison( ; CHECK-NEXT: [[Y:%.*]] = srem <2 x i64> [[PY:%.*]], -; CHECK-NEXT: [[SH0:%.*]] = lshr <2 x i64> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = add <2 x i64> [[Y]], [[SH0]] -; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i64> [[R]], +; CHECK-NEXT: [[SH0:%.*]] = lshr <2 x i64> [[X:%.*]], +; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i64> [[Y]], [[SH0]] +; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i64> [[R]], ; CHECK-NEXT: ret <2 x i64> [[SH1]] ; %y = srem <2 x i64> %py, ; thwart complexity-based canonicalization - %sh0 = lshr <2 x i64> %x, + %sh0 = lshr <2 x i64> %x, %r = add <2 x i64> %y, %sh0 - %sh1 = lshr <2 x i64> %r, + %sh1 = lshr <2 x i64> %r, ret <2 x i64> %sh1 } @@ -488,18 +488,18 @@ define <2 x i8> @shl_sub_nonuniform(<2 x i8> %x, <2 x i8> %y) { } -define <2 x i64> @shl_sub_undef(<2 x i64> %x, <2 x i64> %py) { -; CHECK-LABEL: @shl_sub_undef( +define <2 x i64> @shl_sub_poison(<2 x i64> %x, <2 x i64> %py) { +; CHECK-LABEL: @shl_sub_poison( ; CHECK-NEXT: [[Y:%.*]] = srem <2 x i64> [[PY:%.*]], -; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i64> [[X:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[Y]], +; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i64> [[X:%.*]], +; CHECK-NEXT: [[TMP2:%.*]] = shl nsw <2 x i64> [[Y]], ; CHECK-NEXT: [[SH1:%.*]] = sub <2 x i64> [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret <2 x i64> [[SH1]] ; %y = srem <2 x i64> %py, ; thwart complexity-based canonicalization - %sh0 = shl <2 x i64> %x, + %sh0 = shl <2 x i64> %x, %r = sub <2 x i64> %y, %sh0 - %sh1 = shl <2 x i64> %r, + %sh1 = shl <2 x i64> %r, ret <2 x i64> %sh1 } @@ -530,17 +530,17 @@ define <2 x i8> @lshr_sub_nonuniform(<2 x i8> %x, <2 x i8> %y) { ret <2 x i8> %sh1 } -define <2 x i64> @lshr_sub_undef(<2 x i64> %x, <2 x i64> %py) { -; CHECK-LABEL: @lshr_sub_undef( +define <2 x i64> @lshr_sub_poison(<2 x i64> %x, <2 x i64> %py) { +; CHECK-LABEL: @lshr_sub_poison( ; CHECK-NEXT: [[Y:%.*]] = srem <2 x i64> [[PY:%.*]], -; CHECK-NEXT: [[SH0:%.*]] = lshr <2 x i64> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = sub <2 x i64> [[Y]], [[SH0]] -; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i64> [[R]], +; CHECK-NEXT: [[SH0:%.*]] = lshr <2 x i64> [[X:%.*]], +; CHECK-NEXT: [[R:%.*]] = sub nsw <2 x i64> [[Y]], [[SH0]] +; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i64> [[R]], ; CHECK-NEXT: ret <2 x i64> [[SH1]] ; %y = srem <2 x i64> %py, ; thwart complexity-based canonicalization - %sh0 = lshr <2 x i64> %x, + %sh0 = lshr <2 x i64> %x, %r = sub <2 x i64> %y, %sh0 - %sh1 = lshr <2 x i64> %r, + %sh1 = lshr <2 x i64> %r, ret <2 x i64> %sh1 } diff --git a/llvm/test/Transforms/InstCombine/shl-and-negC-icmpeq-zero.ll b/llvm/test/Transforms/InstCombine/shl-and-negC-icmpeq-zero.ll index 406dc72f2646e..daa4955796594 100644 --- a/llvm/test/Transforms/InstCombine/shl-and-negC-icmpeq-zero.ll +++ b/llvm/test/Transforms/InstCombine/shl-and-negC-icmpeq-zero.ll @@ -81,39 +81,39 @@ define <4 x i1> @vec_4xi32_shl_and_negC_eq(<4 x i32> %x, <4 x i32> %y) { ret <4 x i1> %r } -define <4 x i1> @vec_shl_and_negC_eq_undef1(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_shl_and_negC_eq_undef1( +define <4 x i1> @vec_shl_and_negC_eq_poison1(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_shl_and_negC_eq_poison1( ; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp ult <4 x i32> [[SHL]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %shl = shl <4 x i32> %x, %y - %and = and <4 x i32> %shl, ; ~7 + %and = and <4 x i32> %shl, ; ~7 %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } -define <4 x i1> @vec_shl_and_negC_eq_undef2(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_shl_and_negC_eq_undef2( +define <4 x i1> @vec_shl_and_negC_eq_poison2(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_shl_and_negC_eq_poison2( ; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp ult <4 x i32> [[SHL]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %shl = shl <4 x i32> %x, %y %and = and <4 x i32> %shl, ; ~7 - %r = icmp eq <4 x i32> %and, + %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } -define <4 x i1> @vec_shl_and_negC_eq_undef3(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_shl_and_negC_eq_undef3( +define <4 x i1> @vec_shl_and_negC_eq_poison3(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_shl_and_negC_eq_poison3( ; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp ult <4 x i32> [[SHL]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %shl = shl <4 x i32> %x, %y - %and = and <4 x i32> %shl, ; ~7 - %r = icmp eq <4 x i32> %and, + %and = and <4 x i32> %shl, ; ~7 + %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/shl-and-signbit-icmpeq-zero.ll b/llvm/test/Transforms/InstCombine/shl-and-signbit-icmpeq-zero.ll index 4c2c876e3925b..dcc181945357d 100644 --- a/llvm/test/Transforms/InstCombine/shl-and-signbit-icmpeq-zero.ll +++ b/llvm/test/Transforms/InstCombine/shl-and-signbit-icmpeq-zero.ll @@ -81,39 +81,39 @@ define <4 x i1> @vec_4xi32_shl_and_signbit_eq(<4 x i32> %x, <4 x i32> %y) { ret <4 x i1> %r } -define <4 x i1> @vec_4xi32_shl_and_signbit_eq_undef1(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_4xi32_shl_and_signbit_eq_undef1( +define <4 x i1> @vec_4xi32_shl_and_signbit_eq_poison1(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_4xi32_shl_and_signbit_eq_poison1( ; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt <4 x i32> [[SHL]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %shl = shl <4 x i32> %x, %y - %and = and <4 x i32> %shl, + %and = and <4 x i32> %shl, %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } -define <4 x i1> @vec_4xi32_shl_and_signbit_eq_undef2(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_4xi32_shl_and_signbit_eq_undef2( +define <4 x i1> @vec_4xi32_shl_and_signbit_eq_poison2(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_4xi32_shl_and_signbit_eq_poison2( ; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt <4 x i32> [[SHL]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %shl = shl <4 x i32> %x, %y %and = and <4 x i32> %shl, - %r = icmp eq <4 x i32> %and, + %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } -define <4 x i1> @vec_4xi32_shl_and_signbit_eq_undef3(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @vec_4xi32_shl_and_signbit_eq_undef3( +define <4 x i1> @vec_4xi32_shl_and_signbit_eq_poison3(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @vec_4xi32_shl_and_signbit_eq_poison3( ; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt <4 x i32> [[SHL]], ; CHECK-NEXT: ret <4 x i1> [[R]] ; %shl = shl <4 x i32> %x, %y - %and = and <4 x i32> %shl, - %r = icmp eq <4 x i32> %and, + %and = and <4 x i32> %shl, + %r = icmp eq <4 x i32> %and, ret <4 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/signmask-of-sext-vs-of-shl-of-zext.ll b/llvm/test/Transforms/InstCombine/signmask-of-sext-vs-of-shl-of-zext.ll index aeb4c8bb62cba..e7505721cad60 100644 --- a/llvm/test/Transforms/InstCombine/signmask-of-sext-vs-of-shl-of-zext.ll +++ b/llvm/test/Transforms/InstCombine/signmask-of-sext-vs-of-shl-of-zext.ll @@ -129,40 +129,56 @@ define <2 x i32> @t8(<2 x i16> %x) { %r = and <2 x i32> %i1, ret <2 x i32> %r } + define <2 x i32> @t9(<2 x i16> %x) { ; CHECK-LABEL: @t9( -; CHECK-NEXT: [[X_SIGNEXT:%.*]] = sext <2 x i16> [[X:%.*]] to <2 x i32> -; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[X_SIGNEXT]], +; CHECK-NEXT: [[I1:%.*]] = sext <2 x i16> [[X:%.*]] to <2 x i32> +; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[I1]], ; CHECK-NEXT: ret <2 x i32> [[R]] ; %i0 = zext <2 x i16> %x to <2 x i32> - %i1 = shl <2 x i32> %i0, + %i1 = shl <2 x i32> %i0, %r = and <2 x i32> %i1, - ; Here undef can be propagated into the mask. ret <2 x i32> %r } -define <2 x i32> @t10(<2 x i16> %x) { -; CHECK-LABEL: @t10( -; CHECK-NEXT: [[X_SIGNEXT:%.*]] = sext <2 x i16> [[X:%.*]] to <2 x i32> -; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[X_SIGNEXT]], + +; If we folded this, we wouldn't be able to keep the undef mask. +define <2 x i32> @t10_undef(<2 x i16> %x) { +; CHECK-LABEL: @t10_undef( +; CHECK-NEXT: [[I0:%.*]] = zext <2 x i16> [[X:%.*]] to <2 x i32> +; CHECK-NEXT: [[I1:%.*]] = shl nuw <2 x i32> [[I0]], +; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[I1]], ; CHECK-NEXT: ret <2 x i32> [[R]] ; %i0 = zext <2 x i16> %x to <2 x i32> %i1 = shl <2 x i32> %i0, %r = and <2 x i32> %i1, - ; CAREFUL! We can't keep undef mask here, since high bits are no longer zero, + ret <2 x i32> %r +} + +define <2 x i32> @t10_poison(<2 x i16> %x) { +; CHECK-LABEL: @t10_poison( +; CHECK-NEXT: [[I1:%.*]] = sext <2 x i16> [[X:%.*]] to <2 x i32> +; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[I1]], +; CHECK-NEXT: ret <2 x i32> [[R]] +; + %i0 = zext <2 x i16> %x to <2 x i32> + %i1 = shl <2 x i32> %i0, + %r = and <2 x i32> %i1, + ; CAREFUL! We can't keep poison mask here, since high bits are no longer zero, ; we must sanitize it to 0. ret <2 x i32> %r } + define <2 x i32> @t11(<2 x i16> %x) { ; CHECK-LABEL: @t11( ; CHECK-NEXT: [[X_SIGNEXT:%.*]] = sext <2 x i16> [[X:%.*]] to <2 x i32> -; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[X_SIGNEXT]], +; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[X_SIGNEXT]], ; CHECK-NEXT: ret <2 x i32> [[R]] ; %i0 = zext <2 x i16> %x to <2 x i32> - %i1 = shl <2 x i32> %i0, - %r = and <2 x i32> %i1, - ; Here undef mask is fine. + %i1 = shl <2 x i32> %i0, + %r = and <2 x i32> %i1, + ; Here poison mask is fine. ret <2 x i32> %r } diff --git a/llvm/test/Transforms/InstCombine/sub-not.ll b/llvm/test/Transforms/InstCombine/sub-not.ll index ec36754d3e9b1..89ccf5aa3c8f4 100644 --- a/llvm/test/Transforms/InstCombine/sub-not.ll +++ b/llvm/test/Transforms/InstCombine/sub-not.ll @@ -34,7 +34,7 @@ define <2 x i8> @sub_not_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-NEXT: ret <2 x i8> [[R]] ; %s = sub <2 x i8> %x, %y - %r = xor <2 x i8> %s, + %r = xor <2 x i8> %s, ret <2 x i8> %r } @@ -69,7 +69,7 @@ define <2 x i8> @dec_sub_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-NEXT: ret <2 x i8> [[R]] ; %s = sub <2 x i8> %x, %y - %r = add <2 x i8> %s, + %r = add <2 x i8> %s, ret <2 x i8> %r } @@ -103,7 +103,7 @@ define <2 x i8> @sub_inc_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[S_NEG]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; - %s = add <2 x i8> %x, + %s = add <2 x i8> %x, %r = sub <2 x i8> %y, %s ret <2 x i8> %r } @@ -138,7 +138,7 @@ define <2 x i8> @sub_dec_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; - %s = add <2 x i8> %x, + %s = add <2 x i8> %x, %r = sub <2 x i8> %s, %y ret <2 x i8> %r } diff --git a/llvm/test/Transforms/InstCombine/sub.ll b/llvm/test/Transforms/InstCombine/sub.ll index 249b5673c8acf..a84e389f13c3b 100644 --- a/llvm/test/Transforms/InstCombine/sub.ll +++ b/llvm/test/Transforms/InstCombine/sub.ll @@ -130,44 +130,44 @@ define <2 x i32> @neg_nsw_sub_nsw_vec(<2 x i32> %x, <2 x i32> %y) { ret <2 x i32> %r } -define <2 x i32> @neg_sub_vec_undef(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @neg_sub_vec_undef( +define <2 x i32> @neg_sub_vec_poison(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @neg_sub_vec_poison( ; CHECK-NEXT: [[R:%.*]] = add <2 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; - %neg = sub <2 x i32> , %x + %neg = sub <2 x i32> , %x %r = sub <2 x i32> %y, %neg ret <2 x i32> %r } -define <2 x i32> @neg_nsw_sub_vec_undef(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @neg_nsw_sub_vec_undef( +define <2 x i32> @neg_nsw_sub_vec_poison(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @neg_nsw_sub_vec_poison( ; CHECK-NEXT: [[R:%.*]] = add <2 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; - %neg = sub nsw <2 x i32> , %x + %neg = sub nsw <2 x i32> , %x %r = sub <2 x i32> %y, %neg ret <2 x i32> %r } -define <2 x i32> @neg_sub_nsw_vec_undef(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @neg_sub_nsw_vec_undef( +define <2 x i32> @neg_sub_nsw_vec_poison(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @neg_sub_nsw_vec_poison( ; CHECK-NEXT: [[R:%.*]] = add <2 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; - %neg = sub <2 x i32> , %x + %neg = sub <2 x i32> , %x %r = sub nsw <2 x i32> %y, %neg ret <2 x i32> %r } ; This should not drop 'nsw'. -define <2 x i32> @neg_nsw_sub_nsw_vec_undef(<2 x i32> %x, <2 x i32> %y) { -; CHECK-LABEL: @neg_nsw_sub_nsw_vec_undef( +define <2 x i32> @neg_nsw_sub_nsw_vec_poison(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @neg_nsw_sub_nsw_vec_poison( ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; - %neg = sub nsw <2 x i32> , %x + %neg = sub nsw <2 x i32> , %x %r = sub nsw <2 x i32> %y, %neg ret <2 x i32> %r } @@ -205,13 +205,13 @@ define <2 x i8> @notnotsub_vec(<2 x i8> %x, <2 x i8> %y) { ret <2 x i8> %sub } -define <2 x i8> @notnotsub_vec_undef_elts(<2 x i8> %x, <2 x i8> %y) { -; CHECK-LABEL: @notnotsub_vec_undef_elts( +define <2 x i8> @notnotsub_vec_poison_elts(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @notnotsub_vec_poison_elts( ; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i8> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i8> [[SUB]] ; - %nx = xor <2 x i8> %x, - %ny = xor <2 x i8> %y, + %nx = xor <2 x i8> %x, + %ny = xor <2 x i8> %y, %sub = sub <2 x i8> %nx, %ny ret <2 x i8> %sub } @@ -2351,12 +2351,12 @@ define <2 x i8> @sub_to_and_vector1(<2 x i8> %x) { define <2 x i8> @sub_to_and_vector2(<2 x i8> %x) { ; CHECK-LABEL: @sub_to_and_vector2( -; CHECK-NEXT: [[SUB:%.*]] = sub nuw <2 x i8> , [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = sub nuw <2 x i8> , [[X:%.*]] ; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[SUB]], ; CHECK-NEXT: [[R:%.*]] = sub nsw <2 x i8> , [[AND]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; - %sub = sub nuw <2 x i8> , %x + %sub = sub nuw <2 x i8> , %x %and = and <2 x i8> %sub, %r = sub <2 x i8> , %and ret <2 x i8> %r @@ -2366,12 +2366,12 @@ define <2 x i8> @sub_to_and_vector2(<2 x i8> %x) { define <2 x i8> @sub_to_and_vector3(<2 x i8> %x) { ; CHECK-LABEL: @sub_to_and_vector3( ; CHECK-NEXT: [[SUB:%.*]] = sub nuw <2 x i8> , [[X:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[SUB]], +; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[SUB]], ; CHECK-NEXT: [[R:%.*]] = sub nsw <2 x i8> , [[AND]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %sub = sub nuw <2 x i8> , %x - %and = and <2 x i8> %sub, + %and = and <2 x i8> %sub, %r = sub <2 x i8> , %and ret <2 x i8> %r } @@ -2381,12 +2381,12 @@ define <2 x i8> @sub_to_and_vector4(<2 x i8> %x) { ; CHECK-LABEL: @sub_to_and_vector4( ; CHECK-NEXT: [[SUB:%.*]] = sub nuw <2 x i8> , [[X:%.*]] ; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[SUB]], -; CHECK-NEXT: [[R:%.*]] = sub <2 x i8> , [[AND]] +; CHECK-NEXT: [[R:%.*]] = sub nsw <2 x i8> , [[AND]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %sub = sub nuw <2 x i8> , %x %and = and <2 x i8> %sub, - %r = sub <2 x i8> , %and + %r = sub <2 x i8> , %and ret <2 x i8> %r } diff --git a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll index 4c857125365a9..063006ba5eea8 100644 --- a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll @@ -49,15 +49,15 @@ define <2 x i64> @test1_vec_nonuniform(<2 x i64> %a) { ret <2 x i64> %d } -define <2 x i64> @test1_vec_undef(<2 x i64> %a) { -; CHECK-LABEL: @test1_vec_undef( +define <2 x i64> @test1_vec_poison(<2 x i64> %a) { +; CHECK-LABEL: @test1_vec_poison( ; CHECK-NEXT: [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32> -; CHECK-NEXT: [[D:%.*]] = and <2 x i64> [[A]], +; CHECK-NEXT: [[D:%.*]] = and <2 x i64> [[A]], ; CHECK-NEXT: call void @use_vec(<2 x i32> [[B]]) ; CHECK-NEXT: ret <2 x i64> [[D]] ; %b = trunc <2 x i64> %a to <2 x i32> - %c = and <2 x i32> %b, + %c = and <2 x i32> %b, %d = zext <2 x i32> %c to <2 x i64> call void @use_vec(<2 x i32> %b) ret <2 x i64> %d @@ -111,17 +111,17 @@ define <2 x i64> @test2_vec_nonuniform(<2 x i64> %a) { ret <2 x i64> %d } -define <2 x i64> @test2_vec_undef(<2 x i64> %a) { -; CHECK-LABEL: @test2_vec_undef( +define <2 x i64> @test2_vec_poison(<2 x i64> %a) { +; CHECK-LABEL: @test2_vec_poison( ; CHECK-NEXT: [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32> -; CHECK-NEXT: [[D1:%.*]] = shl <2 x i64> [[A]], -; CHECK-NEXT: [[D:%.*]] = ashr exact <2 x i64> [[D1]], +; CHECK-NEXT: [[D1:%.*]] = shl <2 x i64> [[A]], +; CHECK-NEXT: [[D:%.*]] = ashr exact <2 x i64> [[D1]], ; CHECK-NEXT: call void @use_vec(<2 x i32> [[B]]) ; CHECK-NEXT: ret <2 x i64> [[D]] ; %b = trunc <2 x i64> %a to <2 x i32> - %c = shl <2 x i32> %b, - %q = ashr <2 x i32> %c, + %c = shl <2 x i32> %b, + %q = ashr <2 x i32> %c, %d = sext <2 x i32> %q to <2 x i64> call void @use_vec(<2 x i32> %b) ret <2 x i64> %d @@ -300,18 +300,17 @@ define <2 x i64> @test8_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) { ret <2 x i64> %G } -define <2 x i64> @test8_vec_undef(<2 x i32> %A, <2 x i32> %B) { -; CHECK-LABEL: @test8_vec_undef( -; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i128> -; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128> -; CHECK-NEXT: [[E:%.*]] = shl <2 x i128> [[D]], -; CHECK-NEXT: [[F:%.*]] = or <2 x i128> [[E]], [[C]] -; CHECK-NEXT: [[G:%.*]] = trunc <2 x i128> [[F]] to <2 x i64> +define <2 x i64> @test8_vec_poison(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @test8_vec_poison( +; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64> +; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i64> +; CHECK-NEXT: [[E:%.*]] = shl nuw <2 x i64> [[D]], +; CHECK-NEXT: [[G:%.*]] = or disjoint <2 x i64> [[E]], [[C]] ; CHECK-NEXT: ret <2 x i64> [[G]] ; %C = zext <2 x i32> %A to <2 x i128> %D = zext <2 x i32> %B to <2 x i128> - %E = shl <2 x i128> %D, + %E = shl <2 x i128> %D, %F = or <2 x i128> %E, %C %G = trunc <2 x i128> %F to <2 x i64> ret <2 x i64> %G @@ -388,18 +387,17 @@ define <2 x i64> @test11_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) { ret <2 x i64> %G } -define <2 x i64> @test11_vec_undef(<2 x i32> %A, <2 x i32> %B) { -; CHECK-LABEL: @test11_vec_undef( -; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i128> -; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128> -; CHECK-NEXT: [[E:%.*]] = and <2 x i128> [[D]], -; CHECK-NEXT: [[F:%.*]] = shl <2 x i128> [[C]], [[E]] -; CHECK-NEXT: [[G:%.*]] = trunc <2 x i128> [[F]] to <2 x i64> +define <2 x i64> @test11_vec_poison(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @test11_vec_poison( +; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], +; CHECK-NEXT: [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64> +; CHECK-NEXT: [[G:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]] ; CHECK-NEXT: ret <2 x i64> [[G]] ; %C = zext <2 x i32> %A to <2 x i128> %D = zext <2 x i32> %B to <2 x i128> - %E = and <2 x i128> %D, + %E = and <2 x i128> %D, %F = shl <2 x i128> %C, %E %G = trunc <2 x i128> %F to <2 x i64> ret <2 x i64> %G @@ -453,18 +451,17 @@ define <2 x i64> @test12_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) { ret <2 x i64> %G } -define <2 x i64> @test12_vec_undef(<2 x i32> %A, <2 x i32> %B) { -; CHECK-LABEL: @test12_vec_undef( -; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i128> -; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128> -; CHECK-NEXT: [[E:%.*]] = and <2 x i128> [[D]], -; CHECK-NEXT: [[F:%.*]] = lshr <2 x i128> [[C]], [[E]] -; CHECK-NEXT: [[G:%.*]] = trunc nuw nsw <2 x i128> [[F]] to <2 x i64> +define <2 x i64> @test12_vec_poison(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @test12_vec_poison( +; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], +; CHECK-NEXT: [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64> +; CHECK-NEXT: [[G:%.*]] = lshr <2 x i64> [[C]], [[E]] ; CHECK-NEXT: ret <2 x i64> [[G]] ; %C = zext <2 x i32> %A to <2 x i128> %D = zext <2 x i32> %B to <2 x i128> - %E = and <2 x i128> %D, + %E = and <2 x i128> %D, %F = lshr <2 x i128> %C, %E %G = trunc <2 x i128> %F to <2 x i64> ret <2 x i64> %G @@ -518,18 +515,17 @@ define <2 x i64> @test13_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) { ret <2 x i64> %G } -define <2 x i64> @test13_vec_undef(<2 x i32> %A, <2 x i32> %B) { -; CHECK-LABEL: @test13_vec_undef( -; CHECK-NEXT: [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i128> -; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128> -; CHECK-NEXT: [[E:%.*]] = and <2 x i128> [[D]], -; CHECK-NEXT: [[F:%.*]] = ashr <2 x i128> [[C]], [[E]] -; CHECK-NEXT: [[G:%.*]] = trunc nsw <2 x i128> [[F]] to <2 x i64> +define <2 x i64> @test13_vec_poison(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @test13_vec_poison( +; CHECK-NEXT: [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], +; CHECK-NEXT: [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64> +; CHECK-NEXT: [[G:%.*]] = ashr <2 x i64> [[C]], [[E]] ; CHECK-NEXT: ret <2 x i64> [[G]] ; %C = sext <2 x i32> %A to <2 x i128> %D = zext <2 x i32> %B to <2 x i128> - %E = and <2 x i128> %D, + %E = and <2 x i128> %D, %F = ashr <2 x i128> %C, %E %G = trunc <2 x i128> %F to <2 x i64> ret <2 x i64> %G @@ -766,13 +762,13 @@ define <2 x i32> @trunc_shl_v2i32_v2i64_uniform(<2 x i64> %val) { ret <2 x i32> %trunc } -define <2 x i32> @trunc_shl_v2i32_v2i64_undef(<2 x i64> %val) { -; CHECK-LABEL: @trunc_shl_v2i32_v2i64_undef( +define <2 x i32> @trunc_shl_v2i32_v2i64_poison(<2 x i64> %val) { +; CHECK-LABEL: @trunc_shl_v2i32_v2i64_poison( ; CHECK-NEXT: [[VAL_TR:%.*]] = trunc <2 x i64> [[VAL:%.*]] to <2 x i32> -; CHECK-NEXT: [[TRUNC:%.*]] = shl <2 x i32> [[VAL_TR]], +; CHECK-NEXT: [[TRUNC:%.*]] = shl <2 x i32> [[VAL_TR]], ; CHECK-NEXT: ret <2 x i32> [[TRUNC]] ; - %shl = shl <2 x i64> %val, + %shl = shl <2 x i64> %val, %trunc = trunc <2 x i64> %shl to <2 x i32> ret <2 x i32> %trunc } @@ -917,7 +913,7 @@ define <4 x i8> @wide_shuf(<4 x i32> %x) { ret <4 x i8> %trunc } -; trunc (shuffle X, undef, SplatMask) --> shuffle (trunc X), undef, SplatMask +; trunc (shuffle X, poison, SplatMask) --> shuffle (trunc X), poison, SplatMask define <4 x i8> @wide_splat1(<4 x i32> %x) { ; CHECK-LABEL: @wide_splat1( @@ -931,7 +927,7 @@ define <4 x i8> @wide_splat1(<4 x i32> %x) { } ; Test weird types. -; trunc (shuffle X, undef, SplatMask) --> shuffle (trunc X), undef, SplatMask +; trunc (shuffle X, poison, SplatMask) --> shuffle (trunc X), poison, SplatMask define <3 x i31> @wide_splat2(<3 x i33> %x) { ; CHECK-LABEL: @wide_splat2( @@ -945,8 +941,8 @@ define <3 x i31> @wide_splat2(<3 x i33> %x) { } ; FIXME: -; trunc (shuffle X, undef, SplatMask) --> shuffle (trunc X), undef, SplatMask -; A mask with undef elements should still be considered a splat mask. +; trunc (shuffle X, poison, SplatMask) --> shuffle (trunc X), poison, SplatMask +; A mask with poison elements should still be considered a splat mask. define <3 x i31> @wide_splat3(<3 x i33> %x) { ; CHECK-LABEL: @wide_splat3( @@ -954,7 +950,7 @@ define <3 x i31> @wide_splat3(<3 x i33> %x) { ; CHECK-NEXT: [[TRUNC:%.*]] = trunc <3 x i33> [[SHUF]] to <3 x i31> ; CHECK-NEXT: ret <3 x i31> [[TRUNC]] ; - %shuf = shufflevector <3 x i33> %x, <3 x i33> poison, <3 x i32> + %shuf = shufflevector <3 x i33> %x, <3 x i33> poison, <3 x i32> %trunc = trunc <3 x i33> %shuf to <3 x i31> ret <3 x i31> %trunc } diff --git a/llvm/test/Transforms/InstCombine/trunc-shift-trunc.ll b/llvm/test/Transforms/InstCombine/trunc-shift-trunc.ll index 2c5f428cf98de..c50a3d06d24b9 100644 --- a/llvm/test/Transforms/InstCombine/trunc-shift-trunc.ll +++ b/llvm/test/Transforms/InstCombine/trunc-shift-trunc.ll @@ -56,14 +56,14 @@ define <2 x i8> @trunc_lshr_trunc_nonuniform(<2 x i64> %a) { ret <2 x i8> %d } -define <2 x i8> @trunc_lshr_trunc_uniform_undef(<2 x i64> %a) { -; CHECK-LABEL: @trunc_lshr_trunc_uniform_undef( -; CHECK-NEXT: [[C1:%.*]] = lshr <2 x i64> [[A:%.*]], +define <2 x i8> @trunc_lshr_trunc_uniform_poison(<2 x i64> %a) { +; CHECK-LABEL: @trunc_lshr_trunc_uniform_poison( +; CHECK-NEXT: [[C1:%.*]] = lshr <2 x i64> [[A:%.*]], ; CHECK-NEXT: [[D:%.*]] = trunc <2 x i64> [[C1]] to <2 x i8> ; CHECK-NEXT: ret <2 x i8> [[D]] ; %b = trunc <2 x i64> %a to <2 x i32> - %c = lshr <2 x i32> %b, + %c = lshr <2 x i32> %b, %d = trunc <2 x i32> %c to <2 x i8> ret <2 x i8> %d } @@ -142,14 +142,14 @@ define <2 x i8> @trunc_ashr_trunc_nonuniform(<2 x i64> %a) { ret <2 x i8> %d } -define <2 x i8> @trunc_ashr_trunc_uniform_undef(<2 x i64> %a) { -; CHECK-LABEL: @trunc_ashr_trunc_uniform_undef( -; CHECK-NEXT: [[C1:%.*]] = ashr <2 x i64> [[A:%.*]], +define <2 x i8> @trunc_ashr_trunc_uniform_poison(<2 x i64> %a) { +; CHECK-LABEL: @trunc_ashr_trunc_uniform_poison( +; CHECK-NEXT: [[C1:%.*]] = ashr <2 x i64> [[A:%.*]], ; CHECK-NEXT: [[D:%.*]] = trunc <2 x i64> [[C1]] to <2 x i8> ; CHECK-NEXT: ret <2 x i8> [[D]] ; %b = trunc <2 x i64> %a to <2 x i32> - %c = ashr <2 x i32> %b, + %c = ashr <2 x i32> %b, %d = trunc <2 x i32> %c to <2 x i8> ret <2 x i8> %d } diff --git a/llvm/test/Transforms/InstCombine/trunc.ll b/llvm/test/Transforms/InstCombine/trunc.ll index c77d7269f2cf7..e59b2bea6684c 100644 --- a/llvm/test/Transforms/InstCombine/trunc.ll +++ b/llvm/test/Transforms/InstCombine/trunc.ll @@ -49,15 +49,15 @@ define <2 x i64> @test1_vec_nonuniform(<2 x i64> %a) { ret <2 x i64> %d } -define <2 x i64> @test1_vec_undef(<2 x i64> %a) { -; CHECK-LABEL: @test1_vec_undef( +define <2 x i64> @test1_vec_poison(<2 x i64> %a) { +; CHECK-LABEL: @test1_vec_poison( ; CHECK-NEXT: [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32> -; CHECK-NEXT: [[D:%.*]] = and <2 x i64> [[A]], +; CHECK-NEXT: [[D:%.*]] = and <2 x i64> [[A]], ; CHECK-NEXT: call void @use_vec(<2 x i32> [[B]]) ; CHECK-NEXT: ret <2 x i64> [[D]] ; %b = trunc <2 x i64> %a to <2 x i32> - %c = and <2 x i32> %b, + %c = and <2 x i32> %b, %d = zext <2 x i32> %c to <2 x i64> call void @use_vec(<2 x i32> %b) ret <2 x i64> %d @@ -111,17 +111,17 @@ define <2 x i64> @test2_vec_nonuniform(<2 x i64> %a) { ret <2 x i64> %d } -define <2 x i64> @test2_vec_undef(<2 x i64> %a) { -; CHECK-LABEL: @test2_vec_undef( +define <2 x i64> @test2_vec_poison(<2 x i64> %a) { +; CHECK-LABEL: @test2_vec_poison( ; CHECK-NEXT: [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32> -; CHECK-NEXT: [[D1:%.*]] = shl <2 x i64> [[A]], -; CHECK-NEXT: [[D:%.*]] = ashr exact <2 x i64> [[D1]], +; CHECK-NEXT: [[D1:%.*]] = shl <2 x i64> [[A]], +; CHECK-NEXT: [[D:%.*]] = ashr exact <2 x i64> [[D1]], ; CHECK-NEXT: call void @use_vec(<2 x i32> [[B]]) ; CHECK-NEXT: ret <2 x i64> [[D]] ; %b = trunc <2 x i64> %a to <2 x i32> - %c = shl <2 x i32> %b, - %q = ashr <2 x i32> %c, + %c = shl <2 x i32> %b, + %q = ashr <2 x i32> %c, %d = sext <2 x i32> %q to <2 x i64> call void @use_vec(<2 x i32> %b) ret <2 x i64> %d @@ -300,18 +300,17 @@ define <2 x i64> @test8_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) { ret <2 x i64> %G } -define <2 x i64> @test8_vec_undef(<2 x i32> %A, <2 x i32> %B) { -; CHECK-LABEL: @test8_vec_undef( -; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i128> -; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128> -; CHECK-NEXT: [[E:%.*]] = shl <2 x i128> [[D]], -; CHECK-NEXT: [[F:%.*]] = or <2 x i128> [[E]], [[C]] -; CHECK-NEXT: [[G:%.*]] = trunc <2 x i128> [[F]] to <2 x i64> +define <2 x i64> @test8_vec_poison(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @test8_vec_poison( +; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64> +; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i64> +; CHECK-NEXT: [[E:%.*]] = shl nuw <2 x i64> [[D]], +; CHECK-NEXT: [[G:%.*]] = or disjoint <2 x i64> [[E]], [[C]] ; CHECK-NEXT: ret <2 x i64> [[G]] ; %C = zext <2 x i32> %A to <2 x i128> %D = zext <2 x i32> %B to <2 x i128> - %E = shl <2 x i128> %D, + %E = shl <2 x i128> %D, %F = or <2 x i128> %E, %C %G = trunc <2 x i128> %F to <2 x i64> ret <2 x i64> %G @@ -388,18 +387,17 @@ define <2 x i64> @test11_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) { ret <2 x i64> %G } -define <2 x i64> @test11_vec_undef(<2 x i32> %A, <2 x i32> %B) { -; CHECK-LABEL: @test11_vec_undef( -; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i128> -; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128> -; CHECK-NEXT: [[E:%.*]] = and <2 x i128> [[D]], -; CHECK-NEXT: [[F:%.*]] = shl <2 x i128> [[C]], [[E]] -; CHECK-NEXT: [[G:%.*]] = trunc <2 x i128> [[F]] to <2 x i64> +define <2 x i64> @test11_vec_poison(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @test11_vec_poison( +; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], +; CHECK-NEXT: [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64> +; CHECK-NEXT: [[G:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]] ; CHECK-NEXT: ret <2 x i64> [[G]] ; %C = zext <2 x i32> %A to <2 x i128> %D = zext <2 x i32> %B to <2 x i128> - %E = and <2 x i128> %D, + %E = and <2 x i128> %D, %F = shl <2 x i128> %C, %E %G = trunc <2 x i128> %F to <2 x i64> ret <2 x i64> %G @@ -453,18 +451,17 @@ define <2 x i64> @test12_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) { ret <2 x i64> %G } -define <2 x i64> @test12_vec_undef(<2 x i32> %A, <2 x i32> %B) { -; CHECK-LABEL: @test12_vec_undef( -; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i128> -; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128> -; CHECK-NEXT: [[E:%.*]] = and <2 x i128> [[D]], -; CHECK-NEXT: [[F:%.*]] = lshr <2 x i128> [[C]], [[E]] -; CHECK-NEXT: [[G:%.*]] = trunc nuw nsw <2 x i128> [[F]] to <2 x i64> +define <2 x i64> @test12_vec_poison(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @test12_vec_poison( +; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], +; CHECK-NEXT: [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64> +; CHECK-NEXT: [[G:%.*]] = lshr <2 x i64> [[C]], [[E]] ; CHECK-NEXT: ret <2 x i64> [[G]] ; %C = zext <2 x i32> %A to <2 x i128> %D = zext <2 x i32> %B to <2 x i128> - %E = and <2 x i128> %D, + %E = and <2 x i128> %D, %F = lshr <2 x i128> %C, %E %G = trunc <2 x i128> %F to <2 x i64> ret <2 x i64> %G @@ -518,18 +515,17 @@ define <2 x i64> @test13_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) { ret <2 x i64> %G } -define <2 x i64> @test13_vec_undef(<2 x i32> %A, <2 x i32> %B) { -; CHECK-LABEL: @test13_vec_undef( -; CHECK-NEXT: [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i128> -; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128> -; CHECK-NEXT: [[E:%.*]] = and <2 x i128> [[D]], -; CHECK-NEXT: [[F:%.*]] = ashr <2 x i128> [[C]], [[E]] -; CHECK-NEXT: [[G:%.*]] = trunc nsw <2 x i128> [[F]] to <2 x i64> +define <2 x i64> @test13_vec_poison(<2 x i32> %A, <2 x i32> %B) { +; CHECK-LABEL: @test13_vec_poison( +; CHECK-NEXT: [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], +; CHECK-NEXT: [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64> +; CHECK-NEXT: [[G:%.*]] = ashr <2 x i64> [[C]], [[E]] ; CHECK-NEXT: ret <2 x i64> [[G]] ; %C = sext <2 x i32> %A to <2 x i128> %D = zext <2 x i32> %B to <2 x i128> - %E = and <2 x i128> %D, + %E = and <2 x i128> %D, %F = ashr <2 x i128> %C, %E %G = trunc <2 x i128> %F to <2 x i64> ret <2 x i64> %G @@ -766,13 +762,13 @@ define <2 x i32> @trunc_shl_v2i32_v2i64_uniform(<2 x i64> %val) { ret <2 x i32> %trunc } -define <2 x i32> @trunc_shl_v2i32_v2i64_undef(<2 x i64> %val) { -; CHECK-LABEL: @trunc_shl_v2i32_v2i64_undef( +define <2 x i32> @trunc_shl_v2i32_v2i64_poison(<2 x i64> %val) { +; CHECK-LABEL: @trunc_shl_v2i32_v2i64_poison( ; CHECK-NEXT: [[VAL_TR:%.*]] = trunc <2 x i64> [[VAL:%.*]] to <2 x i32> -; CHECK-NEXT: [[TRUNC:%.*]] = shl <2 x i32> [[VAL_TR]], +; CHECK-NEXT: [[TRUNC:%.*]] = shl <2 x i32> [[VAL_TR]], ; CHECK-NEXT: ret <2 x i32> [[TRUNC]] ; - %shl = shl <2 x i64> %val, + %shl = shl <2 x i64> %val, %trunc = trunc <2 x i64> %shl to <2 x i32> ret <2 x i32> %trunc } @@ -917,7 +913,7 @@ define <4 x i8> @wide_shuf(<4 x i32> %x) { ret <4 x i8> %trunc } -; trunc (shuffle X, undef, SplatMask) --> shuffle (trunc X), undef, SplatMask +; trunc (shuffle X, poison, SplatMask) --> shuffle (trunc X), poison, SplatMask define <4 x i8> @wide_splat1(<4 x i32> %x) { ; CHECK-LABEL: @wide_splat1( @@ -925,13 +921,13 @@ define <4 x i8> @wide_splat1(<4 x i32> %x) { ; CHECK-NEXT: [[TRUNC:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <4 x i32> ; CHECK-NEXT: ret <4 x i8> [[TRUNC]] ; - %shuf = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> %trunc = trunc <4 x i32> %shuf to <4 x i8> ret <4 x i8> %trunc } ; Test weird types. -; trunc (shuffle X, undef, SplatMask) --> shuffle (trunc X), undef, SplatMask +; trunc (shuffle X, poison, SplatMask) --> shuffle (trunc X), poison, SplatMask define <3 x i31> @wide_splat2(<3 x i33> %x) { ; CHECK-LABEL: @wide_splat2( @@ -939,14 +935,14 @@ define <3 x i31> @wide_splat2(<3 x i33> %x) { ; CHECK-NEXT: [[TRUNC:%.*]] = shufflevector <3 x i31> [[TMP1]], <3 x i31> poison, <3 x i32> ; CHECK-NEXT: ret <3 x i31> [[TRUNC]] ; - %shuf = shufflevector <3 x i33> %x, <3 x i33> undef, <3 x i32> + %shuf = shufflevector <3 x i33> %x, <3 x i33> poison, <3 x i32> %trunc = trunc <3 x i33> %shuf to <3 x i31> ret <3 x i31> %trunc } ; FIXME: -; trunc (shuffle X, undef, SplatMask) --> shuffle (trunc X), undef, SplatMask -; A mask with undef elements should still be considered a splat mask. +; trunc (shuffle X, poison, SplatMask) --> shuffle (trunc X), poison, SplatMask +; A mask with poison elements should still be considered a splat mask. define <3 x i31> @wide_splat3(<3 x i33> %x) { ; CHECK-LABEL: @wide_splat3( @@ -954,7 +950,7 @@ define <3 x i31> @wide_splat3(<3 x i33> %x) { ; CHECK-NEXT: [[TRUNC:%.*]] = trunc <3 x i33> [[SHUF]] to <3 x i31> ; CHECK-NEXT: ret <3 x i31> [[TRUNC]] ; - %shuf = shufflevector <3 x i33> %x, <3 x i33> undef, <3 x i32> + %shuf = shufflevector <3 x i33> %x, <3 x i33> poison, <3 x i32> %trunc = trunc <3 x i33> %shuf to <3 x i31> ret <3 x i31> %trunc } diff --git a/llvm/test/Transforms/InstCombine/unsigned-mul-lack-of-overflow-check-via-udiv-of-allones.ll b/llvm/test/Transforms/InstCombine/unsigned-mul-lack-of-overflow-check-via-udiv-of-allones.ll index 1ffcfb4424e31..241d9cbcde338 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-mul-lack-of-overflow-check-via-udiv-of-allones.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-mul-lack-of-overflow-check-via-udiv-of-allones.ll @@ -30,14 +30,14 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { ret <2 x i1> %r } -define <3 x i1> @t2_vec_undef(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @t2_vec_undef( +define <3 x i1> @t2_vec_poison(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @t2_vec_poison( ; CHECK-NEXT: [[MUL:%.*]] = call { <3 x i8>, <3 x i1> } @llvm.umul.with.overflow.v3i8(<3 x i8> [[X:%.*]], <3 x i8> [[Y:%.*]]) ; CHECK-NEXT: [[MUL_OV:%.*]] = extractvalue { <3 x i8>, <3 x i1> } [[MUL]], 1 ; CHECK-NEXT: [[MUL_NOT_OV:%.*]] = xor <3 x i1> [[MUL_OV]], ; CHECK-NEXT: ret <3 x i1> [[MUL_NOT_OV]] ; - %t0 = udiv <3 x i8> , %x + %t0 = udiv <3 x i8> , %x %r = icmp uge <3 x i8> %t0, %y ret <3 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/unsigned-mul-overflow-check-via-udiv-of-allones.ll b/llvm/test/Transforms/InstCombine/unsigned-mul-overflow-check-via-udiv-of-allones.ll index 710a09f6e16a1..7eb08bdd6016c 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-mul-overflow-check-via-udiv-of-allones.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-mul-overflow-check-via-udiv-of-allones.ll @@ -28,13 +28,13 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { ret <2 x i1> %r } -define <3 x i1> @t2_vec_undef(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @t2_vec_undef( +define <3 x i1> @t2_vec_poison(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @t2_vec_poison( ; CHECK-NEXT: [[MUL:%.*]] = call { <3 x i8>, <3 x i1> } @llvm.umul.with.overflow.v3i8(<3 x i8> [[X:%.*]], <3 x i8> [[Y:%.*]]) ; CHECK-NEXT: [[MUL_OV:%.*]] = extractvalue { <3 x i8>, <3 x i1> } [[MUL]], 1 ; CHECK-NEXT: ret <3 x i1> [[MUL_OV]] ; - %t0 = udiv <3 x i8> , %x + %t0 = udiv <3 x i8> , %x %r = icmp ult <3 x i8> %t0, %y ret <3 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/variable-signext-of-variable-high-bit-extraction.ll b/llvm/test/Transforms/InstCombine/variable-signext-of-variable-high-bit-extraction.ll index adacf3ce99b2f..262942aa1219b 100644 --- a/llvm/test/Transforms/InstCombine/variable-signext-of-variable-high-bit-extraction.ll +++ b/llvm/test/Transforms/InstCombine/variable-signext-of-variable-high-bit-extraction.ll @@ -203,20 +203,20 @@ define <2 x i32> @t4_vec(<2 x i64> %data, <2 x i32> %nbits) { ret <2 x i32> %signextended } -define <3 x i32> @t5_vec_undef(<3 x i64> %data, <3 x i32> %nbits) { -; CHECK-LABEL: @t5_vec_undef( -; CHECK-NEXT: [[SKIP_HIGH:%.*]] = sub <3 x i32> , [[NBITS:%.*]] +define <3 x i32> @t5_vec_poison(<3 x i64> %data, <3 x i32> %nbits) { +; CHECK-LABEL: @t5_vec_poison( +; CHECK-NEXT: [[SKIP_HIGH:%.*]] = sub <3 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[SKIP_HIGH_WIDE:%.*]] = zext nneg <3 x i32> [[SKIP_HIGH]] to <3 x i64> ; CHECK-NEXT: [[TMP1:%.*]] = ashr <3 x i64> [[DATA:%.*]], [[SKIP_HIGH_WIDE]] ; CHECK-NEXT: [[SIGNEXTENDED:%.*]] = trunc <3 x i64> [[TMP1]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[SIGNEXTENDED]] ; - %skip_high = sub <3 x i32> , %nbits + %skip_high = sub <3 x i32> , %nbits %skip_high_wide = zext <3 x i32> %skip_high to <3 x i64> %extracted = lshr <3 x i64> %data, %skip_high_wide %extracted_narrow = trunc <3 x i64> %extracted to <3 x i32> - %num_high_bits_to_smear_narrow0 = sub <3 x i32> , %nbits - %num_high_bits_to_smear_narrow1 = sub <3 x i32> , %nbits + %num_high_bits_to_smear_narrow0 = sub <3 x i32> , %nbits + %num_high_bits_to_smear_narrow1 = sub <3 x i32> , %nbits %signbit_positioned = shl <3 x i32> %extracted_narrow, %num_high_bits_to_smear_narrow0 %signextended = ashr <3 x i32> %signbit_positioned, %num_high_bits_to_smear_narrow1 ret <3 x i32> %signextended diff --git a/llvm/test/Transforms/InstCombine/vec_sext.ll b/llvm/test/Transforms/InstCombine/vec_sext.ll index a880d5e562725..9f5f957f49445 100644 --- a/llvm/test/Transforms/InstCombine/vec_sext.ll +++ b/llvm/test/Transforms/InstCombine/vec_sext.ll @@ -42,24 +42,24 @@ define <4 x i32> @vec_select_alternate_sign_bit_test(<4 x i32> %a, <4 x i32> %b) ret <4 x i32> %cond } -define <2 x i32> @is_negative_undef_elt(<2 x i32> %a) { -; CHECK-LABEL: @is_negative_undef_elt( +define <2 x i32> @is_negative_poison_elt(<2 x i32> %a) { +; CHECK-LABEL: @is_negative_poison_elt( ; CHECK-NEXT: [[A_LOBIT:%.*]] = ashr <2 x i32> [[A:%.*]], ; CHECK-NEXT: ret <2 x i32> [[A_LOBIT]] ; - %cmp = icmp slt <2 x i32> %a, + %cmp = icmp slt <2 x i32> %a, %sext = sext <2 x i1> %cmp to <2 x i32> ret <2 x i32> %sext } -define <2 x i32> @is_positive_undef_elt(<2 x i32> %a) { -; CHECK-LABEL: @is_positive_undef_elt( -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i32> [[A:%.*]], +define <2 x i32> @is_positive_poison_elt(<2 x i32> %a) { +; CHECK-LABEL: @is_positive_poison_elt( +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i32> [[A:%.*]], ; CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i1> [[CMP]] to <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[SEXT]] ; - %cmp = icmp sgt <2 x i32> %a, + %cmp = icmp sgt <2 x i32> %a, %sext = sext <2 x i1> %cmp to <2 x i32> ret <2 x i32> %sext } diff --git a/llvm/test/Transforms/InstCombine/vector-casts-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vector-casts-inseltpoison.ll index cf1b72fbcf3e1..a87364600ba30 100644 --- a/llvm/test/Transforms/InstCombine/vector-casts-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/vector-casts-inseltpoison.ll @@ -26,26 +26,26 @@ define <2 x i1> @and_cmp_is_trunc(<2 x i64> %a) { ; This is trunc. -define <2 x i1> @and_cmp_is_trunc_even_with_undef_elt(<2 x i64> %a) { -; CHECK-LABEL: @and_cmp_is_trunc_even_with_undef_elt( +define <2 x i1> @and_cmp_is_trunc_even_with_poison_elt(<2 x i64> %a) { +; CHECK-LABEL: @and_cmp_is_trunc_even_with_poison_elt( ; CHECK-NEXT: [[R:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i1> ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %t = and <2 x i64> %a, + %t = and <2 x i64> %a, %r = icmp ne <2 x i64> %t, zeroinitializer ret <2 x i1> %r } -; TODO: This could be just 1 instruction (trunc), but our undef matching is incomplete. +; TODO: This could be just 1 instruction (trunc), but our poison matching is incomplete. -define <2 x i1> @and_cmp_is_trunc_even_with_undef_elts(<2 x i64> %a) { -; CHECK-LABEL: @and_cmp_is_trunc_even_with_undef_elts( -; CHECK-NEXT: [[T:%.*]] = and <2 x i64> [[A:%.*]], -; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i64> [[T]], +define <2 x i1> @and_cmp_is_trunc_even_with_poison_elts(<2 x i64> %a) { +; CHECK-LABEL: @and_cmp_is_trunc_even_with_poison_elts( +; CHECK-NEXT: [[T:%.*]] = and <2 x i64> [[A:%.*]], +; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i64> [[T]], ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %t = and <2 x i64> %a, - %r = icmp ne <2 x i64> %t, + %t = and <2 x i64> %a, + %r = icmp ne <2 x i64> %t, ret <2 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/vector-casts.ll b/llvm/test/Transforms/InstCombine/vector-casts.ll index 281fc5f6011ea..fd2a4ffdfb709 100644 --- a/llvm/test/Transforms/InstCombine/vector-casts.ll +++ b/llvm/test/Transforms/InstCombine/vector-casts.ll @@ -26,26 +26,26 @@ define <2 x i1> @and_cmp_is_trunc(<2 x i64> %a) { ; This is trunc. -define <2 x i1> @and_cmp_is_trunc_even_with_undef_elt(<2 x i64> %a) { -; CHECK-LABEL: @and_cmp_is_trunc_even_with_undef_elt( +define <2 x i1> @and_cmp_is_trunc_even_with_poison_elt(<2 x i64> %a) { +; CHECK-LABEL: @and_cmp_is_trunc_even_with_poison_elt( ; CHECK-NEXT: [[R:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i1> ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %t = and <2 x i64> %a, + %t = and <2 x i64> %a, %r = icmp ne <2 x i64> %t, zeroinitializer ret <2 x i1> %r } -; TODO: This could be just 1 instruction (trunc), but our undef matching is incomplete. +; TODO: This could be just 1 instruction (trunc), but our poison matching is incomplete. -define <2 x i1> @and_cmp_is_trunc_even_with_undef_elts(<2 x i64> %a) { -; CHECK-LABEL: @and_cmp_is_trunc_even_with_undef_elts( -; CHECK-NEXT: [[T:%.*]] = and <2 x i64> [[A:%.*]], -; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i64> [[T]], +define <2 x i1> @and_cmp_is_trunc_even_with_poison_elts(<2 x i64> %a) { +; CHECK-LABEL: @and_cmp_is_trunc_even_with_poison_elts( +; CHECK-NEXT: [[T:%.*]] = and <2 x i64> [[A:%.*]], +; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i64> [[T]], ; CHECK-NEXT: ret <2 x i1> [[R]] ; - %t = and <2 x i64> %a, - %r = icmp ne <2 x i64> %t, + %t = and <2 x i64> %a, + %r = icmp ne <2 x i64> %t, ret <2 x i1> %r } diff --git a/llvm/test/Transforms/InstCombine/vector-urem.ll b/llvm/test/Transforms/InstCombine/vector-urem.ll index d5c77470a20f8..627789a03ef6c 100644 --- a/llvm/test/Transforms/InstCombine/vector-urem.ll +++ b/llvm/test/Transforms/InstCombine/vector-urem.ll @@ -19,11 +19,11 @@ define <4 x i32> @test_v4i32_const_pow2(<4 x i32> %a0) { ret <4 x i32> %1 } -define <4 x i32> @test_v4i32_const_pow2_undef(<4 x i32> %a0) { -; CHECK-LABEL: @test_v4i32_const_pow2_undef( +define <4 x i32> @test_v4i32_const_pow2_poison(<4 x i32> %a0) { +; CHECK-LABEL: @test_v4i32_const_pow2_poison( ; CHECK-NEXT: ret <4 x i32> poison ; - %1 = urem <4 x i32> %a0, + %1 = urem <4 x i32> %a0, ret <4 x i32> %1 } @@ -37,13 +37,13 @@ define <4 x i32> @test_v4i32_one(<4 x i32> %a0) { ret <4 x i32> %1 } -define <4 x i32> @test_v4i32_one_undef(<4 x i32> %a0) { -; CHECK-LABEL: @test_v4i32_one_undef( +define <4 x i32> @test_v4i32_one_poison(<4 x i32> %a0) { +; CHECK-LABEL: @test_v4i32_one_poison( ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <4 x i32> [[A0:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i1> [[TMP1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; - %1 = urem <4 x i32> , %a0 + %1 = urem <4 x i32> , %a0 ret <4 x i32> %1 } @@ -71,10 +71,10 @@ define <4 x i32> @test_v4i32_negconst(<4 x i32> %a0) { ret <4 x i32> %1 } -define <4 x i32> @test_v4i32_negconst_undef(<4 x i32> %a0) { -; CHECK-LABEL: @test_v4i32_negconst_undef( +define <4 x i32> @test_v4i32_negconst_poison(<4 x i32> %a0) { +; CHECK-LABEL: @test_v4i32_negconst_poison( ; CHECK-NEXT: ret <4 x i32> poison ; - %1 = urem <4 x i32> %a0, + %1 = urem <4 x i32> %a0, ret <4 x i32> %1 } diff --git a/llvm/test/Transforms/InstCombine/vector-xor.ll b/llvm/test/Transforms/InstCombine/vector-xor.ll index 171dd6e35b4e1..ee593b5d15e8e 100644 --- a/llvm/test/Transforms/InstCombine/vector-xor.ll +++ b/llvm/test/Transforms/InstCombine/vector-xor.ll @@ -53,14 +53,14 @@ define <4 x i32> @test_v4i32_xor_bswap_const(<4 x i32> %a0) { ret <4 x i32> %2 } -define <4 x i32> @test_v4i32_xor_bswap_const_undef(<4 x i32> %a0) { -; CHECK-LABEL: @test_v4i32_xor_bswap_const_undef( +define <4 x i32> @test_v4i32_xor_bswap_const_poison(<4 x i32> %a0) { +; CHECK-LABEL: @test_v4i32_xor_bswap_const_poison( ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> [[A0:%.*]]) -; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], +; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], ; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; %1 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %a0) - %2 = xor <4 x i32> %1, + %2 = xor <4 x i32> %1, ret <4 x i32> %2 } @@ -105,14 +105,14 @@ define <4 x i32> @test_v4i32_not_ashr_not(<4 x i32> %x, <4 x i32> %y) { ret <4 x i32> %3 } -define <4 x i32> @test_v4i32_not_ashr_not_undef(<4 x i32> %x, <4 x i32> %y) { -; CHECK-LABEL: @test_v4i32_not_ashr_not_undef( +define <4 x i32> @test_v4i32_not_ashr_not_poison(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: @test_v4i32_not_ashr_not_poison( ; CHECK-NEXT: [[DOTNOT:%.*]] = ashr <4 x i32> [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret <4 x i32> [[DOTNOT]] ; - %1 = xor <4 x i32> , %x + %1 = xor <4 x i32> , %x %2 = ashr <4 x i32> %1, %y - %3 = xor <4 x i32> , %2 + %3 = xor <4 x i32> , %2 ret <4 x i32> %3 } @@ -138,13 +138,13 @@ define <4 x i32> @test_v4i32_not_ashr_negative_const(<4 x i32> %a0) { ret <4 x i32> %2 } -define <4 x i32> @test_v4i32_not_ashr_negative_const_undef(<4 x i32> %a0) { -; CHECK-LABEL: @test_v4i32_not_ashr_negative_const_undef( +define <4 x i32> @test_v4i32_not_ashr_negative_const_poison(<4 x i32> %a0) { +; CHECK-LABEL: @test_v4i32_not_ashr_negative_const_poison( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <4 x i32> , [[A0:%.*]] ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = ashr <4 x i32> , %a0 - %2 = xor <4 x i32> , %1 + %1 = ashr <4 x i32> , %a0 + %2 = xor <4 x i32> , %1 ret <4 x i32> %2 } @@ -170,13 +170,13 @@ define <4 x i32> @test_v4i32_not_lshr_nonnegative_const(<4 x i32> %a0) { ret <4 x i32> %2 } -define <4 x i32> @test_v4i32_not_lshr_nonnegative_const_undef(<4 x i32> %a0) { -; CHECK-LABEL: @test_v4i32_not_lshr_nonnegative_const_undef( +define <4 x i32> @test_v4i32_not_lshr_nonnegative_const_poison(<4 x i32> %a0) { +; CHECK-LABEL: @test_v4i32_not_lshr_nonnegative_const_poison( ; CHECK-NEXT: [[TMP1:%.*]] = ashr <4 x i32> , [[A0:%.*]] ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = lshr <4 x i32> , %a0 - %2 = xor <4 x i32> , %1 + %1 = lshr <4 x i32> , %a0 + %2 = xor <4 x i32> , %1 ret <4 x i32> %2 } @@ -202,13 +202,13 @@ define <4 x i32> @test_v4i32_not_sub_const(<4 x i32> %a0) { ret <4 x i32> %2 } -define <4 x i32> @test_v4i32_not_sub_const_undef(<4 x i32> %a0) { -; CHECK-LABEL: @test_v4i32_not_sub_const_undef( -; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[A0:%.*]], +define <4 x i32> @test_v4i32_not_sub_const_poison(<4 x i32> %a0) { +; CHECK-LABEL: @test_v4i32_not_sub_const_poison( +; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[A0:%.*]], ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = sub <4 x i32> , %a0 - %2 = xor <4 x i32> , %1 + %1 = sub <4 x i32> , %a0 + %2 = xor <4 x i32> , %1 ret <4 x i32> %2 } @@ -235,14 +235,14 @@ define <4 x i32> @test_v4i32_xor_signmask_sub_const(<4 x i32> %a0) { ret <4 x i32> %2 } -define <4 x i32> @test_v4i32_xor_signmask_sub_const_undef(<4 x i32> %a0) { -; CHECK-LABEL: @test_v4i32_xor_signmask_sub_const_undef( -; CHECK-NEXT: [[TMP1:%.*]] = sub <4 x i32> , [[A0:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], +define <4 x i32> @test_v4i32_xor_signmask_sub_const_poison(<4 x i32> %a0) { +; CHECK-LABEL: @test_v4i32_xor_signmask_sub_const_poison( +; CHECK-NEXT: [[TMP1:%.*]] = sub <4 x i32> , [[A0:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], ; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; - %1 = sub <4 x i32> , %a0 - %2 = xor <4 x i32> , %1 + %1 = sub <4 x i32> , %a0 + %2 = xor <4 x i32> , %1 ret <4 x i32> %2 } @@ -269,13 +269,13 @@ define <4 x i32> @test_v4i32_xor_signmask_add_const(<4 x i32> %a0) { ret <4 x i32> %2 } -define <4 x i32> @test_v4i32_xor_signmask_add_const_undef(<4 x i32> %a0) { -; CHECK-LABEL: @test_v4i32_xor_signmask_add_const_undef( -; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[A0:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], +define <4 x i32> @test_v4i32_xor_signmask_add_const_poison(<4 x i32> %a0) { +; CHECK-LABEL: @test_v4i32_xor_signmask_add_const_poison( +; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[A0:%.*]], +; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], ; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; - %1 = add <4 x i32> , %a0 - %2 = xor <4 x i32> , %1 + %1 = add <4 x i32> , %a0 + %2 = xor <4 x i32> , %1 ret <4 x i32> %2 } diff --git a/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll b/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll index 7fed952a7ff7e..12739b5686a0a 100644 --- a/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll +++ b/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll @@ -126,13 +126,13 @@ define <2 x i64> @zext_negate_vec(<2 x i1> %A) { ret <2 x i64> %sub } -define <2 x i64> @zext_negate_vec_undef_elt(<2 x i1> %A) { -; CHECK-LABEL: @zext_negate_vec_undef_elt( +define <2 x i64> @zext_negate_vec_poison_elt(<2 x i1> %A) { +; CHECK-LABEL: @zext_negate_vec_poison_elt( ; CHECK-NEXT: [[EXT_NEG:%.*]] = sext <2 x i1> [[A:%.*]] to <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[EXT_NEG]] ; %ext = zext <2 x i1> %A to <2 x i64> - %sub = sub <2 x i64> , %ext + %sub = sub <2 x i64> , %ext ret <2 x i64> %sub } @@ -169,13 +169,13 @@ define <2 x i64> @zext_sub_const_vec(<2 x i1> %A) { ret <2 x i64> %sub } -define <2 x i64> @zext_sub_const_vec_undef_elt(<2 x i1> %A) { -; CHECK-LABEL: @zext_sub_const_vec_undef_elt( -; CHECK-NEXT: [[SUB:%.*]] = select <2 x i1> [[A:%.*]], <2 x i64> , <2 x i64> +define <2 x i64> @zext_sub_const_vec_poison_elt(<2 x i1> %A) { +; CHECK-LABEL: @zext_sub_const_vec_poison_elt( +; CHECK-NEXT: [[SUB:%.*]] = select <2 x i1> [[A:%.*]], <2 x i64> , <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[SUB]] ; %ext = zext <2 x i1> %A to <2 x i64> - %sub = sub <2 x i64> , %ext + %sub = sub <2 x i64> , %ext ret <2 x i64> %sub } @@ -212,13 +212,13 @@ define <2 x i64> @sext_negate_vec(<2 x i1> %A) { ret <2 x i64> %sub } -define <2 x i64> @sext_negate_vec_undef_elt(<2 x i1> %A) { -; CHECK-LABEL: @sext_negate_vec_undef_elt( +define <2 x i64> @sext_negate_vec_poison_elt(<2 x i1> %A) { +; CHECK-LABEL: @sext_negate_vec_poison_elt( ; CHECK-NEXT: [[EXT_NEG:%.*]] = zext <2 x i1> [[A:%.*]] to <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[EXT_NEG]] ; %ext = sext <2 x i1> %A to <2 x i64> - %sub = sub <2 x i64> , %ext + %sub = sub <2 x i64> , %ext ret <2 x i64> %sub } @@ -255,13 +255,13 @@ define <2 x i64> @sext_sub_const_vec(<2 x i1> %A) { ret <2 x i64> %sub } -define <2 x i64> @sext_sub_const_vec_undef_elt(<2 x i1> %A) { -; CHECK-LABEL: @sext_sub_const_vec_undef_elt( -; CHECK-NEXT: [[SUB:%.*]] = select <2 x i1> [[A:%.*]], <2 x i64> , <2 x i64> +define <2 x i64> @sext_sub_const_vec_poison_elt(<2 x i1> %A) { +; CHECK-LABEL: @sext_sub_const_vec_poison_elt( +; CHECK-NEXT: [[SUB:%.*]] = select <2 x i1> [[A:%.*]], <2 x i64> , <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[SUB]] ; %ext = sext <2 x i1> %A to <2 x i64> - %sub = sub <2 x i64> , %ext + %sub = sub <2 x i64> , %ext ret <2 x i64> %sub } diff --git a/llvm/test/Transforms/InstSimplify/AndOrXor.ll b/llvm/test/Transforms/InstSimplify/AndOrXor.ll index 494b6bcd2b66d..2e3a605224203 100644 --- a/llvm/test/Transforms/InstSimplify/AndOrXor.ll +++ b/llvm/test/Transforms/InstSimplify/AndOrXor.ll @@ -12,11 +12,11 @@ define i8 @and0(i8 %x) { ret i8 %r } -define <2 x i8> @and0_vec_undef_elt(<2 x i8> %x) { -; CHECK-LABEL: @and0_vec_undef_elt( +define <2 x i8> @and0_vec_poison_elt(<2 x i8> %x) { +; CHECK-LABEL: @and0_vec_poison_elt( ; CHECK-NEXT: ret <2 x i8> zeroinitializer ; - %r = and <2 x i8> %x, + %r = and <2 x i8> %x, ret <2 x i8> %r } @@ -31,14 +31,14 @@ define <2 x i32> @add_nsw_signbit(<2 x i32> %x) { ret <2 x i32> %z } -; Undef elements in either constant vector are ok. +; Poison elements in either constant vector are ok. -define <2 x i32> @add_nsw_signbit_undef(<2 x i32> %x) { -; CHECK-LABEL: @add_nsw_signbit_undef( +define <2 x i32> @add_nsw_signbit_poison(<2 x i32> %x) { +; CHECK-LABEL: @add_nsw_signbit_poison( ; CHECK-NEXT: ret <2 x i32> [[X:%.*]] ; - %y = xor <2 x i32> %x, - %z = add nsw <2 x i32> %y, + %y = xor <2 x i32> %x, + %z = add nsw <2 x i32> %y, ret <2 x i32> %z } @@ -53,14 +53,14 @@ define <2 x i5> @add_nuw_signbit(<2 x i5> %x) { ret <2 x i5> %z } -; Undef elements in either constant vector are ok. +; Poison elements in either constant vector are ok. -define <2 x i5> @add_nuw_signbit_undef(<2 x i5> %x) { -; CHECK-LABEL: @add_nuw_signbit_undef( +define <2 x i5> @add_nuw_signbit_poison(<2 x i5> %x) { +; CHECK-LABEL: @add_nuw_signbit_poison( ; CHECK-NEXT: ret <2 x i5> [[X:%.*]] ; - %y = xor <2 x i5> %x, - %z = add nuw <2 x i5> %y, + %y = xor <2 x i5> %x, + %z = add nuw <2 x i5> %y, ret <2 x i5> %z } @@ -584,7 +584,7 @@ define <2 x i32> @or_xor_andn_commute2(<2 x i32> %a, <2 x i32> %b) { ; CHECK-NEXT: ret <2 x i32> [[XOR]] ; %xor = xor <2 x i32> %a, %b - %neg = xor <2 x i32> %b, + %neg = xor <2 x i32> %b, %and = and <2 x i32> %a, %neg %or = or <2 x i32> %xor, %and ret <2 x i32> %or @@ -708,15 +708,13 @@ define <2 x i32> @or_xorn_and_commute2_undef(<2 x i32> %a, <2 x i32> %b) { ret <2 x i32> %or } -; TODO: Unlike the above test, this is safe to fold. +; Unlike the above test, this is safe to fold. define <2 x i32> @or_xorn_and_commute2_poison(<2 x i32> %a, <2 x i32> %b) { ; CHECK-LABEL: @or_xorn_and_commute2_poison( ; CHECK-NEXT: [[NEGA:%.*]] = xor <2 x i32> [[A:%.*]], -; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[B:%.*]], [[A]] -; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i32> [[B]], [[NEGA]] -; CHECK-NEXT: [[OR:%.*]] = or <2 x i32> [[XOR]], [[AND]] -; CHECK-NEXT: ret <2 x i32> [[OR]] +; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i32> [[B:%.*]], [[NEGA]] +; CHECK-NEXT: ret <2 x i32> [[XOR]] ; %nega = xor <2 x i32> %a, %and = and <2 x i32> %b, %a diff --git a/llvm/test/Transforms/InstSimplify/call.ll b/llvm/test/Transforms/InstSimplify/call.ll index 52c207a276046..c6f6b65f89dc2 100644 --- a/llvm/test/Transforms/InstSimplify/call.ll +++ b/llvm/test/Transforms/InstSimplify/call.ll @@ -976,7 +976,7 @@ define <2 x i8> @fshr_zero_vec(<2 x i8> %shamt) { ; CHECK-LABEL: @fshr_zero_vec( ; CHECK-NEXT: ret <2 x i8> zeroinitializer ; - %r = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> zeroinitializer, <2 x i8> , <2 x i8> %shamt) + %r = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> zeroinitializer, <2 x i8> , <2 x i8> %shamt) ret <2 x i8> %r } @@ -984,7 +984,7 @@ define <2 x i7> @fshl_ones_vec(<2 x i7> %shamt) { ; CHECK-LABEL: @fshl_ones_vec( ; CHECK-NEXT: ret <2 x i7> ; - %r = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> , <2 x i7> , <2 x i7> %shamt) + %r = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> , <2 x i7> , <2 x i7> %shamt) ret <2 x i7> %r } @@ -1466,7 +1466,7 @@ define <3 x i33> @cttz_shl1_vec(<3 x i33> %x) { ; CHECK-LABEL: @cttz_shl1_vec( ; CHECK-NEXT: ret <3 x i33> [[X:%.*]] ; - %s = shl <3 x i33> , %x + %s = shl <3 x i33> , %x %r = call <3 x i33> @llvm.cttz.v3i33(<3 x i33> %s, i1 false) ret <3 x i33> %r } @@ -1509,7 +1509,7 @@ define <3 x i33> @ctlz_lshr_sign_bit_vec(<3 x i33> %x) { ; CHECK-LABEL: @ctlz_lshr_sign_bit_vec( ; CHECK-NEXT: ret <3 x i33> [[X:%.*]] ; - %s = lshr <3 x i33> , %x + %s = lshr <3 x i33> , %x %r = call <3 x i33> @llvm.ctlz.v3i33(<3 x i33> %s, i1 false) ret <3 x i33> %r } @@ -1549,7 +1549,7 @@ define <3 x i33> @ctlz_ashr_sign_bit_vec(<3 x i33> %x) { ; CHECK-LABEL: @ctlz_ashr_sign_bit_vec( ; CHECK-NEXT: ret <3 x i33> zeroinitializer ; - %s = ashr <3 x i33> , %x + %s = ashr <3 x i33> , %x %r = call <3 x i33> @llvm.ctlz.v3i33(<3 x i33> %s, i1 true) ret <3 x i33> %r } diff --git a/llvm/test/Transforms/InstSimplify/compare.ll b/llvm/test/Transforms/InstSimplify/compare.ll index 1e90f0edbd800..724912d90bd86 100644 --- a/llvm/test/Transforms/InstSimplify/compare.ll +++ b/llvm/test/Transforms/InstSimplify/compare.ll @@ -1659,21 +1659,21 @@ define <2 x i1> @icmp_shl_1_ugt_signmask(<2 x i8> %V) { ret <2 x i1> %cmp } -define <2 x i1> @icmp_shl_1_ugt_signmask_undef(<2 x i8> %V) { -; CHECK-LABEL: @icmp_shl_1_ugt_signmask_undef( +define <2 x i1> @icmp_shl_1_ugt_signmask_poison(<2 x i8> %V) { +; CHECK-LABEL: @icmp_shl_1_ugt_signmask_poison( ; CHECK-NEXT: ret <2 x i1> zeroinitializer ; %shl = shl <2 x i8> , %V - %cmp = icmp ugt <2 x i8> %shl, + %cmp = icmp ugt <2 x i8> %shl, ret <2 x i1> %cmp } -define <2 x i1> @icmp_shl_1_ugt_signmask_undef2(<2 x i8> %V) { -; CHECK-LABEL: @icmp_shl_1_ugt_signmask_undef2( +define <2 x i1> @icmp_shl_1_ugt_signmask_poison2(<2 x i8> %V) { +; CHECK-LABEL: @icmp_shl_1_ugt_signmask_poison2( ; CHECK-NEXT: ret <2 x i1> zeroinitializer ; - %shl = shl <2 x i8> , %V - %cmp = icmp ugt <2 x i8> %shl, + %shl = shl <2 x i8> , %V + %cmp = icmp ugt <2 x i8> %shl, ret <2 x i1> %cmp } @@ -1695,21 +1695,21 @@ define <2 x i1> @icmp_shl_1_ule_signmask(<2 x i8> %V) { ret <2 x i1> %cmp } -define <2 x i1> @icmp_shl_1_ule_signmask_undef(<2 x i8> %V) { -; CHECK-LABEL: @icmp_shl_1_ule_signmask_undef( +define <2 x i1> @icmp_shl_1_ule_signmask_poison(<2 x i8> %V) { +; CHECK-LABEL: @icmp_shl_1_ule_signmask_poison( ; CHECK-NEXT: ret <2 x i1> ; %shl = shl <2 x i8> , %V - %cmp = icmp ule <2 x i8> %shl, + %cmp = icmp ule <2 x i8> %shl, ret <2 x i1> %cmp } -define <2 x i1> @icmp_shl_1_ule_signmask_undef2(<2 x i8> %V) { -; CHECK-LABEL: @icmp_shl_1_ule_signmask_undef2( +define <2 x i1> @icmp_shl_1_ule_signmask_poison2(<2 x i8> %V) { +; CHECK-LABEL: @icmp_shl_1_ule_signmask_poison2( ; CHECK-NEXT: ret <2 x i1> ; - %shl = shl <2 x i8> , %V - %cmp = icmp ule <2 x i8> %shl, + %shl = shl <2 x i8> , %V + %cmp = icmp ule <2 x i8> %shl, ret <2 x i1> %cmp } @@ -1731,12 +1731,12 @@ define <2 x i1> @shl_1_cmp_eq_nonpow2_splat(<2 x i32> %x) { ret <2 x i1> %c } -define <2 x i1> @shl_1_cmp_eq_nonpow2_splat_undef(<2 x i32> %x) { -; CHECK-LABEL: @shl_1_cmp_eq_nonpow2_splat_undef( +define <2 x i1> @shl_1_cmp_eq_nonpow2_splat_poison(<2 x i32> %x) { +; CHECK-LABEL: @shl_1_cmp_eq_nonpow2_splat_poison( ; CHECK-NEXT: ret <2 x i1> zeroinitializer ; %s = shl <2 x i32> , %x - %c = icmp eq <2 x i32> %s, + %c = icmp eq <2 x i32> %s, ret <2 x i1> %c } @@ -1758,12 +1758,12 @@ define <2 x i1> @shl_1_cmp_ne_nonpow2_splat(<2 x i32> %x) { ret <2 x i1> %c } -define <2 x i1> @shl_1_cmp_ne_nonpow2_splat_undef(<2 x i32> %x) { -; CHECK-LABEL: @shl_1_cmp_ne_nonpow2_splat_undef( +define <2 x i1> @shl_1_cmp_ne_nonpow2_splat_poison(<2 x i32> %x) { +; CHECK-LABEL: @shl_1_cmp_ne_nonpow2_splat_poison( ; CHECK-NEXT: ret <2 x i1> ; - %s = shl <2 x i32> , %x - %c = icmp ne <2 x i32> %s, + %s = shl <2 x i32> , %x + %c = icmp ne <2 x i32> %s, ret <2 x i1> %c } @@ -1776,12 +1776,12 @@ define i1 @shl_pow2_cmp_eq_nonpow2(i32 %x) { ret i1 %c } -define <2 x i1> @shl_pow21_cmp_ne_nonpow2_splat_undef(<2 x i32> %x) { -; CHECK-LABEL: @shl_pow21_cmp_ne_nonpow2_splat_undef( +define <2 x i1> @shl_pow21_cmp_ne_nonpow2_splat_poison(<2 x i32> %x) { +; CHECK-LABEL: @shl_pow21_cmp_ne_nonpow2_splat_poison( ; CHECK-NEXT: ret <2 x i1> ; - %s = shl <2 x i32> , %x - %c = icmp ne <2 x i32> %s, + %s = shl <2 x i32> , %x + %c = icmp ne <2 x i32> %s, ret <2 x i1> %c } @@ -1820,12 +1820,12 @@ define i1 @shl_pow2_cmp_eq_zero_nuw(i32 %x) { ret i1 %c } -define <2 x i1> @shl_pow2_cmp_ne_zero_nuw_splat_undef(<2 x i32> %x) { -; CHECK-LABEL: @shl_pow2_cmp_ne_zero_nuw_splat_undef( +define <2 x i1> @shl_pow2_cmp_ne_zero_nuw_splat_poison(<2 x i32> %x) { +; CHECK-LABEL: @shl_pow2_cmp_ne_zero_nuw_splat_poison( ; CHECK-NEXT: ret <2 x i1> ; - %s = shl nuw <2 x i32> , %x - %c = icmp ne <2 x i32> %s, + %s = shl nuw <2 x i32> , %x + %c = icmp ne <2 x i32> %s, ret <2 x i1> %c } @@ -1838,12 +1838,12 @@ define i1 @shl_pow2_cmp_ne_zero_nsw(i32 %x) { ret i1 %c } -define <2 x i1> @shl_pow2_cmp_eq_zero_nsw_splat_undef(<2 x i32> %x) { -; CHECK-LABEL: @shl_pow2_cmp_eq_zero_nsw_splat_undef( +define <2 x i1> @shl_pow2_cmp_eq_zero_nsw_splat_poison(<2 x i32> %x) { +; CHECK-LABEL: @shl_pow2_cmp_eq_zero_nsw_splat_poison( ; CHECK-NEXT: ret <2 x i1> zeroinitializer ; - %s = shl nsw <2 x i32> , %x - %c = icmp eq <2 x i32> %s, + %s = shl nsw <2 x i32> , %x + %c = icmp eq <2 x i32> %s, ret <2 x i1> %c } diff --git a/llvm/test/Transforms/InstSimplify/constantfold-add-nuw-allones-to-allones.ll b/llvm/test/Transforms/InstSimplify/constantfold-add-nuw-allones-to-allones.ll index 7c9d9a9e2c7ce..92d6cc30d6248 100644 --- a/llvm/test/Transforms/InstSimplify/constantfold-add-nuw-allones-to-allones.ll +++ b/llvm/test/Transforms/InstSimplify/constantfold-add-nuw-allones-to-allones.ll @@ -63,11 +63,11 @@ define <2 x i8> @add_vec(<2 x i8> %x) { ret <2 x i8> %ret } -define <3 x i8> @add_vec_undef(<3 x i8> %x) { -; CHECK-LABEL: @add_vec_undef( -; CHECK-NEXT: ret <3 x i8> +define <3 x i8> @add_vec_poison(<3 x i8> %x) { +; CHECK-LABEL: @add_vec_poison( +; CHECK-NEXT: ret <3 x i8> ; - %ret = add nuw <3 x i8> %x, + %ret = add nuw <3 x i8> %x, ret <3 x i8> %ret } diff --git a/llvm/test/Transforms/InstSimplify/constantfold-shl-nuw-C-to-C.ll b/llvm/test/Transforms/InstSimplify/constantfold-shl-nuw-C-to-C.ll index b5b5773fee538..3f4a08807a4b4 100644 --- a/llvm/test/Transforms/InstSimplify/constantfold-shl-nuw-C-to-C.ll +++ b/llvm/test/Transforms/InstSimplify/constantfold-shl-nuw-C-to-C.ll @@ -78,11 +78,11 @@ define <2 x i8> @shl_vec(<2 x i8> %x) { ret <2 x i8> %ret } -define <3 x i8> @shl_vec_undef(<3 x i8> %x) { -; CHECK-LABEL: @shl_vec_undef( -; CHECK-NEXT: ret <3 x i8> +define <3 x i8> @shl_vec_poison(<3 x i8> %x) { +; CHECK-LABEL: @shl_vec_poison( +; CHECK-NEXT: ret <3 x i8> ; - %ret = shl nuw <3 x i8> , %x + %ret = shl nuw <3 x i8> , %x ret <3 x i8> %ret } diff --git a/llvm/test/Transforms/InstSimplify/div.ll b/llvm/test/Transforms/InstSimplify/div.ll index e13b6f139bcf5..5ca2e8837b924 100644 --- a/llvm/test/Transforms/InstSimplify/div.ll +++ b/llvm/test/Transforms/InstSimplify/div.ll @@ -17,11 +17,11 @@ define <2 x i32> @zero_dividend_vector(<2 x i32> %A) { ret <2 x i32> %B } -define <2 x i32> @zero_dividend_vector_undef_elt(<2 x i32> %A) { -; CHECK-LABEL: @zero_dividend_vector_undef_elt( +define <2 x i32> @zero_dividend_vector_poison_elt(<2 x i32> %A) { +; CHECK-LABEL: @zero_dividend_vector_poison_elt( ; CHECK-NEXT: ret <2 x i32> zeroinitializer ; - %B = sdiv <2 x i32> , %A + %B = sdiv <2 x i32> , %A ret <2 x i32> %B } @@ -59,23 +59,23 @@ define <2 x i8> @udiv_zero_elt_vec(<2 x i8> %x) { ret <2 x i8> %div } -define <2 x i8> @sdiv_undef_elt_vec(<2 x i8> %x) { -; CHECK-LABEL: @sdiv_undef_elt_vec( +define <2 x i8> @sdiv_poison_elt_vec(<2 x i8> %x) { +; CHECK-LABEL: @sdiv_poison_elt_vec( ; CHECK-NEXT: ret <2 x i8> poison ; - %div = sdiv <2 x i8> %x, + %div = sdiv <2 x i8> %x, ret <2 x i8> %div } -define <2 x i8> @udiv_undef_elt_vec(<2 x i8> %x) { -; CHECK-LABEL: @udiv_undef_elt_vec( +define <2 x i8> @udiv_poison_elt_vec(<2 x i8> %x) { +; CHECK-LABEL: @udiv_poison_elt_vec( ; CHECK-NEXT: ret <2 x i8> poison ; - %div = udiv <2 x i8> %x, + %div = udiv <2 x i8> %x, ret <2 x i8> %div } -; Division-by-zero is undef. UB in any vector lane means the whole op is undef. +; Division-by-zero is poison. UB in any vector lane means the whole op is poison. ; Thus, we can simplify this: if any element of 'y' is 0, we can do anything. ; Therefore, assume that all elements of 'y' must be 1. diff --git a/llvm/test/Transforms/InstSimplify/fast-math-strictfp.ll b/llvm/test/Transforms/InstSimplify/fast-math-strictfp.ll index 4938987baccc2..b1d772890aff8 100644 --- a/llvm/test/Transforms/InstSimplify/fast-math-strictfp.ll +++ b/llvm/test/Transforms/InstSimplify/fast-math-strictfp.ll @@ -18,11 +18,11 @@ define float @mul_zero_2(float %a) #0 { ret float %b } -define <2 x float> @mul_zero_nsz_nnan_vec_undef(<2 x float> %a) #0 { -; CHECK-LABEL: @mul_zero_nsz_nnan_vec_undef( +define <2 x float> @mul_zero_nsz_nnan_vec_poison(<2 x float> %a) #0 { +; CHECK-LABEL: @mul_zero_nsz_nnan_vec_poison( ; CHECK-NEXT: ret <2 x float> zeroinitializer ; - %b = call nsz nnan <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %a, <2 x float>, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %b = call nsz nnan <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %a, <2 x float>, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %b } @@ -98,13 +98,13 @@ define <2 x float> @fadd_unary_fnegx_commute_vec(<2 x float> %x) #0 { ret <2 x float> %r } -define <2 x float> @fadd_fnegx_commute_vec_undef(<2 x float> %x) #0 { -; CHECK-LABEL: @fadd_fnegx_commute_vec_undef( -; CHECK-NEXT: [[NEGX:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") +define <2 x float> @fadd_fnegx_commute_vec_poison(<2 x float> %x) #0 { +; CHECK-LABEL: @fadd_fnegx_commute_vec_poison( +; CHECK-NEXT: [[NEGX:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: [[R:%.*]] = call nnan <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> [[X]], <2 x float> [[NEGX]], metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: ret <2 x float> [[R]] ; - %negx = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %negx = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") %r = call nnan <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %negx, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %r } @@ -240,34 +240,34 @@ define float @fneg_x(float %a) #0 { ret float %ret } -define <2 x float> @fsub_0_0_x_vec_undef1(<2 x float> %a) #0 { -; CHECK-LABEL: @fsub_0_0_x_vec_undef1( -; CHECK-NEXT: [[T1:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") +define <2 x float> @fsub_0_0_x_vec_poison1(<2 x float> %a) #0 { +; CHECK-LABEL: @fsub_0_0_x_vec_poison1( +; CHECK-NEXT: [[T1:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: [[RET:%.*]] = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> zeroinitializer, <2 x float> [[T1]], metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: ret <2 x float> [[RET]] ; - %t1 = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %t1 = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") %ret = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> zeroinitializer, <2 x float> %t1, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %ret } -define <2 x float> @fneg_x_vec_undef1(<2 x float> %a) #0 { -; CHECK-LABEL: @fneg_x_vec_undef1( +define <2 x float> @fneg_x_vec_poison1(<2 x float> %a) #0 { +; CHECK-LABEL: @fneg_x_vec_poison1( ; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; %t1 = fneg <2 x float> %a - %ret = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> %t1, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %ret = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> %t1, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %ret } -define <2 x float> @fsub_0_0_x_vec_undef2(<2 x float> %a) #0 { -; CHECK-LABEL: @fsub_0_0_x_vec_undef2( +define <2 x float> @fsub_0_0_x_vec_poison2(<2 x float> %a) #0 { +; CHECK-LABEL: @fsub_0_0_x_vec_poison2( ; CHECK-NEXT: [[T1:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> zeroinitializer, <2 x float> [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") -; CHECK-NEXT: [[RET:%.*]] = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[T1]], metadata !"round.tonearest", metadata !"fpexcept.ignore") +; CHECK-NEXT: [[RET:%.*]] = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[T1]], metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: ret <2 x float> [[RET]] ; %t1 = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> zeroinitializer, <2 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") - %ret = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> %t1, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %ret = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> %t1, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %ret } @@ -281,11 +281,11 @@ define <2 x float> @fadd_zero_nsz_vec(<2 x float> %x) #0 { ret <2 x float> %r } -define <2 x float> @fadd_zero_nsz_vec_undef(<2 x float> %x) #0 { -; CHECK-LABEL: @fadd_zero_nsz_vec_undef( +define <2 x float> @fadd_zero_nsz_vec_poison(<2 x float> %x) #0 { +; CHECK-LABEL: @fadd_zero_nsz_vec_poison( ; CHECK-NEXT: ret <2 x float> [[X:%.*]] ; - %r = call nsz <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> , metadata !"round.tonearest", metadata !"fpexcept.ignore") + %r = call nsz <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> , metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %r } @@ -375,11 +375,11 @@ define double @fdiv_zero_by_x(double %x) #0 { ret double %r } -define <2 x double> @fdiv_zero_by_x_vec_undef(<2 x double> %x) #0 { -; CHECK-LABEL: @fdiv_zero_by_x_vec_undef( +define <2 x double> @fdiv_zero_by_x_vec_poison(<2 x double> %x) #0 { +; CHECK-LABEL: @fdiv_zero_by_x_vec_poison( ; CHECK-NEXT: ret <2 x double> zeroinitializer ; - %r = call nnan nsz <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> , <2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %r = call nnan nsz <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> , <2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x double> %r } @@ -394,11 +394,11 @@ define double @frem_zero_by_x(double %x) #0 { ret double %r } -define <2 x double> @frem_poszero_by_x_vec_undef(<2 x double> %x) #0 { -; CHECK-LABEL: @frem_poszero_by_x_vec_undef( +define <2 x double> @frem_poszero_by_x_vec_poison(<2 x double> %x) #0 { +; CHECK-LABEL: @frem_poszero_by_x_vec_poison( ; CHECK-NEXT: ret <2 x double> zeroinitializer ; - %r = call nnan <2 x double> @llvm.experimental.constrained.frem.v2f64(<2 x double> , <2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %r = call nnan <2 x double> @llvm.experimental.constrained.frem.v2f64(<2 x double> , <2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x double> %r } @@ -413,11 +413,11 @@ define double @frem_negzero_by_x(double %x) #0 { ret double %r } -define <2 x double> @frem_negzero_by_x_vec_undef(<2 x double> %x) #0 { -; CHECK-LABEL: @frem_negzero_by_x_vec_undef( +define <2 x double> @frem_negzero_by_x_vec_poison(<2 x double> %x) #0 { +; CHECK-LABEL: @frem_negzero_by_x_vec_poison( ; CHECK-NEXT: ret <2 x double> ; - %r = call nnan <2 x double> @llvm.experimental.constrained.frem.v2f64(<2 x double> , <2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %r = call nnan <2 x double> @llvm.experimental.constrained.frem.v2f64(<2 x double> , <2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x double> %r } @@ -493,13 +493,13 @@ define float @fdiv_neg_swapped2(float %f) #0 { ret float %div } -define <2 x float> @fdiv_neg_vec_undef_elt(<2 x float> %f) #0 { -; CHECK-LABEL: @fdiv_neg_vec_undef_elt( -; CHECK-NEXT: [[NEG:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[F:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") +define <2 x float> @fdiv_neg_vec_poison_elt(<2 x float> %f) #0 { +; CHECK-LABEL: @fdiv_neg_vec_poison_elt( +; CHECK-NEXT: [[NEG:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[F:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: [[DIV:%.*]] = call nnan <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float> [[F]], <2 x float> [[NEG]], metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: ret <2 x float> [[DIV]] ; - %neg = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> %f, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %neg = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> %f, metadata !"round.tonearest", metadata !"fpexcept.ignore") %div = call nnan <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float> %f, <2 x float> %neg, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %div } diff --git a/llvm/test/Transforms/InstSimplify/fast-math.ll b/llvm/test/Transforms/InstSimplify/fast-math.ll index d1818e6346d7a..287f30b162f80 100644 --- a/llvm/test/Transforms/InstSimplify/fast-math.ll +++ b/llvm/test/Transforms/InstSimplify/fast-math.ll @@ -18,11 +18,11 @@ define float @mul_zero_2(float %a) { ret float %b } -define <2 x float> @mul_zero_nsz_nnan_vec_undef(<2 x float> %a) { -; CHECK-LABEL: @mul_zero_nsz_nnan_vec_undef( +define <2 x float> @mul_zero_nsz_nnan_vec_poison(<2 x float> %a) { +; CHECK-LABEL: @mul_zero_nsz_nnan_vec_poison( ; CHECK-NEXT: ret <2 x float> zeroinitializer ; - %b = fmul nsz nnan <2 x float> %a, + %b = fmul nsz nnan <2 x float> %a, ret <2 x float> %b } @@ -94,11 +94,11 @@ define <2 x float> @fadd_unary_fnegx_commute_vec(<2 x float> %x) { ret <2 x float> %r } -define <2 x float> @fadd_fnegx_commute_vec_undef(<2 x float> %x) { -; CHECK-LABEL: @fadd_fnegx_commute_vec_undef( +define <2 x float> @fadd_fnegx_commute_vec_poison(<2 x float> %x) { +; CHECK-LABEL: @fadd_fnegx_commute_vec_poison( ; CHECK-NEXT: ret <2 x float> zeroinitializer ; - %negx = fsub <2 x float> , %x + %negx = fsub <2 x float> , %x %r = fadd nnan <2 x float> %x, %negx ret <2 x float> %r } @@ -226,30 +226,30 @@ define float @fneg_x(float %a) { ret float %ret } -define <2 x float> @fsub_0_0_x_vec_undef1(<2 x float> %a) { -; CHECK-LABEL: @fsub_0_0_x_vec_undef1( +define <2 x float> @fsub_0_0_x_vec_poison1(<2 x float> %a) { +; CHECK-LABEL: @fsub_0_0_x_vec_poison1( ; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; - %t1 = fsub <2 x float> , %a + %t1 = fsub <2 x float> , %a %ret = fsub nsz <2 x float> zeroinitializer, %t1 ret <2 x float> %ret } -define <2 x float> @fneg_x_vec_undef1(<2 x float> %a) { -; CHECK-LABEL: @fneg_x_vec_undef1( +define <2 x float> @fneg_x_vec_poison1(<2 x float> %a) { +; CHECK-LABEL: @fneg_x_vec_poison1( ; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; %t1 = fneg <2 x float> %a - %ret = fsub nsz <2 x float> , %t1 + %ret = fsub nsz <2 x float> , %t1 ret <2 x float> %ret } -define <2 x float> @fsub_0_0_x_vec_undef2(<2 x float> %a) { -; CHECK-LABEL: @fsub_0_0_x_vec_undef2( +define <2 x float> @fsub_0_0_x_vec_poison2(<2 x float> %a) { +; CHECK-LABEL: @fsub_0_0_x_vec_poison2( ; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; %t1 = fsub <2 x float> zeroinitializer, %a - %ret = fsub nsz <2 x float> , %t1 + %ret = fsub nsz <2 x float> , %t1 ret <2 x float> %ret } @@ -263,11 +263,11 @@ define <2 x float> @fadd_zero_nsz_vec(<2 x float> %x) { ret <2 x float> %r } -define <2 x float> @fadd_zero_nsz_vec_undef(<2 x float> %x) { -; CHECK-LABEL: @fadd_zero_nsz_vec_undef( +define <2 x float> @fadd_zero_nsz_vec_poison(<2 x float> %x) { +; CHECK-LABEL: @fadd_zero_nsz_vec_poison( ; CHECK-NEXT: ret <2 x float> [[X:%.*]] ; - %r = fadd nsz <2 x float> %x, + %r = fadd nsz <2 x float> %x, ret <2 x float> %r } @@ -357,11 +357,11 @@ define double @fdiv_zero_by_x(double %x) { ret double %r } -define <2 x double> @fdiv_zero_by_x_vec_undef(<2 x double> %x) { -; CHECK-LABEL: @fdiv_zero_by_x_vec_undef( +define <2 x double> @fdiv_zero_by_x_vec_poison(<2 x double> %x) { +; CHECK-LABEL: @fdiv_zero_by_x_vec_poison( ; CHECK-NEXT: ret <2 x double> zeroinitializer ; - %r = fdiv nnan nsz <2 x double> , %x + %r = fdiv nnan nsz <2 x double> , %x ret <2 x double> %r } @@ -376,11 +376,11 @@ define double @frem_zero_by_x(double %x) { ret double %r } -define <2 x double> @frem_poszero_by_x_vec_undef(<2 x double> %x) { -; CHECK-LABEL: @frem_poszero_by_x_vec_undef( +define <2 x double> @frem_poszero_by_x_vec_poison(<2 x double> %x) { +; CHECK-LABEL: @frem_poszero_by_x_vec_poison( ; CHECK-NEXT: ret <2 x double> zeroinitializer ; - %r = frem nnan <2 x double> , %x + %r = frem nnan <2 x double> , %x ret <2 x double> %r } @@ -395,11 +395,11 @@ define double @frem_negzero_by_x(double %x) { ret double %r } -define <2 x double> @frem_negzero_by_x_vec_undef(<2 x double> %x) { -; CHECK-LABEL: @frem_negzero_by_x_vec_undef( +define <2 x double> @frem_negzero_by_x_vec_poison(<2 x double> %x) { +; CHECK-LABEL: @frem_negzero_by_x_vec_poison( ; CHECK-NEXT: ret <2 x double> ; - %r = frem nnan <2 x double> , %x + %r = frem nnan <2 x double> , %x ret <2 x double> %r } @@ -467,11 +467,11 @@ define float @fdiv_neg_swapped2(float %f) { ret float %div } -define <2 x float> @fdiv_neg_vec_undef_elt(<2 x float> %f) { -; CHECK-LABEL: @fdiv_neg_vec_undef_elt( +define <2 x float> @fdiv_neg_vec_poison_elt(<2 x float> %f) { +; CHECK-LABEL: @fdiv_neg_vec_poison_elt( ; CHECK-NEXT: ret <2 x float> ; - %neg = fsub <2 x float> , %f + %neg = fsub <2 x float> , %f %div = fdiv nnan <2 x float> %f, %neg ret <2 x float> %div } diff --git a/llvm/test/Transforms/InstSimplify/fdiv.ll b/llvm/test/Transforms/InstSimplify/fdiv.ll index 38e31257e185a..fb59011b91d5b 100644 --- a/llvm/test/Transforms/InstSimplify/fdiv.ll +++ b/llvm/test/Transforms/InstSimplify/fdiv.ll @@ -110,11 +110,11 @@ define <2 x float> @fdiv_nnan_ninf_by_undef_v2f32(<2 x float> %x) { ret <2 x float> %fdiv } -define <2 x float> @fdiv_nnan_ninf_by_zero_undef_v2f32(<2 x float> %x) { -; CHECK-LABEL: @fdiv_nnan_ninf_by_zero_undef_v2f32( +define <2 x float> @fdiv_nnan_ninf_by_zero_poison_v2f32(<2 x float> %x) { +; CHECK-LABEL: @fdiv_nnan_ninf_by_zero_poison_v2f32( ; CHECK-NEXT: ret <2 x float> poison ; - %fdiv = fdiv nnan ninf <2 x float> %x, + %fdiv = fdiv nnan ninf <2 x float> %x, ret <2 x float> %fdiv } diff --git a/llvm/test/Transforms/InstSimplify/floating-point-arithmetic-strictfp.ll b/llvm/test/Transforms/InstSimplify/floating-point-arithmetic-strictfp.ll index e4748a2402923..32ea4cb7cd198 100644 --- a/llvm/test/Transforms/InstSimplify/floating-point-arithmetic-strictfp.ll +++ b/llvm/test/Transforms/InstSimplify/floating-point-arithmetic-strictfp.ll @@ -24,23 +24,23 @@ define <2 x float> @fsub_-0_x_vec(<2 x float> %a) #0 { ret <2 x float> %ret } -define <2 x float> @fsub_-0_x_vec_undef_elts(<2 x float> %a) #0 { -; CHECK-LABEL: @fsub_-0_x_vec_undef_elts( -; CHECK-NEXT: [[T1:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") +define <2 x float> @fsub_-0_x_vec_poison_elts(<2 x float> %a) #0 { +; CHECK-LABEL: @fsub_-0_x_vec_poison_elts( +; CHECK-NEXT: [[T1:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: [[RET:%.*]] = fneg <2 x float> [[T1]] ; CHECK-NEXT: ret <2 x float> [[RET]] ; - %t1 = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %t1 = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") %ret = fneg <2 x float> %t1 ret <2 x float> %ret } -define <2 x float> @fsub_negzero_vec_undef_elts(<2 x float> %x) #0 { -; CHECK-LABEL: @fsub_negzero_vec_undef_elts( -; CHECK-NEXT: [[R:%.*]] = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") +define <2 x float> @fsub_negzero_vec_poison_elts(<2 x float> %x) #0 { +; CHECK-LABEL: @fsub_negzero_vec_poison_elts( +; CHECK-NEXT: [[R:%.*]] = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: ret <2 x float> [[R]] ; - %r = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %r = call nsz <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %r } @@ -86,23 +86,23 @@ define <2 x float> @fneg_x_vec(<2 x float> %a) #0 { ret <2 x float> %ret } -define <2 x float> @fsub_-0_-0_x_vec_undef_elts(<2 x float> %a) #0 { -; CHECK-LABEL: @fsub_-0_-0_x_vec_undef_elts( -; CHECK-NEXT: [[T1:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") -; CHECK-NEXT: [[RET:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[T1]], metadata !"round.tonearest", metadata !"fpexcept.ignore") +define <2 x float> @fsub_-0_-0_x_vec_poison_elts(<2 x float> %a) #0 { +; CHECK-LABEL: @fsub_-0_-0_x_vec_poison_elts( +; CHECK-NEXT: [[T1:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") +; CHECK-NEXT: [[RET:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> , <2 x float> [[T1]], metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: ret <2 x float> [[RET]] ; - %t1 = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") - %ret = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float> %t1, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %t1 = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float> %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %ret = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float> %t1, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %ret } -define <2 x float> @fneg_x_vec_undef_elts(<2 x float> %a) #0 { -; CHECK-LABEL: @fneg_x_vec_undef_elts( +define <2 x float> @fneg_x_vec_poison_elts(<2 x float> %a) #0 { +; CHECK-LABEL: @fneg_x_vec_poison_elts( ; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; %t1 = fneg <2 x float> %a - %ret = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float> %t1, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %ret = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float> %t1, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %ret } @@ -139,11 +139,11 @@ define float @fsub_x_0(float %x) #0 { ret float %r } -define <2 x float> @fsub_x_0_vec_undef(<2 x float> %x) #0 { -; CHECK-LABEL: @fsub_x_0_vec_undef( +define <2 x float> @fsub_x_0_vec_poison(<2 x float> %x) #0 { +; CHECK-LABEL: @fsub_x_0_vec_poison( ; CHECK-NEXT: ret <2 x float> [[X:%.*]] ; - %r = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %x, <2 x float>, metadata !"round.tonearest", metadata !"fpexcept.ignore") + %r = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %x, <2 x float>, metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %r } @@ -156,11 +156,11 @@ define float @fadd_x_n0(float %a) #0 { ret float %ret } -define <2 x float> @fadd_x_n0_vec_undef_elt(<2 x float> %a) #0 { -; CHECK-LABEL: @fadd_x_n0_vec_undef_elt( +define <2 x float> @fadd_x_n0_vec_poison_elt(<2 x float> %a) #0 { +; CHECK-LABEL: @fadd_x_n0_vec_poison_elt( ; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; - %ret = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %a, <2 x float> , metadata !"round.tonearest", metadata !"fpexcept.ignore") + %ret = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %a, <2 x float> , metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %ret } @@ -174,12 +174,12 @@ define float @fadd_x_p0(float %a) #0 { ret float %ret } -define <2 x float> @fadd_x_p0_vec_undef_elt(<2 x float> %a) #0 { -; CHECK-LABEL: @fadd_x_p0_vec_undef_elt( -; CHECK-NEXT: [[RET:%.*]] = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> [[A:%.*]], <2 x float> , metadata !"round.tonearest", metadata !"fpexcept.ignore") +define <2 x float> @fadd_x_p0_vec_poison_elt(<2 x float> %a) #0 { +; CHECK-LABEL: @fadd_x_p0_vec_poison_elt( +; CHECK-NEXT: [[RET:%.*]] = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> [[A:%.*]], <2 x float> , metadata !"round.tonearest", metadata !"fpexcept.ignore") ; CHECK-NEXT: ret <2 x float> [[RET]] ; - %ret = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %a, <2 x float> , metadata !"round.tonearest", metadata !"fpexcept.ignore") + %ret = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %a, <2 x float> , metadata !"round.tonearest", metadata !"fpexcept.ignore") ret <2 x float> %ret } diff --git a/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll b/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll index 5d17504c09df6..7a35f09f03b99 100644 --- a/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll +++ b/llvm/test/Transforms/InstSimplify/floating-point-arithmetic.ll @@ -30,20 +30,20 @@ define <2 x float> @fsub_-0_x_vec(<2 x float> %a) { ret <2 x float> %ret } -define <2 x float> @fsub_-0_x_vec_undef_elts(<2 x float> %a) { -; CHECK-LABEL: @fsub_-0_x_vec_undef_elts( +define <2 x float> @fsub_-0_x_vec_poison_elts(<2 x float> %a) { +; CHECK-LABEL: @fsub_-0_x_vec_poison_elts( ; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; - %t1 = fsub <2 x float> , %a + %t1 = fsub <2 x float> , %a %ret = fneg <2 x float> %t1 ret <2 x float> %ret } -define <2 x float> @fsub_negzero_vec_undef_elts(<2 x float> %x) { -; CHECK-LABEL: @fsub_negzero_vec_undef_elts( +define <2 x float> @fsub_negzero_vec_poison_elts(<2 x float> %x) { +; CHECK-LABEL: @fsub_negzero_vec_poison_elts( ; CHECK-NEXT: ret <2 x float> [[X:%.*]] ; - %r = fsub nsz <2 x float> %x, + %r = fsub nsz <2 x float> %x, ret <2 x float> %r } @@ -85,21 +85,21 @@ define <2 x float> @fneg_x_vec(<2 x float> %a) { ret <2 x float> %ret } -define <2 x float> @fsub_-0_-0_x_vec_undef_elts(<2 x float> %a) { -; CHECK-LABEL: @fsub_-0_-0_x_vec_undef_elts( +define <2 x float> @fsub_-0_-0_x_vec_poison_elts(<2 x float> %a) { +; CHECK-LABEL: @fsub_-0_-0_x_vec_poison_elts( ; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; - %t1 = fsub <2 x float> , %a - %ret = fsub <2 x float> , %t1 + %t1 = fsub <2 x float> , %a + %ret = fsub <2 x float> , %t1 ret <2 x float> %ret } -define <2 x float> @fneg_x_vec_undef_elts(<2 x float> %a) { -; CHECK-LABEL: @fneg_x_vec_undef_elts( +define <2 x float> @fneg_x_vec_poison_elts(<2 x float> %a) { +; CHECK-LABEL: @fneg_x_vec_poison_elts( ; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; %t1 = fneg <2 x float> %a - %ret = fsub <2 x float> , %t1 + %ret = fsub <2 x float> , %t1 ret <2 x float> %ret } @@ -136,11 +136,11 @@ define float @fsub_x_0(float %x) { ret float %r } -define <2 x float> @fsub_x_0_vec_undef(<2 x float> %x) { -; CHECK-LABEL: @fsub_x_0_vec_undef( +define <2 x float> @fsub_x_0_vec_poison(<2 x float> %x) { +; CHECK-LABEL: @fsub_x_0_vec_poison( ; CHECK-NEXT: ret <2 x float> [[X:%.*]] ; - %r = fsub <2 x float> %x, + %r = fsub <2 x float> %x, ret <2 x float> %r } @@ -153,11 +153,11 @@ define float @fadd_x_n0(float %a) { ret float %ret } -define <2 x float> @fadd_x_n0_vec_undef_elt(<2 x float> %a) { -; CHECK-LABEL: @fadd_x_n0_vec_undef_elt( +define <2 x float> @fadd_x_n0_vec_poison_elt(<2 x float> %a) { +; CHECK-LABEL: @fadd_x_n0_vec_poison_elt( ; CHECK-NEXT: ret <2 x float> [[A:%.*]] ; - %ret = fadd <2 x float> %a, + %ret = fadd <2 x float> %a, ret <2 x float> %ret } diff --git a/llvm/test/Transforms/InstSimplify/floating-point-compare.ll b/llvm/test/Transforms/InstSimplify/floating-point-compare.ll index 3c1794c81284d..70f0321039ea9 100644 --- a/llvm/test/Transforms/InstSimplify/floating-point-compare.ll +++ b/llvm/test/Transforms/InstSimplify/floating-point-compare.ll @@ -547,30 +547,30 @@ define <2 x i1> @fabs_is_not_negative_anyzero(<2 x float> %V) { ret <2 x i1> %cmp } -define <3 x i1> @fabs_is_not_negative_negzero_undef(<3 x float> %V) { -; CHECK-LABEL: @fabs_is_not_negative_negzero_undef( +define <3 x i1> @fabs_is_not_negative_negzero_poison(<3 x float> %V) { +; CHECK-LABEL: @fabs_is_not_negative_negzero_poison( ; CHECK-NEXT: ret <3 x i1> zeroinitializer ; %abs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %V) - %cmp = fcmp olt <3 x float> %abs, + %cmp = fcmp olt <3 x float> %abs, ret <3 x i1> %cmp } -define <3 x i1> @fabs_is_not_negative_poszero_undef(<3 x float> %V) { -; CHECK-LABEL: @fabs_is_not_negative_poszero_undef( +define <3 x i1> @fabs_is_not_negative_poszero_poison(<3 x float> %V) { +; CHECK-LABEL: @fabs_is_not_negative_poszero_poison( ; CHECK-NEXT: ret <3 x i1> zeroinitializer ; %abs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %V) - %cmp = fcmp olt <3 x float> %abs, + %cmp = fcmp olt <3 x float> %abs, ret <3 x i1> %cmp } -define <3 x i1> @fabs_is_not_negative_anyzero_undef(<3 x float> %V) { -; CHECK-LABEL: @fabs_is_not_negative_anyzero_undef( +define <3 x i1> @fabs_is_not_negative_anyzero_poison(<3 x float> %V) { +; CHECK-LABEL: @fabs_is_not_negative_anyzero_poison( ; CHECK-NEXT: ret <3 x i1> zeroinitializer ; %abs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %V) - %cmp = fcmp olt <3 x float> %abs, + %cmp = fcmp olt <3 x float> %abs, ret <3 x i1> %cmp } @@ -1335,19 +1335,19 @@ define <2 x i1> @orderedCompareWithNaNVector(<2 x double> %A) { ret <2 x i1> %cmp } -define <2 x i1> @orderedCompareWithNaNVector_undef_elt(<2 x double> %A) { -; CHECK-LABEL: @orderedCompareWithNaNVector_undef_elt( +define <2 x i1> @orderedCompareWithNaNVector_poison_elt(<2 x double> %A) { +; CHECK-LABEL: @orderedCompareWithNaNVector_poison_elt( ; CHECK-NEXT: ret <2 x i1> zeroinitializer ; - %cmp = fcmp olt <2 x double> %A, + %cmp = fcmp olt <2 x double> %A, ret <2 x i1> %cmp } -define <2 x i1> @unorderedCompareWithNaNVector_undef_elt(<2 x double> %A) { -; CHECK-LABEL: @unorderedCompareWithNaNVector_undef_elt( +define <2 x i1> @unorderedCompareWithNaNVector_poison_elt(<2 x double> %A) { +; CHECK-LABEL: @unorderedCompareWithNaNVector_poison_elt( ; CHECK-NEXT: ret <2 x i1> ; - %cmp = fcmp ult <2 x double> %A, + %cmp = fcmp ult <2 x double> %A, ret <2 x i1> %cmp } diff --git a/llvm/test/Transforms/InstSimplify/fminmax-folds.ll b/llvm/test/Transforms/InstSimplify/fminmax-folds.ll index a8a9e96a652fa..668a93ddf5a42 100644 --- a/llvm/test/Transforms/InstSimplify/fminmax-folds.ll +++ b/llvm/test/Transforms/InstSimplify/fminmax-folds.ll @@ -493,7 +493,7 @@ define <2 x double> @maxnum_nan_op0_vec(<2 x double> %x) { ; CHECK-LABEL: @maxnum_nan_op0_vec( ; CHECK-NEXT: ret <2 x double> [[X:%.*]] ; - %r = call <2 x double> @llvm.maxnum.v2f64(<2 x double> , <2 x double> %x) + %r = call <2 x double> @llvm.maxnum.v2f64(<2 x double> , <2 x double> %x) ret <2 x double> %r } @@ -509,7 +509,7 @@ define <2 x double> @minnum_nan_op0_vec(<2 x double> %x) { ; CHECK-LABEL: @minnum_nan_op0_vec( ; CHECK-NEXT: ret <2 x double> [[X:%.*]] ; - %r = call <2 x double> @llvm.minnum.v2f64(<2 x double> , <2 x double> %x) + %r = call <2 x double> @llvm.minnum.v2f64(<2 x double> , <2 x double> %x) ret <2 x double> %r } @@ -873,19 +873,19 @@ define double @minimum_nan_op1(double %x) { ret double %r } -define <2 x double> @maximum_nan_op0_vec_partial_undef(<2 x double> %x) { -; CHECK-LABEL: @maximum_nan_op0_vec_partial_undef( -; CHECK-NEXT: ret <2 x double> +define <2 x double> @maximum_nan_op0_vec_partial_poison(<2 x double> %x) { +; CHECK-LABEL: @maximum_nan_op0_vec_partial_poison( +; CHECK-NEXT: ret <2 x double> ; - %r = call <2 x double> @llvm.maximum.v2f64(<2 x double> , <2 x double> %x) + %r = call <2 x double> @llvm.maximum.v2f64(<2 x double> , <2 x double> %x) ret <2 x double> %r } -define <2 x double> @maximum_nan_op1_vec_partial_undef(<2 x double> %x) { -; CHECK-LABEL: @maximum_nan_op1_vec_partial_undef( -; CHECK-NEXT: ret <2 x double> +define <2 x double> @maximum_nan_op1_vec_partial_poison(<2 x double> %x) { +; CHECK-LABEL: @maximum_nan_op1_vec_partial_poison( +; CHECK-NEXT: ret <2 x double> ; - %r = call <2 x double> @llvm.maximum.v2f64(<2 x double> %x, <2 x double> ) + %r = call <2 x double> @llvm.maximum.v2f64(<2 x double> %x, <2 x double> ) ret <2 x double> %r } @@ -897,19 +897,19 @@ define <2 x double> @maximum_nan_op1_vec(<2 x double> %x) { ret <2 x double> %r } -define <2 x double> @minimum_nan_op0_vec_partial_undef(<2 x double> %x) { -; CHECK-LABEL: @minimum_nan_op0_vec_partial_undef( -; CHECK-NEXT: ret <2 x double> +define <2 x double> @minimum_nan_op0_vec_partial_poison(<2 x double> %x) { +; CHECK-LABEL: @minimum_nan_op0_vec_partial_poison( +; CHECK-NEXT: ret <2 x double> ; - %r = call <2 x double> @llvm.minimum.v2f64(<2 x double> , <2 x double> %x) + %r = call <2 x double> @llvm.minimum.v2f64(<2 x double> , <2 x double> %x) ret <2 x double> %r } -define <2 x double> @minimum_nan_op1_vec_partial_undef(<2 x double> %x) { -; CHECK-LABEL: @minimum_nan_op1_vec_partial_undef( -; CHECK-NEXT: ret <2 x double> +define <2 x double> @minimum_nan_op1_vec_partial_poison(<2 x double> %x) { +; CHECK-LABEL: @minimum_nan_op1_vec_partial_poison( +; CHECK-NEXT: ret <2 x double> ; - %r = call <2 x double> @llvm.minimum.v2f64(<2 x double> %x, <2 x double> ) + %r = call <2 x double> @llvm.minimum.v2f64(<2 x double> %x, <2 x double> ) ret <2 x double> %r } diff --git a/llvm/test/Transforms/InstSimplify/fp-nan.ll b/llvm/test/Transforms/InstSimplify/fp-nan.ll index cb0bed3790782..bb557500822c1 100644 --- a/llvm/test/Transforms/InstSimplify/fp-nan.ll +++ b/llvm/test/Transforms/InstSimplify/fp-nan.ll @@ -163,13 +163,13 @@ define <2 x double> @fsub_nan_poison_op1(<2 x double> %x) { ret <2 x double> %r } -; Vector with undef element +; Vector with poison element -define <2 x double> @frem_nan_undef_op0(<2 x double> %x) { -; CHECK-LABEL: @frem_nan_undef_op0( -; CHECK-NEXT: ret <2 x double> +define <2 x double> @frem_nan_poison_op0(<2 x double> %x) { +; CHECK-LABEL: @frem_nan_poison_op0( +; CHECK-NEXT: ret <2 x double> ; - %r = frem <2 x double> , %x + %r = frem <2 x double> , %x ret <2 x double> %r } @@ -177,7 +177,8 @@ define <2 x double> @frem_nan_undef_op0(<2 x double> %x) { define <3 x double> @fadd_nan_poison_undef_op1(<3 x double> %x) { ; CHECK-LABEL: @fadd_nan_poison_undef_op1( -; CHECK-NEXT: ret <3 x double> +; CHECK-NEXT: [[R:%.*]] = fadd <3 x double> [[X:%.*]], +; CHECK-NEXT: ret <3 x double> [[R]] ; %r = fadd <3 x double> %x, ret <3 x double> %r diff --git a/llvm/test/Transforms/InstSimplify/icmp-bool-constant.ll b/llvm/test/Transforms/InstSimplify/icmp-bool-constant.ll index 6205225098a7a..a501f995b6c97 100644 --- a/llvm/test/Transforms/InstSimplify/icmp-bool-constant.ll +++ b/llvm/test/Transforms/InstSimplify/icmp-bool-constant.ll @@ -12,11 +12,11 @@ define <2 x i1> @eq_t(<2 x i1> %a) { ret <2 x i1> %r } -define <2 x i1> @eq_t_undef_elt(<2 x i1> %a) { -; CHECK-LABEL: @eq_t_undef_elt( +define <2 x i1> @eq_t_poison_elt(<2 x i1> %a) { +; CHECK-LABEL: @eq_t_poison_elt( ; CHECK-NEXT: ret <2 x i1> [[A:%.*]] ; - %r = icmp eq <2 x i1> %a, + %r = icmp eq <2 x i1> %a, ret <2 x i1> %r } @@ -54,11 +54,11 @@ define <2 x i1> @ugt_t(<2 x i1> %a) { ret <2 x i1> %r } -define <2 x i1> @ugt_t_undef_elt(<2 x i1> %a) { -; CHECK-LABEL: @ugt_t_undef_elt( +define <2 x i1> @ugt_t_poison_elt(<2 x i1> %a) { +; CHECK-LABEL: @ugt_t_poison_elt( ; CHECK-NEXT: ret <2 x i1> zeroinitializer ; - %r = icmp ugt <2 x i1> %a, + %r = icmp ugt <2 x i1> %a, ret <2 x i1> %r } @@ -161,11 +161,11 @@ define <2 x i1> @sge_t(<2 x i1> %a) { ret <2 x i1> %r } -define <2 x i1> @sge_t_undef_elt(<2 x i1> %a) { -; CHECK-LABEL: @sge_t_undef_elt( +define <2 x i1> @sge_t_poison_elt(<2 x i1> %a) { +; CHECK-LABEL: @sge_t_poison_elt( ; CHECK-NEXT: ret <2 x i1> ; - %r = icmp sge <2 x i1> %a, + %r = icmp sge <2 x i1> %a, ret <2 x i1> %r } diff --git a/llvm/test/Transforms/InstSimplify/icmp-not-bool-constant.ll b/llvm/test/Transforms/InstSimplify/icmp-not-bool-constant.ll index f4a0b6ddf6621..045d773bf3284 100644 --- a/llvm/test/Transforms/InstSimplify/icmp-not-bool-constant.ll +++ b/llvm/test/Transforms/InstSimplify/icmp-not-bool-constant.ll @@ -33,11 +33,11 @@ define <2 x i1> @eq_f_not_swap(<2 x i1> %a) { ret <2 x i1> %r } -define <2 x i1> @eq_f_not_undef(<2 x i1> %a) { -; CHECK-LABEL: @eq_f_not_undef( +define <2 x i1> @eq_f_not_poison(<2 x i1> %a) { +; CHECK-LABEL: @eq_f_not_poison( ; CHECK-NEXT: ret <2 x i1> [[A:%.*]] ; - %not = xor <2 x i1> %a, + %not = xor <2 x i1> %a, %r = icmp eq <2 x i1> %not, ret <2 x i1> %r } @@ -60,11 +60,11 @@ define <2 x i1> @ne_t_not_swap(<2 x i1> %a) { ret <2 x i1> %r } -define <2 x i1> @ne_t_not_undef(<2 x i1> %a) { -; CHECK-LABEL: @ne_t_not_undef( +define <2 x i1> @ne_t_not_poison(<2 x i1> %a) { +; CHECK-LABEL: @ne_t_not_poison( ; CHECK-NEXT: ret <2 x i1> [[A:%.*]] ; - %not = xor <2 x i1> %a, + %not = xor <2 x i1> %a, %r = icmp ne <2 x i1> %not, ret <2 x i1> %r } @@ -116,11 +116,11 @@ define <2 x i1> @ult_t_not_swap(<2 x i1> %a) { ret <2 x i1> %r } -define <2 x i1> @ult_t_not_undef(<2 x i1> %a) { -; CHECK-LABEL: @ult_t_not_undef( +define <2 x i1> @ult_t_not_poison(<2 x i1> %a) { +; CHECK-LABEL: @ult_t_not_poison( ; CHECK-NEXT: ret <2 x i1> [[A:%.*]] ; - %not = xor <2 x i1> %a, + %not = xor <2 x i1> %a, %r = icmp ult <2 x i1> %not, ret <2 x i1> %r } @@ -152,11 +152,11 @@ define <2 x i1> @sgt_t_not_swap(<2 x i1> %a) { ret <2 x i1> %r } -define <2 x i1> @sgt_t_not_undef(<2 x i1> %a) { -; CHECK-LABEL: @sgt_t_not_undef( +define <2 x i1> @sgt_t_not_poison(<2 x i1> %a) { +; CHECK-LABEL: @sgt_t_not_poison( ; CHECK-NEXT: ret <2 x i1> [[A:%.*]] ; - %not = xor <2 x i1> %a, + %not = xor <2 x i1> %a, %r = icmp sgt <2 x i1> %not, ret <2 x i1> %r } @@ -235,11 +235,11 @@ define <2 x i1> @ule_f_not_swap(<2 x i1> %a) { ret <2 x i1> %r } -define <2 x i1> @ule_f_not_undef(<2 x i1> %a) { -; CHECK-LABEL: @ule_f_not_undef( +define <2 x i1> @ule_f_not_poison(<2 x i1> %a) { +; CHECK-LABEL: @ule_f_not_poison( ; CHECK-NEXT: ret <2 x i1> [[A:%.*]] ; - %not = xor <2 x i1> %a, + %not = xor <2 x i1> %a, %r = icmp ule <2 x i1> %not, ret <2 x i1> %r } @@ -271,11 +271,11 @@ define <2 x i1> @sge_f_not_swap(<2 x i1> %a) { ret <2 x i1> %r } -define <2 x i1> @sge_f_not_undef(<2 x i1> %a) { -; CHECK-LABEL: @sge_f_not_undef( +define <2 x i1> @sge_f_not_poison(<2 x i1> %a) { +; CHECK-LABEL: @sge_f_not_poison( ; CHECK-NEXT: ret <2 x i1> [[A:%.*]] ; - %not = xor <2 x i1> %a, + %not = xor <2 x i1> %a, %r = icmp sge <2 x i1> %not, ret <2 x i1> %r } diff --git a/llvm/test/Transforms/InstSimplify/ldexp.ll b/llvm/test/Transforms/InstSimplify/ldexp.ll index c6bb0141199f2..d39f6a1e49673 100644 --- a/llvm/test/Transforms/InstSimplify/ldexp.ll +++ b/llvm/test/Transforms/InstSimplify/ldexp.ll @@ -57,11 +57,12 @@ define void @ldexp_f32_exp0(float %x) { define void @ldexp_v2f32_exp0(<2 x float> %x) { ; CHECK-LABEL: @ldexp_v2f32_exp0( ; CHECK-NEXT: store volatile <2 x float> [[X:%.*]], ptr addrspace(1) undef, align 8 -; CHECK-NEXT: store volatile <2 x float> [[X]], ptr addrspace(1) undef, align 8 +; CHECK-NEXT: [[PART_UNDEF1:%.*]] = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> [[X]], <2 x i32> ) +; CHECK-NEXT: store volatile <2 x float> [[PART_UNDEF1]], ptr addrspace(1) undef, align 8 ; CHECK-NEXT: store volatile <2 x float> [[X]], ptr addrspace(1) undef, align 8 ; CHECK-NEXT: ret void ; - %part.undef0 = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %x, <2 x i32> ) + %part.undef0 = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %x, <2 x i32> ) store volatile <2 x float> %part.undef0, ptr addrspace(1) undef %part.undef1 = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %x, <2 x i32> ) diff --git a/llvm/test/Transforms/InstSimplify/mul.ll b/llvm/test/Transforms/InstSimplify/mul.ll index 8ae7f1eaac92b..a1b03a30fe4f4 100644 --- a/llvm/test/Transforms/InstSimplify/mul.ll +++ b/llvm/test/Transforms/InstSimplify/mul.ll @@ -34,11 +34,11 @@ define <16 x i8> @mul_by_0_vec(<16 x i8> %a) { ret <16 x i8> %b } -define <2 x i8> @mul_by_0_vec_undef_elt(<2 x i8> %a) { -; CHECK-LABEL: @mul_by_0_vec_undef_elt( +define <2 x i8> @mul_by_0_vec_poison_elt(<2 x i8> %a) { +; CHECK-LABEL: @mul_by_0_vec_poison_elt( ; CHECK-NEXT: ret <2 x i8> zeroinitializer ; - %b = mul <2 x i8> %a, + %b = mul <2 x i8> %a, ret <2 x i8> %b } diff --git a/llvm/test/Transforms/InstSimplify/negate.ll b/llvm/test/Transforms/InstSimplify/negate.ll index d72a0db6d445c..d07029becd1fe 100644 --- a/llvm/test/Transforms/InstSimplify/negate.ll +++ b/llvm/test/Transforms/InstSimplify/negate.ll @@ -17,11 +17,11 @@ define <2 x i32> @negate_nuw_vec(<2 x i32> %x) { ret <2 x i32> %neg } -define <2 x i32> @negate_nuw_vec_undef_elt(<2 x i32> %x) { -; CHECK-LABEL: @negate_nuw_vec_undef_elt( +define <2 x i32> @negate_nuw_vec_poison_elt(<2 x i32> %x) { +; CHECK-LABEL: @negate_nuw_vec_poison_elt( ; CHECK-NEXT: ret <2 x i32> zeroinitializer ; - %neg = sub nuw <2 x i32> , %x + %neg = sub nuw <2 x i32> , %x ret <2 x i32> %neg } @@ -43,12 +43,12 @@ define <2 x i8> @negate_zero_or_minsigned_nsw_vec(<2 x i8> %x) { ret <2 x i8> %neg } -define <2 x i8> @negate_zero_or_minsigned_nsw_vec_undef_elt(<2 x i8> %x) { -; CHECK-LABEL: @negate_zero_or_minsigned_nsw_vec_undef_elt( +define <2 x i8> @negate_zero_or_minsigned_nsw_vec_poison_elt(<2 x i8> %x) { +; CHECK-LABEL: @negate_zero_or_minsigned_nsw_vec_poison_elt( ; CHECK-NEXT: ret <2 x i8> zeroinitializer ; %signbit = shl <2 x i8> %x, - %neg = sub nsw <2 x i8> , %signbit + %neg = sub nsw <2 x i8> , %signbit ret <2 x i8> %neg } diff --git a/llvm/test/Transforms/InstSimplify/or.ll b/llvm/test/Transforms/InstSimplify/or.ll index 913b760dd331c..f241c6987b9e7 100644 --- a/llvm/test/Transforms/InstSimplify/or.ll +++ b/llvm/test/Transforms/InstSimplify/or.ll @@ -17,11 +17,11 @@ define i32 @all_ones(i32 %A) { ret i32 %B } -define <3 x i8> @all_ones_vec_with_undef_elt(<3 x i8> %A) { -; CHECK-LABEL: @all_ones_vec_with_undef_elt( +define <3 x i8> @all_ones_vec_with_poison_elt(<3 x i8> %A) { +; CHECK-LABEL: @all_ones_vec_with_poison_elt( ; CHECK-NEXT: ret <3 x i8> ; - %B = or <3 x i8> %A, + %B = or <3 x i8> %A, ret <3 x i8> %B } @@ -68,11 +68,11 @@ define i32 @or_not(i32 %A) { ret i32 %B } -define <2 x i4> @or_not_commute_vec_undef(<2 x i4> %A) { -; CHECK-LABEL: @or_not_commute_vec_undef( +define <2 x i4> @or_not_commute_vec_poison(<2 x i4> %A) { +; CHECK-LABEL: @or_not_commute_vec_poison( ; CHECK-NEXT: ret <2 x i4> ; - %NotA = xor <2 x i4> %A, + %NotA = xor <2 x i4> %A, %B = or <2 x i4> %NotA, %A ret <2 x i4> %B } @@ -335,7 +335,7 @@ define <2 x i1> @or_with_not_op_commute4(<2 x i1> %a, <2 x i1> %b) { ; CHECK-NEXT: ret <2 x i1> ; %ab = and <2 x i1> %b, %a - %not = xor <2 x i1> %ab, + %not = xor <2 x i1> %ab, %r = or <2 x i1> %not, %a ret <2 x i1> %r } @@ -515,6 +515,21 @@ define <2 x i4> @and_or_not_or_commute7_undef_elt(<2 x i4> %A, <2 x i4> %B) { ret <2 x i4> %r } +; doing the same with poison is safe. + +define <2 x i4> @and_or_not_or_commute7_poison_elt(<2 x i4> %A, <2 x i4> %B) { +; CHECK-LABEL: @and_or_not_or_commute7_poison_elt( +; CHECK-NEXT: [[NOTA:%.*]] = xor <2 x i4> [[A:%.*]], +; CHECK-NEXT: ret <2 x i4> [[NOTA]] +; + %nota = xor <2 x i4> %A, + %and = and <2 x i4> %B, %nota + %or = or <2 x i4> %B, %A + %notab = xor <2 x i4> %or, + %r = or <2 x i4> %notab, %and + ret <2 x i4> %r +} + ; (A | B) | (A ^ B) --> A | B define i69 @or_or_xor(i69 %A, i69 %B) { @@ -769,6 +784,21 @@ define <2 x i4> @or_nxor_and_undef_elt(<2 x i4> %a, <2 x i4> %b) { ret <2 x i4> %r } +; Same with poison is safe. + +define <2 x i4> @or_nxor_and_poison_elt(<2 x i4> %a, <2 x i4> %b) { +; CHECK-LABEL: @or_nxor_and_poison_elt( +; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i4> [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[NOT:%.*]] = xor <2 x i4> [[XOR]], +; CHECK-NEXT: ret <2 x i4> [[NOT]] +; + %and = and <2 x i4> %b, %a + %xor = xor <2 x i4> %a, %b + %not = xor <2 x i4> %xor, + %r = or <2 x i4> %not, %and + ret <2 x i4> %r +} + ; ~(A ^ B) | (A | B) --> -1 define i4 @or_nxor_or_commute0(i4 %a, i4 %b) { @@ -849,15 +879,15 @@ define i4 @or_nxor_or_wrong_val2(i4 %a, i4 %b, i4 %c) { ret i4 %r } -; negative test - undef in 'not' is allowed +; negative test - poison in 'not' is allowed -define <2 x i4> @or_nxor_or_undef_elt(<2 x i4> %a, <2 x i4> %b) { -; CHECK-LABEL: @or_nxor_or_undef_elt( +define <2 x i4> @or_nxor_or_poison_elt(<2 x i4> %a, <2 x i4> %b) { +; CHECK-LABEL: @or_nxor_or_poison_elt( ; CHECK-NEXT: ret <2 x i4> ; %or = or <2 x i4> %b, %a %xor = xor <2 x i4> %a, %b - %not = xor <2 x i4> %xor, + %not = xor <2 x i4> %xor, %r = or <2 x i4> %or, %not ret <2 x i4> %r } @@ -966,12 +996,12 @@ define i32 @or_xor_not_op_or_commute7(i32 %a, i32 %b){ ret i32 %r } -define <2 x i4> @or_xor_not_op_or_undef_elt(<2 x i4> %a, <2 x i4> %b) { -; CHECK-LABEL: @or_xor_not_op_or_undef_elt( +define <2 x i4> @or_xor_not_op_or_poison_elt(<2 x i4> %a, <2 x i4> %b) { +; CHECK-LABEL: @or_xor_not_op_or_poison_elt( ; CHECK-NEXT: ret <2 x i4> ; %xor = xor <2 x i4> %a, %b - %nota = xor <2 x i4> %a, + %nota = xor <2 x i4> %a, %or = or <2 x i4> %nota, %b %r = or <2 x i4> %xor, %or ret <2 x i4> %r @@ -1082,6 +1112,21 @@ define <2 x i4> @or_nand_xor_undef_elt(<2 x i4> %x, <2 x i4> %y) { ret <2 x i4> %or } +; Same with poison is safe. + +define <2 x i4> @or_nand_xor_poison_elt(<2 x i4> %x, <2 x i4> %y) { +; CHECK-LABEL: @or_nand_xor_poison_elt( +; CHECK-NEXT: [[AND:%.*]] = and <2 x i4> [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[NAND:%.*]] = xor <2 x i4> [[AND]], +; CHECK-NEXT: ret <2 x i4> [[NAND]] +; + %and = and <2 x i4> %y, %x + %xor = xor <2 x i4> %x, %y + %nand = xor <2 x i4> %and, + %or = or <2 x i4> %xor, %nand + ret <2 x i4> %or +} + declare i32 @llvm.fshl.i32 (i32, i32, i32) declare i32 @llvm.fshr.i32 (i32, i32, i32) diff --git a/llvm/test/Transforms/InstSimplify/ptrmask.ll b/llvm/test/Transforms/InstSimplify/ptrmask.ll index dd83abfdeee46..d2c4a5dd7f035 100644 --- a/llvm/test/Transforms/InstSimplify/ptrmask.ll +++ b/llvm/test/Transforms/InstSimplify/ptrmask.ll @@ -40,7 +40,8 @@ define <2 x ptr addrspace(1) > @ptrmask_simplify_poison_and_zero_i32_vec_fail(<2 define <2 x ptr> @ptrmask_simplify_undef_and_ones_vec(<2 x ptr> %p) { ; CHECK-LABEL: define <2 x ptr> @ptrmask_simplify_undef_and_ones_vec ; CHECK-SAME: (<2 x ptr> [[P:%.*]]) { -; CHECK-NEXT: ret <2 x ptr> [[P]] +; CHECK-NEXT: [[R:%.*]] = call <2 x ptr> @llvm.ptrmask.v2p0.v2i64(<2 x ptr> [[P]], <2 x i64> ) +; CHECK-NEXT: ret <2 x ptr> [[R]] ; %r = call <2 x ptr> @llvm.ptrmask.v2p1.v2i64(<2 x ptr> %p, <2 x i64> ) ret <2 x ptr> %r diff --git a/llvm/test/Transforms/InstSimplify/rem.ll b/llvm/test/Transforms/InstSimplify/rem.ll index 5af3b5f7c5e0b..a46db0342042f 100644 --- a/llvm/test/Transforms/InstSimplify/rem.ll +++ b/llvm/test/Transforms/InstSimplify/rem.ll @@ -17,11 +17,11 @@ define <2 x i32> @zero_dividend_vector(<2 x i32> %A) { ret <2 x i32> %B } -define <2 x i32> @zero_dividend_vector_undef_elt(<2 x i32> %A) { -; CHECK-LABEL: @zero_dividend_vector_undef_elt( +define <2 x i32> @zero_dividend_vector_poison_elt(<2 x i32> %A) { +; CHECK-LABEL: @zero_dividend_vector_poison_elt( ; CHECK-NEXT: ret <2 x i32> zeroinitializer ; - %B = urem <2 x i32> , %A + %B = urem <2 x i32> , %A ret <2 x i32> %B } diff --git a/llvm/test/Transforms/InstSimplify/saturating-add-sub.ll b/llvm/test/Transforms/InstSimplify/saturating-add-sub.ll index 6fb12612f2f72..40b22c619f768 100644 --- a/llvm/test/Transforms/InstSimplify/saturating-add-sub.ll +++ b/llvm/test/Transforms/InstSimplify/saturating-add-sub.ll @@ -44,7 +44,7 @@ define <2 x i8> @uadd_vector_0_commute(<2 x i8> %a) { ; CHECK-LABEL: @uadd_vector_0_commute( ; CHECK-NEXT: ret <2 x i8> [[A:%.*]] ; - %x2v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> , <2 x i8> %a) + %x2v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> , <2 x i8> %a) ret <2 x i8> %x2v } @@ -156,7 +156,7 @@ define <2 x i8> @sadd_vector_0(<2 x i8> %a) { ; CHECK-LABEL: @sadd_vector_0( ; CHECK-NEXT: ret <2 x i8> [[A:%.*]] ; - %y1v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> ) + %y1v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> ) ret <2 x i8> %y1v } @@ -205,10 +205,10 @@ define i8 @sadd_scalar_maxval_commute(i8 %a) { define <2 x i8> @sadd_vector_maxval_commute(<2 x i8> %a) { ; CHECK-LABEL: @sadd_vector_maxval_commute( -; CHECK-NEXT: [[Y4V:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> , <2 x i8> [[A:%.*]]) +; CHECK-NEXT: [[Y4V:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> , <2 x i8> [[A:%.*]]) ; CHECK-NEXT: ret <2 x i8> [[Y4V]] ; - %y4v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> , <2 x i8> %a) + %y4v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> , <2 x i8> %a) ret <2 x i8> %y4v } diff --git a/llvm/test/Transforms/InstSimplify/sdiv.ll b/llvm/test/Transforms/InstSimplify/sdiv.ll index 2514d90b01235..99092802cab02 100644 --- a/llvm/test/Transforms/InstSimplify/sdiv.ll +++ b/llvm/test/Transforms/InstSimplify/sdiv.ll @@ -158,11 +158,11 @@ define <2 x i32> @knownnegation_commute_vec_bad3(<2 x i32> %x, <2 x i32> %y) { ret <2 x i32> %div } -define <3 x i32> @negated_operand_vec_undef(<3 x i32> %x) { -; CHECK-LABEL: @negated_operand_vec_undef( +define <3 x i32> @negated_operand_vec_poison(<3 x i32> %x) { +; CHECK-LABEL: @negated_operand_vec_poison( ; CHECK-NEXT: ret <3 x i32> ; - %negx = sub nsw <3 x i32> , %x + %negx = sub nsw <3 x i32> , %x %div = sdiv <3 x i32> %negx, %x ret <3 x i32> %div } diff --git a/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll b/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll index 2a4ce85ed11f8..fcf8c31b25eed 100644 --- a/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll +++ b/llvm/test/Transforms/InstSimplify/select-inseltpoison.ll @@ -17,11 +17,11 @@ define <2 x i1> @bool_true_or_false_vec(<2 x i1> %cond) { ret <2 x i1> %s } -define <2 x i1> @bool_true_or_false_vec_undef(<2 x i1> %cond) { -; CHECK-LABEL: @bool_true_or_false_vec_undef( +define <2 x i1> @bool_true_or_false_vec_poison(<2 x i1> %cond) { +; CHECK-LABEL: @bool_true_or_false_vec_poison( ; CHECK-NEXT: ret <2 x i1> [[COND:%.*]] ; - %s = select <2 x i1> %cond, <2 x i1> , <2 x i1> + %s = select <2 x i1> %cond, <2 x i1> , <2 x i1> ret <2 x i1> %s } @@ -57,27 +57,27 @@ define <2 x i32> @equal_arms_vec(<2 x i1> %cond, <2 x i32> %x) { ret <2 x i32> %V } -define <2 x i32> @equal_arms_vec_undef(<2 x i1> %cond) { -; CHECK-LABEL: @equal_arms_vec_undef( +define <2 x i32> @equal_arms_vec_poison(<2 x i1> %cond) { +; CHECK-LABEL: @equal_arms_vec_poison( ; CHECK-NEXT: ret <2 x i32> ; - %V = select <2 x i1> %cond, <2 x i32> , <2 x i32> + %V = select <2 x i1> %cond, <2 x i32> , <2 x i32> ret <2 x i32> %V } -define <3 x float> @equal_arms_vec_less_undef(<3 x i1> %cond) { -; CHECK-LABEL: @equal_arms_vec_less_undef( +define <3 x float> @equal_arms_vec_less_poison(<3 x i1> %cond) { +; CHECK-LABEL: @equal_arms_vec_less_poison( ; CHECK-NEXT: ret <3 x float> ; - %V = select <3 x i1> %cond, <3 x float> , <3 x float> + %V = select <3 x i1> %cond, <3 x float> , <3 x float> ret <3 x float> %V } -define <3 x float> @equal_arms_vec_more_undef(<3 x i1> %cond) { -; CHECK-LABEL: @equal_arms_vec_more_undef( -; CHECK-NEXT: ret <3 x float> +define <3 x float> @equal_arms_vec_more_poison(<3 x i1> %cond) { +; CHECK-LABEL: @equal_arms_vec_more_poison( +; CHECK-NEXT: ret <3 x float> ; - %V = select <3 x i1> %cond, <3 x float> , <3 x float> + %V = select <3 x i1> %cond, <3 x float> , <3 x float> ret <3 x float> %V } @@ -105,19 +105,19 @@ define <2 x i8> @vsel_mixedvec() { ret <2 x i8> %s } -define <3 x i8> @vsel_undef_true_op(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @vsel_undef_true_op( +define <3 x i8> @vsel_poison_true_op(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @vsel_poison_true_op( ; CHECK-NEXT: ret <3 x i8> [[X:%.*]] ; - %s = select <3 x i1>, <3 x i8> %x, <3 x i8> %y + %s = select <3 x i1>, <3 x i8> %x, <3 x i8> %y ret <3 x i8> %s } -define <3 x i4> @vsel_undef_false_op(<3 x i4> %x, <3 x i4> %y) { -; CHECK-LABEL: @vsel_undef_false_op( +define <3 x i4> @vsel_poison_false_op(<3 x i4> %x, <3 x i4> %y) { +; CHECK-LABEL: @vsel_poison_false_op( ; CHECK-NEXT: ret <3 x i4> [[Y:%.*]] ; - %s = select <3 x i1>, <3 x i4> %x, <3 x i4> %y + %s = select <3 x i1>, <3 x i4> %x, <3 x i4> %y ret <3 x i4> %s } diff --git a/llvm/test/Transforms/InstSimplify/select.ll b/llvm/test/Transforms/InstSimplify/select.ll index fe93a0c3f2125..40c1460e3ebc3 100644 --- a/llvm/test/Transforms/InstSimplify/select.ll +++ b/llvm/test/Transforms/InstSimplify/select.ll @@ -25,11 +25,11 @@ define <2 x i1> @bool_true_or_false_vec(<2 x i1> %cond) { ret <2 x i1> %s } -define <2 x i1> @bool_true_or_false_vec_undef(<2 x i1> %cond) { -; CHECK-LABEL: @bool_true_or_false_vec_undef( +define <2 x i1> @bool_true_or_false_vec_poison(<2 x i1> %cond) { +; CHECK-LABEL: @bool_true_or_false_vec_poison( ; CHECK-NEXT: ret <2 x i1> [[COND:%.*]] ; - %s = select <2 x i1> %cond, <2 x i1> , <2 x i1> + %s = select <2 x i1> %cond, <2 x i1> , <2 x i1> ret <2 x i1> %s } @@ -65,27 +65,27 @@ define <2 x i32> @equal_arms_vec(<2 x i1> %cond, <2 x i32> %x) { ret <2 x i32> %V } -define <2 x i32> @equal_arms_vec_undef(<2 x i1> %cond) { -; CHECK-LABEL: @equal_arms_vec_undef( +define <2 x i32> @equal_arms_vec_poison(<2 x i1> %cond) { +; CHECK-LABEL: @equal_arms_vec_poison( ; CHECK-NEXT: ret <2 x i32> ; - %V = select <2 x i1> %cond, <2 x i32> , <2 x i32> + %V = select <2 x i1> %cond, <2 x i32> , <2 x i32> ret <2 x i32> %V } -define <3 x float> @equal_arms_vec_less_undef(<3 x i1> %cond) { -; CHECK-LABEL: @equal_arms_vec_less_undef( +define <3 x float> @equal_arms_vec_less_poison(<3 x i1> %cond) { +; CHECK-LABEL: @equal_arms_vec_less_poison( ; CHECK-NEXT: ret <3 x float> ; - %V = select <3 x i1> %cond, <3 x float> , <3 x float> + %V = select <3 x i1> %cond, <3 x float> , <3 x float> ret <3 x float> %V } -define <3 x float> @equal_arms_vec_more_undef(<3 x i1> %cond) { -; CHECK-LABEL: @equal_arms_vec_more_undef( -; CHECK-NEXT: ret <3 x float> +define <3 x float> @equal_arms_vec_more_poison(<3 x i1> %cond) { +; CHECK-LABEL: @equal_arms_vec_more_poison( +; CHECK-NEXT: ret <3 x float> ; - %V = select <3 x i1> %cond, <3 x float> , <3 x float> + %V = select <3 x i1> %cond, <3 x float> , <3 x float> ret <3 x float> %V } @@ -113,19 +113,19 @@ define <2 x i8> @vsel_mixedvec() { ret <2 x i8> %s } -define <3 x i8> @vsel_undef_true_op(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @vsel_undef_true_op( +define <3 x i8> @vsel_poison_true_op(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @vsel_poison_true_op( ; CHECK-NEXT: ret <3 x i8> [[X:%.*]] ; - %s = select <3 x i1>, <3 x i8> %x, <3 x i8> %y + %s = select <3 x i1>, <3 x i8> %x, <3 x i8> %y ret <3 x i8> %s } -define <3 x i4> @vsel_undef_false_op(<3 x i4> %x, <3 x i4> %y) { -; CHECK-LABEL: @vsel_undef_false_op( +define <3 x i4> @vsel_poison_false_op(<3 x i4> %x, <3 x i4> %y) { +; CHECK-LABEL: @vsel_poison_false_op( ; CHECK-NEXT: ret <3 x i4> [[Y:%.*]] ; - %s = select <3 x i1>, <3 x i4> %x, <3 x i4> %y + %s = select <3 x i1>, <3 x i4> %x, <3 x i4> %y ret <3 x i4> %s } diff --git a/llvm/test/Transforms/InstSimplify/shift.ll b/llvm/test/Transforms/InstSimplify/shift.ll index b562c3c164d52..a816fcbdeeee0 100644 --- a/llvm/test/Transforms/InstSimplify/shift.ll +++ b/llvm/test/Transforms/InstSimplify/shift.ll @@ -17,11 +17,11 @@ define i41 @shl_0(i41 %X) { ret i41 %B } -define <2 x i41> @shl_0_vec_undef_elt(<2 x i41> %X) { -; CHECK-LABEL: @shl_0_vec_undef_elt( +define <2 x i41> @shl_0_vec_poison_elt(<2 x i41> %X) { +; CHECK-LABEL: @shl_0_vec_poison_elt( ; CHECK-NEXT: ret <2 x i41> zeroinitializer ; - %B = shl <2 x i41> , %X + %B = shl <2 x i41> , %X ret <2 x i41> %B } @@ -41,11 +41,11 @@ define i39 @ashr_0(i39 %X) { ret i39 %B } -define <2 x i141> @ashr_0_vec_undef_elt(<2 x i141> %X) { -; CHECK-LABEL: @ashr_0_vec_undef_elt( +define <2 x i141> @ashr_0_vec_poison_elt(<2 x i141> %X) { +; CHECK-LABEL: @ashr_0_vec_poison_elt( ; CHECK-NEXT: ret <2 x i141> zeroinitializer ; - %B = shl <2 x i141> , %X + %B = shl <2 x i141> , %X ret <2 x i141> %B } @@ -113,11 +113,11 @@ define i32 @ashr_all_ones(i32 %A) { ret i32 %B } -define <3 x i8> @ashr_all_ones_vec_with_undef_elts(<3 x i8> %x, <3 x i8> %y) { -; CHECK-LABEL: @ashr_all_ones_vec_with_undef_elts( +define <3 x i8> @ashr_all_ones_vec_with_poison_elts(<3 x i8> %x, <3 x i8> %y) { +; CHECK-LABEL: @ashr_all_ones_vec_with_poison_elts( ; CHECK-NEXT: ret <3 x i8> ; - %sh = ashr <3 x i8> , %y + %sh = ashr <3 x i8> , %y ret <3 x i8> %sh } @@ -306,11 +306,22 @@ define <2 x i7> @all_ones_left_right_splat(<2 x i7> %x) { ; Poison could propagate, but undef must not. -define <3 x i7> @all_ones_left_right_splat_poison_undef_elt(<3 x i7> %x) { -; CHECK-LABEL: @all_ones_left_right_splat_poison_undef_elt( +define <3 x i7> @all_ones_left_right_splat_undef_elt(<3 x i7> %x) { +; CHECK-LABEL: @all_ones_left_right_splat_undef_elt( +; CHECK-NEXT: [[LEFT:%.*]] = shl <3 x i7> , [[X:%.*]] +; CHECK-NEXT: [[RIGHT:%.*]] = ashr <3 x i7> [[LEFT]], [[X]] +; CHECK-NEXT: ret <3 x i7> [[RIGHT]] +; + %left = shl <3 x i7> , %x + %right = ashr <3 x i7> %left, %x + ret <3 x i7> %right +} + +define <3 x i7> @all_ones_left_right_splat_poison__elt(<3 x i7> %x) { +; CHECK-LABEL: @all_ones_left_right_splat_poison__elt( ; CHECK-NEXT: ret <3 x i7> ; - %left = shl <3 x i7> , %x + %left = shl <3 x i7> , %x %right = ashr <3 x i7> %left, %x ret <3 x i7> %right } diff --git a/llvm/test/Transforms/InstSimplify/srem.ll b/llvm/test/Transforms/InstSimplify/srem.ll index b1cbdf35b3c7c..ab726832e517b 100644 --- a/llvm/test/Transforms/InstSimplify/srem.ll +++ b/llvm/test/Transforms/InstSimplify/srem.ll @@ -39,11 +39,11 @@ define <2 x i32> @knownnegation_commute_vec(<2 x i32> %x, <2 x i32> %y) { ret <2 x i32> %rem } -define <3 x i32> @negated_operand_vec_undef(<3 x i32> %x) { -; CHECK-LABEL: @negated_operand_vec_undef( +define <3 x i32> @negated_operand_vec_poison(<3 x i32> %x) { +; CHECK-LABEL: @negated_operand_vec_poison( ; CHECK-NEXT: ret <3 x i32> zeroinitializer ; - %negx = sub <3 x i32> , %x + %negx = sub <3 x i32> , %x %rem = srem <3 x i32> %negx, %x ret <3 x i32> %rem } diff --git a/llvm/test/Transforms/InstSimplify/sub.ll b/llvm/test/Transforms/InstSimplify/sub.ll index deb0ee33cd920..fd88fc15716c8 100644 --- a/llvm/test/Transforms/InstSimplify/sub.ll +++ b/llvm/test/Transforms/InstSimplify/sub.ll @@ -29,7 +29,7 @@ define <2 x i32> @sub_zero_vec(<2 x i32> %A) { ; CHECK-LABEL: @sub_zero_vec( ; CHECK-NEXT: ret <2 x i32> [[A:%.*]] ; - %B = sub <2 x i32> %A, + %B = sub <2 x i32> %A, ret <2 x i32> %B } @@ -46,8 +46,8 @@ define <2 x i32> @neg_neg_vec(<2 x i32> %A) { ; CHECK-LABEL: @neg_neg_vec( ; CHECK-NEXT: ret <2 x i32> [[A:%.*]] ; - %B = sub <2 x i32> , %A - %C = sub <2 x i32> , %B + %B = sub <2 x i32> , %A + %C = sub <2 x i32> , %B ret <2 x i32> %C } diff --git a/llvm/test/Transforms/InstSimplify/xor.ll b/llvm/test/Transforms/InstSimplify/xor.ll index 0e23cc66c1652..229e943a3836f 100644 --- a/llvm/test/Transforms/InstSimplify/xor.ll +++ b/llvm/test/Transforms/InstSimplify/xor.ll @@ -156,6 +156,20 @@ define <2 x i4> @xor_and_or_not_undef_elt(<2 x i4> %a, <2 x i4> %b) { ret <2 x i4> %r } +; but correct to propagate poison element + +define <2 x i4> @xor_and_or_not_poison_elt(<2 x i4> %a, <2 x i4> %b) { +; CHECK-LABEL: @xor_and_or_not_poison_elt( +; CHECK-NEXT: [[NOT:%.*]] = xor <2 x i4> [[A:%.*]], +; CHECK-NEXT: ret <2 x i4> [[NOT]] +; + %and = and <2 x i4> %b, %a + %not = xor <2 x i4> %a, + %or = or <2 x i4> %not, %b + %r = xor <2 x i4> %or, %and + ret <2 x i4> %r +} + define i4 @xor_or_and_not_commute0(i4 %a, i4 %b) { ; CHECK-LABEL: @xor_or_and_not_commute0( ; CHECK-NEXT: ret i4 [[A:%.*]] @@ -277,11 +291,11 @@ define i4 @xor_or_and_not_wrong_val2(i4 %a, i4 %b, i4 %c) { ret i4 %r } -define <2 x i4> @xor_or_and_not_undef_elt(<2 x i4> %a, <2 x i4> %b) { -; CHECK-LABEL: @xor_or_and_not_undef_elt( +define <2 x i4> @xor_or_and_not_poison_elt(<2 x i4> %a, <2 x i4> %b) { +; CHECK-LABEL: @xor_or_and_not_poison_elt( ; CHECK-NEXT: ret <2 x i4> [[A:%.*]] ; - %not = xor <2 x i4> %a, + %not = xor <2 x i4> %a, %and = and <2 x i4> %b, %not %or = or <2 x i4> %a, %b %r = xor <2 x i4> %or, %and diff --git a/llvm/test/Transforms/Reassociate/inverses.ll b/llvm/test/Transforms/Reassociate/inverses.ll index b6962c6872a9a..a9d0c4fb03222 100644 --- a/llvm/test/Transforms/Reassociate/inverses.ll +++ b/llvm/test/Transforms/Reassociate/inverses.ll @@ -12,12 +12,12 @@ define i32 @test1(i32 %a, i32 %b) { ret i32 %t5 } -define <2 x i32> @not_op_vec_undef(<2 x i32> %a, <2 x i32> %b) { -; CHECK-LABEL: @not_op_vec_undef( +define <2 x i32> @not_op_vec_poison(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: @not_op_vec_poison( ; CHECK-NEXT: ret <2 x i32> zeroinitializer ; %t2 = and <2 x i32> %b, %a - %t4 = xor <2 x i32> %a, + %t4 = xor <2 x i32> %a, %t5 = and <2 x i32> %t2, %t4 ret <2 x i32> %t5 } diff --git a/llvm/test/Transforms/Reassociate/negation.ll b/llvm/test/Transforms/Reassociate/negation.ll index 4718d9d87ae1b..14ae86fb94aab 100644 --- a/llvm/test/Transforms/Reassociate/negation.ll +++ b/llvm/test/Transforms/Reassociate/negation.ll @@ -31,16 +31,16 @@ define i32 @test2(i32 %a, i32 %b, i32 %z) { ret i32 %f } -define <2 x i32> @negate_vec_undefs(<2 x i32> %a, <2 x i32> %b, <2 x i32> %z) { -; CHECK-LABEL: @negate_vec_undefs( +define <2 x i32> @negate_vec_poisons(<2 x i32> %a, <2 x i32> %b, <2 x i32> %z) { +; CHECK-LABEL: @negate_vec_poisons( ; CHECK-NEXT: [[E:%.*]] = mul <2 x i32> [[A:%.*]], ; CHECK-NEXT: [[F:%.*]] = mul <2 x i32> [[E]], [[Z:%.*]] ; CHECK-NEXT: ret <2 x i32> [[F]] ; %d = mul <2 x i32> %z, - %c = sub <2 x i32> , %d + %c = sub <2 x i32> , %d %e = mul <2 x i32> %a, %c - %f = sub <2 x i32> , %e + %f = sub <2 x i32> , %e ret <2 x i32> %f } diff --git a/llvm/unittests/IR/ConstantsTest.cpp b/llvm/unittests/IR/ConstantsTest.cpp index 1d6a92c498b06..8f0a507c0fd18 100644 --- a/llvm/unittests/IR/ConstantsTest.cpp +++ b/llvm/unittests/IR/ConstantsTest.cpp @@ -581,7 +581,7 @@ TEST(ConstantsTest, containsUndefElemTest) { } } -// Check that undefined elements in vector constants are matched +// Check that poison elements in vector constants are matched // correctly for both integer and floating-point types. Just don't // crash on vectors of pointers (could be handled?). @@ -590,6 +590,7 @@ TEST(ConstantsTest, isElementWiseEqual) { Type *Int32Ty = Type::getInt32Ty(Context); Constant *CU = UndefValue::get(Int32Ty); + Constant *CP = PoisonValue::get(Int32Ty); Constant *C1 = ConstantInt::get(Int32Ty, 1); Constant *C2 = ConstantInt::get(Int32Ty, 2); @@ -597,15 +598,25 @@ TEST(ConstantsTest, isElementWiseEqual) { Constant *C12U1 = ConstantVector::get({C1, C2, CU, C1}); Constant *C12U2 = ConstantVector::get({C1, C2, CU, C2}); Constant *C12U21 = ConstantVector::get({C1, C2, CU, C2, C1}); + Constant *C12P1 = ConstantVector::get({C1, C2, CP, C1}); + Constant *C12P2 = ConstantVector::get({C1, C2, CP, C2}); + Constant *C12P21 = ConstantVector::get({C1, C2, CP, C2, C1}); - EXPECT_TRUE(C1211->isElementWiseEqual(C12U1)); - EXPECT_TRUE(C12U1->isElementWiseEqual(C1211)); + EXPECT_FALSE(C1211->isElementWiseEqual(C12U1)); + EXPECT_FALSE(C12U1->isElementWiseEqual(C1211)); EXPECT_FALSE(C12U2->isElementWiseEqual(C12U1)); EXPECT_FALSE(C12U1->isElementWiseEqual(C12U2)); EXPECT_FALSE(C12U21->isElementWiseEqual(C12U2)); + EXPECT_TRUE(C1211->isElementWiseEqual(C12P1)); + EXPECT_TRUE(C12P1->isElementWiseEqual(C1211)); + EXPECT_FALSE(C12P2->isElementWiseEqual(C12P1)); + EXPECT_FALSE(C12P1->isElementWiseEqual(C12P2)); + EXPECT_FALSE(C12P21->isElementWiseEqual(C12P2)); + Type *FltTy = Type::getFloatTy(Context); Constant *CFU = UndefValue::get(FltTy); + Constant *CFP = PoisonValue::get(FltTy); Constant *CF1 = ConstantFP::get(FltTy, 1.0); Constant *CF2 = ConstantFP::get(FltTy, 2.0); @@ -613,25 +624,41 @@ TEST(ConstantsTest, isElementWiseEqual) { Constant *CF12U1 = ConstantVector::get({CF1, CF2, CFU, CF1}); Constant *CF12U2 = ConstantVector::get({CF1, CF2, CFU, CF2}); Constant *CFUU1U = ConstantVector::get({CFU, CFU, CF1, CFU}); + Constant *CF12P1 = ConstantVector::get({CF1, CF2, CFP, CF1}); + Constant *CF12P2 = ConstantVector::get({CF1, CF2, CFP, CF2}); + Constant *CFPP1P = ConstantVector::get({CFP, CFP, CF1, CFP}); - EXPECT_TRUE(CF1211->isElementWiseEqual(CF12U1)); - EXPECT_TRUE(CF12U1->isElementWiseEqual(CF1211)); - EXPECT_TRUE(CFUU1U->isElementWiseEqual(CF12U1)); + EXPECT_FALSE(CF1211->isElementWiseEqual(CF12U1)); + EXPECT_FALSE(CF12U1->isElementWiseEqual(CF1211)); + EXPECT_FALSE(CFUU1U->isElementWiseEqual(CF12U1)); EXPECT_FALSE(CF12U2->isElementWiseEqual(CF12U1)); EXPECT_FALSE(CF12U1->isElementWiseEqual(CF12U2)); + EXPECT_TRUE(CF1211->isElementWiseEqual(CF12P1)); + EXPECT_TRUE(CF12P1->isElementWiseEqual(CF1211)); + EXPECT_TRUE(CFPP1P->isElementWiseEqual(CF12P1)); + EXPECT_FALSE(CF12P2->isElementWiseEqual(CF12P1)); + EXPECT_FALSE(CF12P1->isElementWiseEqual(CF12P2)); + PointerType *PtrTy = PointerType::get(Context, 0); Constant *CPU = UndefValue::get(PtrTy); + Constant *CPP = PoisonValue::get(PtrTy); Constant *CP0 = ConstantPointerNull::get(PtrTy); Constant *CP0000 = ConstantVector::get({CP0, CP0, CP0, CP0}); Constant *CP00U0 = ConstantVector::get({CP0, CP0, CPU, CP0}); Constant *CP00U = ConstantVector::get({CP0, CP0, CPU}); + Constant *CP00P0 = ConstantVector::get({CP0, CP0, CPP, CP0}); + Constant *CP00P = ConstantVector::get({CP0, CP0, CPP}); EXPECT_FALSE(CP0000->isElementWiseEqual(CP00U0)); EXPECT_FALSE(CP00U0->isElementWiseEqual(CP0000)); EXPECT_FALSE(CP0000->isElementWiseEqual(CP00U)); EXPECT_FALSE(CP00U->isElementWiseEqual(CP00U0)); + EXPECT_FALSE(CP0000->isElementWiseEqual(CP00P0)); + EXPECT_FALSE(CP00P0->isElementWiseEqual(CP0000)); + EXPECT_FALSE(CP0000->isElementWiseEqual(CP00P)); + EXPECT_FALSE(CP00P->isElementWiseEqual(CP00P0)); } // Check that vector/aggregate constants correctly store undef and poison diff --git a/llvm/unittests/IR/PatternMatch.cpp b/llvm/unittests/IR/PatternMatch.cpp index 4d0c2e4220fec..133012684d16d 100644 --- a/llvm/unittests/IR/PatternMatch.cpp +++ b/llvm/unittests/IR/PatternMatch.cpp @@ -1184,6 +1184,8 @@ TEST_F(PatternMatchTest, VectorUndefInt) { Type *VectorTy = FixedVectorType::get(ScalarTy, 4); Constant *ScalarUndef = UndefValue::get(ScalarTy); Constant *VectorUndef = UndefValue::get(VectorTy); + Constant *ScalarPoison = PoisonValue::get(ScalarTy); + Constant *VectorPoison = PoisonValue::get(VectorTy); Constant *ScalarZero = Constant::getNullValue(ScalarTy); Constant *VectorZero = Constant::getNullValue(VectorTy); @@ -1194,17 +1196,30 @@ TEST_F(PatternMatchTest, VectorUndefInt) { Elems.push_back(ScalarZero); Constant *VectorZeroUndef = ConstantVector::get(Elems); + SmallVector Elems2; + Elems2.push_back(ScalarPoison); + Elems2.push_back(ScalarZero); + Elems2.push_back(ScalarPoison); + Elems2.push_back(ScalarZero); + Constant *VectorZeroPoison = ConstantVector::get(Elems2); + EXPECT_TRUE(match(ScalarUndef, m_Undef())); + EXPECT_TRUE(match(ScalarPoison, m_Undef())); EXPECT_TRUE(match(VectorUndef, m_Undef())); + EXPECT_TRUE(match(VectorPoison, m_Undef())); EXPECT_FALSE(match(ScalarZero, m_Undef())); EXPECT_FALSE(match(VectorZero, m_Undef())); EXPECT_FALSE(match(VectorZeroUndef, m_Undef())); + EXPECT_FALSE(match(VectorZeroPoison, m_Undef())); EXPECT_FALSE(match(ScalarUndef, m_Zero())); + EXPECT_FALSE(match(ScalarPoison, m_Zero())); EXPECT_FALSE(match(VectorUndef, m_Zero())); + EXPECT_FALSE(match(VectorPoison, m_Zero())); + EXPECT_FALSE(match(VectorZeroUndef, m_Zero())); EXPECT_TRUE(match(ScalarZero, m_Zero())); EXPECT_TRUE(match(VectorZero, m_Zero())); - EXPECT_TRUE(match(VectorZeroUndef, m_Zero())); + EXPECT_TRUE(match(VectorZeroPoison, m_Zero())); const APInt *C; // Regardless of whether undefs are allowed, @@ -1249,6 +1264,8 @@ TEST_F(PatternMatchTest, VectorUndefFloat) { Type *VectorTy = FixedVectorType::get(ScalarTy, 4); Constant *ScalarUndef = UndefValue::get(ScalarTy); Constant *VectorUndef = UndefValue::get(VectorTy); + Constant *ScalarPoison = PoisonValue::get(ScalarTy); + Constant *VectorPoison = PoisonValue::get(VectorTy); Constant *ScalarZero = Constant::getNullValue(ScalarTy); Constant *VectorZero = Constant::getNullValue(VectorTy); Constant *ScalarPosInf = ConstantFP::getInfinity(ScalarTy, false); @@ -1258,72 +1275,116 @@ TEST_F(PatternMatchTest, VectorUndefFloat) { Constant *VectorZeroUndef = ConstantVector::get({ScalarUndef, ScalarZero, ScalarUndef, ScalarZero}); + Constant *VectorZeroPoison = + ConstantVector::get({ScalarPoison, ScalarZero, ScalarPoison, ScalarZero}); + Constant *VectorInfUndef = ConstantVector::get( {ScalarPosInf, ScalarNegInf, ScalarUndef, ScalarPosInf}); + Constant *VectorInfPoison = ConstantVector::get( + {ScalarPosInf, ScalarNegInf, ScalarPoison, ScalarPosInf}); + Constant *VectorNaNUndef = ConstantVector::get({ScalarUndef, ScalarNaN, ScalarNaN, ScalarNaN}); + Constant *VectorNaNPoison = + ConstantVector::get({ScalarPoison, ScalarNaN, ScalarNaN, ScalarNaN}); + EXPECT_TRUE(match(ScalarUndef, m_Undef())); EXPECT_TRUE(match(VectorUndef, m_Undef())); + EXPECT_TRUE(match(ScalarPoison, m_Undef())); + EXPECT_TRUE(match(VectorPoison, m_Undef())); EXPECT_FALSE(match(ScalarZero, m_Undef())); EXPECT_FALSE(match(VectorZero, m_Undef())); EXPECT_FALSE(match(VectorZeroUndef, m_Undef())); EXPECT_FALSE(match(VectorInfUndef, m_Undef())); EXPECT_FALSE(match(VectorNaNUndef, m_Undef())); + EXPECT_FALSE(match(VectorZeroPoison, m_Undef())); + EXPECT_FALSE(match(VectorInfPoison, m_Undef())); + EXPECT_FALSE(match(VectorNaNPoison, m_Undef())); EXPECT_FALSE(match(ScalarUndef, m_AnyZeroFP())); EXPECT_FALSE(match(VectorUndef, m_AnyZeroFP())); + EXPECT_FALSE(match(ScalarPoison, m_AnyZeroFP())); + EXPECT_FALSE(match(VectorPoison, m_AnyZeroFP())); EXPECT_TRUE(match(ScalarZero, m_AnyZeroFP())); EXPECT_TRUE(match(VectorZero, m_AnyZeroFP())); - EXPECT_TRUE(match(VectorZeroUndef, m_AnyZeroFP())); + EXPECT_FALSE(match(VectorZeroUndef, m_AnyZeroFP())); EXPECT_FALSE(match(VectorInfUndef, m_AnyZeroFP())); EXPECT_FALSE(match(VectorNaNUndef, m_AnyZeroFP())); + EXPECT_TRUE(match(VectorZeroPoison, m_AnyZeroFP())); + EXPECT_FALSE(match(VectorInfPoison, m_AnyZeroFP())); + EXPECT_FALSE(match(VectorNaNPoison, m_AnyZeroFP())); EXPECT_FALSE(match(ScalarUndef, m_NaN())); EXPECT_FALSE(match(VectorUndef, m_NaN())); EXPECT_FALSE(match(VectorZeroUndef, m_NaN())); + EXPECT_FALSE(match(ScalarPoison, m_NaN())); + EXPECT_FALSE(match(VectorPoison, m_NaN())); + EXPECT_FALSE(match(VectorZeroPoison, m_NaN())); EXPECT_FALSE(match(ScalarPosInf, m_NaN())); EXPECT_FALSE(match(ScalarNegInf, m_NaN())); EXPECT_TRUE(match(ScalarNaN, m_NaN())); EXPECT_FALSE(match(VectorInfUndef, m_NaN())); - EXPECT_TRUE(match(VectorNaNUndef, m_NaN())); + EXPECT_FALSE(match(VectorNaNUndef, m_NaN())); + EXPECT_FALSE(match(VectorInfPoison, m_NaN())); + EXPECT_TRUE(match(VectorNaNPoison, m_NaN())); EXPECT_FALSE(match(ScalarUndef, m_NonNaN())); EXPECT_FALSE(match(VectorUndef, m_NonNaN())); - EXPECT_TRUE(match(VectorZeroUndef, m_NonNaN())); + EXPECT_FALSE(match(VectorZeroUndef, m_NonNaN())); + EXPECT_FALSE(match(ScalarPoison, m_NonNaN())); + EXPECT_FALSE(match(VectorPoison, m_NonNaN())); + EXPECT_TRUE(match(VectorZeroPoison, m_NonNaN())); EXPECT_TRUE(match(ScalarPosInf, m_NonNaN())); EXPECT_TRUE(match(ScalarNegInf, m_NonNaN())); EXPECT_FALSE(match(ScalarNaN, m_NonNaN())); - EXPECT_TRUE(match(VectorInfUndef, m_NonNaN())); + EXPECT_FALSE(match(VectorInfUndef, m_NonNaN())); EXPECT_FALSE(match(VectorNaNUndef, m_NonNaN())); + EXPECT_TRUE(match(VectorInfPoison, m_NonNaN())); + EXPECT_FALSE(match(VectorNaNPoison, m_NonNaN())); EXPECT_FALSE(match(ScalarUndef, m_Inf())); EXPECT_FALSE(match(VectorUndef, m_Inf())); EXPECT_FALSE(match(VectorZeroUndef, m_Inf())); + EXPECT_FALSE(match(ScalarPoison, m_Inf())); + EXPECT_FALSE(match(VectorPoison, m_Inf())); + EXPECT_FALSE(match(VectorZeroPoison, m_Inf())); EXPECT_TRUE(match(ScalarPosInf, m_Inf())); EXPECT_TRUE(match(ScalarNegInf, m_Inf())); EXPECT_FALSE(match(ScalarNaN, m_Inf())); - EXPECT_TRUE(match(VectorInfUndef, m_Inf())); + EXPECT_FALSE(match(VectorInfUndef, m_Inf())); EXPECT_FALSE(match(VectorNaNUndef, m_Inf())); + EXPECT_TRUE(match(VectorInfPoison, m_Inf())); + EXPECT_FALSE(match(VectorNaNPoison, m_Inf())); EXPECT_FALSE(match(ScalarUndef, m_NonInf())); EXPECT_FALSE(match(VectorUndef, m_NonInf())); - EXPECT_TRUE(match(VectorZeroUndef, m_NonInf())); + EXPECT_FALSE(match(VectorZeroUndef, m_NonInf())); + EXPECT_FALSE(match(ScalarPoison, m_NonInf())); + EXPECT_FALSE(match(VectorPoison, m_NonInf())); + EXPECT_TRUE(match(VectorZeroPoison, m_NonInf())); EXPECT_FALSE(match(ScalarPosInf, m_NonInf())); EXPECT_FALSE(match(ScalarNegInf, m_NonInf())); EXPECT_TRUE(match(ScalarNaN, m_NonInf())); EXPECT_FALSE(match(VectorInfUndef, m_NonInf())); - EXPECT_TRUE(match(VectorNaNUndef, m_NonInf())); + EXPECT_FALSE(match(VectorNaNUndef, m_NonInf())); + EXPECT_FALSE(match(VectorInfPoison, m_NonInf())); + EXPECT_TRUE(match(VectorNaNPoison, m_NonInf())); EXPECT_FALSE(match(ScalarUndef, m_Finite())); EXPECT_FALSE(match(VectorUndef, m_Finite())); - EXPECT_TRUE(match(VectorZeroUndef, m_Finite())); + EXPECT_FALSE(match(VectorZeroUndef, m_Finite())); + EXPECT_FALSE(match(ScalarPoison, m_Finite())); + EXPECT_FALSE(match(VectorPoison, m_Finite())); + EXPECT_TRUE(match(VectorZeroPoison, m_Finite())); EXPECT_FALSE(match(ScalarPosInf, m_Finite())); EXPECT_FALSE(match(ScalarNegInf, m_Finite())); EXPECT_FALSE(match(ScalarNaN, m_Finite())); EXPECT_FALSE(match(VectorInfUndef, m_Finite())); EXPECT_FALSE(match(VectorNaNUndef, m_Finite())); + EXPECT_FALSE(match(VectorInfPoison, m_Finite())); + EXPECT_FALSE(match(VectorNaNPoison, m_Finite())); const APFloat *C; // Regardless of whether undefs are allowed, @@ -1707,38 +1768,57 @@ TEST_F(PatternMatchTest, ConstantPredicateType) { Constant *CMixedU32 = ConstantVector::get({CU32Max, CU32Zero, CU32DeadBeef}); Constant *CU32Undef = UndefValue::get(U32Ty); + Constant *CU32Poison = PoisonValue::get(U32Ty); Constant *CU32MaxWithUndef = ConstantVector::get({CU32Undef, CU32Max, CU32Undef}); + Constant *CU32MaxWithPoison = + ConstantVector::get({CU32Poison, CU32Max, CU32Poison}); EXPECT_FALSE(match(CMixedU32, cst_pred_ty())); EXPECT_FALSE(match(CMixedU32, cst_pred_ty())); EXPECT_TRUE(match(CMixedU32, cst_pred_ty>())); EXPECT_FALSE(match(CMixedU32, cst_pred_ty>())); - EXPECT_TRUE(match(CU32MaxWithUndef, cst_pred_ty())); + EXPECT_FALSE(match(CU32MaxWithUndef, cst_pred_ty())); EXPECT_FALSE(match(CU32MaxWithUndef, cst_pred_ty())); - EXPECT_TRUE(match(CU32MaxWithUndef, cst_pred_ty>())); + EXPECT_FALSE(match(CU32MaxWithUndef, cst_pred_ty>())); EXPECT_FALSE( match(CU32MaxWithUndef, cst_pred_ty>())); + EXPECT_TRUE(match(CU32MaxWithPoison, cst_pred_ty())); + EXPECT_FALSE(match(CU32MaxWithPoison, cst_pred_ty())); + EXPECT_TRUE(match(CU32MaxWithPoison, cst_pred_ty>())); + EXPECT_FALSE( + match(CU32MaxWithPoison, cst_pred_ty>())); + // Float arbitrary vector Constant *CMixedF32 = ConstantVector::get({CF32NaN, CF32Zero, CF32Pi}); Constant *CF32Undef = UndefValue::get(F32Ty); + Constant *CF32Poison = PoisonValue::get(F32Ty); Constant *CF32NaNWithUndef = ConstantVector::get({CF32Undef, CF32NaN, CF32Undef}); + Constant *CF32NaNWithPoison = + ConstantVector::get({CF32Poison, CF32NaN, CF32Poison}); EXPECT_FALSE(match(CMixedF32, cstfp_pred_ty())); EXPECT_FALSE(match(CMixedF32, cstfp_pred_ty())); EXPECT_TRUE(match(CMixedF32, cstfp_pred_ty>())); EXPECT_FALSE(match(CMixedF32, cstfp_pred_ty>())); - EXPECT_TRUE(match(CF32NaNWithUndef, cstfp_pred_ty())); + EXPECT_FALSE(match(CF32NaNWithUndef, cstfp_pred_ty())); EXPECT_FALSE(match(CF32NaNWithUndef, cstfp_pred_ty())); - EXPECT_TRUE( + EXPECT_FALSE( match(CF32NaNWithUndef, cstfp_pred_ty>())); EXPECT_FALSE( match(CF32NaNWithUndef, cstfp_pred_ty>())); + + EXPECT_TRUE(match(CF32NaNWithPoison, cstfp_pred_ty())); + EXPECT_FALSE(match(CF32NaNWithPoison, cstfp_pred_ty())); + EXPECT_TRUE( + match(CF32NaNWithPoison, cstfp_pred_ty>())); + EXPECT_FALSE( + match(CF32NaNWithPoison, cstfp_pred_ty>())); } TEST_F(PatternMatchTest, InsertValue) { @@ -1888,35 +1968,44 @@ TEST_F(PatternMatchTest, NotForbidUndef) { Type *ScalarTy = IRB.getInt8Ty(); Type *VectorTy = FixedVectorType::get(ScalarTy, 3); Constant *ScalarUndef = UndefValue::get(ScalarTy); + Constant *ScalarPoison = PoisonValue::get(ScalarTy); Constant *ScalarOnes = Constant::getAllOnesValue(ScalarTy); Constant *VectorZero = Constant::getNullValue(VectorTy); Constant *VectorOnes = Constant::getAllOnesValue(VectorTy); - SmallVector MixedElems; - MixedElems.push_back(ScalarOnes); - MixedElems.push_back(ScalarOnes); - MixedElems.push_back(ScalarUndef); - Constant *VectorMixed = ConstantVector::get(MixedElems); + SmallVector MixedElemsUndef; + MixedElemsUndef.push_back(ScalarOnes); + MixedElemsUndef.push_back(ScalarOnes); + MixedElemsUndef.push_back(ScalarUndef); + Constant *VectorMixedUndef = ConstantVector::get(MixedElemsUndef); + + SmallVector MixedElemsPoison; + MixedElemsPoison.push_back(ScalarOnes); + MixedElemsPoison.push_back(ScalarOnes); + MixedElemsPoison.push_back(ScalarPoison); + Constant *VectorMixedPoison = ConstantVector::get(MixedElemsPoison); Value *Not = IRB.CreateXor(VectorZero, VectorOnes); Value *X; - EXPECT_TRUE(match(Not, m_Not(m_Value()))); - EXPECT_TRUE(match(Not, m_NotForbidUndef(m_Value(X)))); + EXPECT_TRUE(match(Not, m_Not(m_Value(X)))); EXPECT_TRUE(match(X, m_Zero())); Value *NotCommute = IRB.CreateXor(VectorOnes, VectorZero); Value *Y; - EXPECT_TRUE(match(NotCommute, m_Not(m_Value()))); - EXPECT_TRUE(match(NotCommute, m_NotForbidUndef(m_Value(Y)))); + EXPECT_TRUE(match(NotCommute, m_Not(m_Value(Y)))); EXPECT_TRUE(match(Y, m_Zero())); - Value *NotWithUndefs = IRB.CreateXor(VectorZero, VectorMixed); - EXPECT_TRUE(match(NotWithUndefs, m_Not(m_Value()))); - EXPECT_FALSE(match(NotWithUndefs, m_NotForbidUndef(m_Value()))); + Value *NotWithUndefs = IRB.CreateXor(VectorZero, VectorMixedUndef); + EXPECT_FALSE(match(NotWithUndefs, m_Not(m_Value()))); + + Value *NotWithPoisons = IRB.CreateXor(VectorZero, VectorMixedPoison); + EXPECT_TRUE(match(NotWithPoisons, m_Not(m_Value()))); + + Value *NotWithUndefsCommute = IRB.CreateXor(VectorMixedUndef, VectorZero); + EXPECT_FALSE(match(NotWithUndefsCommute, m_Not(m_Value()))); - Value *NotWithUndefsCommute = IRB.CreateXor(VectorMixed, VectorZero); - EXPECT_TRUE(match(NotWithUndefsCommute, m_Not(m_Value()))); - EXPECT_FALSE(match(NotWithUndefsCommute, m_NotForbidUndef(m_Value(X)))); + Value *NotWithPoisonsCommute = IRB.CreateXor(VectorMixedPoison, VectorZero); + EXPECT_TRUE(match(NotWithPoisonsCommute, m_Not(m_Value()))); } template struct MutableConstTest : PatternMatchTest { };