diff --git a/llvm/include/llvm/Analysis/ConstantFolding.h b/llvm/include/llvm/Analysis/ConstantFolding.h index 1b194b07e8678..9b054672b735a 100644 --- a/llvm/include/llvm/Analysis/ConstantFolding.h +++ b/llvm/include/llvm/Analysis/ConstantFolding.h @@ -22,6 +22,11 @@ #include namespace llvm { + +namespace Intrinsic { +using ID = unsigned; +} + class APInt; template class ArrayRef; class CallBase; @@ -186,6 +191,10 @@ Constant *ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef Operands, const TargetLibraryInfo *TLI = nullptr); +Constant *ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS, + Constant *RHS, Type *Ty, + Instruction *FMFSource); + /// ConstantFoldLoadThroughBitcast - try to cast constant to destination type /// returning null if unsuccessful. Can cast pointer to pointer or pointer to /// integer and vice versa if their sizes are equal. diff --git a/llvm/include/llvm/Analysis/InstSimplifyFolder.h b/llvm/include/llvm/Analysis/InstSimplifyFolder.h index 23e2ea80e8cbe..8a3269d6add0e 100644 --- a/llvm/include/llvm/Analysis/InstSimplifyFolder.h +++ b/llvm/include/llvm/Analysis/InstSimplifyFolder.h @@ -117,6 +117,12 @@ class InstSimplifyFolder final : public IRBuilderFolder { return simplifyCastInst(Op, V, DestTy, SQ); } + Value *FoldBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Type *Ty, + Instruction *FMFSource) const override { + return simplifyBinaryIntrinsic(ID, Ty, LHS, RHS, SQ, + dyn_cast_if_present(FMFSource)); + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/llvm/include/llvm/Analysis/InstructionSimplify.h b/llvm/include/llvm/Analysis/InstructionSimplify.h index a29955a06cf4e..03d7ad12c12d8 100644 --- a/llvm/include/llvm/Analysis/InstructionSimplify.h +++ b/llvm/include/llvm/Analysis/InstructionSimplify.h @@ -186,6 +186,11 @@ Value *simplifyExtractElementInst(Value *Vec, Value *Idx, Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q); +/// Given operands for a BinaryIntrinsic, fold the result or return null. +Value *simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, + Value *Op1, const SimplifyQuery &Q, + const CallBase *Call); + /// Given operands for a ShuffleVectorInst, fold the result or return null. /// See class ShuffleVectorInst for a description of the mask representation. Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef Mask, diff --git a/llvm/include/llvm/Analysis/TargetFolder.h b/llvm/include/llvm/Analysis/TargetFolder.h index 978e1002515fc..b4105ad76c02e 100644 --- a/llvm/include/llvm/Analysis/TargetFolder.h +++ b/llvm/include/llvm/Analysis/TargetFolder.h @@ -191,6 +191,15 @@ class TargetFolder final : public IRBuilderFolder { return nullptr; } + Value *FoldBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Type *Ty, + Instruction *FMFSource) const override { + auto *C1 = dyn_cast(LHS); + auto *C2 = dyn_cast(RHS); + if (C1 && C2) + return ConstantFoldBinaryIntrinsic(ID, C1, C2, Ty, FMFSource); + return nullptr; + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/llvm/include/llvm/IR/ConstantFolder.h b/llvm/include/llvm/IR/ConstantFolder.h index c2b30a65e32e2..3e74a563a5842 100644 --- a/llvm/include/llvm/IR/ConstantFolder.h +++ b/llvm/include/llvm/IR/ConstantFolder.h @@ -18,8 +18,8 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" -#include "llvm/IR/Constants.h" #include "llvm/IR/ConstantFold.h" +#include "llvm/IR/Constants.h" #include "llvm/IR/IRBuilderFolder.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Operator.h" @@ -89,7 +89,7 @@ class ConstantFolder final : public IRBuilderFolder { } Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V, - FastMathFlags FMF) const override { + FastMathFlags FMF) const override { if (Constant *C = dyn_cast(V)) return ConstantFoldUnaryInstruction(Opc, C); return nullptr; @@ -183,6 +183,12 @@ class ConstantFolder final : public IRBuilderFolder { return nullptr; } + Value *FoldBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Type *Ty, + Instruction *FMFSource) const override { + // Use TargetFolder or InstSimplifyFolder instead. + return nullptr; + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h index f2922311097e9..c07ffea711511 100644 --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -962,9 +962,9 @@ class IRBuilderBase { /// Create a call to intrinsic \p ID with 2 operands which is mangled on the /// first type. - CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, - Instruction *FMFSource = nullptr, - const Twine &Name = ""); + Value *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, + Instruction *FMFSource = nullptr, + const Twine &Name = ""); /// Create a call to intrinsic \p ID with \p Args, mangled using \p Types. If /// \p FMFSource is provided, copy fast-math-flags from that instruction to @@ -983,7 +983,7 @@ class IRBuilderBase { const Twine &Name = ""); /// Create call to the minnum intrinsic. - CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") { + Value *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") { if (IsFPConstrained) { return CreateConstrainedFPUnroundedBinOp( Intrinsic::experimental_constrained_minnum, LHS, RHS, nullptr, Name); @@ -993,7 +993,7 @@ class IRBuilderBase { } /// Create call to the maxnum intrinsic. - CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") { + Value *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") { if (IsFPConstrained) { return CreateConstrainedFPUnroundedBinOp( Intrinsic::experimental_constrained_maxnum, LHS, RHS, nullptr, Name); @@ -1003,19 +1003,19 @@ class IRBuilderBase { } /// Create call to the minimum intrinsic. - CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") { + Value *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name); } /// Create call to the maximum intrinsic. - CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") { + Value *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name); } /// Create call to the copysign intrinsic. - CallInst *CreateCopySign(Value *LHS, Value *RHS, - Instruction *FMFSource = nullptr, - const Twine &Name = "") { + Value *CreateCopySign(Value *LHS, Value *RHS, + Instruction *FMFSource = nullptr, + const Twine &Name = "") { return CreateBinaryIntrinsic(Intrinsic::copysign, LHS, RHS, FMFSource, Name); } diff --git a/llvm/include/llvm/IR/IRBuilderFolder.h b/llvm/include/llvm/IR/IRBuilderFolder.h index bd2324dfc5f1b..3020f2684ee45 100644 --- a/llvm/include/llvm/IR/IRBuilderFolder.h +++ b/llvm/include/llvm/IR/IRBuilderFolder.h @@ -73,6 +73,10 @@ class IRBuilderFolder { virtual Value *FoldCast(Instruction::CastOps Op, Value *V, Type *DestTy) const = 0; + virtual Value * + FoldBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Type *Ty, + Instruction *FMFSource = nullptr) const = 0; + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/llvm/include/llvm/IR/NoFolder.h b/llvm/include/llvm/IR/NoFolder.h index a612f98465aea..7bb5d5e696e9e 100644 --- a/llvm/include/llvm/IR/NoFolder.h +++ b/llvm/include/llvm/IR/NoFolder.h @@ -112,6 +112,11 @@ class NoFolder final : public IRBuilderFolder { return nullptr; } + Value *FoldBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Type *Ty, + Instruction *FMFSource) const override { + return nullptr; + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index 90da3390eab32..0551a988a9789 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -2529,12 +2529,73 @@ static Constant *evaluateCompare(const APFloat &Op1, const APFloat &Op2, return nullptr; } -static Constant *ConstantFoldScalarCall2(StringRef Name, - Intrinsic::ID IntrinsicID, - Type *Ty, - ArrayRef Operands, - const TargetLibraryInfo *TLI, - const CallBase *Call) { +static Constant *ConstantFoldLibCall2(StringRef Name, Type *Ty, + ArrayRef Operands, + const TargetLibraryInfo *TLI) { + if (!TLI) + return nullptr; + + LibFunc Func = NotLibFunc; + if (!TLI->getLibFunc(Name, Func)) + return nullptr; + + const auto *Op1 = dyn_cast(Operands[0]); + if (!Op1) + return nullptr; + + const auto *Op2 = dyn_cast(Operands[1]); + if (!Op2) + return nullptr; + + const APFloat &Op1V = Op1->getValueAPF(); + const APFloat &Op2V = Op2->getValueAPF(); + + switch (Func) { + default: + break; + case LibFunc_pow: + case LibFunc_powf: + case LibFunc_pow_finite: + case LibFunc_powf_finite: + if (TLI->has(Func)) + return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); + break; + case LibFunc_fmod: + case LibFunc_fmodf: + if (TLI->has(Func)) { + APFloat V = Op1->getValueAPF(); + if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF())) + return ConstantFP::get(Ty->getContext(), V); + } + break; + case LibFunc_remainder: + case LibFunc_remainderf: + if (TLI->has(Func)) { + APFloat V = Op1->getValueAPF(); + if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF())) + return ConstantFP::get(Ty->getContext(), V); + } + break; + case LibFunc_atan2: + case LibFunc_atan2f: + // atan2(+/-0.0, +/-0.0) is known to raise an exception on some libm + // (Solaris), so we do not assume a known result for that. + if (Op1V.isZero() && Op2V.isZero()) + return nullptr; + [[fallthrough]]; + case LibFunc_atan2_finite: + case LibFunc_atan2f_finite: + if (TLI->has(Func)) + return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); + break; + } + + return nullptr; +} + +static Constant *ConstantFoldIntrinsicCall2(Intrinsic::ID IntrinsicID, Type *Ty, + ArrayRef Operands, + const CallBase *Call) { assert(Operands.size() == 2 && "Wrong number of operands."); if (Ty->isFloatingPointTy()) { @@ -2564,7 +2625,8 @@ static Constant *ConstantFoldScalarCall2(StringRef Name, return nullptr; const APFloat &Op2V = Op2->getValueAPF(); - if (const auto *ConstrIntr = dyn_cast(Call)) { + if (const auto *ConstrIntr = + dyn_cast_if_present(Call)) { RoundingMode RM = getEvaluationRoundingMode(ConstrIntr); APFloat Res = Op1V; APFloat::opStatus St; @@ -2627,52 +2689,6 @@ static Constant *ConstantFoldScalarCall2(StringRef Name, return ConstantFP::get(Ty->getContext(), Op1V * Op2V); } - if (!TLI) - return nullptr; - - LibFunc Func = NotLibFunc; - if (!TLI->getLibFunc(Name, Func)) - return nullptr; - - switch (Func) { - default: - break; - case LibFunc_pow: - case LibFunc_powf: - case LibFunc_pow_finite: - case LibFunc_powf_finite: - if (TLI->has(Func)) - return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); - break; - case LibFunc_fmod: - case LibFunc_fmodf: - if (TLI->has(Func)) { - APFloat V = Op1->getValueAPF(); - if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF())) - return ConstantFP::get(Ty->getContext(), V); - } - break; - case LibFunc_remainder: - case LibFunc_remainderf: - if (TLI->has(Func)) { - APFloat V = Op1->getValueAPF(); - if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF())) - return ConstantFP::get(Ty->getContext(), V); - } - break; - case LibFunc_atan2: - case LibFunc_atan2f: - // atan2(+/-0.0, +/-0.0) is known to raise an exception on some libm - // (Solaris), so we do not assume a known result for that. - if (Op1V.isZero() && Op2V.isZero()) - return nullptr; - [[fallthrough]]; - case LibFunc_atan2_finite: - case LibFunc_atan2f_finite: - if (TLI->has(Func)) - return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); - break; - } } else if (auto *Op2C = dyn_cast(Operands[1])) { switch (IntrinsicID) { case Intrinsic::ldexp: { @@ -3163,8 +3179,13 @@ static Constant *ConstantFoldScalarCall(StringRef Name, if (Operands.size() == 1) return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call); - if (Operands.size() == 2) - return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call); + if (Operands.size() == 2) { + if (Constant *FoldedLibCall = + ConstantFoldLibCall2(Name, Ty, Operands, TLI)) { + return FoldedLibCall; + } + return ConstantFoldIntrinsicCall2(IntrinsicID, Ty, Operands, Call); + } if (Operands.size() == 3) return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call); @@ -3371,6 +3392,13 @@ ConstantFoldStructCall(StringRef Name, Intrinsic::ID IntrinsicID, } // end anonymous namespace +Constant *llvm::ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS, + Constant *RHS, Type *Ty, + Instruction *FMFSource) { + return ConstantFoldIntrinsicCall2(ID, Ty, {LHS, RHS}, + dyn_cast_if_present(FMFSource)); +} + Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef Operands, const TargetLibraryInfo *TLI) { diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 51e258d69e9e2..51f390db424cd 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -6374,11 +6374,10 @@ static Value *foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, return nullptr; } -static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1, - const SimplifyQuery &Q, - const CallBase *Call) { - Intrinsic::ID IID = F->getIntrinsicID(); - Type *ReturnType = F->getReturnType(); +Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, + Value *Op0, Value *Op1, + const SimplifyQuery &Q, + const CallBase *Call) { unsigned BitWidth = ReturnType->getScalarSizeInBits(); switch (IID) { case Intrinsic::abs: @@ -6636,19 +6635,21 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1, // float, if the ninf flag is set. const APFloat *C; if (match(Op1, m_APFloat(C)) && - (C->isInfinity() || (Call->hasNoInfs() && C->isLargest()))) { + (C->isInfinity() || (Call && Call->hasNoInfs() && C->isLargest()))) { // minnum(X, -inf) -> -inf // maxnum(X, +inf) -> +inf // minimum(X, -inf) -> -inf if nnan // maximum(X, +inf) -> +inf if nnan - if (C->isNegative() == IsMin && (!PropagateNaN || Call->hasNoNaNs())) + if (C->isNegative() == IsMin && + (!PropagateNaN || (Call && Call->hasNoNaNs()))) return ConstantFP::get(ReturnType, *C); // minnum(X, +inf) -> X if nnan // maxnum(X, -inf) -> X if nnan // minimum(X, +inf) -> X // maximum(X, -inf) -> X - if (C->isNegative() != IsMin && (PropagateNaN || Call->hasNoNaNs())) + if (C->isNegative() != IsMin && + (PropagateNaN || (Call && Call->hasNoNaNs()))) return Op0; } @@ -6662,8 +6663,6 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1, break; } case Intrinsic::vector_extract: { - Type *ReturnType = F->getReturnType(); - // (extract_vector (insert_vector _, X, 0), 0) -> X unsigned IdxN = cast(Op1)->getZExtValue(); Value *X = nullptr; @@ -6710,7 +6709,8 @@ static Value *simplifyIntrinsic(CallBase *Call, Value *Callee, return simplifyUnaryIntrinsic(F, Args[0], Q, Call); if (NumOperands == 2) - return simplifyBinaryIntrinsic(F, Args[0], Args[1], Q, Call); + return simplifyBinaryIntrinsic(IID, F->getReturnType(), Args[0], Args[1], Q, + Call); // Handle intrinsics with 3 or more arguments. switch (IID) { diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp index b09b80f95871a..d6746d1d43824 100644 --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -918,12 +918,14 @@ CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, return createCallHelper(Fn, {V}, Name, FMFSource); } -CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, - Value *RHS, - Instruction *FMFSource, - const Twine &Name) { +Value *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, + Value *RHS, Instruction *FMFSource, + const Twine &Name) { Module *M = BB->getModule(); Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() }); + if (Value *V = Folder.FoldBinaryIntrinsic(ID, LHS, RHS, Fn->getReturnType(), + FMFSource)) + return V; return createCallHelper(Fn, {LHS, RHS}, Name, FMFSource); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp index fb829fab0a2c1..5b7fa13f2e835 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp @@ -767,19 +767,21 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { // Checking for NaN before canonicalization provides better fidelity when // mapping other operations onto fmed3 since the order of operands is // unchanged. - CallInst *NewCall = nullptr; + Value *V = nullptr; if (match(Src0, PatternMatch::m_NaN()) || isa(Src0)) { - NewCall = IC.Builder.CreateMinNum(Src1, Src2); + V = IC.Builder.CreateMinNum(Src1, Src2); } else if (match(Src1, PatternMatch::m_NaN()) || isa(Src1)) { - NewCall = IC.Builder.CreateMinNum(Src0, Src2); + V = IC.Builder.CreateMinNum(Src0, Src2); } else if (match(Src2, PatternMatch::m_NaN()) || isa(Src2)) { - NewCall = IC.Builder.CreateMaxNum(Src0, Src1); + V = IC.Builder.CreateMaxNum(Src0, Src1); } - if (NewCall) { - NewCall->copyFastMathFlags(&II); - NewCall->takeName(&II); - return IC.replaceInstUsesWith(II, NewCall); + if (V) { + if (auto *CI = dyn_cast(V)) { + CI->copyFastMathFlags(&II); + CI->takeName(&II); + } + return IC.replaceInstUsesWith(II, V); } bool Swap = false; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index ed5d44757fbeb..de19872fdd792 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2257,13 +2257,14 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { default: llvm_unreachable("unexpected intrinsic ID"); } - Instruction *NewCall = Builder.CreateBinaryIntrinsic( + Value *V = Builder.CreateBinaryIntrinsic( IID, X, ConstantFP::get(Arg0->getType(), Res), II); // TODO: Conservatively intersecting FMF. If Res == C2, the transform // was a simplification (so Arg0 and its original flags could // propagate?) - NewCall->andIRFlags(M); - return replaceInstUsesWith(*II, NewCall); + if (auto *CI = dyn_cast(V)) + CI->andIRFlags(M); + return replaceInstUsesWith(*II, V); } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp index 527037881edb1..915ed865ab5d3 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -1191,7 +1191,7 @@ static Value *canonicalizeSPF(ICmpInst &Cmp, Value *TrueVal, Value *FalseVal, match(RHS, m_NSWNeg(m_Specific(LHS))); Constant *IntMinIsPoisonC = ConstantInt::get(Type::getInt1Ty(Cmp.getContext()), IntMinIsPoison); - Instruction *Abs = + Value *Abs = IC.Builder.CreateBinaryIntrinsic(Intrinsic::abs, LHS, IntMinIsPoisonC); if (SPF == SelectPatternFlavor::SPF_NABS) diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index c0b7298f78005..bad857dec5deb 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -14131,13 +14131,13 @@ class HorizontalReduction { if (IsConstant) return ConstantFP::get(LHS->getType(), maximum(cast(LHS)->getValueAPF(), - cast(RHS)->getValueAPF())); + cast(RHS)->getValueAPF())); return Builder.CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS); case RecurKind::FMinimum: if (IsConstant) return ConstantFP::get(LHS->getType(), minimum(cast(LHS)->getValueAPF(), - cast(RHS)->getValueAPF())); + cast(RHS)->getValueAPF())); return Builder.CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS); case RecurKind::SMax: if (IsConstant || UseSelect) { diff --git a/llvm/unittests/IR/IRBuilderTest.cpp b/llvm/unittests/IR/IRBuilderTest.cpp index d15ff9dd51a4c..74ef72f42d905 100644 --- a/llvm/unittests/IR/IRBuilderTest.cpp +++ b/llvm/unittests/IR/IRBuilderTest.cpp @@ -57,7 +57,7 @@ TEST_F(IRBuilderTest, Intrinsics) { IRBuilder<> Builder(BB); Value *V; Instruction *I; - CallInst *Call; + Value *Result; IntrinsicInst *II; V = Builder.CreateLoad(GV->getValueType(), GV); @@ -65,78 +65,80 @@ TEST_F(IRBuilderTest, Intrinsics) { I->setHasNoInfs(true); I->setHasNoNaNs(false); - Call = Builder.CreateMinNum(V, V); - II = cast(Call); + Result = Builder.CreateMinNum(V, V); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::minnum); - Call = Builder.CreateMaxNum(V, V); - II = cast(Call); + Result = Builder.CreateMaxNum(V, V); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::maxnum); - Call = Builder.CreateMinimum(V, V); - II = cast(Call); + Result = Builder.CreateMinimum(V, V); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::minimum); - Call = Builder.CreateMaximum(V, V); - II = cast(Call); + Result = Builder.CreateMaximum(V, V); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::maximum); - Call = Builder.CreateIntrinsic(Intrinsic::readcyclecounter, {}, {}); - II = cast(Call); + Result = Builder.CreateIntrinsic(Intrinsic::readcyclecounter, {}, {}); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::readcyclecounter); - Call = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, V); - II = cast(Call); + Result = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, V); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::fabs); EXPECT_FALSE(II->hasNoInfs()); EXPECT_FALSE(II->hasNoNaNs()); - Call = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, V, I); - II = cast(Call); + Result = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, V, I); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::fabs); EXPECT_TRUE(II->hasNoInfs()); EXPECT_FALSE(II->hasNoNaNs()); - Call = Builder.CreateBinaryIntrinsic(Intrinsic::pow, V, V); - II = cast(Call); + Result = Builder.CreateBinaryIntrinsic(Intrinsic::pow, V, V); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::pow); EXPECT_FALSE(II->hasNoInfs()); EXPECT_FALSE(II->hasNoNaNs()); - Call = Builder.CreateBinaryIntrinsic(Intrinsic::pow, V, V, I); - II = cast(Call); + Result = Builder.CreateBinaryIntrinsic(Intrinsic::pow, V, V, I); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::pow); EXPECT_TRUE(II->hasNoInfs()); EXPECT_FALSE(II->hasNoNaNs()); - Call = Builder.CreateIntrinsic(Intrinsic::fma, {V->getType()}, {V, V, V}); - II = cast(Call); + Result = Builder.CreateIntrinsic(Intrinsic::fma, {V->getType()}, {V, V, V}); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::fma); EXPECT_FALSE(II->hasNoInfs()); EXPECT_FALSE(II->hasNoNaNs()); - Call = Builder.CreateIntrinsic(Intrinsic::fma, {V->getType()}, {V, V, V}, I); - II = cast(Call); + Result = + Builder.CreateIntrinsic(Intrinsic::fma, {V->getType()}, {V, V, V}, I); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::fma); EXPECT_TRUE(II->hasNoInfs()); EXPECT_FALSE(II->hasNoNaNs()); - Call = Builder.CreateIntrinsic(Intrinsic::fma, {V->getType()}, {V, V, V}, I); - II = cast(Call); + Result = + Builder.CreateIntrinsic(Intrinsic::fma, {V->getType()}, {V, V, V}, I); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::fma); EXPECT_TRUE(II->hasNoInfs()); EXPECT_FALSE(II->hasNoNaNs()); - Call = Builder.CreateUnaryIntrinsic(Intrinsic::roundeven, V); - II = cast(Call); + Result = Builder.CreateUnaryIntrinsic(Intrinsic::roundeven, V); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::roundeven); EXPECT_FALSE(II->hasNoInfs()); EXPECT_FALSE(II->hasNoNaNs()); - Call = Builder.CreateIntrinsic( + Result = Builder.CreateIntrinsic( Intrinsic::set_rounding, {}, {Builder.getInt32(static_cast(RoundingMode::TowardZero))}); - II = cast(Call); + II = cast(Result); EXPECT_EQ(II->getIntrinsicID(), Intrinsic::set_rounding); }