From 33de6aa8b234febb21e3611950446ff551cadde7 Mon Sep 17 00:00:00 2001 From: Younan Zhang Date: Wed, 12 Nov 2025 20:10:35 +0800 Subject: [PATCH 01/34] [Clang][NFC] Fix a typo in FunctionDecl (#167677) Found it while looking at other stuffs. --- clang/include/clang/AST/Decl.h | 2 +- clang/lib/AST/Decl.cpp | 2 +- clang/lib/Sema/SemaDeclCXX.cpp | 2 +- clang/lib/Sema/SemaTemplateInstantiateDecl.cpp | 2 +- clang/lib/Serialization/ASTWriterDecl.cpp | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h index 406d79ebd6641..ee2321dd158d4 100644 --- a/clang/include/clang/AST/Decl.h +++ b/clang/include/clang/AST/Decl.h @@ -2335,7 +2335,7 @@ class FunctionDecl : public DeclaratorDecl, } void setDefaultedOrDeletedInfo(DefaultedOrDeletedFunctionInfo *Info); - DefaultedOrDeletedFunctionInfo *getDefalutedOrDeletedInfo() const; + DefaultedOrDeletedFunctionInfo *getDefaultedOrDeletedInfo() const; /// Whether this function is variadic. bool isVariadic() const; diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp index 8579e51e45697..eff2b81d61a50 100644 --- a/clang/lib/AST/Decl.cpp +++ b/clang/lib/AST/Decl.cpp @@ -3180,7 +3180,7 @@ void FunctionDecl::DefaultedOrDeletedFunctionInfo::setDeletedMessage( } FunctionDecl::DefaultedOrDeletedFunctionInfo * -FunctionDecl::getDefalutedOrDeletedInfo() const { +FunctionDecl::getDefaultedOrDeletedInfo() const { return FunctionDeclBits.HasDefaultedOrDeletedInfo ? DefaultedOrDeletedInfo : nullptr; } diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp index d41ab126c426f..8030aac3d8771 100644 --- a/clang/lib/Sema/SemaDeclCXX.cpp +++ b/clang/lib/Sema/SemaDeclCXX.cpp @@ -8035,7 +8035,7 @@ class DefaultedComparisonVisitor { DefaultedComparisonVisitor(Sema &S, CXXRecordDecl *RD, FunctionDecl *FD, DefaultedComparisonKind DCK) : S(S), RD(RD), FD(FD), DCK(DCK) { - if (auto *Info = FD->getDefalutedOrDeletedInfo()) { + if (auto *Info = FD->getDefaultedOrDeletedInfo()) { // FIXME: Change CreateOverloadedBinOp to take an ArrayRef instead of an // UnresolvedSet to avoid this copy. Fns.assign(Info->getUnqualifiedLookups().begin(), diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp index a56017cd7b7e7..1b6b559c1227b 100644 --- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp @@ -5465,7 +5465,7 @@ TemplateDeclInstantiator::InitMethodInstantiation(CXXMethodDecl *New, bool TemplateDeclInstantiator::SubstDefaultedFunction(FunctionDecl *New, FunctionDecl *Tmpl) { // Transfer across any unqualified lookups. - if (auto *DFI = Tmpl->getDefalutedOrDeletedInfo()) { + if (auto *DFI = Tmpl->getDefaultedOrDeletedInfo()) { SmallVector Lookups; Lookups.reserve(DFI->getUnqualifiedLookups().size()); bool AnyChanged = false; diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp index a8c487005f6ec..c9f8797ab973f 100644 --- a/clang/lib/Serialization/ASTWriterDecl.cpp +++ b/clang/lib/Serialization/ASTWriterDecl.cpp @@ -903,7 +903,7 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) { Record.push_back(D->getODRHash()); if (D->isDefaulted() || D->isDeletedAsWritten()) { - if (auto *FDI = D->getDefalutedOrDeletedInfo()) { + if (auto *FDI = D->getDefaultedOrDeletedInfo()) { // Store both that there is an DefaultedOrDeletedInfo and whether it // contains a DeletedMessage. StringLiteral *DeletedMessage = FDI->getDeletedMessage(); From 07cd105416561e1b1406173d73314c60c8dc961f Mon Sep 17 00:00:00 2001 From: Sander de Smalen Date: Wed, 12 Nov 2025 12:06:51 +0000 Subject: [PATCH 02/34] [AArch64] Add 'REQUIRES: asserts' to regalloc-hint-movprfx.mir This should fix the buildbot failure reported here: https://lab.llvm.org/buildbot/#/builders/11/builds/27869 --- llvm/test/CodeGen/AArch64/regalloc-hint-movprfx.mir | 1 + 1 file changed, 1 insertion(+) diff --git a/llvm/test/CodeGen/AArch64/regalloc-hint-movprfx.mir b/llvm/test/CodeGen/AArch64/regalloc-hint-movprfx.mir index c2d8f8e73772d..05f583e2e692f 100644 --- a/llvm/test/CodeGen/AArch64/regalloc-hint-movprfx.mir +++ b/llvm/test/CodeGen/AArch64/regalloc-hint-movprfx.mir @@ -1,4 +1,5 @@ # RUN: llc -mtriple=aarch64 -mattr=+sve -start-before=greedy -stop-after=virtregrewriter -debug-only=regalloc %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=DBG +# REQUIRES: asserts # Check that the register allocator gets hints to reuse registers of one of it's operands. --- From 7838dbee3a307cd8bd129ee8dbb998209133bffe Mon Sep 17 00:00:00 2001 From: Jack Styles Date: Wed, 12 Nov 2025 13:15:34 +0000 Subject: [PATCH 03/34] [Flang][OpenMP] Add Lowering support for Collapse with Taskloop (#166791) Support for lowering collapse already exists within `genLoopNestClauses`, which is called when lowering taskloop. However, the TODO message still included the Collapse clause, so it was not activated. By removing this, it enables lowering of the Collapse clause in taskloop. --- flang/lib/Lower/OpenMP/OpenMP.cpp | 2 -- .../Lower/OpenMP/Todo/taskloop-collapse.f90 | 15 -------- flang/test/Lower/OpenMP/taskloop-collapse.f90 | 34 +++++++++++++++++++ flang/test/Semantics/OpenMP/taskloop04.f90 | 15 ++++++++ 4 files changed, 49 insertions(+), 17 deletions(-) delete mode 100644 flang/test/Lower/OpenMP/Todo/taskloop-collapse.f90 create mode 100644 flang/test/Lower/OpenMP/taskloop-collapse.f90 create mode 100644 flang/test/Semantics/OpenMP/taskloop04.f90 diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index 6e9f0716a02fe..4048aeea37b92 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -1781,8 +1781,6 @@ static void genTaskloopClauses( cp.processPriority(stmtCtx, clauseOps); cp.processReduction(loc, clauseOps, reductionSyms); cp.processUntied(clauseOps); - - cp.processTODO(loc, llvm::omp::Directive::OMPD_taskloop); } static void genTaskwaitClauses(lower::AbstractConverter &converter, diff --git a/flang/test/Lower/OpenMP/Todo/taskloop-collapse.f90 b/flang/test/Lower/OpenMP/Todo/taskloop-collapse.f90 deleted file mode 100644 index cd54f5eeba6c4..0000000000000 --- a/flang/test/Lower/OpenMP/Todo/taskloop-collapse.f90 +++ /dev/null @@ -1,15 +0,0 @@ -! RUN: %not_todo_cmd bbc -emit-fir -fopenmp -o - %s 2>&1 | FileCheck %s -! RUN: %not_todo_cmd %flang_fc1 -emit-fir -fopenmp -o - %s 2>&1 | FileCheck %s - -! CHECK: not yet implemented: Unhandled clause COLLAPSE in TASKLOOP construct -subroutine omp_taskloop_collapse() - integer x - x = 0 - !$omp taskloop collapse(2) - do i = 1, 100 - do j = 1, 100 - x = x + 1 - end do - end do - !$omp end taskloop -end subroutine omp_taskloop_collapse diff --git a/flang/test/Lower/OpenMP/taskloop-collapse.f90 b/flang/test/Lower/OpenMP/taskloop-collapse.f90 new file mode 100644 index 0000000000000..48243640d07b9 --- /dev/null +++ b/flang/test/Lower/OpenMP/taskloop-collapse.f90 @@ -0,0 +1,34 @@ +! Test the collapse clause when being used with the taskloop construct +! RUN: bbc -emit-hlfir -fopenmp -fopenmp-version=45 %s -o - 2>&1 | FileCheck %s +! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=45 %s -o - 2>&1 | FileCheck %s + +! CHECK-LABEL: omp.private +! CHECK-SAME: {type = private} @[[J_PRIVATE:.*]] : i32 +! CHECK-LABEL: omp.private +! CHECK-SAME: {type = private} @[[I_PRIVATE:.*]] : i32 +! CHECK-LABEL: omp.private +! CHECK-SAME: {type = firstprivate} @[[SUM_FIRSTPRIVATE:.*]] : i32 copy + +! CHECK-LABEL: func.func @_QPtest() +! CHECK: %[[ALLOCA_I:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFtestEi"} +! CHECK: %[[DECLARE_I:.*]]:2 = hlfir.declare %1 {uniq_name = "_QFtestEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) +! CHECK: %[[ALLOCA_J:.*]] = fir.alloca i32 {bindc_name = "j", uniq_name = "_QFtestEj"} +! CHECK: %[[DECLARE_J:.*]]:2 = hlfir.declare %3 {uniq_name = "_QFtestEj"} : (!fir.ref) -> (!fir.ref, !fir.ref) +! CHECK: %[[ALLOCA_SUM:.*]] = fir.alloca i32 {bindc_name = "sum", uniq_name = "_QFtestEsum"} +! CHECK: %[[DECLARE_SUM:.*]]:2 = hlfir.declare %5 {uniq_name = "_QFtestEsum"} : (!fir.ref) -> (!fir.ref, !fir.ref) + +subroutine test() + integer :: i, j, sum + + !$omp taskloop collapse(2) + ! CHECK-LABEL: omp.taskloop + ! CHECK-SAME: private(@_QFtestEsum_firstprivate_i32 %[[DECLARE_SUM]]#0 -> %arg0, @_QFtestEi_private_i32 %[[DECLARE_I]]#0 -> %arg1, @_QFtestEj_private_i32 %[[DECLARE_J]]#0 -> %arg2 : !fir.ref, !fir.ref, !fir.ref) + ! CHECK-LABEL: omp.loop_nest + ! CHECK-SAME: (%arg3, %arg4) : i32 = (%c1_i32, %c1_i32_1) to (%c10_i32, %c5_i32) inclusive step (%c1_i32_0, %c1_i32_2) collapse(2) + do i = 1, 10 + do j = 1, 5 + sum = sum + i + j + end do + end do + !$omp end taskloop +end subroutine diff --git a/flang/test/Semantics/OpenMP/taskloop04.f90 b/flang/test/Semantics/OpenMP/taskloop04.f90 new file mode 100644 index 0000000000000..4ffcf84f708e9 --- /dev/null +++ b/flang/test/Semantics/OpenMP/taskloop04.f90 @@ -0,0 +1,15 @@ +! When lowering Taskloop, it is possible for the TileSizes clause to be lowered, but this is not a supported clause. +! We should make sure that any use of Tilesizes with Taskloop is correctly rejected by the Semantics. +! RUN: %python %S/../test_errors.py %s %flang -fopenmp + +subroutine test + integer :: i, sum + + !ERROR: TILE cannot follow TASKLOOP + !ERROR: SIZES clause is not allowed on the TASKLOOP directive + !$omp taskloop tile sizes(2) + do i=1,10 + sum = sum + i + end do + !$omp end taskloop +end subroutine From c5eb7eb3dcd51415adf804ca45eeb719cbab4351 Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Wed, 12 Nov 2025 08:03:32 -0600 Subject: [PATCH 04/34] [OpenMP] Add more comments to `ConstructDecompositionT.h`, NFC (#167564) --- .../Frontend/OpenMP/ConstructDecompositionT.h | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h b/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h index d702273cec9ec..3918cecfc1e65 100644 --- a/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h +++ b/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h @@ -68,17 +68,16 @@ find_unique(Container &&container, Predicate &&pred) { namespace tomp { -// ClauseType - Either instance of ClauseT, or a type derived from ClauseT. -// -// This is the clause representation in the code using this infrastructure. -// -// HelperType - A class that implements two member functions: +// ClauseType: Either an instance of ClauseT, or a type derived from ClauseT. +// This is the clause representation in the code using this infrastructure. // +// HelperType: A class that implements two member functions: // // Return the base object of the given object, if any. // std::optional getBaseObject(const Object &object) const // // Return the iteration variable of the outermost loop associated // // with the construct being worked on, if any. // std::optional getLoopIterVar() const + template struct ConstructDecompositionT { using ClauseTy = ClauseType; @@ -181,27 +180,32 @@ struct ConstructDecompositionT { std::enable_if_t::UnionTrait::value, void> addClauseSymsToMap(U &&item, const ClauseTy *); - // Apply a clause to the only directive that allows it. If there are no + // Apply the clause to the only directive that allows it. If there are no // directives that allow it, or if there is more that one, do not apply // anything and return false, otherwise return true. bool applyToUnique(const ClauseTy *node); - // Apply a clause to the first directive in given range that allows it. + // Apply the clause to the first directive in given range that allows it. // If such a directive does not exist, return false, otherwise return true. template bool applyToFirst(const ClauseTy *node, llvm::iterator_range range); - // Apply a clause to the innermost directive that allows it. If such a + // Apply the clause to the innermost directive that allows it. If such a // directive does not exist, return false, otherwise return true. bool applyToInnermost(const ClauseTy *node); - // Apply a clause to the outermost directive that allows it. If such a + // Apply the clause to the outermost directive that allows it. If such a // directive does not exist, return false, otherwise return true. bool applyToOutermost(const ClauseTy *node); + // Apply the clause to all directives that allow it, and which satisfy + // the predicate: bool shouldApply(LeafReprInternal). If no such + // directives exist, return false, otherwise return true. template bool applyIf(const ClauseTy *node, Predicate shouldApply); + // Apply the clause to all directives that allow it. If no such directives + // exist, return false, otherwise return true. bool applyToAll(const ClauseTy *node); template @@ -983,7 +987,7 @@ bool ConstructDecompositionT::applyClause( return dir == llvm::omp::Directive::OMPD_simd || llvm::is_contained(getWorksharingLoop(), dir); case ReductionModifier::Task: - if (alreadyApplied) + if (alreadyApplied) // Not an error return false; // According to [5.2:135:16-18], "task" only applies to "parallel" and // worksharing constructs. From 0f4dc93608059469df7606b4bd0b2834f9035d54 Mon Sep 17 00:00:00 2001 From: Michael Buch Date: Wed, 12 Nov 2025 14:05:59 +0000 Subject: [PATCH 05/34] [lldb][Language] Pass SymbolNameFitsToLanguage parameter by const-ref (#167684) We've been seeing (rare) crashes from both `CPlusPlusLanguage::SymbolNameFitsToLanguage` and `ObjCLanguage::SymbolNameFitsToLanguage` when we try to read contents of the `ConstString`s of the `Mangled` parameter. I'm not entirely sure how that can happen (current theory is corrupted stack somehow which overwrites `ConstString::m_string` to an invalid pointer) but I'm not able to confirm that. One thing these crashes had in common is that they operate on the `Mangled` object we copied into `SymbolNameFitsToLanguage` by value. While I can't see off the top why that would cause it to contain unintiailized/corrupt `ConstString`s, the class is sufficiently large enough to probably pass it by `const &` anyway. This is what this patch does. rdar://164519648 --- lldb/include/lldb/Target/Language.h | 4 +++- lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp | 2 +- lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.h | 2 +- lldb/source/Plugins/Language/ObjC/ObjCLanguage.cpp | 2 +- lldb/source/Plugins/Language/ObjC/ObjCLanguage.h | 2 +- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/lldb/include/lldb/Target/Language.h b/lldb/include/lldb/Target/Language.h index 9958b6ea2f815..9292f790333a1 100644 --- a/lldb/include/lldb/Target/Language.h +++ b/lldb/include/lldb/Target/Language.h @@ -318,7 +318,9 @@ class Language : public PluginInterface { /// /// This function should only return true if there is a high confidence /// that the name actually belongs to this language. - virtual bool SymbolNameFitsToLanguage(Mangled name) const { return false; } + virtual bool SymbolNameFitsToLanguage(const Mangled &name) const { + return false; + } /// An individual data formatter may apply to several types and cross language /// boundaries. Each of those languages may want to customize the display of diff --git a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp index a2199cb65cd35..e935ea8fab813 100644 --- a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp +++ b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp @@ -103,7 +103,7 @@ CPlusPlusLanguage::GetFunctionNameInfo(ConstString name) const { return {func_name_type, ConstString(basename)}; } -bool CPlusPlusLanguage::SymbolNameFitsToLanguage(Mangled mangled) const { +bool CPlusPlusLanguage::SymbolNameFitsToLanguage(const Mangled &mangled) const { auto mangling_scheme = Mangled::GetManglingScheme(mangled.GetMangledName().GetStringRef()); return mangling_scheme == Mangled::eManglingSchemeItanium || diff --git a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.h b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.h index 9a528ca7b03f9..13d436a68c691 100644 --- a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.h +++ b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.h @@ -92,7 +92,7 @@ class CPlusPlusLanguage : public Language { static llvm::StringRef GetPluginNameStatic() { return "cplusplus"; } - bool SymbolNameFitsToLanguage(Mangled mangled) const override; + bool SymbolNameFitsToLanguage(const Mangled &mangled) const override; bool DemangledNameContainsPath(llvm::StringRef path, ConstString demangled) const override; diff --git a/lldb/source/Plugins/Language/ObjC/ObjCLanguage.cpp b/lldb/source/Plugins/Language/ObjC/ObjCLanguage.cpp index 3b8e21cbb9269..5e31faccb315c 100644 --- a/lldb/source/Plugins/Language/ObjC/ObjCLanguage.cpp +++ b/lldb/source/Plugins/Language/ObjC/ObjCLanguage.cpp @@ -235,7 +235,7 @@ ObjCLanguage::GetFunctionNameInfo(ConstString name) const { return {func_name_type, std::nullopt}; } -bool ObjCLanguage::SymbolNameFitsToLanguage(Mangled mangled) const { +bool ObjCLanguage::SymbolNameFitsToLanguage(const Mangled &mangled) const { ConstString demangled_name = mangled.GetDemangledName(); if (!demangled_name) return false; diff --git a/lldb/source/Plugins/Language/ObjC/ObjCLanguage.h b/lldb/source/Plugins/Language/ObjC/ObjCLanguage.h index a68ea41c723de..729230dc2dccb 100644 --- a/lldb/source/Plugins/Language/ObjC/ObjCLanguage.h +++ b/lldb/source/Plugins/Language/ObjC/ObjCLanguage.h @@ -145,7 +145,7 @@ class ObjCLanguage : public Language { std::pair> GetFunctionNameInfo(ConstString name) const override; - bool SymbolNameFitsToLanguage(Mangled mangled) const override; + bool SymbolNameFitsToLanguage(const Mangled &mangled) const override; lldb::TypeCategoryImplSP GetFormatters() override; From f6cf44ac2d3ebc4b2c785c202ac4f33b7533d45f Mon Sep 17 00:00:00 2001 From: Michael Buch Date: Wed, 12 Nov 2025 14:22:15 +0000 Subject: [PATCH 06/34] [lldb][ObjC][NFC] Rewrite IsPossibleObjCMethodName in terms of llvm::StringRef (#167660) We've seen some crashes around this area (particularly around checking/handling raw C-strings). Dealing with `StringRef`s makes it a bit easier to reason about. This doesn't fix anything per se, but is an improvement in readability. rdar://164519648 --- .../Plugins/Language/ObjC/ObjCLanguage.cpp | 7 +++ .../Plugins/Language/ObjC/ObjCLanguage.h | 8 +--- .../Language/ObjC/ObjCLanguageTest.cpp | 43 +++++++++++++++++++ 3 files changed, 51 insertions(+), 7 deletions(-) diff --git a/lldb/source/Plugins/Language/ObjC/ObjCLanguage.cpp b/lldb/source/Plugins/Language/ObjC/ObjCLanguage.cpp index 5e31faccb315c..c0dcb958ad85f 100644 --- a/lldb/source/Plugins/Language/ObjC/ObjCLanguage.cpp +++ b/lldb/source/Plugins/Language/ObjC/ObjCLanguage.cpp @@ -1065,3 +1065,10 @@ ObjCLanguage::GetBooleanFromString(llvm::StringRef str) const { .Case("NO", {false}) .Default({}); } + +bool ObjCLanguage::IsPossibleObjCMethodName(llvm::StringRef name) { + if (!name.starts_with("-[") && !name.starts_with("+[")) + return false; + + return name.ends_with("]"); +} diff --git a/lldb/source/Plugins/Language/ObjC/ObjCLanguage.h b/lldb/source/Plugins/Language/ObjC/ObjCLanguage.h index 729230dc2dccb..ced6bd3290a86 100644 --- a/lldb/source/Plugins/Language/ObjC/ObjCLanguage.h +++ b/lldb/source/Plugins/Language/ObjC/ObjCLanguage.h @@ -175,13 +175,7 @@ class ObjCLanguage : public Language { static llvm::StringRef GetPluginNameStatic() { return "objc"; } - static bool IsPossibleObjCMethodName(const char *name) { - if (!name) - return false; - bool starts_right = (name[0] == '+' || name[0] == '-') && name[1] == '['; - bool ends_right = (name[strlen(name) - 1] == ']'); - return (starts_right && ends_right); - } + static bool IsPossibleObjCMethodName(llvm::StringRef name); static bool IsPossibleObjCSelector(const char *name) { if (!name) diff --git a/lldb/unittests/Language/ObjC/ObjCLanguageTest.cpp b/lldb/unittests/Language/ObjC/ObjCLanguageTest.cpp index 70baa7e6bc135..4b018a29f3587 100644 --- a/lldb/unittests/Language/ObjC/ObjCLanguageTest.cpp +++ b/lldb/unittests/Language/ObjC/ObjCLanguageTest.cpp @@ -112,3 +112,46 @@ TEST(ObjCLanguage, InvalidMethodNameParsing) { EXPECT_FALSE(lax_method.has_value()); } } + +struct ObjCMethodTestCase { + llvm::StringRef name; + bool is_valid; +}; + +struct ObjCMethodNameTextFiture + : public testing::TestWithParam {}; + +static ObjCMethodTestCase g_objc_method_name_test_cases[] = { + {"", false}, + {"+[Uh oh!", false}, + {"-[Definitely not...", false}, + {"[Nice try ] :)", false}, + {"+MaybeIfYouSquintYourEyes]", false}, + {"?[Tricky]", false}, + {"[]", false}, + {"-[a", false}, + {"+[a", false}, + {"-]a]", false}, + {"+]a]", false}, + + // FIXME: should these count as valid? + {"+[]", true}, + {"-[]", true}, + {"-[[]", true}, + {"+[[]", true}, + {"+[a ]", true}, + {"-[a ]", true}, + + // Valid names + {"+[a a]", true}, + {"-[a a]", true}, +}; + +TEST_P(ObjCMethodNameTextFiture, TestIsPossibleObjCMethodName) { + // Tests ObjCLanguage::IsPossibleObjCMethodName + auto [name, expect_valid] = GetParam(); + EXPECT_EQ(ObjCLanguage::IsPossibleObjCMethodName(name), expect_valid); +} + +INSTANTIATE_TEST_SUITE_P(ObjCMethodNameTests, ObjCMethodNameTextFiture, + testing::ValuesIn(g_objc_method_name_test_cases)); From 448146d6479cdfd6b7c80cb33dbaed882dead2f1 Mon Sep 17 00:00:00 2001 From: Maxime Arthaud Date: Wed, 12 Nov 2025 15:36:56 +0100 Subject: [PATCH 07/34] [llvm-c] Add bindings for DbgRecord (#166383) In the LLVM-C library, there is currently no way to get information about a DbgRecord - which is the new way to attach debug information to llvm instructions. We can only iterate on debug records with LLVMGetFirstDbgRecord/ LLVMGetLastDbgRecord/LLVMGetNextDbgRecord, but there is no way to read information. This PR adds utility functions to read DbgRecord information. --- llvm/include/llvm-c/Core.h | 38 ++++++++++++++++++++++++++++++ llvm/lib/IR/Core.cpp | 31 ++++++++++++++++++++++++ llvm/tools/llvm-c-test/debuginfo.c | 37 +++++++++++++++++++++++++++++ 3 files changed, 106 insertions(+) diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h index 4e380d9bd5969..83dd1eba876e6 100644 --- a/llvm/include/llvm-c/Core.h +++ b/llvm/include/llvm-c/Core.h @@ -531,6 +531,13 @@ enum { */ typedef unsigned LLVMGEPNoWrapFlags; +typedef enum { + LLVMDbgRecordLabel, + LLVMDbgRecordDeclare, + LLVMDbgRecordValue, + LLVMDbgRecordAssign, +} LLVMDbgRecordKind; + /** * @} */ @@ -3896,6 +3903,37 @@ LLVM_C_ABI LLVMDbgRecordRef LLVMGetNextDbgRecord(LLVMDbgRecordRef DbgRecord); LLVM_C_ABI LLVMDbgRecordRef LLVMGetPreviousDbgRecord(LLVMDbgRecordRef DbgRecord); +/** + * Get the debug location attached to the debug record. + * + * @see llvm::DbgRecord::getDebugLoc() + */ +LLVMMetadataRef LLVMDbgRecordGetDebugLoc(LLVMDbgRecordRef Rec); + +LLVMDbgRecordKind LLVMDbgRecordGetKind(LLVMDbgRecordRef Rec); + +/** + * Get the value of the DbgVariableRecord. + * + * @see llvm::DbgVariableRecord::getValue() + */ +LLVMValueRef LLVMDbgVariableRecordGetValue(LLVMDbgRecordRef Rec, + unsigned OpIdx); + +/** + * Get the debug info variable of the DbgVariableRecord. + * + * @see llvm::DbgVariableRecord::getVariable() + */ +LLVMMetadataRef LLVMDbgVariableRecordGetVariable(LLVMDbgRecordRef Rec); + +/** + * Get the debug info expression of the DbgVariableRecord. + * + * @see llvm::DbgVariableRecord::getExpression() + */ +LLVMMetadataRef LLVMDbgVariableRecordGetExpression(LLVMDbgRecordRef Rec); + /** * @defgroup LLVMCCoreValueInstructionCall Call Sites and Invocations * diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp index 27d8294b01264..604730e0d3004 100644 --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -3036,6 +3036,37 @@ LLVMDbgRecordRef LLVMGetPreviousDbgRecord(LLVMDbgRecordRef Rec) { return wrap(&*--I); } +LLVMMetadataRef LLVMDbgRecordGetDebugLoc(LLVMDbgRecordRef Rec) { + return wrap(unwrap(Rec)->getDebugLoc().getAsMDNode()); +} + +LLVMDbgRecordKind LLVMDbgRecordGetKind(LLVMDbgRecordRef Rec) { + DbgRecord *Record = unwrap(Rec); + if (isa(Record)) + return LLVMDbgRecordLabel; + DbgVariableRecord *VariableRecord = dyn_cast(Record); + assert(VariableRecord && "unexpected record"); + if (VariableRecord->isDbgDeclare()) + return LLVMDbgRecordDeclare; + if (VariableRecord->isDbgValue()) + return LLVMDbgRecordValue; + assert(VariableRecord->isDbgAssign() && "unexpected record"); + return LLVMDbgRecordAssign; +} + +LLVMValueRef LLVMDbgVariableRecordGetValue(LLVMDbgRecordRef Rec, + unsigned OpIdx) { + return wrap(unwrap(Rec)->getValue(OpIdx)); +} + +LLVMMetadataRef LLVMDbgVariableRecordGetVariable(LLVMDbgRecordRef Rec) { + return wrap(unwrap(Rec)->getRawVariable()); +} + +LLVMMetadataRef LLVMDbgVariableRecordGetExpression(LLVMDbgRecordRef Rec) { + return wrap(unwrap(Rec)->getRawExpression()); +} + unsigned LLVMGetNumArgOperands(LLVMValueRef Instr) { if (FuncletPadInst *FPI = dyn_cast(unwrap(Instr))) { return FPI->arg_size(); diff --git a/llvm/tools/llvm-c-test/debuginfo.c b/llvm/tools/llvm-c-test/debuginfo.c index 9db7aa0929aab..677722fea1a98 100644 --- a/llvm/tools/llvm-c-test/debuginfo.c +++ b/llvm/tools/llvm-c-test/debuginfo.c @@ -364,6 +364,43 @@ int llvm_test_dibuilder(void) { assert(AddDbgRecordUnderTheRange == NULL); (void)AddDbgRecordUnderTheRange; + // Test that we can read the first debug record. + LLVMMetadataRef AddDbgRecordFirstDebugLoc = + LLVMDbgRecordGetDebugLoc(AddDbgRecordFirst); + (void)AddDbgRecordFirstDebugLoc; + assert(LLVMDILocationGetLine(AddDbgRecordFirstDebugLoc) == 43); + assert(LLVMDbgRecordGetKind(AddDbgRecordFirst) == LLVMDbgRecordValue); + LLVMValueRef AddDbgRecordFirstValue = + LLVMDbgVariableRecordGetValue(AddDbgRecordFirst, 0); + (void)AddDbgRecordFirstValue; + assert(LLVMGetValueKind(AddDbgRecordFirstValue) == LLVMConstantIntValueKind); + assert(LLVMConstIntGetZExtValue(AddDbgRecordFirstValue) == 0); + LLVMMetadataRef AddDbgRecordFirstVariable = + LLVMDbgVariableRecordGetVariable(AddDbgRecordFirst); + (void)AddDbgRecordFirstVariable; + assert(LLVMGetMetadataKind(AddDbgRecordFirstVariable) == + LLVMDILocalVariableMetadataKind); + // TODO: For now, there is no way to get the name. + LLVMMetadataRef AddDbgRecordFirstVariableScope = + LLVMDIVariableGetScope(AddDbgRecordFirstVariable); + (void)AddDbgRecordFirstVariableScope; + assert(LLVMGetMetadataKind(AddDbgRecordFirstVariableScope) == + LLVMDILexicalBlockMetadataKind); + LLVMMetadataRef AddDbgRecordFirstVariableFile = + LLVMDIScopeGetFile(AddDbgRecordFirstVariableScope); + (void)AddDbgRecordFirstVariableFile; + assert(LLVMGetMetadataKind(AddDbgRecordFirstVariableFile) == + LLVMDIFileMetadataKind); + unsigned FileLen = 0; + assert(strcmp(LLVMDIFileGetFilename(AddDbgRecordFirstVariableFile, &FileLen), + "debuginfo.c") == 0); + (void)FileLen; + LLVMMetadataRef AddDbgRecordFirstExpr = + LLVMDbgVariableRecordGetExpression(AddDbgRecordFirst); + assert(LLVMGetMetadataKind(AddDbgRecordFirstExpr) == + LLVMDIExpressionMetadataKind); + (void)AddDbgRecordFirstExpr; + char *MStr = LLVMPrintModuleToString(M); puts(MStr); LLVMDisposeMessage(MStr); From 1f58cbe60a01d4651cec3ada480ab8b63586b680 Mon Sep 17 00:00:00 2001 From: Chaitanya Koparkar Date: Wed, 12 Nov 2025 09:38:21 -0500 Subject: [PATCH 08/34] [DAG] Fold (umin (sub a b) a) -> (usubo a b); (select usubo.1 a usubo.0) (#161651) Fixes #161036. --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 19 +++ .../umin-sub-to-usubo-select-combine.ll | 151 +++++++++++++++++ .../X86/umin-sub-to-usubo-select-combine.ll | 156 ++++++++++++++++++ 3 files changed, 326 insertions(+) create mode 100644 llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll create mode 100644 llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index df353c4d91b1a..d9d3a3ec01757 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -6219,6 +6219,25 @@ SDValue DAGCombiner::visitIMINMAX(SDNode *N) { SDLoc(N), VT, N0, N1)) return SD; + if (TLI.isOperationLegalOrCustom(ISD::USUBO, VT) && + !TLI.isOperationLegalOrCustom(ISD::UMIN, VT)) { + SDValue B; + + // (umin (sub a, b), a) -> (usubo a, b); (select usubo.1, a, usubo.0) + if (sd_match(N0, m_Sub(m_Specific(N1), m_Value(B)))) { + SDVTList VTs = DAG.getVTList(VT, getSetCCResultType(VT)); + SDValue USO = DAG.getNode(ISD::USUBO, DL, VTs, N1, B); + return DAG.getSelect(DL, VT, USO.getValue(1), N1, USO.getValue(0)); + } + + // (umin a, (sub a, b)) -> (usubo a, b); (select usubo.1, a, usubo.0) + if (sd_match(N1, m_Sub(m_Specific(N0), m_Value(B)))) { + SDVTList VTs = DAG.getVTList(VT, getSetCCResultType(VT)); + SDValue USO = DAG.getNode(ISD::USUBO, DL, VTs, N0, B); + return DAG.getSelect(DL, VT, USO.getValue(1), N0, USO.getValue(0)); + } + } + // Simplify the operands using demanded-bits information. if (SimplifyDemandedBits(SDValue(N, 0))) return SDValue(N, 0); diff --git a/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll b/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll new file mode 100644 index 0000000000000..fe3eee06db65e --- /dev/null +++ b/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll @@ -0,0 +1,151 @@ +; RUN: llc < %s -mtriple=aarch64 | FileCheck %s + +; GitHub issue #161036 + +; Positive test : umin(sub(a,b),a) with scalar types should be folded +define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) { +; CHECK-LABEL: underflow_compare_fold_i64 +; CHECK-LABEL: %bb.0: +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: csel x0, x0, x8, lo +; CHECK-NEXT: ret + %sub = sub i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a) + ret i64 %cond +} + +; Positive test : umin(a,sub(a,b)) with scalar types should be folded +define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) { +; CHECK-LABEL: underflow_compare_fold_i64_commute +; CHECK-LABEL: %bb.0: +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: csel x0, x0, x8, lo +; CHECK-NEXT: ret + %sub = sub i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub) + ret i64 %cond +} + +; Positive test : multi-use is OK since the sub instruction still runs once +define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i64_multi_use +; CHECK-LABEL: %bb.0: +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: csel x0, x0, x8, lo +; CHECK-NEXT: str x8, [x2] +; CHECK-NEXT: ret + %sub = sub i64 %a, %b + store i64 %sub, ptr addrspace(1) %ptr + %cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a) + ret i64 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) { +; CHECK-LABEL: underflow_compare_fold_i32 +; CHECK-LABEL: %bb.0: +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: csel w0, w0, w8, lo +; CHECK-NEXT: ret + %sub = sub i32 %a, %b + %cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a) + ret i32 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) { +; CHECK-LABEL: underflow_compare_fold_i32_commute +; CHECK-LABEL: %bb.0: +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: csel w0, w0, w8, lo +; CHECK-NEXT: ret + %sub = sub i32 %a, %b + %cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub) + ret i32 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i32_multi_use +; CHECK-LABEL: %bb.0: +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: csel w0, w0, w8, lo +; CHECK-NEXT: str w8, [x2] +; CHECK-NEXT: ret + %sub = sub i32 %a, %b + store i32 %sub, ptr addrspace(1) %ptr + %cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a) + ret i32 %cond +} + +; Negative test : i16 +define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) { +; CHECK-LABEL: underflow_compare_fold_i16 +; CHECK-LABEL: %bb.0: +; CHECK-LABEL: sub w8, w0, w1 +; CHECK-LABEL: and w9, w0, #0xffff +; CHECK-LABEL: and w8, w8, #0xffff +; CHECK-LABEL: cmp w8, w9 +; CHECK-LABEL: csel w0, w8, w9, lo +; CHECK-LABEL: ret + %sub = sub i16 %a, %b + %cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a) + ret i16 %cond +} + +; Negative test : i16 +define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) { +; CHECK-LABEL: underflow_compare_fold_i16_commute +; CHECK-LABEL: %bb.0: +; CHECK-LABEL: sub w8, w0, w1 +; CHECK-LABEL: and w9, w0, #0xffff +; CHECK-LABEL: and w8, w8, #0xffff +; CHECK-LABEL: cmp w9, w8 +; CHECK-LABEL: csel w0, w9, w8, lo +; CHECK-LABEL: ret + %sub = sub i16 %a, %b + %cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub) + ret i16 %cond +} + +; Negative test : i16 +define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i16_multi_use +; CHECK-LABEL: %bb.0: +; CHECK-LABEL: sub w8, w0, w1 +; CHECK-LABEL: and w9, w0, #0xffff +; CHECK-LABEL: and w10, w8, #0xffff +; CHECK-LABEL: strh w8, [x2] +; CHECK-LABEL: cmp w10, w9 +; CHECK-LABEL: csel w0, w10, w9, lo +; CHECK-LABEL: ret + %sub = sub i16 %a, %b + store i16 %sub, ptr addrspace(1) %ptr + %cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a) + ret i16 %cond +} + +; Negative test, vector types : umin(sub(a,b),a) but with vectors +define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: underflow_compare_dontfold_vectors +; CHECK-LABEL: %bb.0 +; CHECK-NEXT: sub v1.16b, v0.16b, v1.16b +; CHECK-NEXT: umin v0.16b, v1.16b, v0.16b +; CHECK-NEXT: ret + %sub = sub <16 x i8> %a, %b + %cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a) + ret <16 x i8> %cond +} + +; Negative test, pattern mismatch : umin(add(a,b),a) +define i64 @umin_add(i64 %a, i64 %b) { +; CHECK-LABEL: umin_add +; CHECK-LABEL: %bb.0 +; CHECK-NEXT: add x8, x0, x1 +; CHECK-NEXT: cmp x8, x0 +; CHECK-NEXT: csel x0, x8, x0, lo +; CHECK-NEXT: ret + %add = add i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a) + ret i64 %cond +} diff --git a/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll b/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll new file mode 100644 index 0000000000000..e9756b411eb2c --- /dev/null +++ b/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll @@ -0,0 +1,156 @@ +; RUN: llc < %s -mtriple=x86_64 | FileCheck %s + +; GitHub issue #161036 + +; Positive test : umin(sub(a,b),a) with scalar types should be folded +define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) { +; CHECK-LABEL: underflow_compare_fold_i64 +; CHECK-LABEL: %bb.0 +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: subq %rsi, %rax +; CHECK-NEXT: cmovbq %rdi, %rax +; CHECK-NEXT: retq + %sub = sub i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a) + ret i64 %cond +} + +; Positive test : umin(a,sub(a,b)) with scalar types should be folded +define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) { +; CHECK-LABEL: underflow_compare_fold_i64_commute +; CHECK-LABEL: %bb.0 +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: subq %rsi, %rax +; CHECK-NEXT: cmovbq %rdi, %rax +; CHECK-NEXT: retq + %sub = sub i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub) + ret i64 %cond +} + +; Positive test : multi-use is OK since the sub instruction still runs once +define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i64_multi_use +; CHECK-LABEL: %bb.0 +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: subq %rsi, %rax +; CHECK-NEXT: movq %rax, (%rdx) +; CHECK-NEXT: cmovbq %rdi, %rax +; CHECK-NEXT: retq + %sub = sub i64 %a, %b + store i64 %sub, ptr addrspace(1) %ptr + %cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a) + ret i64 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) { +; CHECK-LABEL: underflow_compare_fold_i32 +; CHECK-LABEL: %bb.0 +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subl %esi, %eax +; CHECK-NEXT: cmovbl %edi, %eax +; CHECK-NEXT: retq + %sub = sub i32 %a, %b + %cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a) + ret i32 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) { +; CHECK-LABEL: underflow_compare_fold_i32_commute +; CHECK-LABEL: %bb.0 +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subl %esi, %eax +; CHECK-NEXT: cmovbl %edi, %eax +; CHECK-NEXT: retq + %sub = sub i32 %a, %b + %cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub) + ret i32 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i32_multi_use +; CHECK-LABEL: %bb.0 +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subl %esi, %eax +; CHECK-NEXT: movl %eax, (%rdx) +; CHECK-NEXT: cmovbl %edi, %eax +; CHECK-NEXT: retq + %sub = sub i32 %a, %b + store i32 %sub, ptr addrspace(1) %ptr + %cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a) + ret i32 %cond +} + +; Positive test : i16 +define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) { +; CHECK-LABEL: underflow_compare_fold_i16 +; CHECK-LABEL: %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subw %si, %ax +; CHECK-NEXT: cmovbl %edi, %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax +; CHECK-NEXT: retq + %sub = sub i16 %a, %b + %cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a) + ret i16 %cond +} + +; Positive test : i16 +define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) { +; CHECK-LABEL: underflow_compare_fold_i16_commute +; CHECK-LABEL: %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subw %si, %ax +; CHECK-NEXT: cmovbl %edi, %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax +; CHECK-NEXT: retq + %sub = sub i16 %a, %b + %cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub) + ret i16 %cond +} + +; Positive test : i16 +define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i16_multi_use +; CHECK-LABEL: %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subw %si, %ax +; CHECK-NEXT: movw %ax, (%rdx) +; CHECK-NEXT: cmovbl %edi, %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax +; CHECK-NEXT: retq + %sub = sub i16 %a, %b + store i16 %sub, ptr addrspace(1) %ptr + %cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a) + ret i16 %cond +} + + +; Negative test, vector types : umin(sub(a,b),a) but with vectors +define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: underflow_compare_dontfold_vectors +; CHECK-LABEL: %bb.0 +; CHECK-NEXT: movdqa %xmm0, %xmm2 +; CHECK-NEXT: psubb %xmm1, %xmm2 +; CHECK-NEXT: pminub %xmm2, %xmm0 +; CHECK-NEXT: retq + %sub = sub <16 x i8> %a, %b + %cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a) + ret <16 x i8> %cond +} + +; Negative test, pattern mismatch : umin(add(a,b),a) +define i64 @umin_add(i64 %a, i64 %b) { +; CHECK-LABEL: umin_add +; CHECK-LABEL: %bb.0 +; CHECK-NEXT: leaq (%rsi,%rdi), %rax +; CHECK-NEXT: cmpq %rdi, %rax +; CHECK-NEXT: cmovaeq %rdi, %rax +; CHECK-NEXT: retq + %add = add i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a) + ret i64 %cond +} From aca28f10da54526a7955f62f090273b588888b6d Mon Sep 17 00:00:00 2001 From: Nikolas Klauser Date: Wed, 12 Nov 2025 15:53:37 +0100 Subject: [PATCH 09/34] [libc++] Optimize __tree copy/move constructor/assignment with allocator (#163558) This patch applies the same optimization as implemented in #151304 to the overloads taking an allocator as the second argument. Apple M4: ``` Benchmark old new Difference % Difference ----------------------------------------------------------- -------------- -------------- ------------ -------------- std::map::ctor(&&,_different_allocs)/0 14.59 12.78 -1.81 -12.41% std::map::ctor(&&,_different_allocs)/1024 16407.05 6265.11 -10141.94 -61.81% std::map::ctor(&&,_different_allocs)/32 395.99 199.76 -196.23 -49.56% std::map::ctor(&&,_different_allocs)/8192 141478.67 53767.84 -87710.83 -62.00% std::map::ctor(const&,_alloc)/0 12.83 12.71 -0.12 -0.94% std::map::ctor(const&,_alloc)/1024 9979.71 7849.11 -2130.59 -21.35% std::map::ctor(const&,_alloc)/32 283.82 266.05 -17.77 -6.26% std::map::ctor(const&,_alloc)/8192 81418.63 63190.41 -18228.21 -22.39% std::map::ctor(&&,_different_allocs)/0 14.58 12.68 -1.90 -13.00% std::map::ctor(&&,_different_allocs)/1024 19513.56 7806.04 -11707.52 -60.00% std::map::ctor(&&,_different_allocs)/32 477.80 247.28 -230.52 -48.25% std::map::ctor(&&,_different_allocs)/8192 504558.78 69592.21 -434966.56 -86.21% std::map::ctor(const&,_alloc)/0 12.64 12.60 -0.04 -0.33% std::map::ctor(const&,_alloc)/1024 43198.53 37220.54 -5977.99 -13.84% std::map::ctor(const&,_alloc)/32 928.39 867.03 -61.36 -6.61% std::map::ctor(const&,_alloc)/8192 461313.81 389200.82 -72112.99 -15.63% ``` --- libcxx/include/__tree | 156 +++++++++++------- libcxx/include/map | 39 +---- libcxx/include/set | 40 +---- .../associative_container_benchmarks.h | 59 +++++++ .../containers/associative/flat_map.bench.cpp | 8 + .../associative/flat_multimap.bench.cpp | 8 + .../containers/associative/map.bench.cpp | 3 + .../containers/associative/multimap.bench.cpp | 3 + .../containers/associative/multiset.bench.cpp | 3 + .../containers/associative/set.bench.cpp | 3 + .../associative/unordered_map.bench.cpp | 3 + .../associative/unordered_multimap.bench.cpp | 3 + .../associative/unordered_multiset.bench.cpp | 3 + .../associative/unordered_set.bench.cpp | 3 + 14 files changed, 202 insertions(+), 132 deletions(-) diff --git a/libcxx/include/__tree b/libcxx/include/__tree index 694796922c914..ceae22bb48702 100644 --- a/libcxx/include/__tree +++ b/libcxx/include/__tree @@ -887,6 +887,18 @@ public: } _LIBCPP_HIDE_FROM_ABI __tree(const __tree& __t); + + _LIBCPP_HIDE_FROM_ABI __tree(const __tree& __other, const allocator_type& __alloc) + : __begin_node_(__end_node()), __node_alloc_(__alloc), __size_(0), __value_comp_(__other.value_comp()) { + if (__other.size() == 0) + return; + + *__root_ptr() = static_cast<__node_base_pointer>(__copy_construct_tree(__other.__root())); + __root()->__parent_ = __end_node(); + __begin_node_ = static_cast<__end_node_pointer>(std::__tree_min(__end_node()->__left_)); + __size_ = __other.size(); + } + _LIBCPP_HIDE_FROM_ABI __tree& operator=(const __tree& __t); template _LIBCPP_HIDE_FROM_ABI void __assign_unique(_ForwardIterator __first, _ForwardIterator __last); @@ -995,27 +1007,6 @@ public: std::forward<_Args>(__args)...); } - template , int> = 0> - _LIBCPP_HIDE_FROM_ABI void - __insert_unique_from_orphaned_node(const_iterator __p, __get_node_value_type_t<_Tp>&& __value) { - __emplace_hint_unique(__p, const_cast(__value.first), std::move(__value.second)); - } - - template , int> = 0> - _LIBCPP_HIDE_FROM_ABI void __insert_unique_from_orphaned_node(const_iterator __p, _Tp&& __value) { - __emplace_hint_unique(__p, std::move(__value)); - } - - template , int> = 0> - _LIBCPP_HIDE_FROM_ABI void __insert_multi_from_orphaned_node(const_iterator __p, value_type&& __value) { - __emplace_hint_multi(__p, const_cast(__value.first), std::move(__value.second)); - } - - template , int> = 0> - _LIBCPP_HIDE_FROM_ABI void __insert_multi_from_orphaned_node(const_iterator __p, _Tp&& __value) { - __emplace_hint_multi(__p, std::move(__value)); - } - template _LIBCPP_HIDE_FROM_ABI void __insert_range_multi(_InIter __first, _Sent __last) { if (__first == __last) @@ -1388,19 +1379,19 @@ private: // copy the exact structure 1:1. Since this is for copy construction _only_ we know that we get a correct tree. If we // didn't get a correct tree, the invariants of __tree are broken and we have a much bigger problem than an improperly // balanced tree. + template #ifdef _LIBCPP_COMPILER_CLANG_BASED // FIXME: GCC complains about not being able to always_inline a recursive function _LIBCPP_HIDE_FROM_ABI #endif - __node_pointer - __copy_construct_tree(__node_pointer __src) { + __node_pointer __construct_from_tree(__node_pointer __src, _NodeConstructor __construct) { if (!__src) return nullptr; - __node_holder __new_node = __construct_node(__src->__get_value()); + __node_holder __new_node = __construct(__src->__get_value()); unique_ptr<__node, __tree_deleter> __left( - __copy_construct_tree(static_cast<__node_pointer>(__src->__left_)), __node_alloc_); - __node_pointer __right = __copy_construct_tree(static_cast<__node_pointer>(__src->__right_)); + __construct_from_tree(static_cast<__node_pointer>(__src->__left_), __construct), __node_alloc_); + __node_pointer __right = __construct_from_tree(static_cast<__node_pointer>(__src->__right_), __construct); __node_pointer __new_node_ptr = __new_node.release(); @@ -1414,46 +1405,85 @@ private: return __new_node_ptr; } + _LIBCPP_HIDE_FROM_ABI __node_pointer __copy_construct_tree(__node_pointer __src) { + return __construct_from_tree(__src, [this](const value_type& __val) { return __construct_node(__val); }); + } + + template , int> = 0> + _LIBCPP_HIDE_FROM_ABI __node_pointer __move_construct_tree(__node_pointer __src) { + return __construct_from_tree(__src, [this](value_type& __val) { + return __construct_node(const_cast(__val.first), std::move(__val.second)); + }); + } + + template , int> = 0> + _LIBCPP_HIDE_FROM_ABI __node_pointer __move_construct_tree(__node_pointer __src) { + return __construct_from_tree(__src, [this](value_type& __val) { return __construct_node(std::move(__val)); }); + } + + template // This copy assignment will always produce a correct red-black-tree assuming the incoming tree is correct, since our // own tree is a red-black-tree and the incoming tree is a red-black-tree. The invariants of a red-black-tree are // temporarily not met until all of the incoming red-black tree is copied. #ifdef _LIBCPP_COMPILER_CLANG_BASED // FIXME: GCC complains about not being able to always_inline a recursive function _LIBCPP_HIDE_FROM_ABI #endif - __node_pointer - __copy_assign_tree(__node_pointer __dest, __node_pointer __src) { + __node_pointer __assign_from_tree( + __node_pointer __dest, __node_pointer __src, _Assignment __assign, _ConstructionAlg __construct_subtree) { if (!__src) { destroy(__dest); return nullptr; } - __assign_value(__dest->__get_value(), __src->__get_value()); + __assign(__dest->__get_value(), __src->__get_value()); __dest->__is_black_ = __src->__is_black_; // If we already have a left node in the destination tree, reuse it and copy-assign recursively if (__dest->__left_) { - __dest->__left_ = static_cast<__node_base_pointer>(__copy_assign_tree( - static_cast<__node_pointer>(__dest->__left_), static_cast<__node_pointer>(__src->__left_))); + __dest->__left_ = static_cast<__node_base_pointer>(__assign_from_tree( + static_cast<__node_pointer>(__dest->__left_), + static_cast<__node_pointer>(__src->__left_), + __assign, + __construct_subtree)); // Otherwise, we must create new nodes; copy-construct from here on } else if (__src->__left_) { - auto __new_left = __copy_construct_tree(static_cast<__node_pointer>(__src->__left_)); + auto __new_left = __construct_subtree(static_cast<__node_pointer>(__src->__left_)); __dest->__left_ = static_cast<__node_base_pointer>(__new_left); __new_left->__parent_ = static_cast<__end_node_pointer>(__dest); } // Identical to the left case above, just for the right nodes if (__dest->__right_) { - __dest->__right_ = static_cast<__node_base_pointer>(__copy_assign_tree( - static_cast<__node_pointer>(__dest->__right_), static_cast<__node_pointer>(__src->__right_))); + __dest->__right_ = static_cast<__node_base_pointer>(__assign_from_tree( + static_cast<__node_pointer>(__dest->__right_), + static_cast<__node_pointer>(__src->__right_), + __assign, + __construct_subtree)); } else if (__src->__right_) { - auto __new_right = __copy_construct_tree(static_cast<__node_pointer>(__src->__right_)); + auto __new_right = __construct_subtree(static_cast<__node_pointer>(__src->__right_)); __dest->__right_ = static_cast<__node_base_pointer>(__new_right); __new_right->__parent_ = static_cast<__end_node_pointer>(__dest); } return __dest; } + + _LIBCPP_HIDE_FROM_ABI __node_pointer __copy_assign_tree(__node_pointer __dest, __node_pointer __src) { + return __assign_from_tree( + __dest, + __src, + [](value_type& __lhs, const value_type& __rhs) { __assign_value(__lhs, __rhs); }, + [this](__node_pointer __nd) { return __copy_construct_tree(__nd); }); + } + + _LIBCPP_HIDE_FROM_ABI __node_pointer __move_assign_tree(__node_pointer __dest, __node_pointer __src) { + return __assign_from_tree( + __dest, + __src, + [](value_type& __lhs, value_type& __rhs) { __assign_value(__lhs, std::move(__rhs)); }, + [this](__node_pointer __nd) { return __move_construct_tree(__nd); }); + } }; // Precondition: __size_ != 0 @@ -1594,21 +1624,26 @@ __tree<_Tp, _Compare, _Allocator>::__tree(__tree&& __t) _NOEXCEPT_( template __tree<_Tp, _Compare, _Allocator>::__tree(__tree&& __t, const allocator_type& __a) - : __node_alloc_(__node_allocator(__a)), __size_(0), __value_comp_(std::move(__t.value_comp())) { + : __begin_node_(__end_node()), + __node_alloc_(__node_allocator(__a)), + __size_(0), + __value_comp_(std::move(__t.value_comp())) { + if (__t.size() == 0) + return; if (__a == __t.__alloc()) { - if (__t.__size_ == 0) - __begin_node_ = __end_node(); - else { - __begin_node_ = __t.__begin_node_; - __end_node()->__left_ = __t.__end_node()->__left_; - __end_node()->__left_->__parent_ = static_cast<__end_node_pointer>(__end_node()); - __size_ = __t.__size_; - __t.__begin_node_ = __t.__end_node(); - __t.__end_node()->__left_ = nullptr; - __t.__size_ = 0; - } + __begin_node_ = __t.__begin_node_; + __end_node()->__left_ = __t.__end_node()->__left_; + __end_node()->__left_->__parent_ = static_cast<__end_node_pointer>(__end_node()); + __size_ = __t.__size_; + __t.__begin_node_ = __t.__end_node(); + __t.__end_node()->__left_ = nullptr; + __t.__size_ = 0; } else { - __begin_node_ = __end_node(); + *__root_ptr() = static_cast<__node_base_pointer>(__move_construct_tree(__t.__root())); + __root()->__parent_ = __end_node(); + __begin_node_ = static_cast<__end_node_pointer>(std::__tree_min(__end_node()->__left_)); + __size_ = __t.size(); + __t.clear(); // Ensure that __t is in a valid state after moving out the keys } } @@ -1633,22 +1668,21 @@ void __tree<_Tp, _Compare, _Allocator>::__move_assign(__tree& __t, true_type) template void __tree<_Tp, _Compare, _Allocator>::__move_assign(__tree& __t, false_type) { - if (__node_alloc() == __t.__node_alloc()) + if (__node_alloc() == __t.__node_alloc()) { __move_assign(__t, true_type()); - else { - value_comp() = std::move(__t.value_comp()); - const_iterator __e = end(); + } else { + value_comp() = std::move(__t.value_comp()); if (__size_ != 0) { - _DetachedTreeCache __cache(this); - while (__cache.__get() != nullptr && __t.__size_ != 0) { - __assign_value(__cache.__get()->__get_value(), std::move(__t.remove(__t.begin())->__get_value())); - __node_insert_multi(__cache.__get()); - __cache.__advance(); - } - } - while (__t.__size_ != 0) { - __insert_multi_from_orphaned_node(__e, std::move(__t.remove(__t.begin())->__get_value())); + *__root_ptr() = static_cast<__node_base_pointer>(__move_assign_tree(__root(), __t.__root())); + } else { + *__root_ptr() = static_cast<__node_base_pointer>(__move_construct_tree(__t.__root())); + if (__root()) + __root()->__parent_ = __end_node(); } + __begin_node_ = + __end_node()->__left_ ? static_cast<__end_node_pointer>(std::__tree_min(__end_node()->__left_)) : __end_node(); + __size_ = __t.size(); + __t.clear(); // Ensure that __t is in a valid state after moving out the keys } } diff --git a/libcxx/include/map b/libcxx/include/map index cc8b8769189d1..0dca11cabd12e 100644 --- a/libcxx/include/map +++ b/libcxx/include/map @@ -995,7 +995,7 @@ public: _LIBCPP_HIDE_FROM_ABI map(map&& __m) = default; - _LIBCPP_HIDE_FROM_ABI map(map&& __m, const allocator_type& __a); + _LIBCPP_HIDE_FROM_ABI map(map&& __m, const allocator_type& __a) : __tree_(std::move(__m.__tree_), __a) {} _LIBCPP_HIDE_FROM_ABI map& operator=(map&& __m) = default; @@ -1023,10 +1023,7 @@ public: _LIBCPP_HIDE_FROM_ABI explicit map(const allocator_type& __a) : __tree_(typename __base::allocator_type(__a)) {} - _LIBCPP_HIDE_FROM_ABI map(const map& __m, const allocator_type& __a) - : __tree_(__m.__tree_.value_comp(), typename __base::allocator_type(__a)) { - insert(__m.begin(), __m.end()); - } + _LIBCPP_HIDE_FROM_ABI map(const map& __m, const allocator_type& __alloc) : __tree_(__m.__tree_, __alloc) {} _LIBCPP_HIDE_FROM_ABI ~map() { static_assert(sizeof(std::__diagnose_non_const_comparator<_Key, _Compare>()), ""); } @@ -1426,18 +1423,6 @@ map(initializer_list>, _Allocator) # endif # ifndef _LIBCPP_CXX03_LANG -template -map<_Key, _Tp, _Compare, _Allocator>::map(map&& __m, const allocator_type& __a) - : __tree_(std::move(__m.__tree_), typename __base::allocator_type(__a)) { - if (__a != __m.get_allocator()) { - const_iterator __e = cend(); - while (!__m.empty()) { - __tree_.__insert_unique_from_orphaned_node( - __e.__i_, std::move(__m.__tree_.remove(__m.begin().__i_)->__get_value())); - } - } -} - template _Tp& map<_Key, _Tp, _Compare, _Allocator>::operator[](const key_type& __k) { return __tree_.__emplace_unique(std::piecewise_construct, std::forward_as_tuple(__k), std::forward_as_tuple()) @@ -1683,7 +1668,7 @@ public: _LIBCPP_HIDE_FROM_ABI multimap(multimap&& __m) = default; - _LIBCPP_HIDE_FROM_ABI multimap(multimap&& __m, const allocator_type& __a); + _LIBCPP_HIDE_FROM_ABI multimap(multimap&& __m, const allocator_type& __a) : __tree_(std::move(__m.__tree_), __a) {} _LIBCPP_HIDE_FROM_ABI multimap& operator=(multimap&& __m) = default; @@ -1712,10 +1697,7 @@ public: _LIBCPP_HIDE_FROM_ABI explicit multimap(const allocator_type& __a) : __tree_(typename __base::allocator_type(__a)) {} - _LIBCPP_HIDE_FROM_ABI multimap(const multimap& __m, const allocator_type& __a) - : __tree_(__m.__tree_.value_comp(), typename __base::allocator_type(__a)) { - insert(__m.begin(), __m.end()); - } + _LIBCPP_HIDE_FROM_ABI multimap(const multimap& __m, const allocator_type& __a) : __tree_(__m.__tree_, __a) {} _LIBCPP_HIDE_FROM_ABI ~multimap() { static_assert(sizeof(std::__diagnose_non_const_comparator<_Key, _Compare>()), ""); @@ -1990,19 +1972,6 @@ multimap(initializer_list>, _Allocator) -> multimap, _Tp, less>, _Allocator>; # endif -# ifndef _LIBCPP_CXX03_LANG -template -multimap<_Key, _Tp, _Compare, _Allocator>::multimap(multimap&& __m, const allocator_type& __a) - : __tree_(std::move(__m.__tree_), typename __base::allocator_type(__a)) { - if (__a != __m.get_allocator()) { - const_iterator __e = cend(); - while (!__m.empty()) - __tree_.__insert_multi_from_orphaned_node( - __e.__i_, std::move(__m.__tree_.remove(__m.begin().__i_)->__get_value())); - } -} -# endif - template inline _LIBCPP_HIDE_FROM_ABI bool operator==(const multimap<_Key, _Tp, _Compare, _Allocator>& __x, const multimap<_Key, _Tp, _Compare, _Allocator>& __y) { diff --git a/libcxx/include/set b/libcxx/include/set index d58b6e96b061d..3d6f571a42a1a 100644 --- a/libcxx/include/set +++ b/libcxx/include/set @@ -671,12 +671,10 @@ public: _LIBCPP_HIDE_FROM_ABI explicit set(const allocator_type& __a) : __tree_(__a) {} - _LIBCPP_HIDE_FROM_ABI set(const set& __s, const allocator_type& __a) : __tree_(__s.__tree_.value_comp(), __a) { - insert(__s.begin(), __s.end()); - } + _LIBCPP_HIDE_FROM_ABI set(const set& __s, const allocator_type& __alloc) : __tree_(__s.__tree_, __alloc) {} # ifndef _LIBCPP_CXX03_LANG - _LIBCPP_HIDE_FROM_ABI set(set&& __s, const allocator_type& __a); + _LIBCPP_HIDE_FROM_ABI set(set&& __s, const allocator_type& __alloc) : __tree_(std::move(__s.__tree_), __alloc) {} _LIBCPP_HIDE_FROM_ABI set(initializer_list __il, const value_compare& __comp = value_compare()) : __tree_(__comp) { @@ -946,19 +944,6 @@ template , _Allocator) -> set<_Key, less<_Key>, _Allocator>; # endif -# ifndef _LIBCPP_CXX03_LANG - -template -set<_Key, _Compare, _Allocator>::set(set&& __s, const allocator_type& __a) : __tree_(std::move(__s.__tree_), __a) { - if (__a != __s.get_allocator()) { - const_iterator __e = cend(); - while (!__s.empty()) - insert(__e, std::move(__s.__tree_.remove(__s.begin())->__get_value())); - } -} - -# endif // _LIBCPP_CXX03_LANG - template inline _LIBCPP_HIDE_FROM_ABI bool operator==(const set<_Key, _Compare, _Allocator>& __x, const set<_Key, _Compare, _Allocator>& __y) { @@ -1128,13 +1113,10 @@ public: # ifndef _LIBCPP_CXX03_LANG _LIBCPP_HIDE_FROM_ABI multiset(multiset&& __s) = default; - _LIBCPP_HIDE_FROM_ABI multiset(multiset&& __s, const allocator_type& __a); + _LIBCPP_HIDE_FROM_ABI multiset(multiset&& __s, const allocator_type& __a) : __tree_(std::move(__s.__tree_), __a) {} # endif // _LIBCPP_CXX03_LANG _LIBCPP_HIDE_FROM_ABI explicit multiset(const allocator_type& __a) : __tree_(__a) {} - _LIBCPP_HIDE_FROM_ABI multiset(const multiset& __s, const allocator_type& __a) - : __tree_(__s.__tree_.value_comp(), __a) { - insert(__s.begin(), __s.end()); - } + _LIBCPP_HIDE_FROM_ABI multiset(const multiset& __s, const allocator_type& __a) : __tree_(__s.__tree_, __a) {} # ifndef _LIBCPP_CXX03_LANG _LIBCPP_HIDE_FROM_ABI multiset(initializer_list __il, const value_compare& __comp = value_compare()) @@ -1407,20 +1389,6 @@ template , _Allocator) -> multiset<_Key, less<_Key>, _Allocator>; # endif -# ifndef _LIBCPP_CXX03_LANG - -template -multiset<_Key, _Compare, _Allocator>::multiset(multiset&& __s, const allocator_type& __a) - : __tree_(std::move(__s.__tree_), __a) { - if (__a != __s.get_allocator()) { - const_iterator __e = cend(); - while (!__s.empty()) - insert(__e, std::move(__s.__tree_.remove(__s.begin())->__get_value())); - } -} - -# endif // _LIBCPP_CXX03_LANG - template inline _LIBCPP_HIDE_FROM_ABI bool operator==(const multiset<_Key, _Compare, _Allocator>& __x, const multiset<_Key, _Compare, _Allocator>& __y) { diff --git a/libcxx/test/benchmarks/containers/associative/associative_container_benchmarks.h b/libcxx/test/benchmarks/containers/associative/associative_container_benchmarks.h index 22a6d0d753b0c..5dd55f244d885 100644 --- a/libcxx/test/benchmarks/containers/associative/associative_container_benchmarks.h +++ b/libcxx/test/benchmarks/containers/associative/associative_container_benchmarks.h @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -33,6 +34,9 @@ struct adapt_operations { // using InsertionResult = ...; // static Container::iterator get_iterator(InsertionResult const&); + + // template + // using rebind_alloc = ...; }; template @@ -103,6 +107,61 @@ void associative_container_benchmarks(std::string container) { } }); + bench("ctor(const&, alloc)", [=](auto& st) { + const std::size_t size = st.range(0); + std::vector in = make_value_types(generate_unique_keys(size)); + Container src(in.begin(), in.end()); + ScratchSpace c[BatchSize]; + + while (st.KeepRunningBatch(BatchSize)) { + for (std::size_t i = 0; i != BatchSize; ++i) { + new (c + i) Container(src, std::allocator()); + benchmark::DoNotOptimize(c + i); + benchmark::ClobberMemory(); + } + + st.PauseTiming(); + for (std::size_t i = 0; i != BatchSize; ++i) { + reinterpret_cast(c + i)->~Container(); + } + st.ResumeTiming(); + } + }); + + bench("ctor(&&, different allocs)", [=](auto& st) { + using PMRContainer = adapt_operations::template rebind_alloc< + std::pmr::polymorphic_allocator>; + + const std::size_t size = st.range(0); + std::vector in = make_value_types(generate_unique_keys(size)); + std::pmr::monotonic_buffer_resource rs(size * 64 * BatchSize); // 64 bytes should be enough per node + std::vector srcs; + srcs.reserve(BatchSize); + for (size_t i = 0; i != BatchSize; ++i) + srcs.emplace_back(&rs).insert(in.begin(), in.end()); + alignas(PMRContainer) char c[BatchSize * sizeof(PMRContainer)]; + + std::pmr::monotonic_buffer_resource rs2(size * 64 * BatchSize); // 64 bytes should be enough per node + while (st.KeepRunningBatch(BatchSize)) { + for (std::size_t i = 0; i != BatchSize; ++i) { + new (c + i * sizeof(PMRContainer)) PMRContainer(std::move(srcs[i]), &rs2); + benchmark::DoNotOptimize(c + i); + benchmark::ClobberMemory(); + } + + st.PauseTiming(); + for (std::size_t i = 0; i != BatchSize; ++i) { + reinterpret_cast(c + i * sizeof(PMRContainer))->~PMRContainer(); + } + rs2.release(); + srcs.clear(); + for (size_t i = 0; i != BatchSize; ++i) + srcs.emplace_back(&rs).insert(in.begin(), in.end()); + + st.ResumeTiming(); + } + }); + bench("ctor(iterator, iterator) (unsorted sequence)", [=](auto& st) { const std::size_t size = st.range(0); std::mt19937 randomness; diff --git a/libcxx/test/benchmarks/containers/associative/flat_map.bench.cpp b/libcxx/test/benchmarks/containers/associative/flat_map.bench.cpp index f3b86554802ca..407afb14e1e13 100644 --- a/libcxx/test/benchmarks/containers/associative/flat_map.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/flat_map.bench.cpp @@ -24,6 +24,14 @@ struct support::adapt_operations> { using InsertionResult = std::pair::iterator, bool>; static auto get_iterator(InsertionResult const& result) { return result.first; } + + template + using rebind_alloc = + std::flat_map, + std::vector::template rebind_alloc>, + std::vector::template rebind_alloc>>; }; int main(int argc, char** argv) { diff --git a/libcxx/test/benchmarks/containers/associative/flat_multimap.bench.cpp b/libcxx/test/benchmarks/containers/associative/flat_multimap.bench.cpp index 80eaa549042c6..4f70d26116b0b 100644 --- a/libcxx/test/benchmarks/containers/associative/flat_multimap.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/flat_multimap.bench.cpp @@ -23,6 +23,14 @@ struct support::adapt_operations> { using InsertionResult = typename std::flat_multimap::iterator; static auto get_iterator(InsertionResult const& result) { return result; } + + template + using rebind_alloc = + std::flat_multimap, + std::vector::template rebind_alloc>, + std::vector::template rebind_alloc>>; }; int main(int argc, char** argv) { diff --git a/libcxx/test/benchmarks/containers/associative/map.bench.cpp b/libcxx/test/benchmarks/containers/associative/map.bench.cpp index 142229ae64cad..cc9ffd857caf2 100644 --- a/libcxx/test/benchmarks/containers/associative/map.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/map.bench.cpp @@ -38,6 +38,9 @@ struct support::adapt_operations> { using InsertionResult = std::pair::iterator, bool>; static auto get_iterator(InsertionResult const& result) { return result.first; } + + template + using rebind_alloc = std::map, Allocator>; }; int main(int argc, char** argv) { diff --git a/libcxx/test/benchmarks/containers/associative/multimap.bench.cpp b/libcxx/test/benchmarks/containers/associative/multimap.bench.cpp index 15a0b573081bb..8e3abf0b7cf8b 100644 --- a/libcxx/test/benchmarks/containers/associative/multimap.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/multimap.bench.cpp @@ -24,6 +24,9 @@ struct support::adapt_operations> { using InsertionResult = typename std::multimap::iterator; static auto get_iterator(InsertionResult const& result) { return result; } + + template + using rebind_alloc = std::multimap, Allocator>; }; int main(int argc, char** argv) { diff --git a/libcxx/test/benchmarks/containers/associative/multiset.bench.cpp b/libcxx/test/benchmarks/containers/associative/multiset.bench.cpp index c205e0a4f793f..7bafd0ab52dce 100644 --- a/libcxx/test/benchmarks/containers/associative/multiset.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/multiset.bench.cpp @@ -22,6 +22,9 @@ struct support::adapt_operations> { using InsertionResult = typename std::multiset::iterator; static auto get_iterator(InsertionResult const& result) { return result; } + + template + using rebind_alloc = std::multiset, Allocator>; }; int main(int argc, char** argv) { diff --git a/libcxx/test/benchmarks/containers/associative/set.bench.cpp b/libcxx/test/benchmarks/containers/associative/set.bench.cpp index 50ee142b6e8b3..e5a6cc58913d2 100644 --- a/libcxx/test/benchmarks/containers/associative/set.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/set.bench.cpp @@ -23,6 +23,9 @@ struct support::adapt_operations> { using InsertionResult = std::pair::iterator, bool>; static auto get_iterator(InsertionResult const& result) { return result.first; } + + template + using rebind_alloc = std::set, Allocator>; }; int main(int argc, char** argv) { diff --git a/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp b/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp index d670c531910ea..ddfc90c306010 100644 --- a/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp @@ -37,6 +37,9 @@ struct support::adapt_operations> { using InsertionResult = std::pair::iterator, bool>; static auto get_iterator(InsertionResult const& result) { return result.first; } + + template + using rebind_alloc = std::unordered_map, std::equal_to, Allocator>; }; int main(int argc, char** argv) { diff --git a/libcxx/test/benchmarks/containers/associative/unordered_multimap.bench.cpp b/libcxx/test/benchmarks/containers/associative/unordered_multimap.bench.cpp index 8738ca4bf9f0c..5d92bd8b2deaf 100644 --- a/libcxx/test/benchmarks/containers/associative/unordered_multimap.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/unordered_multimap.bench.cpp @@ -23,6 +23,9 @@ struct support::adapt_operations> { using InsertionResult = typename std::unordered_multimap::iterator; static auto get_iterator(InsertionResult const& result) { return result; } + + template + using rebind_alloc = std::unordered_multimap, std::equal_to, Allocator>; }; int main(int argc, char** argv) { diff --git a/libcxx/test/benchmarks/containers/associative/unordered_multiset.bench.cpp b/libcxx/test/benchmarks/containers/associative/unordered_multiset.bench.cpp index 4888b01bfeba0..09412fc4aeae7 100644 --- a/libcxx/test/benchmarks/containers/associative/unordered_multiset.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/unordered_multiset.bench.cpp @@ -22,6 +22,9 @@ struct support::adapt_operations> { using InsertionResult = typename std::unordered_multiset::iterator; static auto get_iterator(InsertionResult const& result) { return result; } + + template + using rebind_alloc = std::unordered_multiset, std::equal_to, Allocator>; }; int main(int argc, char** argv) { diff --git a/libcxx/test/benchmarks/containers/associative/unordered_set.bench.cpp b/libcxx/test/benchmarks/containers/associative/unordered_set.bench.cpp index 89443a597e85a..1b6663321b43c 100644 --- a/libcxx/test/benchmarks/containers/associative/unordered_set.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/unordered_set.bench.cpp @@ -24,6 +24,9 @@ struct support::adapt_operations> { using InsertionResult = std::pair::iterator, bool>; static auto get_iterator(InsertionResult const& result) { return result.first; } + + template + using rebind_alloc = std::unordered_set, std::equal_to, Allocator>; }; int main(int argc, char** argv) { From 09fd430de7a6002d7a22669348d6589bd32bd5ad Mon Sep 17 00:00:00 2001 From: Ebuka Ezike Date: Wed, 12 Nov 2025 14:54:13 +0000 Subject: [PATCH 10/34] [lldb-dap] Refactor event thread (#166948) Handle each event type in a different function --- lldb/tools/lldb-dap/DAP.cpp | 330 +++++++++++++++++++----------------- lldb/tools/lldb-dap/DAP.h | 4 + 2 files changed, 178 insertions(+), 156 deletions(-) diff --git a/lldb/tools/lldb-dap/DAP.cpp b/lldb/tools/lldb-dap/DAP.cpp index 11aed33886edb..d4203a2f00983 100644 --- a/lldb/tools/lldb-dap/DAP.cpp +++ b/lldb/tools/lldb-dap/DAP.cpp @@ -1319,7 +1319,7 @@ void DAP::ProgressEventThread() { lldb::SBEvent event; bool done = false; while (!done) { - if (listener.WaitForEvent(1, event)) { + if (listener.WaitForEvent(UINT32_MAX, event)) { const auto event_mask = event.GetType(); if (event.BroadcasterMatchesRef(broadcaster)) { if (event_mask & eBroadcastBitStopProgressThread) { @@ -1377,7 +1377,6 @@ void DAP::ProgressEventThread() { // is required. void DAP::EventThread() { llvm::set_thread_name("lldb.DAP.client." + m_client_name + ".event_handler"); - lldb::SBEvent event; lldb::SBListener listener = debugger.GetListener(); broadcaster.AddListener(listener, eBroadcastBitStopEventThread); debugger.GetBroadcaster().AddListener( @@ -1388,169 +1387,176 @@ void DAP::EventThread() { debugger, lldb::SBThread::GetBroadcasterClassName(), lldb::SBThread::eBroadcastBitStackChanged); + lldb::SBEvent event; bool done = false; while (!done) { - if (listener.WaitForEvent(1, event)) { - const auto event_mask = event.GetType(); - if (lldb::SBProcess::EventIsProcessEvent(event)) { - lldb::SBProcess process = lldb::SBProcess::GetProcessFromEvent(event); - if (event_mask & lldb::SBProcess::eBroadcastBitStateChanged) { - auto state = lldb::SBProcess::GetStateFromEvent(event); - switch (state) { - case lldb::eStateConnected: - case lldb::eStateDetached: - case lldb::eStateInvalid: - case lldb::eStateUnloaded: - break; - case lldb::eStateAttaching: - case lldb::eStateCrashed: - case lldb::eStateLaunching: - case lldb::eStateStopped: - case lldb::eStateSuspended: - // Only report a stopped event if the process was not - // automatically restarted. - if (!lldb::SBProcess::GetRestartedFromEvent(event)) { - SendStdOutStdErr(*this, process); - if (llvm::Error err = SendThreadStoppedEvent(*this)) - DAP_LOG_ERROR(log, std::move(err), - "({1}) reporting thread stopped: {0}", - m_client_name); - } - break; - case lldb::eStateRunning: - case lldb::eStateStepping: - WillContinue(); - SendContinuedEvent(*this); - break; - case lldb::eStateExited: - lldb::SBStream stream; - process.GetStatus(stream); - SendOutput(OutputType::Console, stream.GetData()); - - // When restarting, we can get an "exited" event for the process we - // just killed with the old PID, or even with no PID. In that case - // we don't have to terminate the session. - if (process.GetProcessID() == LLDB_INVALID_PROCESS_ID || - process.GetProcessID() == restarting_process_id) { - restarting_process_id = LLDB_INVALID_PROCESS_ID; - } else { - // Run any exit LLDB commands the user specified in the - // launch.json - RunExitCommands(); - SendProcessExitedEvent(*this, process); - SendTerminatedEvent(); - done = true; - } - break; - } - } else if ((event_mask & lldb::SBProcess::eBroadcastBitSTDOUT) || - (event_mask & lldb::SBProcess::eBroadcastBitSTDERR)) { - SendStdOutStdErr(*this, process); - } - } else if (lldb::SBTarget::EventIsTargetEvent(event)) { - if (event_mask & lldb::SBTarget::eBroadcastBitModulesLoaded || - event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded || - event_mask & lldb::SBTarget::eBroadcastBitSymbolsLoaded || - event_mask & lldb::SBTarget::eBroadcastBitSymbolsChanged) { - const uint32_t num_modules = - lldb::SBTarget::GetNumModulesFromEvent(event); - const bool remove_module = - event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded; - - // NOTE: Both mutexes must be acquired to prevent deadlock when - // handling `modules_request`, which also requires both locks. - lldb::SBMutex api_mutex = GetAPIMutex(); - const std::scoped_lock guard( - api_mutex, modules_mutex); - for (uint32_t i = 0; i < num_modules; ++i) { - lldb::SBModule module = - lldb::SBTarget::GetModuleAtIndexFromEvent(i, event); - - std::optional p_module = - CreateModule(target, module, remove_module); - if (!p_module) - continue; - - llvm::StringRef module_id = p_module->id; - - const bool module_exists = modules.contains(module_id); - if (remove_module && module_exists) { - modules.erase(module_id); - Send(protocol::Event{ - "module", ModuleEventBody{std::move(p_module).value(), - ModuleEventBody::eReasonRemoved}}); - } else if (module_exists) { - Send(protocol::Event{ - "module", ModuleEventBody{std::move(p_module).value(), - ModuleEventBody::eReasonChanged}}); - } else if (!remove_module) { - modules.insert(module_id); - Send(protocol::Event{ - "module", ModuleEventBody{std::move(p_module).value(), - ModuleEventBody::eReasonNew}}); - } - } - } - } else if (lldb::SBBreakpoint::EventIsBreakpointEvent(event)) { - if (event_mask & lldb::SBTarget::eBroadcastBitBreakpointChanged) { - auto event_type = - lldb::SBBreakpoint::GetBreakpointEventTypeFromEvent(event); - auto bp = Breakpoint( - *this, lldb::SBBreakpoint::GetBreakpointFromEvent(event)); - // If the breakpoint was set through DAP, it will have the - // BreakpointBase::kDAPBreakpointLabel. Regardless of whether - // locations were added, removed, or resolved, the breakpoint isn't - // going away and the reason is always "changed". - if ((event_type & lldb::eBreakpointEventTypeLocationsAdded || - event_type & lldb::eBreakpointEventTypeLocationsRemoved || - event_type & lldb::eBreakpointEventTypeLocationsResolved) && - bp.MatchesName(BreakpointBase::kDAPBreakpointLabel)) { - // As the DAP client already knows the path of this breakpoint, we - // don't need to send it back as part of the "changed" event. This - // avoids sending paths that should be source mapped. Note that - // CreateBreakpoint doesn't apply source mapping and certain - // implementation ignore the source part of this event anyway. - protocol::Breakpoint protocol_bp = bp.ToProtocolBreakpoint(); - - // "source" is not needed here, unless we add adapter data to be - // saved by the client. - if (protocol_bp.source && !protocol_bp.source->adapterData) - protocol_bp.source = std::nullopt; - - llvm::json::Object body; - body.try_emplace("breakpoint", protocol_bp); - body.try_emplace("reason", "changed"); - - llvm::json::Object bp_event = CreateEventObject("breakpoint"); - bp_event.try_emplace("body", std::move(body)); - - SendJSON(llvm::json::Value(std::move(bp_event))); - } - } + if (!listener.WaitForEvent(UINT32_MAX, event)) + continue; - } else if (lldb::SBThread::EventIsThreadEvent(event)) { - HandleThreadEvent(event); - } else if (event_mask & lldb::eBroadcastBitError || - event_mask & lldb::eBroadcastBitWarning) { - lldb::SBStructuredData data = - lldb::SBDebugger::GetDiagnosticFromEvent(event); - if (!data.IsValid()) - continue; - std::string type = GetStringValue(data.GetValueForKey("type")); - std::string message = GetStringValue(data.GetValueForKey("message")); - SendOutput(OutputType::Important, - llvm::formatv("{0}: {1}", type, message).str()); - } else if (event.BroadcasterMatchesRef(broadcaster)) { - if (event_mask & eBroadcastBitStopEventThread) { - done = true; - } + const uint32_t event_mask = event.GetType(); + if (lldb::SBProcess::EventIsProcessEvent(event)) { + HandleProcessEvent(event, /*&process_exited=*/done); + } else if (lldb::SBTarget::EventIsTargetEvent(event)) { + HandleTargetEvent(event); + } else if (lldb::SBBreakpoint::EventIsBreakpointEvent(event)) { + HandleBreakpointEvent(event); + } else if (lldb::SBThread::EventIsThreadEvent(event)) { + HandleThreadEvent(event); + } else if (event_mask & lldb::eBroadcastBitError || + event_mask & lldb::eBroadcastBitWarning) { + HandleDiagnosticEvent(event); + } else if (event.BroadcasterMatchesRef(broadcaster)) { + if (event_mask & eBroadcastBitStopEventThread) { + done = true; + } + } + } +} + +void DAP::HandleProcessEvent(const lldb::SBEvent &event, bool &process_exited) { + lldb::SBProcess process = lldb::SBProcess::GetProcessFromEvent(event); + const uint32_t event_mask = event.GetType(); + if (event_mask & lldb::SBProcess::eBroadcastBitStateChanged) { + auto state = lldb::SBProcess::GetStateFromEvent(event); + switch (state) { + case lldb::eStateConnected: + case lldb::eStateDetached: + case lldb::eStateInvalid: + case lldb::eStateUnloaded: + break; + case lldb::eStateAttaching: + case lldb::eStateCrashed: + case lldb::eStateLaunching: + case lldb::eStateStopped: + case lldb::eStateSuspended: + // Only report a stopped event if the process was not + // automatically restarted. + if (!lldb::SBProcess::GetRestartedFromEvent(event)) { + SendStdOutStdErr(*this, process); + if (llvm::Error err = SendThreadStoppedEvent(*this)) + DAP_LOG_ERROR(log, std::move(err), + "({1}) reporting thread stopped: {0}", m_client_name); + } + break; + case lldb::eStateRunning: + case lldb::eStateStepping: + WillContinue(); + SendContinuedEvent(*this); + break; + case lldb::eStateExited: + lldb::SBStream stream; + process.GetStatus(stream); + SendOutput(OutputType::Console, stream.GetData()); + + // When restarting, we can get an "exited" event for the process we + // just killed with the old PID, or even with no PID. In that case + // we don't have to terminate the session. + if (process.GetProcessID() == LLDB_INVALID_PROCESS_ID || + process.GetProcessID() == restarting_process_id) { + restarting_process_id = LLDB_INVALID_PROCESS_ID; + } else { + // Run any exit LLDB commands the user specified in the + // launch.json + RunExitCommands(); + SendProcessExitedEvent(*this, process); + SendTerminatedEvent(); + process_exited = true; + } + break; + } + } else if ((event_mask & lldb::SBProcess::eBroadcastBitSTDOUT) || + (event_mask & lldb::SBProcess::eBroadcastBitSTDERR)) { + SendStdOutStdErr(*this, process); + } +} + +void DAP::HandleTargetEvent(const lldb::SBEvent &event) { + const uint32_t event_mask = event.GetType(); + if (event_mask & lldb::SBTarget::eBroadcastBitModulesLoaded || + event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded || + event_mask & lldb::SBTarget::eBroadcastBitSymbolsLoaded || + event_mask & lldb::SBTarget::eBroadcastBitSymbolsChanged) { + const uint32_t num_modules = lldb::SBTarget::GetNumModulesFromEvent(event); + const bool remove_module = + event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded; + + // NOTE: Both mutexes must be acquired to prevent deadlock when + // handling `modules_request`, which also requires both locks. + lldb::SBMutex api_mutex = GetAPIMutex(); + const std::scoped_lock guard(api_mutex, + modules_mutex); + for (uint32_t i = 0; i < num_modules; ++i) { + lldb::SBModule module = + lldb::SBTarget::GetModuleAtIndexFromEvent(i, event); + + std::optional p_module = + CreateModule(target, module, remove_module); + if (!p_module) + continue; + + const llvm::StringRef module_id = p_module->id; + + const bool module_exists = modules.contains(module_id); + if (remove_module && module_exists) { + modules.erase(module_id); + Send(protocol::Event{"module", + ModuleEventBody{std::move(p_module).value(), + ModuleEventBody::eReasonRemoved}}); + } else if (module_exists) { + Send(protocol::Event{"module", + ModuleEventBody{std::move(p_module).value(), + ModuleEventBody::eReasonChanged}}); + } else if (!remove_module) { + modules.insert(module_id); + Send(protocol::Event{"module", + ModuleEventBody{std::move(p_module).value(), + ModuleEventBody::eReasonNew}}); } } } } +void DAP::HandleBreakpointEvent(const lldb::SBEvent &event) { + const uint32_t event_mask = event.GetType(); + if (!(event_mask & lldb::SBTarget::eBroadcastBitBreakpointChanged)) + return; + + auto event_type = lldb::SBBreakpoint::GetBreakpointEventTypeFromEvent(event); + auto bp = + Breakpoint(*this, lldb::SBBreakpoint::GetBreakpointFromEvent(event)); + // If the breakpoint was set through DAP, it will have the + // BreakpointBase::kDAPBreakpointLabel. Regardless of whether + // locations were added, removed, or resolved, the breakpoint isn't + // going away and the reason is always "changed". + if ((event_type & lldb::eBreakpointEventTypeLocationsAdded || + event_type & lldb::eBreakpointEventTypeLocationsRemoved || + event_type & lldb::eBreakpointEventTypeLocationsResolved) && + bp.MatchesName(BreakpointBase::kDAPBreakpointLabel)) { + // As the DAP client already knows the path of this breakpoint, we + // don't need to send it back as part of the "changed" event. This + // avoids sending paths that should be source mapped. Note that + // CreateBreakpoint doesn't apply source mapping and certain + // implementation ignore the source part of this event anyway. + protocol::Breakpoint protocol_bp = bp.ToProtocolBreakpoint(); + + // "source" is not needed here, unless we add adapter data to be + // saved by the client. + if (protocol_bp.source && !protocol_bp.source->adapterData) + protocol_bp.source = std::nullopt; + + llvm::json::Object body; + body.try_emplace("breakpoint", protocol_bp); + body.try_emplace("reason", "changed"); + + llvm::json::Object bp_event = CreateEventObject("breakpoint"); + bp_event.try_emplace("body", std::move(body)); + + SendJSON(llvm::json::Value(std::move(bp_event))); + } +} + void DAP::HandleThreadEvent(const lldb::SBEvent &event) { - uint32_t event_type = event.GetType(); + const uint32_t event_type = event.GetType(); if (event_type & lldb::SBThread::eBroadcastBitStackChanged) { const lldb::SBThread evt_thread = lldb::SBThread::GetThreadFromEvent(event); @@ -1559,6 +1565,18 @@ void DAP::HandleThreadEvent(const lldb::SBEvent &event) { } } +void DAP::HandleDiagnosticEvent(const lldb::SBEvent &event) { + const lldb::SBStructuredData data = + lldb::SBDebugger::GetDiagnosticFromEvent(event); + if (!data.IsValid()) + return; + + std::string type = GetStringValue(data.GetValueForKey("type")); + std::string message = GetStringValue(data.GetValueForKey("message")); + SendOutput(OutputType::Important, + llvm::formatv("{0}: {1}", type, message).str()); +} + std::vector DAP::SetSourceBreakpoints( const protocol::Source &source, const std::optional> &breakpoints) { diff --git a/lldb/tools/lldb-dap/DAP.h b/lldb/tools/lldb-dap/DAP.h index b4f111e4e720c..5d40341329f34 100644 --- a/lldb/tools/lldb-dap/DAP.h +++ b/lldb/tools/lldb-dap/DAP.h @@ -454,7 +454,11 @@ struct DAP final : public DAPTransport::MessageHandler { /// Event threads. /// @{ void EventThread(); + void HandleProcessEvent(const lldb::SBEvent &event, bool &process_exited); + void HandleTargetEvent(const lldb::SBEvent &event); + void HandleBreakpointEvent(const lldb::SBEvent &event); void HandleThreadEvent(const lldb::SBEvent &event); + void HandleDiagnosticEvent(const lldb::SBEvent &event); void ProgressEventThread(); std::thread event_thread; From 1e4b9e4059e12924b1ad38bd127cdbdb57e3c488 Mon Sep 17 00:00:00 2001 From: Tarun Prabhu Date: Wed, 12 Nov 2025 08:10:36 -0700 Subject: [PATCH 11/34] [flang][NFC] Strip trailing whitespace from tests (3 of N) Only the fortran source files in flang/test have been modified. The other files in the directory will be cleaned up in subsequent commits --- flang/test/Lower/HLFIR/charconvert.f90 | 2 +- .../HLFIR/procedure-pointer-component-default-init.f90 | 2 +- flang/test/Lower/HLFIR/procedure-pointer.f90 | 10 +++++----- flang/test/Lower/HLFIR/reshape.f90 | 2 +- flang/test/Lower/MIF/co_broadcast.f90 | 4 ++-- flang/test/Lower/MIF/co_max.f90 | 6 +++--- flang/test/Lower/MIF/co_min.f90 | 6 +++--- flang/test/Lower/MIF/co_sum.f90 | 2 +- flang/test/Lower/MIF/coarray-init.f90 | 2 +- flang/test/Lower/MIF/num_images.f90 | 2 +- flang/test/Lower/MIF/sync_all.f90 | 6 +++--- flang/test/Lower/MIF/sync_images.f90 | 8 ++++---- flang/test/Lower/MIF/sync_memory.f90 | 8 ++++---- flang/test/Lower/MIF/this_image.f90 | 2 +- 14 files changed, 31 insertions(+), 31 deletions(-) diff --git a/flang/test/Lower/HLFIR/charconvert.f90 b/flang/test/Lower/HLFIR/charconvert.f90 index 1044986a3db01..f4cd3b17fee41 100644 --- a/flang/test/Lower/HLFIR/charconvert.f90 +++ b/flang/test/Lower/HLFIR/charconvert.f90 @@ -56,7 +56,7 @@ subroutine charconvert3(c, c4) end subroutine ! CHECK-LABEL: func.func @_QPcharconvert3 -! CHECK-SAME: %[[ARG0:.*]]: !fir.boxchar<1> {{.*}}, %[[ARG1:.*]]: !fir.boxchar<4> +! CHECK-SAME: %[[ARG0:.*]]: !fir.boxchar<1> {{.*}}, %[[ARG1:.*]]: !fir.boxchar<4> ! CHECK: %[[VAL_0:.*]]:2 = fir.unboxchar %[[ARG0]] : (!fir.boxchar<1>) -> (!fir.ref>, index) ! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]]#0 typeparams %[[VAL_0]]#1 dummy_scope %{{[0-9]+}} arg {{[0-9]+}} {uniq_name = "_QFcharconvert3Ec"} : (!fir.ref>, index, !fir.dscope) -> (!fir.boxchar<1>, !fir.ref>) ! CHECK: %[[VAL_2:.*]]:2 = fir.unboxchar %[[ARG1]] : (!fir.boxchar<4>) -> (!fir.ref>, index) diff --git a/flang/test/Lower/HLFIR/procedure-pointer-component-default-init.f90 b/flang/test/Lower/HLFIR/procedure-pointer-component-default-init.f90 index 85931262b5892..61cc743ec9c59 100644 --- a/flang/test/Lower/HLFIR/procedure-pointer-component-default-init.f90 +++ b/flang/test/Lower/HLFIR/procedure-pointer-component-default-init.f90 @@ -1,5 +1,5 @@ ! Test procedure pointer component default initialization when the size -! of the derived type is 32 bytes and larger. +! of the derived type is 32 bytes and larger. ! RUN: bbc -emit-hlfir -o - %s | FileCheck %s interface diff --git a/flang/test/Lower/HLFIR/procedure-pointer.f90 b/flang/test/Lower/HLFIR/procedure-pointer.f90 index 9680497a1e7ac..75e81c165d25b 100644 --- a/flang/test/Lower/HLFIR/procedure-pointer.f90 +++ b/flang/test/Lower/HLFIR/procedure-pointer.f90 @@ -11,7 +11,7 @@ real function real_func(x) real :: x end function character(:) function char_func(x) - pointer :: char_func + pointer :: char_func integer :: x end function subroutine sub(x) @@ -148,7 +148,7 @@ subroutine sub5() use m procedure(real), pointer :: p3 - p3 => real_func + p3 => real_func ! CHECK: %[[VAL_0:.*]] = fir.alloca !fir.boxproc<() -> f32> {bindc_name = "p3", uniq_name = "_QFsub5Ep3"} ! CHECK: %[[VAL_1:.*]] = fir.zero_bits () -> f32 ! CHECK: %[[VAL_2:.*]] = fir.emboxproc %[[VAL_1]] : (() -> f32) -> !fir.boxproc<() -> f32> @@ -165,7 +165,7 @@ subroutine sub6() procedure(), pointer :: p4 real :: r - p4 => sub + p4 => sub ! CHECK: %[[VAL_0:.*]] = fir.alloca !fir.boxproc<() -> ()> {bindc_name = "p4", uniq_name = "_QFsub6Ep4"} ! CHECK: %[[VAL_1:.*]] = fir.zero_bits () -> () ! CHECK: %[[VAL_2:.*]] = fir.emboxproc %[[VAL_1]] : (() -> ()) -> !fir.boxproc<() -> ()> @@ -197,7 +197,7 @@ subroutine sub7(p1, p2) call foo2(p2) ! CHECK: fir.call @_QPfoo2(%[[VAL_1]]#0) fastmath : (!fir.ref ()>>) -> () -end +end subroutine sub8() use m @@ -338,7 +338,7 @@ subroutine sub12() ! CHECK: %[[VAL_16:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = ".tmp.intrinsic_result"} : (!fir.ref) -> !fir.box>>>>) -> (!fir.ref) -> !fir.box>>>>, !fir.ref) -> !fir.box>>>>) ! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]]#0 : (!fir.ref) -> !fir.box>>>>) -> !fir.ref ()>> ! CHECK: fir.call @_QPfoo2(%[[VAL_17]]) fastmath : (!fir.ref ()>>) -> () -end +end subroutine test_opt_pointer() interface diff --git a/flang/test/Lower/HLFIR/reshape.f90 b/flang/test/Lower/HLFIR/reshape.f90 index 8bf3cfd08c6ac..83072d33d6052 100644 --- a/flang/test/Lower/HLFIR/reshape.f90 +++ b/flang/test/Lower/HLFIR/reshape.f90 @@ -49,7 +49,7 @@ end subroutine reshape_test_nopad ! CHECK: %[[VAL_10:.*]]:2 = hlfir.declare {{.*}}{uniq_name = "_QFreshape_test_nopadEsh"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare {{.*}}{uniq_name = "_QFreshape_test_nopadEsource"} : (!fir.box>, !fir.dscope) -> (!fir.box>, !fir.box>) ! CHECK: %[[VAL_13:.*]] = hlfir.reshape %[[VAL_11]]#0 %[[VAL_10]]#0 order %[[VAL_7]]#0 : (!fir.box>, !fir.ref>, !fir.ref>) -> !hlfir.expr - + subroutine test_reshape_optional1(pad, order, source, shape) real, pointer :: pad(:, :) integer, pointer :: order(:) diff --git a/flang/test/Lower/MIF/co_broadcast.f90 b/flang/test/Lower/MIF/co_broadcast.f90 index 25e4330ade704..fadee5f6bcdf8 100644 --- a/flang/test/Lower/MIF/co_broadcast.f90 +++ b/flang/test/Lower/MIF/co_broadcast.f90 @@ -34,13 +34,13 @@ program test_co_broadcast ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_I:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! CHECK: mif.co_broadcast %[[V1]] source %[[C1_i32:.*]] : (!fir.box>, i32) call co_broadcast(array_i, source_image=1) - + ! CHECK: %[[C1_i32:.*]] = arith.constant 1 : i32 ! CHECK: %[[SHAPE_2:.*]] = fir.shape %[[C2_2:.*]] : (index) -> !fir.shape<1> ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_C:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>>, !fir.shape<1>) -> !fir.box>> ! CHECK: mif.co_broadcast %[[V1]] source %[[C1_i32:.*]] : (!fir.box>>, i32) call co_broadcast(array_c, source_image=1) - + ! CHECK: %[[C1_i32:.*]] = arith.constant 1 : i32 ! CHECK: %[[SHAPE_2:.*]] = fir.shape %[[C2_2:.*]] : (index) -> !fir.shape<1> ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_D:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> diff --git a/flang/test/Lower/MIF/co_max.f90 b/flang/test/Lower/MIF/co_max.f90 index 19e65626b50f2..0a179c832dce8 100644 --- a/flang/test/Lower/MIF/co_max.f90 +++ b/flang/test/Lower/MIF/co_max.f90 @@ -40,12 +40,12 @@ program test_co_max ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_I:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! CHECK: mif.co_max %[[V1]] : (!fir.box>) call co_max(array_i) - + ! CHECK: %[[SHAPE_2:.*]] = fir.shape %[[C2_2:.*]] : (index) -> !fir.shape<1> ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_C:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>>, !fir.shape<1>) -> !fir.box>> ! CHECK: mif.co_max %[[V1]] result %[[C1_i32:.*]] : (!fir.box>>, i32) - call co_max(array_c, result_image=1) - + call co_max(array_c, result_image=1) + ! CHECK: %[[SHAPE_2:.*]] = fir.shape %[[C2_2:.*]] : (index) -> !fir.shape<1> ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_D:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! CHECK: mif.co_max %[[V1]] result %[[C1_i32:.*]] stat %[[STATUS:.*]]#0 : (!fir.box>, i32, !fir.ref) diff --git a/flang/test/Lower/MIF/co_min.f90 b/flang/test/Lower/MIF/co_min.f90 index a7adc6b540147..bedee0e61619c 100644 --- a/flang/test/Lower/MIF/co_min.f90 +++ b/flang/test/Lower/MIF/co_min.f90 @@ -40,12 +40,12 @@ program test_co_min ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_I:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! CHECK: mif.co_min %[[V1]] : (!fir.box>) call co_min(array_i) - + ! CHECK: %[[SHAPE_2:.*]] = fir.shape %[[C2_2:.*]] : (index) -> !fir.shape<1> ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_C:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>>, !fir.shape<1>) -> !fir.box>> ! CHECK: mif.co_min %[[V1]] result %[[C1_i32:.*]] : (!fir.box>>, i32) - call co_min(array_c, result_image=1) - + call co_min(array_c, result_image=1) + ! CHECK: %[[SHAPE_2:.*]] = fir.shape %[[C2_2:.*]] : (index) -> !fir.shape<1> ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_D:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! CHECK: mif.co_min %[[V1]] result %[[C1_i32:.*]] stat %[[STATUS:.*]]#0 : (!fir.box>, i32, !fir.ref) diff --git a/flang/test/Lower/MIF/co_sum.f90 b/flang/test/Lower/MIF/co_sum.f90 index 0d8a25850ad5f..9710fd6d521ff 100644 --- a/flang/test/Lower/MIF/co_sum.f90 +++ b/flang/test/Lower/MIF/co_sum.f90 @@ -36,7 +36,7 @@ program test_co_sum ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_I:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! CHECK: mif.co_sum %[[V1]] : (!fir.box>) call co_sum(array_i) - + ! CHECK: %[[SHAPE_2:.*]] = fir.shape %[[C2_2:.*]] : (index) -> !fir.shape<1> ! CHECK: %[[V1:.*]] = fir.embox %[[ARRAY_D:.*]]#0(%[[SHAPE_2]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! CHECK: mif.co_sum %[[V1]] result %[[C1_i32:.*]] stat %[[STATUS:.*]]#0 : (!fir.box>, i32, !fir.ref) diff --git a/flang/test/Lower/MIF/coarray-init.f90 b/flang/test/Lower/MIF/coarray-init.f90 index e3544736df284..e3526f6e09993 100644 --- a/flang/test/Lower/MIF/coarray-init.f90 +++ b/flang/test/Lower/MIF/coarray-init.f90 @@ -3,7 +3,7 @@ program test_init -end +end ! ALL-LABEL: func.func @main ! ALL: fir.call @_FortranAProgramStart diff --git a/flang/test/Lower/MIF/num_images.f90 b/flang/test/Lower/MIF/num_images.f90 index a673b6e8120f8..8f31ab4bc0090 100644 --- a/flang/test/Lower/MIF/num_images.f90 +++ b/flang/test/Lower/MIF/num_images.f90 @@ -3,7 +3,7 @@ program test use iso_fortran_env integer :: i - integer :: team_number + integer :: team_number type(team_type) :: team ! CHECK: mif.num_images : () -> i32 diff --git a/flang/test/Lower/MIF/sync_all.f90 b/flang/test/Lower/MIF/sync_all.f90 index 2b1997c8cc0b8..4d685df31abbb 100644 --- a/flang/test/Lower/MIF/sync_all.f90 +++ b/flang/test/Lower/MIF/sync_all.f90 @@ -4,7 +4,7 @@ program test_sync_all implicit none ! NOCOARRAY: Not yet implemented: Multi-image features are experimental and are disabled by default, use '-fcoarray' to enable. - + ! COARRAY: %[[ERRMSG:.*]]:2 = hlfir.declare %[[VAL_1:.*]] typeparams %[[C_128:.*]] {uniq_name = "_QFEerror_message"} : (!fir.ref>, index) -> (!fir.ref>, !fir.ref>) ! COARRAY: %[[STAT:.*]]:2 = hlfir.declare %[[VAL_2:.*]] {uniq_name = "_QFEsync_status"} : (!fir.ref) -> (!fir.ref, !fir.ref) integer sync_status @@ -15,11 +15,11 @@ program test_sync_all ! COARRAY: mif.sync_all stat %[[STAT]]#0 : (!fir.ref) sync all(stat=sync_status) - + ! COARRAY: %[[VAL_1:.*]] = fir.embox %[[ERRMSG]]#0 : (!fir.ref>) -> !fir.box> ! COARRAY: mif.sync_all errmsg %[[VAL_1]] : (!fir.box>) sync all( errmsg=error_message) - + ! COARRAY: %[[VAL_2:.*]] = fir.embox %[[ERRMSG]]#0 : (!fir.ref>) -> !fir.box> ! COARRAY: mif.sync_all stat %[[STAT]]#0 errmsg %[[VAL_2]] : (!fir.ref, !fir.box>) sync all(stat=sync_status, errmsg=error_message) diff --git a/flang/test/Lower/MIF/sync_images.f90 b/flang/test/Lower/MIF/sync_images.f90 index 7ee5936131750..1ef577ed4f158 100644 --- a/flang/test/Lower/MIF/sync_images.f90 +++ b/flang/test/Lower/MIF/sync_images.f90 @@ -4,7 +4,7 @@ program test_sync_images implicit none ! NOCOARRAY: Not yet implemented: Multi-image features are experimental and are disabled by default, use '-fcoarray' to enable. - + ! COARRAY: %[[ERRMSG:.*]]:2 = hlfir.declare %[[VAL_1:.*]] typeparams %[[C_128:.*]] {uniq_name = "_QFEerror_message"} : (!fir.ref>, index) -> (!fir.ref>, !fir.ref>) ! COARRAY: %[[ME:.*]]:2 = hlfir.declare %[[VAL_3:.*]] {uniq_name = "_QFEme"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! COARRAY: %[[STAT:.*]]:2 = hlfir.declare %[[VAL_2:.*]] {uniq_name = "_QFEsync_status"} : (!fir.ref) -> (!fir.ref, !fir.ref) @@ -24,14 +24,14 @@ program test_sync_images ! COARRAY: %[[VAL_5:.*]] = fir.embox %[[IMG_SET:.*]]#0(%[[SHAPE_1:.*]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! COARRAY: mif.sync_images image_set %[[VAL_5]] stat %[[STAT]]#0 errmsg %[[VAL_4]] : (!fir.box>, !fir.ref, !fir.box>) sync images([1], stat=sync_status, errmsg=error_message) - + ! COARRAY: mif.sync_images : () sync images(*) - + ! COARRAY: %[[VAL_6:.*]] = fir.embox %[[ME]]#0 : (!fir.ref) -> !fir.box ! COARRAY: mif.sync_images image_set %[[VAL_6]] : (!fir.box) sync images(me) - + ! COARRAY: %[[VAL_7:.*]] = fir.embox %[[IMG_SET:.*]]#0(%[[SHAPE_3:.*]]) : (!fir.ref>, !fir.shape<1>) -> !fir.box> ! COARRAY: mif.sync_images image_set %[[VAL_7]] : (!fir.box>) sync images([1]) diff --git a/flang/test/Lower/MIF/sync_memory.f90 b/flang/test/Lower/MIF/sync_memory.f90 index e6e0fa1e7fdf3..a36fc2d1919a5 100644 --- a/flang/test/Lower/MIF/sync_memory.f90 +++ b/flang/test/Lower/MIF/sync_memory.f90 @@ -4,22 +4,22 @@ program test_sync_memory implicit none ! NOCOARRAY: Not yet implemented: Multi-image features are experimental and are disabled by default, use '-fcoarray' to enable. - + ! COARRAY: %[[ERRMSG:.*]]:2 = hlfir.declare %[[VAL_1:.*]] typeparams %[[C_128:.*]] {uniq_name = "_QFEerror_message"} : (!fir.ref>, index) -> (!fir.ref>, !fir.ref>) ! COARRAY: %[[STAT:.*]]:2 = hlfir.declare %[[VAL_2:.*]] {uniq_name = "_QFEsync_status"} : (!fir.ref) -> (!fir.ref, !fir.ref) integer sync_status character(len=128) :: error_message - ! COARRAY: mif.sync_memory : () + ! COARRAY: mif.sync_memory : () sync memory ! COARRAY: mif.sync_memory stat %[[STAT]]#0 : (!fir.ref) sync memory(stat=sync_status) - + ! COARRAY: %[[VAL_1:.*]] = fir.embox %[[ERRMSG]]#0 : (!fir.ref>) -> !fir.box> ! COARRAY: mif.sync_memory errmsg %[[VAL_1]] : (!fir.box>) sync memory( errmsg=error_message) - + ! COARRAY: %[[VAL_2:.*]] = fir.embox %[[ERRMSG]]#0 : (!fir.ref>) -> !fir.box> ! COARRAY: mif.sync_memory stat %[[STAT]]#0 errmsg %[[VAL_2]] : (!fir.ref, !fir.box>) sync memory(stat=sync_status, errmsg=error_message) diff --git a/flang/test/Lower/MIF/this_image.f90 b/flang/test/Lower/MIF/this_image.f90 index ce729b349e6cf..c6674c309f3f4 100644 --- a/flang/test/Lower/MIF/this_image.f90 +++ b/flang/test/Lower/MIF/this_image.f90 @@ -5,7 +5,7 @@ program test integer :: i type(team_type) :: team - ! CHECK: mif.this_image : () -> i32 + ! CHECK: mif.this_image : () -> i32 i = this_image() ! CHECK: mif.this_image team %[[TEAM:.*]] : ({{.*}}) -> i32 From 62d1a080e69e3c5e98840e000135afa7c688a77b Mon Sep 17 00:00:00 2001 From: Florian Hahn Date: Wed, 12 Nov 2025 15:11:00 +0000 Subject: [PATCH 12/34] [LV] Use ExtractLane(LastActiveLane, V) live outs when tail-folding. (#149042) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Building on top of https://github.com/llvm/llvm-project/pull/148817, introduce a new abstract LastActiveLane opcode that gets lowered to Not(Mask) → FirstActiveLane(NotMask) → Sub(result, 1). When folding the tail, update all extracts for uses outside the loop the extract the value of the last actice lane. See also https://github.com/llvm/llvm-project/issues/148603 PR: https://github.com/llvm/llvm-project/pull/149042 --- .../Vectorize/LoopVectorizationLegality.cpp | 18 - .../Transforms/Vectorize/LoopVectorize.cpp | 3 +- llvm/lib/Transforms/Vectorize/VPlan.h | 7 + .../Transforms/Vectorize/VPlanAnalysis.cpp | 1 + .../Transforms/Vectorize/VPlanPatternMatch.h | 22 + .../Transforms/Vectorize/VPlanPredicator.cpp | 39 +- .../lib/Transforms/Vectorize/VPlanRecipes.cpp | 29 + .../Transforms/Vectorize/VPlanTransforms.cpp | 61 +- llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp | 13 +- .../Transforms/Vectorize/VPlanVerifier.cpp | 46 + .../LoopVectorize/RISCV/dead-ops-cost.ll | 68 +- .../Transforms/LoopVectorize/RISCV/divrem.ll | 101 +- .../first-order-recurrence-scalable-vf1.ll | 86 +- .../LoopVectorize/RISCV/scalable-tailfold.ll | 56 +- .../tail-folding-fixed-order-recurrence.ll | 72 +- .../LoopVectorize/RISCV/uniform-load-store.ll | 126 ++- .../LoopVectorize/X86/small-size.ll | 78 +- .../first-order-recurrence-tail-folding.ll | 868 +++++++++++++++--- llvm/test/Transforms/LoopVectorize/optsize.ll | 207 +++-- .../pr43166-fold-tail-by-masking.ll | 86 +- .../use-scalar-epilogue-if-tp-fails.ll | 223 ++++- 21 files changed, 1626 insertions(+), 584 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp index 03112c67dda7b..e522d2f617d8a 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -2097,24 +2097,6 @@ bool LoopVectorizationLegality::canFoldTailByMasking() const { for (const auto &Reduction : getReductionVars()) ReductionLiveOuts.insert(Reduction.second.getLoopExitInstr()); - // TODO: handle non-reduction outside users when tail is folded by masking. - for (auto *AE : AllowedExit) { - // Check that all users of allowed exit values are inside the loop or - // are the live-out of a reduction. - if (ReductionLiveOuts.count(AE)) - continue; - for (User *U : AE->users()) { - Instruction *UI = cast(U); - if (TheLoop->contains(UI)) - continue; - LLVM_DEBUG( - dbgs() - << "LV: Cannot fold tail by masking, loop has an outside user for " - << *UI << "\n"); - return false; - } - } - for (const auto &Entry : getInductionVars()) { PHINode *OrigPhi = Entry.first; for (User *U : OrigPhi->users()) { diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 5cc01a9f974b4..b9d4ff41c0755 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -8895,7 +8895,8 @@ void LoopVectorizationPlanner::adjustRecipesForReductions( if (FinalReductionResult == U || Parent->getParent()) continue; U->replaceUsesOfWith(OrigExitingVPV, FinalReductionResult); - if (match(U, m_ExtractLastElement(m_VPValue()))) + if (match(U, m_CombineOr(m_ExtractLastElement(m_VPValue()), + m_ExtractLane(m_VPValue(), m_VPValue())))) cast(U)->replaceAllUsesWith(FinalReductionResult); } diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 72858e1265d86..08f77b75400bd 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -1047,6 +1047,13 @@ class LLVM_ABI_FOR_TEST VPInstruction : public VPRecipeWithIRFlags, // It produces the lane index across all unrolled iterations. Unrolling will // add all copies of its original operand as additional operands. FirstActiveLane, + // Calculates the last active lane index of the vector predicate operands. + // The predicates must be prefix-masks (all 1s before all 0s). Used when + // tail-folding to extract the correct live-out value from the last active + // iteration. It produces the lane index across all unrolled iterations. + // Unrolling will add all copies of its original operand as additional + // operands. + LastActiveLane, // The opcodes below are used for VPInstructionWithType. // diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp index 80a2e4bc3f754..fb0b029de3d41 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp @@ -115,6 +115,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) { case VPInstruction::ExtractLane: return inferScalarType(R->getOperand(1)); case VPInstruction::FirstActiveLane: + case VPInstruction::LastActiveLane: return Type::getIntNTy(Ctx, 64); case VPInstruction::ExtractLastElement: case VPInstruction::ExtractLastLanePerPart: diff --git a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h index b57c44872c1b6..aa2785252d376 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h +++ b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h @@ -395,12 +395,24 @@ m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1) { return m_VPInstruction(Op0, Op1); } +template +inline VPInstruction_match +m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1) { + return m_VPInstruction(Op0, Op1); +} + template inline VPInstruction_match m_ExtractLastLanePerPart(const Op0_t &Op0) { return m_VPInstruction(Op0); } +template +inline VPInstruction_match +m_ExtractPenultimateElement(const Op0_t &Op0) { + return m_VPInstruction(Op0); +} + template inline VPInstruction_match m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) { @@ -429,6 +441,16 @@ m_FirstActiveLane(const Op0_t &Op0) { return m_VPInstruction(Op0); } +template +inline VPInstruction_match +m_LastActiveLane(const Op0_t &Op0) { + return m_VPInstruction(Op0); +} + +inline VPInstruction_match m_StepVector() { + return m_VPInstruction(); +} + template inline AllRecipe_match m_Unary(const Op0_t &Op0) { return AllRecipe_match(Op0); diff --git a/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp b/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp index fb17d5dd62b9d..3579af21d8b07 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp @@ -44,11 +44,6 @@ class VPPredicator { /// possibly inserting new recipes at \p Dst (using Builder's insertion point) VPValue *createEdgeMask(VPBasicBlock *Src, VPBasicBlock *Dst); - /// Returns the *entry* mask for \p VPBB. - VPValue *getBlockInMask(VPBasicBlock *VPBB) const { - return BlockMaskCache.lookup(VPBB); - } - /// Record \p Mask as the *entry* mask of \p VPBB, which is expected to not /// already have a mask. void setBlockInMask(VPBasicBlock *VPBB, VPValue *Mask) { @@ -68,6 +63,11 @@ class VPPredicator { } public: + /// Returns the *entry* mask for \p VPBB. + VPValue *getBlockInMask(VPBasicBlock *VPBB) const { + return BlockMaskCache.lookup(VPBB); + } + /// Returns the precomputed predicate of the edge from \p Src to \p Dst. VPValue *getEdgeMask(const VPBasicBlock *Src, const VPBasicBlock *Dst) const { return EdgeMaskCache.lookup({Src, Dst}); @@ -301,5 +301,34 @@ VPlanTransforms::introduceMasksAndLinearize(VPlan &Plan, bool FoldTail) { PrevVPBB = VPBB; } + + // If we folded the tail and introduced a header mask, any extract of the + // last element must be updated to extract from the last active lane of the + // header mask instead (i.e., the lane corresponding to the last active + // iteration). + if (FoldTail) { + assert(Plan.getExitBlocks().size() == 1 && + "only a single-exit block is supported currently"); + VPBasicBlock *EB = Plan.getExitBlocks().front(); + assert(EB->getSinglePredecessor() == Plan.getMiddleBlock() && + "the exit block must have middle block as single predecessor"); + + VPBuilder B(Plan.getMiddleBlock()->getTerminator()); + for (auto &P : EB->phis()) { + auto *ExitIRI = cast(&P); + VPValue *Inc = ExitIRI->getIncomingValue(0); + VPValue *Op; + if (!match(Inc, m_ExtractLastElement(m_VPValue(Op)))) + continue; + + // Compute the index of the last active lane. + VPValue *HeaderMask = Predicator.getBlockInMask(Header); + VPValue *LastActiveLane = + B.createNaryOp(VPInstruction::LastActiveLane, HeaderMask); + auto *Ext = + B.createNaryOp(VPInstruction::ExtractLane, {LastActiveLane, Op}); + Inc->replaceAllUsesWith(Ext); + } + } return Predicator.getBlockMaskCache(); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index beae8051e75dc..5e46659227262 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -547,6 +547,7 @@ unsigned VPInstruction::getNumOperandsForOpcode(unsigned Opcode) { case VPInstruction::ExtractLastLanePerPart: case VPInstruction::ExtractPenultimateElement: case VPInstruction::FirstActiveLane: + case VPInstruction::LastActiveLane: case VPInstruction::Not: case VPInstruction::Unpack: return 1; @@ -1156,6 +1157,29 @@ InstructionCost VPInstruction::computeCost(ElementCount VF, {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)}); return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind); } + case VPInstruction::LastActiveLane: { + Type *ScalarTy = Ctx.Types.inferScalarType(getOperand(0)); + if (VF.isScalar()) + return Ctx.TTI.getCmpSelInstrCost(Instruction::ICmp, ScalarTy, + CmpInst::makeCmpResultType(ScalarTy), + CmpInst::ICMP_EQ, Ctx.CostKind); + // Calculate the cost of determining the lane index: NOT + cttz_elts + SUB. + auto *PredTy = toVectorTy(ScalarTy, VF); + IntrinsicCostAttributes Attrs(Intrinsic::experimental_cttz_elts, + Type::getInt64Ty(Ctx.LLVMCtx), + {PredTy, Type::getInt1Ty(Ctx.LLVMCtx)}); + InstructionCost Cost = Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind); + // Add cost of NOT operation on the predicate. + Cost += Ctx.TTI.getArithmeticInstrCost( + Instruction::Xor, PredTy, Ctx.CostKind, + {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None}, + {TargetTransformInfo::OK_UniformConstantValue, + TargetTransformInfo::OP_None}); + // Add cost of SUB operation on the index. + Cost += Ctx.TTI.getArithmeticInstrCost( + Instruction::Sub, Type::getInt64Ty(Ctx.LLVMCtx), Ctx.CostKind); + return Cost; + } case VPInstruction::FirstOrderRecurrenceSplice: { assert(VF.isVector() && "Scalar FirstOrderRecurrenceSplice?"); SmallVector Mask(VF.getKnownMinValue()); @@ -1210,6 +1234,7 @@ bool VPInstruction::isVectorToScalar() const { getOpcode() == Instruction::ExtractElement || getOpcode() == VPInstruction::ExtractLane || getOpcode() == VPInstruction::FirstActiveLane || + getOpcode() == VPInstruction::LastActiveLane || getOpcode() == VPInstruction::ComputeAnyOfResult || getOpcode() == VPInstruction::ComputeFindIVResult || getOpcode() == VPInstruction::ComputeReductionResult || @@ -1275,6 +1300,7 @@ bool VPInstruction::opcodeMayReadOrWriteFromMemory() const { case VPInstruction::ExtractPenultimateElement: case VPInstruction::ActiveLaneMask: case VPInstruction::FirstActiveLane: + case VPInstruction::LastActiveLane: case VPInstruction::FirstOrderRecurrenceSplice: case VPInstruction::LogicalAnd: case VPInstruction::Not: @@ -1451,6 +1477,9 @@ void VPInstruction::print(raw_ostream &O, const Twine &Indent, case VPInstruction::FirstActiveLane: O << "first-active-lane"; break; + case VPInstruction::LastActiveLane: + O << "last-active-lane"; + break; case VPInstruction::ReductionStartVector: O << "reduction-start-vector"; break; diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 7cef98f465715..bb517aad31f19 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -805,8 +805,8 @@ static VPValue *optimizeEarlyExitInductionUser(VPlan &Plan, VPValue *Op, ScalarEvolution &SE) { VPValue *Incoming, *Mask; - if (!match(Op, m_VPInstruction( - m_FirstActiveLane(m_VPValue(Mask)), m_VPValue(Incoming)))) + if (!match(Op, m_ExtractLane(m_FirstActiveLane(m_VPValue(Mask)), + m_VPValue(Incoming)))) return nullptr; auto *WideIV = getOptimizableIVOf(Incoming, SE); @@ -1274,8 +1274,7 @@ static void simplifyRecipe(VPSingleDefRecipe *Def, VPTypeAnalysis &TypeInfo) { } // Look through ExtractPenultimateElement (BuildVector ....). - if (match(Def, m_VPInstruction( - m_BuildVector()))) { + if (match(Def, m_ExtractPenultimateElement(m_BuildVector()))) { auto *BuildVector = cast(Def->getOperand(0)); Def->replaceAllUsesWith( BuildVector->getOperand(BuildVector->getNumOperands() - 2)); @@ -2056,6 +2055,32 @@ bool VPlanTransforms::adjustFixedOrderRecurrences(VPlan &Plan, // Set the first operand of RecurSplice to FOR again, after replacing // all users. RecurSplice->setOperand(0, FOR); + + // Check for users extracting at the penultimate active lane of the FOR. + // If only a single lane is active in the current iteration, we need to + // select the last element from the previous iteration (from the FOR phi + // directly). + for (VPUser *U : RecurSplice->users()) { + if (!match(U, m_ExtractLane(m_LastActiveLane(m_VPValue()), + m_Specific(RecurSplice)))) + continue; + + VPBuilder B(cast(U)); + VPValue *LastActiveLane = cast(U)->getOperand(0); + Type *I64Ty = Type::getInt64Ty(Plan.getContext()); + VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(I64Ty, 0)); + VPValue *One = Plan.getOrAddLiveIn(ConstantInt::get(I64Ty, 1)); + VPValue *PenultimateIndex = + B.createNaryOp(Instruction::Sub, {LastActiveLane, One}); + VPValue *PenultimateLastIter = + B.createNaryOp(VPInstruction::ExtractLane, + {PenultimateIndex, FOR->getBackedgeValue()}); + VPValue *LastPrevIter = + B.createNaryOp(VPInstruction::ExtractLastElement, FOR); + VPValue *Cmp = B.createICmp(CmpInst::ICMP_EQ, LastActiveLane, Zero); + VPValue *Sel = B.createSelect(Cmp, LastPrevIter, PenultimateLastIter); + cast(U)->replaceAllUsesWith(Sel); + } } return true; } @@ -3445,6 +3470,34 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) { ToRemove.push_back(Expr); } + // Expand LastActiveLane into Not + FirstActiveLane + Sub. + auto *LastActiveL = dyn_cast(&R); + if (LastActiveL && + LastActiveL->getOpcode() == VPInstruction::LastActiveLane) { + // Create Not(Mask) for all operands. + SmallVector NotMasks; + for (VPValue *Op : LastActiveL->operands()) { + VPValue *NotMask = Builder.createNot(Op, LastActiveL->getDebugLoc()); + NotMasks.push_back(NotMask); + } + + // Create FirstActiveLane on the inverted masks. + VPValue *FirstInactiveLane = Builder.createNaryOp( + VPInstruction::FirstActiveLane, NotMasks, + LastActiveL->getDebugLoc(), "first.inactive.lane"); + + // Subtract 1 to get the last active lane. + VPValue *One = Plan.getOrAddLiveIn( + ConstantInt::get(Type::getInt64Ty(Plan.getContext()), 1)); + VPValue *LastLane = Builder.createNaryOp( + Instruction::Sub, {FirstInactiveLane, One}, + LastActiveL->getDebugLoc(), "last.active.lane"); + + LastActiveL->replaceAllUsesWith(LastLane); + ToRemove.push_back(LastActiveL); + continue; + } + VPValue *VectorStep; VPValue *ScalarStep; if (!match(&R, m_VPInstruction( diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp index d4b8b72beb942..221ca4ab05370 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp @@ -352,6 +352,7 @@ void UnrollState::unrollBlock(VPBlockBase *VPB) { VPValue *Op1; if (match(&R, m_VPInstruction(m_VPValue(Op1))) || match(&R, m_FirstActiveLane(m_VPValue(Op1))) || + match(&R, m_LastActiveLane(m_VPValue(Op1))) || match(&R, m_VPInstruction( m_VPValue(), m_VPValue(), m_VPValue(Op1))) || match(&R, m_VPInstruction( @@ -364,17 +365,21 @@ void UnrollState::unrollBlock(VPBlockBase *VPB) { continue; } VPValue *Op0; - if (match(&R, m_VPInstruction( - m_VPValue(Op0), m_VPValue(Op1)))) { + if (match(&R, m_ExtractLane(m_VPValue(Op0), m_VPValue(Op1)))) { addUniformForAllParts(cast(&R)); for (unsigned Part = 1; Part != UF; ++Part) R.addOperand(getValueForPart(Op1, Part)); continue; } if (match(&R, m_ExtractLastElement(m_VPValue(Op0))) || - match(&R, m_VPInstruction( - m_VPValue(Op0)))) { + match(&R, m_ExtractPenultimateElement(m_VPValue(Op0)))) { addUniformForAllParts(cast(&R)); + if (isa(Op0)) { + assert(match(&R, m_ExtractLastElement(m_VPValue())) && + "can only extract last element of FOR"); + continue; + } + if (Plan.hasScalarVFOnly()) { auto *I = cast(&R); // Extracting from end with VF = 1 implies retrieving the last or diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index 34754a1ea3992..2d63d2a787f88 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -18,6 +18,7 @@ #include "VPlanDominatorTree.h" #include "VPlanHelpers.h" #include "VPlanPatternMatch.h" +#include "VPlanUtils.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/TypeSwitch.h" @@ -44,6 +45,9 @@ class VPlanVerifier { /// incoming value into EVL's recipe. bool verifyEVLRecipe(const VPInstruction &EVL) const; + /// Verify that \p LastActiveLane's operand is guaranteed to be a prefix-mask. + bool verifyLastActiveLaneRecipe(const VPInstruction &LastActiveLane) const; + bool verifyVPBasicBlock(const VPBasicBlock *VPBB); bool verifyBlock(const VPBlockBase *VPB); @@ -221,6 +225,44 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const { }); } +bool VPlanVerifier::verifyLastActiveLaneRecipe( + const VPInstruction &LastActiveLane) const { + assert(LastActiveLane.getOpcode() == VPInstruction::LastActiveLane && + "must be called with VPInstruction::LastActiveLane"); + + if (LastActiveLane.getNumOperands() < 1) { + errs() << "LastActiveLane must have at least one operand\n"; + return false; + } + + const VPlan &Plan = *LastActiveLane.getParent()->getPlan(); + // All operands must be prefix-mask. Currently we check for header masks or + // EVL-derived masks, as those are currently the only operands in practice, + // but this may need updating in the future. + for (VPValue *Op : LastActiveLane.operands()) { + if (vputils::isHeaderMask(Op, Plan)) + continue; + + // Masks derived from EVL are also fine. + auto BroadcastOrEVL = + m_CombineOr(m_Broadcast(m_EVL(m_VPValue())), m_EVL(m_VPValue())); + if (match(Op, m_CombineOr(m_ICmp(m_StepVector(), BroadcastOrEVL), + m_ICmp(BroadcastOrEVL, m_StepVector())))) + continue; + + errs() << "LastActiveLane operand "; +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + VPSlotTracker Tracker(&Plan); + Op->printAsOperand(errs(), Tracker); +#endif + errs() << " must be prefix mask (a header mask or an " + "EVL-derived mask currently)\n"; + return false; + } + + return true; +} + bool VPlanVerifier::verifyVPBasicBlock(const VPBasicBlock *VPBB) { if (!verifyPhiRecipes(VPBB)) return false; @@ -313,6 +355,10 @@ bool VPlanVerifier::verifyVPBasicBlock(const VPBasicBlock *VPBB) { return false; } break; + case VPInstruction::LastActiveLane: + if (!verifyLastActiveLaneRecipe(*VPI)) + return false; + break; default: break; } diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll index b81637f50989d..a22f72fe929d1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll @@ -69,51 +69,51 @@ exit: define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) { ; CHECK-LABEL: define i8 @dead_live_out_due_to_scalar_epilogue_required( ; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.umax.i32(i32 [[TMP1]], i32 6) -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 252, [[TMP2]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 1005 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 1005 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]] ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] -; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 252, [[TMP4]] -; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 252, [[TMP6]] -; CHECK-NEXT: [[IND_END:%.*]] = mul i32 [[N_VEC]], 4 -; CHECK-NEXT: [[TMP9:%.*]] = call @llvm.stepvector.nxv4i32() -; CHECK-NEXT: [[TMP11:%.*]] = mul [[TMP9]], splat (i32 4) -; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP11]] -; CHECK-NEXT: [[TMP14:%.*]] = mul i32 4, [[TMP4]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i32 [[TMP14]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = call @llvm.stepvector.nxv16i32() +; CHECK-NEXT: [[TMP1:%.*]] = mul [[TMP0]], splat (i32 4) +; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP1]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP15:%.*]] = sext [[VEC_IND]] to -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], [[TMP15]] -; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i8.nxv4p0( zeroinitializer, align 1 [[TMP16]], splat (i1 true)), !alias.scope [[META3:![0-9]+]], !noalias [[META6:![0-9]+]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 252, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i32 [[TMP2]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = mul i32 4, [[TMP2]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP3]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = icmp uge [[TMP0]], [[BROADCAST_SPLAT3]] +; CHECK-NEXT: [[TMP9:%.*]] = sext [[VEC_IND]] to +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC]], [[TMP9]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv16i8.nxv16p0( align 1 [[TMP6]], splat (i1 true), i32 [[TMP2]]), !alias.scope [[META3:![0-9]+]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], [[TMP9]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0( zeroinitializer, align 1 [[TMP7]], splat (i1 true), i32 [[TMP2]]), !alias.scope [[META6:![0-9]+]], !noalias [[META3]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP2]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1( [[TMP5]], i1 true) +; CHECK-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], 1 +; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 16 +; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP13]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = extractelement [[WIDE_MASKED_GATHER]], i64 [[TMP11]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[IV]] to i64 ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IDXPROM]] ; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1 @@ -121,9 +121,9 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) { ; CHECK-NEXT: store i8 0, ptr [[GEP_DST]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 4 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV]], 1001 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[R:%.*]] = phi i8 [ [[L]], %[[LOOP]] ] +; CHECK-NEXT: [[R:%.*]] = phi i8 [ [[L]], %[[LOOP]] ], [ [[TMP15]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i8 [[R]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll index 8e71718061c9b..ebd80b2c2af4d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll @@ -639,73 +639,50 @@ for.end: define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) { ; CHECK-LABEL: @udiv_sdiv_with_invariant_divisors( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 12, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 12, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 12, [[N_MOD_VF]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[Y:%.*]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i8 [[X:%.*]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer -; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i16 -; CHECK-NEXT: [[TMP4:%.*]] = add i16 -12, [[DOTCAST]] -; CHECK-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[N_VEC]] to i8 -; CHECK-NEXT: [[TMP5:%.*]] = add i8 -12, [[DOTCAST5]] -; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[C:%.*]], splat (i8 1), [[BROADCAST_SPLAT2]] -; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[C]], splat (i16 1), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP8:%.*]] = call @llvm.stepvector.nxv2i8() -; CHECK-NEXT: [[TMP9:%.*]] = mul [[TMP8]], splat (i8 1) -; CHECK-NEXT: [[INDUCTION:%.*]] = add splat (i8 -12), [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP3]] to i8 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement poison, i8 [[TMP10]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector [[BROADCAST_SPLATINSERT6]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i1 [[C:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = xor [[BROADCAST_SPLAT]], splat (i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i8 [[X:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, i16 [[Y:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector [[BROADCAST_SPLATINSERT3]], poison, zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv8i8() +; CHECK-NEXT: [[TMP2:%.*]] = mul [[TMP1]], splat (i8 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add splat (i8 -12), [[TMP2]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP11:%.*]] = udiv [[VEC_IND]], [[TMP6]] -; CHECK-NEXT: [[TMP12:%.*]] = zext [[TMP11]] to -; CHECK-NEXT: [[TMP13:%.*]] = sdiv [[TMP12]], [[TMP7]] -; CHECK-NEXT: [[TMP14:%.*]] = sext [[TMP13]] to -; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[C]], zeroinitializer, [[TMP14]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT7]] -; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 12, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement poison, i32 [[TMP3]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector [[BROADCAST_SPLATINSERT7]], poison, zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement poison, i8 [[TMP4]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector [[BROADCAST_SPLATINSERT5]], poison, zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.stepvector.nxv8i32() +; CHECK-NEXT: [[TMP15:%.*]] = icmp uge [[TMP5]], [[BROADCAST_SPLAT8]] +; CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vp.merge.nxv8i8( [[TMP0]], [[BROADCAST_SPLAT2]], splat (i8 1), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP9:%.*]] = udiv [[VEC_IND]], [[TMP8]] +; CHECK-NEXT: [[TMP10:%.*]] = zext [[TMP9]] to +; CHECK-NEXT: [[TMP11:%.*]] = call @llvm.vp.merge.nxv8i16( [[TMP0]], [[BROADCAST_SPLAT4]], splat (i16 1), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP12:%.*]] = sdiv [[TMP10]], [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = sext [[TMP12]] to +; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[C]], zeroinitializer, [[TMP13]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP3]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT6]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i32 [[TMP16]], 2 -; CHECK-NEXT: [[TMP18:%.*]] = sub i32 [[TMP17]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = extractelement [[PREDPHI]], i32 [[TMP18]] -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 12, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] -; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ [[TMP4]], [[MIDDLE_BLOCK]] ], [ -12, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i8 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ -12, [[ENTRY]] ] -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[NARROW_IV:%.*]] = phi i8 [ [[BC_RESUME_VAL8]], [[SCALAR_PH]] ], [ [[IV_NEXT_TRUNC:%.*]], [[LOOP_LATCH]] ] -; CHECK-NEXT: br i1 [[C]], label [[LOOP_LATCH]], label [[THEN:%.*]] -; CHECK: then: -; CHECK-NEXT: [[UD:%.*]] = udiv i8 [[NARROW_IV]], [[X]] -; CHECK-NEXT: [[UD_EXT:%.*]] = zext i8 [[UD]] to i16 -; CHECK-NEXT: [[SD:%.*]] = sdiv i16 [[UD_EXT]], [[Y]] -; CHECK-NEXT: [[SD_EXT:%.*]] = sext i16 [[SD]] to i32 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ 0, [[LOOP_HEADER]] ], [ [[SD_EXT]], [[THEN]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; CHECK-NEXT: [[IV_NEXT_TRUNC]] = trunc i16 [[IV_NEXT]] to i8 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv8i1( [[TMP15]], i1 true) +; CHECK-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1 +; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 8 +; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 0 +; CHECK-NEXT: [[MERGE_LCSSA:%.*]] = extractelement [[PREDPHI]], i64 [[TMP17]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[MERGE_LCSSA:%.*]] = phi i32 [ [[MERGE]], [[LOOP_LATCH]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MERGE_LCSSA]] ; ; FIXED-LABEL: @udiv_sdiv_with_invariant_divisors( diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll index 7eb3d7fc5a36d..0083da77dfea3 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/first-order-recurrence-scalable-vf1.ll @@ -7,55 +7,54 @@ target triple = "riscv64-unknown-linux-gnu" define i64 @pr97452_scalable_vf1_for(ptr %src, ptr noalias %dst) #0 { ; CHECK-LABEL: define i64 @pr97452_scalable_vf1_for( ; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 23, [[TMP0]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1 -; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i64 0, i32 [[TMP4]] +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 2 +; CHECK-NEXT: [[TMP5:%.*]] = sub i32 [[TMP4]], 1 +; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i64 0, i32 [[TMP5]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD]] = load , ptr [[TMP5]], align 8 -; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.vector.splice.nxv1i64( [[VECTOR_RECUR]], [[WIDE_LOAD]], i32 -1) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: store [[TMP7]], ptr [[TMP8]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 23, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP6]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP8:%.*]] = icmp uge [[TMP7]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: [[VP_OP_LOAD]] = call @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP9]], splat (i1 true), i32 [[TMP6]]) +; CHECK-NEXT: [[TMP10:%.*]] = call @llvm.experimental.vp.splice.nxv2i64( [[VECTOR_RECUR]], [[VP_OP_LOAD]], i32 -1, splat (i1 true), i32 [[PREV_EVL]], i32 [[TMP6]]) +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[TMP10]], ptr align 8 [[TMP11]], splat (i1 true), i32 [[TMP6]]) +; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP6]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP12:%.*]] = sub i32 [[TMP11]], 1 -; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement [[WIDE_LOAD]], i32 [[TMP12]] -; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: [[TMP15:%.*]] = sub i32 [[TMP14]], 1 -; CHECK-NEXT: [[TMP16:%.*]] = extractelement [[TMP7]], i32 [[TMP15]] -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 23, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] -; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[FOR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L]] = load i64, ptr [[GEP_SRC]], align 8 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i64 [[FOR]], ptr [[GEP_DST]], align 8 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 22 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv2i1( [[TMP8]], i1 true) +; CHECK-NEXT: [[TMP15:%.*]] = sub i64 [[TMP14]], 1 +; CHECK-NEXT: [[TMP16:%.*]] = sub i64 [[TMP15]], 1 +; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 2 +; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 0 +; CHECK-NEXT: [[TMP20:%.*]] = extractelement [[VP_OP_LOAD]], i64 [[TMP16]] +; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP22:%.*]] = mul nuw i32 [[TMP21]], 2 +; CHECK-NEXT: [[TMP23:%.*]] = sub i32 [[TMP22]], 1 +; CHECK-NEXT: [[TMP24:%.*]] = extractelement [[VECTOR_RECUR]], i32 [[TMP23]] +; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[TMP15]], 0 +; CHECK-NEXT: [[TMP26:%.*]] = select i1 [[TMP25]], i64 [[TMP24]], i64 [[TMP20]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[FOR]], %[[LOOP]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 [[TMP26]] ; entry: br label %loop @@ -81,5 +80,4 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl128b,+zvl256b" } ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll index 3c90908b0a08f..e09284f26f6db 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll @@ -71,7 +71,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: @@ -115,7 +115,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[TMP11]]) ; CHECK-NEXT: br label [[FOR_BODY:%.*]] @@ -159,7 +159,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: @@ -199,7 +199,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: @@ -224,43 +224,37 @@ for.end: define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %n) { ; CHECK-LABEL: @uniform_load( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; CHECK-NEXT: br label [[ENTRY:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[TMP0]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP2:%.*]] = icmp uge [[TMP1]], [[BROADCAST_SPLAT1]] ; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[B:%.*]], align 8 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[V]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]] -; CHECK-NEXT: store [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[ARRAYIDX]], splat (i1 true), i32 [[TMP0]]) +; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP0]] to i64 +; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[TMP5]], [[IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] -; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv2i1( [[TMP2]], i1 true) +; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 0 +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[BROADCAST_SPLAT]], i64 [[TMP8]] ; CHECK-NEXT: br label [[FOR_BODY1:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] -; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[B]], align 8 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] -; CHECK-NEXT: store i64 [[V1]], ptr [[ARRAYIDX1]], align 8 -; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: for.end: -; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V1]], [[FOR_BODY1]] ], [ [[V]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[V_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP12]] ; entry: br label %for.body @@ -299,7 +293,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.end: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll index c7ba826295de8..a89435f4b24e3 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll @@ -400,61 +400,54 @@ for.end: define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-LABEL: define i32 @FOR_reduction( ; IF-EVL-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[TC:%.*]]) #[[ATTR0]] { -; IF-EVL-NEXT: [[ENTRY:.*]]: -; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; IF-EVL-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP9]], 2 -; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TC]], [[TMP1]] -; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; IF-EVL-NEXT: [[ENTRY:.*:]] +; IF-EVL-NEXT: br label %[[VECTOR_PH:.*]] ; IF-EVL: [[VECTOR_PH]]: ; IF-EVL-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; IF-EVL-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]] -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 ; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4 ; IF-EVL-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 ; IF-EVL-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i32 33, i32 [[TMP8]] ; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] ; IF-EVL: [[VECTOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP4]], %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP9]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP9]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; IF-EVL-NEXT: [[TMP22:%.*]] = call @llvm.stepvector.nxv4i32() +; IF-EVL-NEXT: [[TMP23:%.*]] = icmp uge [[TMP22]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] -; IF-EVL-NEXT: [[WIDE_LOAD]] = load , ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[TMP10:%.*]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR]], [[WIDE_LOAD]], i32 -1) +; IF-EVL-NEXT: [[WIDE_LOAD]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[ARRAYIDX]], splat (i1 true), i32 [[TMP9]]) +; IF-EVL-NEXT: [[TMP10:%.*]] = call @llvm.experimental.vp.splice.nxv4i32( [[VECTOR_RECUR]], [[WIDE_LOAD]], i32 -1, splat (i1 true), i32 [[PREV_EVL]], i32 [[TMP9]]) ; IF-EVL-NEXT: [[TMP11:%.*]] = add nsw [[TMP10]], [[WIDE_LOAD]] ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] -; IF-EVL-NEXT: store [[TMP11]], ptr [[TMP12]], align 4 -; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP3]] -; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0( [[TMP11]], ptr align 4 [[TMP12]], splat (i1 true), i32 [[TMP9]]) +; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64 +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[INDVARS]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; IF-EVL-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: +; IF-EVL-NEXT: [[TMP27:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv4i1( [[TMP23]], i1 true) +; IF-EVL-NEXT: [[TMP28:%.*]] = sub i64 [[TMP27]], 1 +; IF-EVL-NEXT: [[TMP17:%.*]] = sub i64 [[TMP28]], 1 +; IF-EVL-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 +; IF-EVL-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 0 +; IF-EVL-NEXT: [[TMP21:%.*]] = extractelement [[WIDE_LOAD]], i64 [[TMP17]] ; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4 ; IF-EVL-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1 -; IF-EVL-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement [[WIDE_LOAD]], i32 [[TMP16]] -; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() -; IF-EVL-NEXT: [[TMP18:%.*]] = mul nuw i32 [[TMP17]], 4 -; IF-EVL-NEXT: [[TMP19:%.*]] = sub i32 [[TMP18]], 2 -; IF-EVL-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement [[WIDE_LOAD]], i32 [[TMP19]] -; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TC]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] -; IF-EVL: [[SCALAR_PH]]: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 33, %[[ENTRY]] ] -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP0:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP0]] = load i32, ptr [[ARRAYIDX1]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP0]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: [[TMP25:%.*]] = extractelement [[VECTOR_RECUR]], i32 [[TMP16]] +; IF-EVL-NEXT: [[TMP26:%.*]] = icmp eq i64 [[TMP28]], 0 +; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = select i1 [[TMP26]], i32 [[TMP25]], i32 [[TMP21]] +; IF-EVL-NEXT: br label %[[FOR_END:.*]] ; IF-EVL: [[FOR_END]]: -; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = phi i32 [ [[FOR1]], %[[FOR_BODY]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[FOR1_LCSSA]] ; ; NO-VP-LABEL: define i32 @FOR_reduction( @@ -570,7 +563,7 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] ; IF-EVL: [[FOR_END]]: @@ -662,8 +655,7 @@ for.end: ; IF-EVL: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} ; IF-EVL: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; IF-EVL: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} -; IF-EVL: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} +; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} ;. ; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index 1e21c753840e9..3f404daef6965 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -109,44 +109,38 @@ for.end: define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %n) { ; SCALABLE-LABEL: define i64 @uniform_load_outside_use( ; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; SCALABLE-NEXT: [[ENTRY:.*]]: -; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1 -; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; SCALABLE-NEXT: [[ENTRY:.*:]] +; SCALABLE-NEXT: br label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; SCALABLE: [[VECTOR_BODY]]: -; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; SCALABLE-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[TMP0]], i64 0 +; SCALABLE-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; SCALABLE-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv2i32() +; SCALABLE-NEXT: [[TMP2:%.*]] = icmp uge [[TMP1]], [[BROADCAST_SPLAT1]] ; SCALABLE-NEXT: [[TMP6:%.*]] = load i64, ptr [[B]], align 8 ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[TMP6]], i64 0 ; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] -; SCALABLE-NEXT: store [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8 -; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] -; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], splat (i1 true), i32 [[TMP0]]) +; SCALABLE-NEXT: [[TMP5:%.*]] = zext i32 [[TMP0]] to i64 +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP5]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] +; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: -; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] -; SCALABLE: [[SCALAR_PH]]: -; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; SCALABLE-NEXT: [[FIRST_INACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv2i1( [[TMP2]], i1 true) +; SCALABLE-NEXT: [[LAST_ACTIVE_LANE:%.*]] = sub i64 [[FIRST_INACTIVE_LANE]], 1 +; SCALABLE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP7]], 2 +; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 [[TMP11]], 0 +; SCALABLE-NEXT: [[TMP12:%.*]] = extractelement [[BROADCAST_SPLAT]], i64 [[LAST_ACTIVE_LANE]] +; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: -; SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; SCALABLE-NEXT: ret i64 [[V_LCSSA]] +; SCALABLE-NEXT: ret i64 [[TMP12]] ; ; FIXEDLEN-LABEL: define i64 @uniform_load_outside_use( ; FIXEDLEN-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { @@ -184,44 +178,38 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; ; TF-SCALABLE-LABEL: define i64 @uniform_load_outside_use( ; TF-SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; TF-SCALABLE-NEXT: [[ENTRY:.*]]: -; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1 -; TF-SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] -; TF-SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; TF-SCALABLE-NEXT: [[ENTRY:.*:]] +; TF-SCALABLE-NEXT: br label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; TF-SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 -; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] -; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] ; TF-SCALABLE: [[VECTOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[TMP0]], i64 0 +; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; TF-SCALABLE-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv2i32() +; TF-SCALABLE-NEXT: [[TMP2:%.*]] = icmp uge [[TMP1]], [[BROADCAST_SPLAT1]] ; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[V]], i64 0 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP3]] -; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[ARRAYIDX]], splat (i1 true), i32 [[TMP0]]) +; TF-SCALABLE-NEXT: [[TMP5:%.*]] = zext i32 [[TMP0]] to i64 +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP5]], [[IV]] +; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] +; TF-SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; TF-SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: -; TF-SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] -; TF-SCALABLE: [[SCALAR_PH]]: -; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: [[V1:%.*]] = load i64, ptr [[B]], align 8 -; TF-SCALABLE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] -; TF-SCALABLE-NEXT: store i64 [[V1]], ptr [[ARRAYIDX1]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TF-SCALABLE-NEXT: [[FIRST_INACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv2i1( [[TMP2]], i1 true) +; TF-SCALABLE-NEXT: [[LAST_ACTIVE_LANE:%.*]] = sub i64 [[FIRST_INACTIVE_LANE]], 1 +; TF-SCALABLE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; TF-SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 0 +; TF-SCALABLE-NEXT: [[TMP12:%.*]] = extractelement [[BROADCAST_SPLAT]], i64 [[LAST_ACTIVE_LANE]] +; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: -; TF-SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V1]], %[[FOR_BODY]] ], [ [[V]], %[[MIDDLE_BLOCK]] ] -; TF-SCALABLE-NEXT: ret i64 [[V_LCSSA]] +; TF-SCALABLE-NEXT: ret i64 [[TMP12]] ; entry: br label %for.body @@ -269,7 +257,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -350,7 +338,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; TF-SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -399,7 +387,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; SCALABLE-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -457,7 +445,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -499,7 +487,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -557,7 +545,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -608,7 +596,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -679,7 +667,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -731,7 +719,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; SCALABLE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -812,7 +800,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: @@ -860,7 +848,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] ; SCALABLE: [[FOR_END]]: @@ -918,7 +906,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[FOR_END]]: diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll index e99ffda9e4043..c10dc5ddba2a9 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll @@ -524,22 +524,78 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst) ; induction is used outside the loop. define i64 @example23d(ptr noalias nocapture %src, ptr noalias nocapture %dst) optsize { ; CHECK-LABEL: @example23d( +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: ; CHECK-NEXT: br label [[TMP1:%.*]] -; CHECK: 1: -; CHECK-NEXT: [[DOT04:%.*]] = phi ptr [ [[SRC:%.*]], [[TMP0:%.*]] ], [ [[TMP2:%.*]], [[TMP1]] ] -; CHECK-NEXT: [[DOT013:%.*]] = phi ptr [ [[DST:%.*]], [[TMP0]] ], [ [[TMP6:%.*]], [[TMP1]] ] -; CHECK-NEXT: [[I_02:%.*]] = phi i64 [ 0, [[TMP0]] ], [ [[TMP7:%.*]], [[TMP1]] ] -; CHECK-NEXT: [[TMP2]] = getelementptr inbounds nuw i8, ptr [[DOT04]], i64 2 +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE14:%.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE14]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[TMP9]], i64 2 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[TMP2]], i64 4 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[TMP10]], i64 6 +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[TMP11]], i64 4 +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[TMP32]], i64 8 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[TMP6]], i64 12 +; CHECK-NEXT: [[TMP33:%.*]] = icmp ult <4 x i64> [[VEC_IND]], splat (i64 257) +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP33]], i64 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; CHECK: pred.store.if: +; CHECK-NEXT: [[DOT013:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]] +; CHECK-NEXT: [[DOT04:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[DOT04]], align 2 ; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[TMP3]] to i32 ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i32 [[TMP4]], 7 -; CHECK-NEXT: [[TMP6]] = getelementptr inbounds nuw i8, ptr [[DOT013]], i64 4 ; CHECK-NEXT: store i32 [[TMP5]], ptr [[DOT013]], align 4 -; CHECK-NEXT: [[TMP7]] = add nuw nsw i64 [[I_02]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP7]], 257 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[TMP8:%.*]], label [[TMP1]] -; CHECK: 8: -; CHECK-NEXT: ret i64 [[TMP7]] +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]] +; CHECK: pred.store.continue: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP33]], i64 1 +; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]] +; CHECK: pred.store.if9: +; CHECK-NEXT: [[TMP13:%.*]] = load i16, ptr [[NEXT_GEP1]], align 2 +; CHECK-NEXT: [[TMP14:%.*]] = zext i16 [[TMP13]] to i32 +; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i32 [[TMP14]], 7 +; CHECK-NEXT: store i32 [[TMP15]], ptr [[NEXT_GEP6]], align 4 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]] +; CHECK: pred.store.continue10: +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP33]], i64 2 +; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12:%.*]] +; CHECK: pred.store.if11: +; CHECK-NEXT: [[TMP17:%.*]] = load i16, ptr [[NEXT_GEP2]], align 2 +; CHECK-NEXT: [[TMP18:%.*]] = zext i16 [[TMP17]] to i32 +; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i32 [[TMP18]], 7 +; CHECK-NEXT: store i32 [[TMP19]], ptr [[NEXT_GEP7]], align 4 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]] +; CHECK: pred.store.continue12: +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP33]], i64 3 +; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_STORE_IF13:%.*]], label [[PRED_STORE_CONTINUE14]] +; CHECK: pred.store.if13: +; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[NEXT_GEP3]], align 2 +; CHECK-NEXT: [[TMP22:%.*]] = zext i16 [[TMP21]] to i32 +; CHECK-NEXT: [[TMP23:%.*]] = shl nuw nsw i32 [[TMP22]], 7 +; CHECK-NEXT: store i32 [[TMP23]], ptr [[NEXT_GEP8]], align 4 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE14]] +; CHECK: pred.store.continue14: +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) +; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 +; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[TMP1]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: br label [[TMP30:%.*]] +; CHECK: 25: +; CHECK-NEXT: [[TMP25:%.*]] = xor <4 x i1> [[TMP33]], splat (i1 true) +; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP25]], i1 true) +; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], -1 +; CHECK-NEXT: [[TMP28:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[TMP27]] +; CHECK-NEXT: [[TMP29:%.*]] = add nsw i64 [[TMP28]], 1 +; CHECK-NEXT: ret i64 [[TMP29]] ; br label %1 diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll index e97d6e66d9d7a..58217069058f8 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll @@ -6,59 +6,276 @@ define i32 @FOR_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { ; VF2IC1-LABEL: define i32 @FOR_used_outside( ; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC1-NEXT: [[ENTRY:.*]]: -; VF2IC1-NEXT: br label %[[LOOP:.*]] -; VF2IC1: [[LOOP]]: -; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[ENTRY:.*:]] +; VF2IC1-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC1: [[VECTOR_PH]]: +; VF2IC1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF2IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF2IC1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC1: [[VECTOR_BODY]]: +; VF2IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE4:.*]] ] +; VF2IC1-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; VF2IC1-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 1 +; VF2IC1-NEXT: [[TMP2:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC1-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC1: [[PRED_LOAD_IF]]: ; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] -; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 -; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] -; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP10]], i32 0 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC1: [[PRED_LOAD_CONTINUE]]: +; VF2IC1-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; VF2IC1-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC1: [[PRED_LOAD_IF1]]: +; VF2IC1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4 +; VF2IC1-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP34]], i32 1 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC1: [[PRED_LOAD_CONTINUE2]]: +; VF2IC1-NEXT: [[TMP12]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP11]], %[[PRED_LOAD_IF1]] ] +; VF2IC1-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP12]], <2 x i32> +; VF2IC1-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC1: [[PRED_STORE_IF]]: +; VF2IC1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP13]], i32 0 +; VF2IC1-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP12]], i32 0 +; VF2IC1-NEXT: [[TMP18:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] +; VF2IC1-NEXT: store i32 [[TMP18]], ptr [[TMP15]], align 4 +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC1: [[PRED_STORE_CONTINUE]]: +; VF2IC1-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_IF3]]: +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP21:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1 +; VF2IC1-NEXT: [[TMP22:%.*]] = extractelement <2 x i32> [[TMP12]], i32 1 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] ; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 -; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 -; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_CONTINUE4]]: +; VF2IC1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; VF2IC1-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VF2IC1: [[MIDDLE_BLOCK]]: +; VF2IC1-NEXT: [[TMP25:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true) +; VF2IC1-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP25]], i1 true) +; VF2IC1-NEXT: [[TMP27:%.*]] = sub i64 [[TMP26]], 1 +; VF2IC1-NEXT: [[TMP28:%.*]] = sub i64 [[TMP27]], 1 +; VF2IC1-NEXT: [[TMP29:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP28]] +; VF2IC1-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC1-NEXT: [[TMP31:%.*]] = icmp eq i64 [[TMP27]], 0 +; VF2IC1-NEXT: [[TMP32:%.*]] = select i1 [[TMP31]], i32 [[TMP30]], i32 [[TMP29]] +; VF2IC1-NEXT: br label %[[FOR_END:.*]] ; VF2IC1: [[FOR_END]]: -; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] ; VF2IC1-NEXT: ret i32 [[TMP32]] ; ; VF2IC2-LABEL: define i32 @FOR_used_outside( ; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC2-NEXT: [[ENTRY:.*]]: -; VF2IC2-NEXT: br label %[[LOOP:.*]] -; VF2IC2: [[LOOP]]: -; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 3 +; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; VF2IC2-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC2-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; VF2IC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; VF2IC2-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; VF2IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 3 +; VF2IC2-NEXT: [[TMP4:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP5:%.*]] = icmp ule <2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP6]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC2: [[PRED_LOAD_IF]]: ; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] -; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 -; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] -; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP23]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC2: [[PRED_LOAD_CONTINUE]]: +; VF2IC2-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; VF2IC2-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC2: [[PRED_LOAD_IF1]]: +; VF2IC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; VF2IC2-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP13]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC2: [[PRED_LOAD_CONTINUE2]]: +; VF2IC2-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP14]], %[[PRED_LOAD_IF1]] ] +; VF2IC2-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] +; VF2IC2: [[PRED_LOAD_IF3]]: +; VF2IC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +; VF2IC2-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE4]] +; VF2IC2: [[PRED_LOAD_CONTINUE4]]: +; VF2IC2-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE2]] ], [ [[TMP19]], %[[PRED_LOAD_IF3]] ] +; VF2IC2-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] +; VF2IC2: [[PRED_LOAD_IF5]]: +; VF2IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC2-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP37]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE6]] +; VF2IC2: [[PRED_LOAD_CONTINUE6]]: +; VF2IC2-NEXT: [[TMP25]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP24]], %[[PRED_LOAD_IF5]] ] +; VF2IC2-NEXT: [[TMP26:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP15]], <2 x i32> +; VF2IC2-NEXT: [[TMP27:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> +; VF2IC2-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC2: [[PRED_STORE_IF]]: +; VF2IC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 +; VF2IC2-NEXT: [[TMP31:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0 +; VF2IC2-NEXT: [[TMP32:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] +; VF2IC2-NEXT: store i32 [[TMP32]], ptr [[TMP29]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC2: [[PRED_STORE_CONTINUE]]: +; VF2IC2-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP33]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; VF2IC2: [[PRED_STORE_IF7]]: +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP35:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 +; VF2IC2-NEXT: [[TMP36:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[TMP35]], [[TMP36]] ; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 -; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 -; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; VF2IC2: [[PRED_STORE_CONTINUE8]]: +; VF2IC2-NEXT: [[TMP38:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; VF2IC2: [[PRED_STORE_IF9]]: +; VF2IC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0 +; VF2IC2-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP25]], i32 0 +; VF2IC2-NEXT: [[TMP42:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] +; VF2IC2-NEXT: store i32 [[TMP42]], ptr [[TMP39]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; VF2IC2: [[PRED_STORE_CONTINUE10]]: +; VF2IC2-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP43]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_IF11]]: +; VF2IC2-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP45:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1 +; VF2IC2-NEXT: [[TMP46:%.*]] = extractelement <2 x i32> [[TMP25]], i32 1 +; VF2IC2-NEXT: [[TMP68:%.*]] = add nsw i32 [[TMP45]], [[TMP46]] +; VF2IC2-NEXT: store i32 [[TMP68]], ptr [[TMP67]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_CONTINUE12]]: +; VF2IC2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; VF2IC2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) +; VF2IC2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; VF2IC2-NEXT: [[TMP49:%.*]] = xor <2 x i1> [[TMP4]], splat (i1 true) +; VF2IC2-NEXT: [[TMP50:%.*]] = xor <2 x i1> [[TMP5]], splat (i1 true) +; VF2IC2-NEXT: [[TMP51:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP50]], i1 true) +; VF2IC2-NEXT: [[TMP52:%.*]] = add i64 2, [[TMP51]] +; VF2IC2-NEXT: [[TMP53:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP49]], i1 true) +; VF2IC2-NEXT: [[TMP54:%.*]] = add i64 0, [[TMP53]] +; VF2IC2-NEXT: [[TMP55:%.*]] = icmp ne i64 [[TMP53]], 2 +; VF2IC2-NEXT: [[TMP56:%.*]] = select i1 [[TMP55]], i64 [[TMP54]], i64 [[TMP52]] +; VF2IC2-NEXT: [[TMP57:%.*]] = sub i64 [[TMP56]], 1 +; VF2IC2-NEXT: [[TMP58:%.*]] = sub i64 [[TMP57]], 1 +; VF2IC2-NEXT: [[TMP59:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP58]] +; VF2IC2-NEXT: [[TMP60:%.*]] = sub i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP61:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP60]] +; VF2IC2-NEXT: [[TMP62:%.*]] = icmp uge i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP63:%.*]] = select i1 [[TMP62]], i32 [[TMP61]], i32 [[TMP59]] +; VF2IC2-NEXT: [[TMP64:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC2-NEXT: [[TMP65:%.*]] = icmp eq i64 [[TMP57]], 0 +; VF2IC2-NEXT: [[TMP66:%.*]] = select i1 [[TMP65]], i32 [[TMP64]], i32 [[TMP63]] +; VF2IC2-NEXT: br label %[[FOR_END:.*]] ; VF2IC2: [[FOR_END]]: -; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] ; VF2IC2-NEXT: ret i32 [[TMP66]] ; ; VF1IC2-LABEL: define i32 @FOR_used_outside( ; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF1IC2-NEXT: [[ENTRY:.*]]: -; VF1IC2-NEXT: br label %[[LOOP:.*]] -; VF1IC2: [[LOOP]]: -; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[ENTRY:.*:]] +; VF1IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF1IC2: [[VECTOR_PH]]: +; VF1IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF1IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF1IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF1IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF1IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF1IC2: [[VECTOR_BODY]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE5:.*]] ] +; VF1IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi i32 [ 33, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[PRED_STORE_CONTINUE5]] ] +; VF1IC2-NEXT: [[TMP3:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[VEC_IV:%.*]] = add i64 [[TMP0]], 0 +; VF1IC2-NEXT: [[VEC_IV1:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: [[TMP2:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF1IC2: [[PRED_LOAD_IF]]: ; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] -; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 -; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] -; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF1IC2: [[PRED_LOAD_CONTINUE]]: +; VF1IC2-NEXT: [[TMP5:%.*]] = phi i32 [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] +; VF1IC2: [[PRED_LOAD_IF2]]: +; VF1IC2-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE3]] +; VF1IC2: [[PRED_LOAD_CONTINUE3]]: +; VF1IC2-NEXT: [[TMP8]] = phi i32 [ poison, %[[PRED_LOAD_CONTINUE]] ], [ [[TMP32]], %[[PRED_LOAD_IF2]] ] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF1IC2: [[PRED_STORE_IF]]: +; VF1IC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP10:%.*]] = add nsw i32 [[VECTOR_RECUR]], [[TMP5]] +; VF1IC2-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4 +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF1IC2: [[PRED_STORE_CONTINUE]]: +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_IF4]]: +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP5]], [[TMP8]] ; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 -; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 -; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_CONTINUE5]]: +; VF1IC2-NEXT: [[INDEX_NEXT]] = add i64 [[TMP0]], 2 +; VF1IC2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF1IC2-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VF1IC2: [[MIDDLE_BLOCK]]: +; VF1IC2-NEXT: [[TMP14:%.*]] = xor i1 [[TMP1]], true +; VF1IC2-NEXT: [[TMP15:%.*]] = xor i1 [[TMP2]], true +; VF1IC2-NEXT: [[TMP16:%.*]] = icmp eq i1 [[TMP15]], false +; VF1IC2-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i64 +; VF1IC2-NEXT: [[TMP18:%.*]] = add i64 1, [[TMP17]] +; VF1IC2-NEXT: [[TMP19:%.*]] = icmp eq i1 [[TMP14]], false +; VF1IC2-NEXT: [[TMP20:%.*]] = zext i1 [[TMP19]] to i64 +; VF1IC2-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]] +; VF1IC2-NEXT: [[TMP22:%.*]] = icmp ne i64 [[TMP20]], 1 +; VF1IC2-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i64 [[TMP21]], i64 [[TMP18]] +; VF1IC2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP23]], 1 +; VF1IC2-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP27:%.*]] = icmp uge i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: [[TMP29:%.*]] = icmp eq i64 [[TMP24]], 0 +; VF1IC2-NEXT: [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[VECTOR_RECUR]], i32 [[TMP28]] +; VF1IC2-NEXT: br label %[[FOR_END:.*]] ; VF1IC2: [[FOR_END]]: -; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] ; VF1IC2-NEXT: ret i32 [[TMP30]] ; entry: @@ -83,59 +300,265 @@ for.end: define i32 @FOR_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { ; VF2IC1-LABEL: define i32 @FOR_next_used_outside( ; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC1-NEXT: [[ENTRY:.*]]: -; VF2IC1-NEXT: br label %[[LOOP:.*]] -; VF2IC1: [[LOOP]]: -; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[ENTRY:.*:]] +; VF2IC1-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC1: [[VECTOR_PH]]: +; VF2IC1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF2IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF2IC1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC1: [[VECTOR_BODY]]: +; VF2IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE4:.*]] ] +; VF2IC1-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; VF2IC1-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 1 +; VF2IC1-NEXT: [[TMP2:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC1-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC1: [[PRED_LOAD_IF]]: ; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] -; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 -; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] -; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP10]], i32 0 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC1: [[PRED_LOAD_CONTINUE]]: +; VF2IC1-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; VF2IC1-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC1: [[PRED_LOAD_IF1]]: +; VF2IC1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +; VF2IC1-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP30]], i32 1 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC1: [[PRED_LOAD_CONTINUE2]]: +; VF2IC1-NEXT: [[TMP12]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP11]], %[[PRED_LOAD_IF1]] ] +; VF2IC1-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP12]], <2 x i32> +; VF2IC1-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC1: [[PRED_STORE_IF]]: +; VF2IC1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP13]], i32 0 +; VF2IC1-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP12]], i32 0 +; VF2IC1-NEXT: [[TMP18:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] +; VF2IC1-NEXT: store i32 [[TMP18]], ptr [[TMP15]], align 4 +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC1: [[PRED_STORE_CONTINUE]]: +; VF2IC1-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_IF3]]: +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP21:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1 +; VF2IC1-NEXT: [[TMP22:%.*]] = extractelement <2 x i32> [[TMP12]], i32 1 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] ; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 -; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 -; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_CONTINUE4]]: +; VF2IC1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; VF2IC1-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VF2IC1: [[MIDDLE_BLOCK]]: +; VF2IC1-NEXT: [[TMP25:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true) +; VF2IC1-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP25]], i1 true) +; VF2IC1-NEXT: [[TMP27:%.*]] = sub i64 [[TMP26]], 1 +; VF2IC1-NEXT: [[TMP28:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP27]] +; VF2IC1-NEXT: br label %[[FOR_END:.*]] ; VF2IC1: [[FOR_END]]: -; VF2IC1-NEXT: [[TMP28:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ] ; VF2IC1-NEXT: ret i32 [[TMP28]] ; ; VF2IC2-LABEL: define i32 @FOR_next_used_outside( ; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC2-NEXT: [[ENTRY:.*]]: -; VF2IC2-NEXT: br label %[[LOOP:.*]] -; VF2IC2: [[LOOP]]: -; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 3 +; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; VF2IC2-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC2-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; VF2IC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; VF2IC2-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; VF2IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 3 +; VF2IC2-NEXT: [[TMP4:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP5:%.*]] = icmp ule <2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP6]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC2: [[PRED_LOAD_IF]]: ; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] -; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 -; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] -; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP23]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC2: [[PRED_LOAD_CONTINUE]]: +; VF2IC2-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; VF2IC2-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC2: [[PRED_LOAD_IF1]]: +; VF2IC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; VF2IC2-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP13]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC2: [[PRED_LOAD_CONTINUE2]]: +; VF2IC2-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP14]], %[[PRED_LOAD_IF1]] ] +; VF2IC2-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] +; VF2IC2: [[PRED_LOAD_IF3]]: +; VF2IC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +; VF2IC2-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE4]] +; VF2IC2: [[PRED_LOAD_CONTINUE4]]: +; VF2IC2-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE2]] ], [ [[TMP19]], %[[PRED_LOAD_IF3]] ] +; VF2IC2-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] +; VF2IC2: [[PRED_LOAD_IF5]]: +; VF2IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC2-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP37]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE6]] +; VF2IC2: [[PRED_LOAD_CONTINUE6]]: +; VF2IC2-NEXT: [[TMP25]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP24]], %[[PRED_LOAD_IF5]] ] +; VF2IC2-NEXT: [[TMP26:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP15]], <2 x i32> +; VF2IC2-NEXT: [[TMP27:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> +; VF2IC2-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC2: [[PRED_STORE_IF]]: +; VF2IC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 +; VF2IC2-NEXT: [[TMP31:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0 +; VF2IC2-NEXT: [[TMP32:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] +; VF2IC2-NEXT: store i32 [[TMP32]], ptr [[TMP29]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC2: [[PRED_STORE_CONTINUE]]: +; VF2IC2-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP33]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; VF2IC2: [[PRED_STORE_IF7]]: +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP35:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 +; VF2IC2-NEXT: [[TMP36:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[TMP35]], [[TMP36]] ; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 -; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 -; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; VF2IC2: [[PRED_STORE_CONTINUE8]]: +; VF2IC2-NEXT: [[TMP38:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; VF2IC2: [[PRED_STORE_IF9]]: +; VF2IC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0 +; VF2IC2-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP25]], i32 0 +; VF2IC2-NEXT: [[TMP42:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] +; VF2IC2-NEXT: store i32 [[TMP42]], ptr [[TMP39]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; VF2IC2: [[PRED_STORE_CONTINUE10]]: +; VF2IC2-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP43]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_IF11]]: +; VF2IC2-NEXT: [[TMP63:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP45:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1 +; VF2IC2-NEXT: [[TMP46:%.*]] = extractelement <2 x i32> [[TMP25]], i32 1 +; VF2IC2-NEXT: [[TMP64:%.*]] = add nsw i32 [[TMP45]], [[TMP46]] +; VF2IC2-NEXT: store i32 [[TMP64]], ptr [[TMP63]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_CONTINUE12]]: +; VF2IC2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; VF2IC2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) +; VF2IC2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; VF2IC2-NEXT: [[TMP49:%.*]] = xor <2 x i1> [[TMP4]], splat (i1 true) +; VF2IC2-NEXT: [[TMP50:%.*]] = xor <2 x i1> [[TMP5]], splat (i1 true) +; VF2IC2-NEXT: [[TMP51:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP50]], i1 true) +; VF2IC2-NEXT: [[TMP52:%.*]] = add i64 2, [[TMP51]] +; VF2IC2-NEXT: [[TMP53:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP49]], i1 true) +; VF2IC2-NEXT: [[TMP54:%.*]] = add i64 0, [[TMP53]] +; VF2IC2-NEXT: [[TMP55:%.*]] = icmp ne i64 [[TMP53]], 2 +; VF2IC2-NEXT: [[TMP56:%.*]] = select i1 [[TMP55]], i64 [[TMP54]], i64 [[TMP52]] +; VF2IC2-NEXT: [[TMP57:%.*]] = sub i64 [[TMP56]], 1 +; VF2IC2-NEXT: [[TMP58:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP57]] +; VF2IC2-NEXT: [[TMP59:%.*]] = sub i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP60:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP59]] +; VF2IC2-NEXT: [[TMP61:%.*]] = icmp uge i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP62:%.*]] = select i1 [[TMP61]], i32 [[TMP60]], i32 [[TMP58]] +; VF2IC2-NEXT: br label %[[FOR_END:.*]] ; VF2IC2: [[FOR_END]]: -; VF2IC2-NEXT: [[TMP62:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ] ; VF2IC2-NEXT: ret i32 [[TMP62]] ; ; VF1IC2-LABEL: define i32 @FOR_next_used_outside( ; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF1IC2-NEXT: [[ENTRY:.*]]: -; VF1IC2-NEXT: br label %[[LOOP:.*]] -; VF1IC2: [[LOOP]]: -; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[ENTRY:.*:]] +; VF1IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF1IC2: [[VECTOR_PH]]: +; VF1IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF1IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF1IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF1IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF1IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF1IC2: [[VECTOR_BODY]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE5:.*]] ] +; VF1IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi i32 [ 33, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[PRED_STORE_CONTINUE5]] ] +; VF1IC2-NEXT: [[TMP3:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[VEC_IV:%.*]] = add i64 [[TMP0]], 0 +; VF1IC2-NEXT: [[VEC_IV1:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: [[TMP2:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF1IC2: [[PRED_LOAD_IF]]: ; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] -; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 -; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] -; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF1IC2: [[PRED_LOAD_CONTINUE]]: +; VF1IC2-NEXT: [[TMP5:%.*]] = phi i32 [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] +; VF1IC2: [[PRED_LOAD_IF2]]: +; VF1IC2-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE3]] +; VF1IC2: [[PRED_LOAD_CONTINUE3]]: +; VF1IC2-NEXT: [[TMP8]] = phi i32 [ poison, %[[PRED_LOAD_CONTINUE]] ], [ [[TMP29]], %[[PRED_LOAD_IF2]] ] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF1IC2: [[PRED_STORE_IF]]: +; VF1IC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP10:%.*]] = add nsw i32 [[VECTOR_RECUR]], [[TMP5]] +; VF1IC2-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4 +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF1IC2: [[PRED_STORE_CONTINUE]]: +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_IF4]]: +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP5]], [[TMP8]] ; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 -; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 -; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_CONTINUE5]]: +; VF1IC2-NEXT: [[INDEX_NEXT]] = add i64 [[TMP0]], 2 +; VF1IC2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF1IC2-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VF1IC2: [[MIDDLE_BLOCK]]: +; VF1IC2-NEXT: [[TMP14:%.*]] = xor i1 [[TMP1]], true +; VF1IC2-NEXT: [[TMP15:%.*]] = xor i1 [[TMP2]], true +; VF1IC2-NEXT: [[TMP16:%.*]] = icmp eq i1 [[TMP15]], false +; VF1IC2-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i64 +; VF1IC2-NEXT: [[TMP18:%.*]] = add i64 1, [[TMP17]] +; VF1IC2-NEXT: [[TMP19:%.*]] = icmp eq i1 [[TMP14]], false +; VF1IC2-NEXT: [[TMP20:%.*]] = zext i1 [[TMP19]] to i64 +; VF1IC2-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]] +; VF1IC2-NEXT: [[TMP22:%.*]] = icmp ne i64 [[TMP20]], 1 +; VF1IC2-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i64 [[TMP21]], i64 [[TMP18]] +; VF1IC2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP23]], 1 +; VF1IC2-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP26:%.*]] = icmp uge i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP27:%.*]] = select i1 [[TMP26]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: br label %[[FOR_END:.*]] ; VF1IC2: [[FOR_END]]: -; VF1IC2-NEXT: [[TMP27:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ] ; VF1IC2-NEXT: ret i32 [[TMP27]] ; entry: @@ -160,64 +583,287 @@ for.end: define i32 @FOR_and_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { ; VF2IC1-LABEL: define i32 @FOR_and_next_used_outside( ; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC1-NEXT: [[ENTRY:.*]]: -; VF2IC1-NEXT: br label %[[LOOP:.*]] -; VF2IC1: [[LOOP]]: -; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[ENTRY:.*:]] +; VF2IC1-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC1: [[VECTOR_PH]]: +; VF2IC1-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF2IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF2IC1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC1: [[VECTOR_BODY]]: +; VF2IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE4:.*]] ] +; VF2IC1-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[PRED_STORE_CONTINUE4]] ] +; VF2IC1-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; VF2IC1-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 1 +; VF2IC1-NEXT: [[TMP2:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC1-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP3]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC1: [[PRED_LOAD_IF]]: ; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] -; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 -; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] -; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP10]], i32 0 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC1: [[PRED_LOAD_CONTINUE]]: +; VF2IC1-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; VF2IC1-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC1: [[PRED_LOAD_IF1]]: +; VF2IC1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC1-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP35]], i32 1 +; VF2IC1-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC1: [[PRED_LOAD_CONTINUE2]]: +; VF2IC1-NEXT: [[TMP12]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP11]], %[[PRED_LOAD_IF1]] ] +; VF2IC1-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP12]], <2 x i32> +; VF2IC1-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC1: [[PRED_STORE_IF]]: +; VF2IC1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP13]], i32 0 +; VF2IC1-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP12]], i32 0 +; VF2IC1-NEXT: [[TMP18:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] +; VF2IC1-NEXT: store i32 [[TMP18]], ptr [[TMP15]], align 4 +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC1: [[PRED_STORE_CONTINUE]]: +; VF2IC1-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_IF3]]: +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP4]] +; VF2IC1-NEXT: [[TMP21:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1 +; VF2IC1-NEXT: [[TMP22:%.*]] = extractelement <2 x i32> [[TMP12]], i32 1 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] ; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 -; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 -; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; VF2IC1: [[PRED_STORE_CONTINUE4]]: +; VF2IC1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; VF2IC1-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC1-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC1-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2IC1: [[MIDDLE_BLOCK]]: +; VF2IC1-NEXT: [[TMP25:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true) +; VF2IC1-NEXT: [[TMP26:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP25]], i1 true) +; VF2IC1-NEXT: [[TMP27:%.*]] = sub i64 [[TMP26]], 1 +; VF2IC1-NEXT: [[TMP28:%.*]] = sub i64 [[TMP27]], 1 +; VF2IC1-NEXT: [[TMP29:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP28]] +; VF2IC1-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC1-NEXT: [[TMP31:%.*]] = icmp eq i64 [[TMP27]], 0 +; VF2IC1-NEXT: [[TMP32:%.*]] = select i1 [[TMP31]], i32 [[TMP30]], i32 [[TMP29]] +; VF2IC1-NEXT: [[TMP33:%.*]] = extractelement <2 x i32> [[TMP12]], i64 [[TMP27]] +; VF2IC1-NEXT: br label %[[FOR_END:.*]] ; VF2IC1: [[FOR_END]]: -; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; VF2IC1-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ] ; VF2IC1-NEXT: [[RES:%.*]] = add i32 [[TMP32]], [[TMP33]] ; VF2IC1-NEXT: ret i32 [[RES]] ; ; VF2IC2-LABEL: define i32 @FOR_and_next_used_outside( ; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF2IC2-NEXT: [[ENTRY:.*]]: -; VF2IC2-NEXT: br label %[[LOOP:.*]] -; VF2IC2: [[LOOP]]: -; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 3 +; VF2IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; VF2IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF2IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; VF2IC2-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; VF2IC2-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2) +; VF2IC2-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; VF2IC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; VF2IC2-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; VF2IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 3 +; VF2IC2-NEXT: [[TMP4:%.*]] = icmp ule <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP5:%.*]] = icmp ule <2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]] +; VF2IC2-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP6]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF2IC2: [[PRED_LOAD_IF]]: ; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] -; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 -; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] -; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP23]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF2IC2: [[PRED_LOAD_CONTINUE]]: +; VF2IC2-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; VF2IC2-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; VF2IC2: [[PRED_LOAD_IF1]]: +; VF2IC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; VF2IC2-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP13]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; VF2IC2: [[PRED_LOAD_CONTINUE2]]: +; VF2IC2-NEXT: [[TMP15:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP14]], %[[PRED_LOAD_IF1]] ] +; VF2IC2-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP16]], label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] +; VF2IC2: [[PRED_LOAD_IF3]]: +; VF2IC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +; VF2IC2-NEXT: [[TMP19:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE4]] +; VF2IC2: [[PRED_LOAD_CONTINUE4]]: +; VF2IC2-NEXT: [[TMP20:%.*]] = phi <2 x i32> [ poison, %[[PRED_LOAD_CONTINUE2]] ], [ [[TMP19]], %[[PRED_LOAD_IF3]] ] +; VF2IC2-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP21]], label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] +; VF2IC2: [[PRED_LOAD_IF5]]: +; VF2IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF2IC2-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP37]], i32 1 +; VF2IC2-NEXT: br label %[[PRED_LOAD_CONTINUE6]] +; VF2IC2: [[PRED_LOAD_CONTINUE6]]: +; VF2IC2-NEXT: [[TMP25]] = phi <2 x i32> [ [[TMP20]], %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP24]], %[[PRED_LOAD_IF5]] ] +; VF2IC2-NEXT: [[TMP26:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[TMP15]], <2 x i32> +; VF2IC2-NEXT: [[TMP27:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> [[TMP25]], <2 x i32> +; VF2IC2-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC2: [[PRED_STORE_IF]]: +; VF2IC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP30:%.*]] = extractelement <2 x i32> [[TMP26]], i32 0 +; VF2IC2-NEXT: [[TMP31:%.*]] = extractelement <2 x i32> [[TMP15]], i32 0 +; VF2IC2-NEXT: [[TMP32:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] +; VF2IC2-NEXT: store i32 [[TMP32]], ptr [[TMP29]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC2: [[PRED_STORE_CONTINUE]]: +; VF2IC2-NEXT: [[TMP33:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP33]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; VF2IC2: [[PRED_STORE_IF7]]: +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC2-NEXT: [[TMP35:%.*]] = extractelement <2 x i32> [[TMP26]], i32 1 +; VF2IC2-NEXT: [[TMP36:%.*]] = extractelement <2 x i32> [[TMP15]], i32 1 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[TMP35]], [[TMP36]] ; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 -; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 -; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; VF2IC2: [[PRED_STORE_CONTINUE8]]: +; VF2IC2-NEXT: [[TMP38:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; VF2IC2: [[PRED_STORE_IF9]]: +; VF2IC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP2]] +; VF2IC2-NEXT: [[TMP40:%.*]] = extractelement <2 x i32> [[TMP27]], i32 0 +; VF2IC2-NEXT: [[TMP41:%.*]] = extractelement <2 x i32> [[TMP25]], i32 0 +; VF2IC2-NEXT: [[TMP42:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] +; VF2IC2-NEXT: store i32 [[TMP42]], ptr [[TMP39]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; VF2IC2: [[PRED_STORE_CONTINUE10]]: +; VF2IC2-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP43]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_IF11]]: +; VF2IC2-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP45:%.*]] = extractelement <2 x i32> [[TMP27]], i32 1 +; VF2IC2-NEXT: [[TMP46:%.*]] = extractelement <2 x i32> [[TMP25]], i32 1 +; VF2IC2-NEXT: [[TMP73:%.*]] = add nsw i32 [[TMP45]], [[TMP46]] +; VF2IC2-NEXT: store i32 [[TMP73]], ptr [[TMP72]], align 4 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; VF2IC2: [[PRED_STORE_CONTINUE12]]: +; VF2IC2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; VF2IC2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) +; VF2IC2-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF2IC2-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; VF2IC2-NEXT: [[TMP49:%.*]] = xor <2 x i1> [[TMP4]], splat (i1 true) +; VF2IC2-NEXT: [[TMP50:%.*]] = xor <2 x i1> [[TMP5]], splat (i1 true) +; VF2IC2-NEXT: [[TMP51:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP50]], i1 true) +; VF2IC2-NEXT: [[TMP52:%.*]] = add i64 2, [[TMP51]] +; VF2IC2-NEXT: [[TMP53:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> [[TMP49]], i1 true) +; VF2IC2-NEXT: [[TMP54:%.*]] = add i64 0, [[TMP53]] +; VF2IC2-NEXT: [[TMP55:%.*]] = icmp ne i64 [[TMP53]], 2 +; VF2IC2-NEXT: [[TMP56:%.*]] = select i1 [[TMP55]], i64 [[TMP54]], i64 [[TMP52]] +; VF2IC2-NEXT: [[TMP57:%.*]] = sub i64 [[TMP56]], 1 +; VF2IC2-NEXT: [[TMP58:%.*]] = sub i64 [[TMP57]], 1 +; VF2IC2-NEXT: [[TMP59:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP58]] +; VF2IC2-NEXT: [[TMP60:%.*]] = sub i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP61:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP60]] +; VF2IC2-NEXT: [[TMP62:%.*]] = icmp uge i64 [[TMP58]], 2 +; VF2IC2-NEXT: [[TMP63:%.*]] = select i1 [[TMP62]], i32 [[TMP61]], i32 [[TMP59]] +; VF2IC2-NEXT: [[TMP64:%.*]] = extractelement <2 x i32> [[VECTOR_RECUR]], i32 1 +; VF2IC2-NEXT: [[TMP65:%.*]] = icmp eq i64 [[TMP57]], 0 +; VF2IC2-NEXT: [[TMP66:%.*]] = select i1 [[TMP65]], i32 [[TMP64]], i32 [[TMP63]] +; VF2IC2-NEXT: [[TMP67:%.*]] = extractelement <2 x i32> [[TMP15]], i64 [[TMP57]] +; VF2IC2-NEXT: [[TMP68:%.*]] = sub i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP69:%.*]] = extractelement <2 x i32> [[TMP25]], i64 [[TMP68]] +; VF2IC2-NEXT: [[TMP70:%.*]] = icmp uge i64 [[TMP57]], 2 +; VF2IC2-NEXT: [[TMP71:%.*]] = select i1 [[TMP70]], i32 [[TMP69]], i32 [[TMP67]] +; VF2IC2-NEXT: br label %[[FOR_END:.*]] ; VF2IC2: [[FOR_END]]: -; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; VF2IC2-NEXT: [[TMP71:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ] ; VF2IC2-NEXT: [[RES:%.*]] = add i32 [[TMP66]], [[TMP71]] ; VF2IC2-NEXT: ret i32 [[RES]] ; ; VF1IC2-LABEL: define i32 @FOR_and_next_used_outside( ; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { -; VF1IC2-NEXT: [[ENTRY:.*]]: -; VF1IC2-NEXT: br label %[[LOOP:.*]] -; VF1IC2: [[LOOP]]: -; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[ENTRY:.*:]] +; VF1IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF1IC2: [[VECTOR_PH]]: +; VF1IC2-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], 1 +; VF1IC2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; VF1IC2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; VF1IC2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; VF1IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF1IC2: [[VECTOR_BODY]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE5:.*]] ] +; VF1IC2-NEXT: [[VECTOR_RECUR:%.*]] = phi i32 [ 33, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[PRED_STORE_CONTINUE5]] ] +; VF1IC2-NEXT: [[TMP3:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[VEC_IV:%.*]] = add i64 [[TMP0]], 0 +; VF1IC2-NEXT: [[VEC_IV1:%.*]] = add i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: [[TMP2:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; VF1IC2: [[PRED_LOAD_IF]]: ; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] -; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 -; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] -; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; VF1IC2: [[PRED_LOAD_CONTINUE]]: +; VF1IC2-NEXT: [[TMP5:%.*]] = phi i32 [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_LOAD_IF]] ] +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF2:.*]], label %[[PRED_LOAD_CONTINUE3:.*]] +; VF1IC2: [[PRED_LOAD_IF2]]: +; VF1IC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4 +; VF1IC2-NEXT: br label %[[PRED_LOAD_CONTINUE3]] +; VF1IC2: [[PRED_LOAD_CONTINUE3]]: +; VF1IC2-NEXT: [[TMP8]] = phi i32 [ poison, %[[PRED_LOAD_CONTINUE]] ], [ [[TMP35]], %[[PRED_LOAD_IF2]] ] +; VF1IC2-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF1IC2: [[PRED_STORE_IF]]: +; VF1IC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP10:%.*]] = add nsw i32 [[VECTOR_RECUR]], [[TMP5]] +; VF1IC2-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4 +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF1IC2: [[PRED_STORE_CONTINUE]]: +; VF1IC2-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_IF4]]: +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[TMP5]], [[TMP8]] ; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 -; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 -; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2-NEXT: br label %[[PRED_STORE_CONTINUE5]] +; VF1IC2: [[PRED_STORE_CONTINUE5]]: +; VF1IC2-NEXT: [[INDEX_NEXT]] = add i64 [[TMP0]], 2 +; VF1IC2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF1IC2-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF1IC2: [[MIDDLE_BLOCK]]: +; VF1IC2-NEXT: [[TMP14:%.*]] = xor i1 [[TMP1]], true +; VF1IC2-NEXT: [[TMP15:%.*]] = xor i1 [[TMP2]], true +; VF1IC2-NEXT: [[TMP16:%.*]] = icmp eq i1 [[TMP15]], false +; VF1IC2-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i64 +; VF1IC2-NEXT: [[TMP18:%.*]] = add i64 1, [[TMP17]] +; VF1IC2-NEXT: [[TMP19:%.*]] = icmp eq i1 [[TMP14]], false +; VF1IC2-NEXT: [[TMP20:%.*]] = zext i1 [[TMP19]] to i64 +; VF1IC2-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]] +; VF1IC2-NEXT: [[TMP22:%.*]] = icmp ne i64 [[TMP20]], 1 +; VF1IC2-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i64 [[TMP21]], i64 [[TMP18]] +; VF1IC2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP23]], 1 +; VF1IC2-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP27:%.*]] = icmp uge i64 [[TMP25]], 1 +; VF1IC2-NEXT: [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: [[TMP29:%.*]] = icmp eq i64 [[TMP24]], 0 +; VF1IC2-NEXT: [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[VECTOR_RECUR]], i32 [[TMP28]] +; VF1IC2-NEXT: [[TMP31:%.*]] = sub i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP32:%.*]] = icmp uge i64 [[TMP24]], 1 +; VF1IC2-NEXT: [[TMP33:%.*]] = select i1 [[TMP32]], i32 [[TMP8]], i32 [[TMP5]] +; VF1IC2-NEXT: br label %[[FOR_END:.*]] ; VF1IC2: [[FOR_END]]: -; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; VF1IC2-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ] ; VF1IC2-NEXT: [[RES:%.*]] = add i32 [[TMP30]], [[TMP33]] ; VF1IC2-NEXT: ret i32 [[RES]] ; diff --git a/llvm/test/Transforms/LoopVectorize/optsize.ll b/llvm/test/Transforms/LoopVectorize/optsize.ll index f9f7feb7bdfbc..9931137566c1a 100644 --- a/llvm/test/Transforms/LoopVectorize/optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/optsize.ll @@ -476,45 +476,87 @@ define i32 @pr45526() optsize { ; ; CHECK-LABEL: define i32 @pr45526( ; CHECK-SAME: ) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PIV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[PIVPLUS1:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 5, %[[ENTRY]] ], [ [[PIVPLUS1]], %[[LOOP]] ] -; CHECK-NEXT: [[PIVPLUS1]] = add nuw nsw i32 [[PIV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[PIV]], 510 -; CHECK-NEXT: br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i32> [[VEC_IND]], splat (i32 510) +; CHECK-NEXT: [[TMP1]] = add nuw nsw <4 x i32> [[VEC_IND]], splat (i32 1) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 512 +; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP0]], i1 true) +; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP4]], 1 +; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[TMP1]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3 +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP5]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 [[TMP7]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; CHECK-NEXT: ret i32 [[FOR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP10]] ; ; PGSO-LABEL: define i32 @pr45526( ; PGSO-SAME: ) #[[ATTR0]] { -; PGSO-NEXT: [[ENTRY:.*]]: -; PGSO-NEXT: br label %[[LOOP:.*]] -; PGSO: [[LOOP]]: -; PGSO-NEXT: [[PIV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[PIVPLUS1:%.*]], %[[LOOP]] ] -; PGSO-NEXT: [[FOR:%.*]] = phi i32 [ 5, %[[ENTRY]] ], [ [[PIVPLUS1]], %[[LOOP]] ] -; PGSO-NEXT: [[PIVPLUS1]] = add nuw nsw i32 [[PIV]], 1 -; PGSO-NEXT: [[COND:%.*]] = icmp ult i32 [[PIV]], 510 -; PGSO-NEXT: br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]] +; PGSO-NEXT: [[ENTRY:.*:]] +; PGSO-NEXT: br label %[[VECTOR_PH:.*]] +; PGSO: [[VECTOR_PH]]: +; PGSO-NEXT: br label %[[VECTOR_BODY:.*]] +; PGSO: [[VECTOR_BODY]]: +; PGSO-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; PGSO-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; PGSO-NEXT: [[TMP2:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] +; PGSO-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i32> [[VEC_IND]], splat (i32 510) +; PGSO-NEXT: [[TMP1]] = add nuw nsw <4 x i32> [[VEC_IND]], splat (i32 1) +; PGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; PGSO-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; PGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 512 +; PGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; PGSO: [[MIDDLE_BLOCK]]: +; PGSO-NEXT: [[TMP4:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP0]], i1 true) +; PGSO-NEXT: [[TMP5:%.*]] = sub i64 [[TMP4]], 1 +; PGSO-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 +; PGSO-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[TMP1]], i64 [[TMP6]] +; PGSO-NEXT: [[TMP8:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3 +; PGSO-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP5]], 0 +; PGSO-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 [[TMP7]] +; PGSO-NEXT: br label %[[EXIT:.*]] ; PGSO: [[EXIT]]: -; PGSO-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; PGSO-NEXT: ret i32 [[FOR_LCSSA]] +; PGSO-NEXT: ret i32 [[TMP10]] ; ; NPGSO-LABEL: define i32 @pr45526( ; NPGSO-SAME: ) #[[ATTR0]] { -; NPGSO-NEXT: [[ENTRY:.*]]: -; NPGSO-NEXT: br label %[[LOOP:.*]] -; NPGSO: [[LOOP]]: -; NPGSO-NEXT: [[PIV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[PIVPLUS1:%.*]], %[[LOOP]] ] -; NPGSO-NEXT: [[FOR:%.*]] = phi i32 [ 5, %[[ENTRY]] ], [ [[PIVPLUS1]], %[[LOOP]] ] -; NPGSO-NEXT: [[PIVPLUS1]] = add nuw nsw i32 [[PIV]], 1 -; NPGSO-NEXT: [[COND:%.*]] = icmp ult i32 [[PIV]], 510 -; NPGSO-NEXT: br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]] +; NPGSO-NEXT: [[ENTRY:.*:]] +; NPGSO-NEXT: br label %[[VECTOR_PH:.*]] +; NPGSO: [[VECTOR_PH]]: +; NPGSO-NEXT: br label %[[VECTOR_BODY:.*]] +; NPGSO: [[VECTOR_BODY]]: +; NPGSO-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NPGSO-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NPGSO-NEXT: [[TMP2:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] +; NPGSO-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i32> [[VEC_IND]], splat (i32 510) +; NPGSO-NEXT: [[TMP1]] = add nuw nsw <4 x i32> [[VEC_IND]], splat (i32 1) +; NPGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; NPGSO-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; NPGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 512 +; NPGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; NPGSO: [[MIDDLE_BLOCK]]: +; NPGSO-NEXT: [[TMP4:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP0]], i1 true) +; NPGSO-NEXT: [[TMP5:%.*]] = sub i64 [[TMP4]], 1 +; NPGSO-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 +; NPGSO-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[TMP1]], i64 [[TMP6]] +; NPGSO-NEXT: [[TMP8:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3 +; NPGSO-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP5]], 0 +; NPGSO-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 [[TMP7]] +; NPGSO-NEXT: br label %[[EXIT:.*]] ; NPGSO: [[EXIT]]: -; NPGSO-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; NPGSO-NEXT: ret i32 [[FOR_LCSSA]] +; NPGSO-NEXT: ret i32 [[TMP10]] ; entry: br label %loop @@ -534,31 +576,59 @@ define i32 @pr45526_pgso() !prof !14 { ; ; CHECK-LABEL: define i32 @pr45526_pgso( ; CHECK-SAME: ) !prof [[PROF14]] { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PIV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[PIVPLUS1:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 5, %[[ENTRY]] ], [ [[PIVPLUS1]], %[[LOOP]] ] -; CHECK-NEXT: [[PIVPLUS1]] = add nuw nsw i32 [[PIV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[PIV]], 510 -; CHECK-NEXT: br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i32> [[VEC_IND]], splat (i32 510) +; CHECK-NEXT: [[TMP1]] = add nuw nsw <4 x i32> [[VEC_IND]], splat (i32 1) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 512 +; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP0]], i1 true) +; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP4]], 1 +; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[TMP1]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3 +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP5]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 [[TMP7]] +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; CHECK-NEXT: ret i32 [[FOR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP10]] ; ; PGSO-LABEL: define i32 @pr45526_pgso( ; PGSO-SAME: ) !prof [[PROF14]] { -; PGSO-NEXT: [[ENTRY:.*]]: -; PGSO-NEXT: br label %[[LOOP:.*]] -; PGSO: [[LOOP]]: -; PGSO-NEXT: [[PIV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[PIVPLUS1:%.*]], %[[LOOP]] ] -; PGSO-NEXT: [[FOR:%.*]] = phi i32 [ 5, %[[ENTRY]] ], [ [[PIVPLUS1]], %[[LOOP]] ] -; PGSO-NEXT: [[PIVPLUS1]] = add nuw nsw i32 [[PIV]], 1 -; PGSO-NEXT: [[COND:%.*]] = icmp ult i32 [[PIV]], 510 -; PGSO-NEXT: br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]] +; PGSO-NEXT: [[ENTRY:.*:]] +; PGSO-NEXT: br label %[[VECTOR_PH:.*]] +; PGSO: [[VECTOR_PH]]: +; PGSO-NEXT: br label %[[VECTOR_BODY:.*]] +; PGSO: [[VECTOR_BODY]]: +; PGSO-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; PGSO-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; PGSO-NEXT: [[TMP2:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[TMP1:%.*]], %[[VECTOR_BODY]] ] +; PGSO-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i32> [[VEC_IND]], splat (i32 510) +; PGSO-NEXT: [[TMP1]] = add nuw nsw <4 x i32> [[VEC_IND]], splat (i32 1) +; PGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; PGSO-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; PGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 512 +; PGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; PGSO: [[MIDDLE_BLOCK]]: +; PGSO-NEXT: [[TMP4:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP0]], i1 true) +; PGSO-NEXT: [[TMP5:%.*]] = sub i64 [[TMP4]], 1 +; PGSO-NEXT: [[TMP6:%.*]] = sub i64 [[TMP5]], 1 +; PGSO-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[TMP1]], i64 [[TMP6]] +; PGSO-NEXT: [[TMP8:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3 +; PGSO-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP5]], 0 +; PGSO-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 [[TMP7]] +; PGSO-NEXT: br label %[[EXIT:.*]] ; PGSO: [[EXIT]]: -; PGSO-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] -; PGSO-NEXT: ret i32 [[FOR_LCSSA]] +; PGSO-NEXT: ret i32 [[TMP10]] ; ; NPGSO-LABEL: define i32 @pr45526_pgso( ; NPGSO-SAME: ) !prof [[PROF14]] { @@ -573,7 +643,7 @@ define i32 @pr45526_pgso() !prof !14 { ; NPGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; NPGSO-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) ; NPGSO-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 508 -; NPGSO-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; NPGSO-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; NPGSO: [[MIDDLE_BLOCK]]: ; NPGSO-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3 ; NPGSO-NEXT: br label %[[SCALAR_PH:.*]] @@ -584,7 +654,7 @@ define i32 @pr45526_pgso() !prof !14 { ; NPGSO-NEXT: [[FOR:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[SCALAR_PH]] ], [ [[PIVPLUS1]], %[[LOOP]] ] ; NPGSO-NEXT: [[PIVPLUS1]] = add nuw nsw i32 [[PIV]], 1 ; NPGSO-NEXT: [[COND:%.*]] = icmp ult i32 [[PIV]], 510 -; NPGSO-NEXT: br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP23:![0-9]+]] +; NPGSO-NEXT: br i1 [[COND]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP24:![0-9]+]] ; NPGSO: [[EXIT]]: ; NPGSO-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] ; NPGSO-NEXT: ret i32 [[FOR_LCSSA]] @@ -640,7 +710,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1026 -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] ; CHECK: [[FOR_END]]: @@ -678,7 +748,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; PGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; PGSO-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) ; PGSO-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1026 -; PGSO-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; PGSO-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; PGSO: [[MIDDLE_BLOCK]]: ; PGSO-NEXT: br label %[[FOR_END:.*]] ; PGSO: [[FOR_END]]: @@ -716,7 +786,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; NPGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; NPGSO-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) ; NPGSO-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1026 -; NPGSO-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; NPGSO-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; NPGSO: [[MIDDLE_BLOCK]]: ; NPGSO-NEXT: br label %[[FOR_END:.*]] ; NPGSO: [[FOR_END]]: @@ -757,7 +827,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; CHECK-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: @@ -770,7 +840,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; CHECK-NEXT: store i16 42, ptr [[GEPOFB]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; @@ -789,7 +859,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; PGSO-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4 ; PGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2 ; PGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 -; PGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; PGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; PGSO: [[MIDDLE_BLOCK]]: ; PGSO-NEXT: br label %[[SCALAR_PH]] ; PGSO: [[SCALAR_PH]]: @@ -802,7 +872,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; PGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 ; PGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; PGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; PGSO: [[FOR_END]]: ; PGSO-NEXT: ret void ; @@ -821,7 +891,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; NPGSO-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4 ; NPGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2 ; NPGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 -; NPGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; NPGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; NPGSO: [[MIDDLE_BLOCK]]: ; NPGSO-NEXT: br label %[[SCALAR_PH]] ; NPGSO: [[SCALAR_PH]]: @@ -834,7 +904,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; NPGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 ; NPGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; NPGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; NPGSO: [[FOR_END]]: ; NPGSO-NEXT: ret void ; @@ -1020,7 +1090,9 @@ exit: ; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META16]], [[META17]]} ; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META16]], [[META17]]} ; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META16]], [[META17]]} -; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]]} +; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]], [[META17]]} +; CHECK: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]} +; CHECK: [[LOOP23]] = distinct !{[[LOOP23]], [[META16]]} ;. ; PGSO: [[PROF14]] = !{!"function_entry_count", i64 0} ; PGSO: [[LOOP15]] = distinct !{[[LOOP15]], [[META16:![0-9]+]], [[META17:![0-9]+]]} @@ -1029,7 +1101,9 @@ exit: ; PGSO: [[LOOP18]] = distinct !{[[LOOP18]], [[META16]], [[META17]]} ; PGSO: [[LOOP19]] = distinct !{[[LOOP19]], [[META16]], [[META17]]} ; PGSO: [[LOOP20]] = distinct !{[[LOOP20]], [[META16]], [[META17]]} -; PGSO: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]]} +; PGSO: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]], [[META17]]} +; PGSO: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]} +; PGSO: [[LOOP23]] = distinct !{[[LOOP23]], [[META16]]} ;. ; NPGSO: [[PROF14]] = !{!"function_entry_count", i64 0} ; NPGSO: [[LOOP15]] = distinct !{[[LOOP15]], [[META16:![0-9]+]], [[META17:![0-9]+]]} @@ -1040,8 +1114,9 @@ exit: ; NPGSO: [[LOOP20]] = distinct !{[[LOOP20]], [[META16]], [[META17]]} ; NPGSO: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]]} ; NPGSO: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]} -; NPGSO: [[LOOP23]] = distinct !{[[LOOP23]], [[META17]], [[META16]]} -; NPGSO: [[LOOP24]] = distinct !{[[LOOP24]], [[META16]], [[META17]]} +; NPGSO: [[LOOP23]] = distinct !{[[LOOP23]], [[META16]], [[META17]]} +; NPGSO: [[LOOP24]] = distinct !{[[LOOP24]], [[META17]], [[META16]]} ; NPGSO: [[LOOP25]] = distinct !{[[LOOP25]], [[META16]], [[META17]]} -; NPGSO: [[LOOP26]] = distinct !{[[LOOP26]], [[META16]]} +; NPGSO: [[LOOP26]] = distinct !{[[LOOP26]], [[META16]], [[META17]]} +; NPGSO: [[LOOP27]] = distinct !{[[LOOP27]], [[META16]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/pr43166-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr43166-fold-tail-by-masking.ll index cbc9fccebb881..960065b09cfd1 100644 --- a/llvm/test/Transforms/LoopVectorize/pr43166-fold-tail-by-masking.ll +++ b/llvm/test/Transforms/LoopVectorize/pr43166-fold-tail-by-masking.ll @@ -39,22 +39,24 @@ define i64 @test1(i64 %y) { ; CHECK-LABEL: @test1( ; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[Y:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i64> [[BROADCAST_SPLAT]], zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i64> splat (i64 3), [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0 +; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP2]], <4 x i64> splat (i64 77), <4 x i64> [[TMP1]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[COND_END:%.*]] ] -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[Y:%.*]], 0 -; CHECK-NEXT: br i1 [[CMP]], label [[COND_END]], label [[COND_FALSE:%.*]] -; CHECK: cond.false: -; CHECK-NEXT: [[DIV:%.*]] = xor i64 3, [[Y]] -; CHECK-NEXT: br label [[COND_END]] -; CHECK: cond.end: -; CHECK-NEXT: [[COND:%.*]] = phi i64 [ [[DIV]], [[COND_FALSE]] ], [ 77, [[FOR_BODY]] ] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 3 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; CHECK: vector.body: +; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> , i1 true) +; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[PREDPHI]], i64 [[TMP4]] +; CHECK-NEXT: br label [[COND_END:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i64 [ [[COND]], [[COND_END]] ] -; CHECK-NEXT: ret i64 [[COND_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP5]] ; entry: br label %for.body @@ -84,21 +86,23 @@ for.cond.cleanup: define i64 @test2(i64 %y) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[Y:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i64> [[BROADCAST_SPLAT]], zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0 +; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP1]], <4 x i64> splat (i64 77), <4 x i64> splat (i64 55) ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[COND_END:%.*]] ] -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[Y:%.*]], 0 -; CHECK-NEXT: br i1 [[CMP]], label [[COND_END]], label [[COND_FALSE:%.*]] -; CHECK: cond.false: -; CHECK-NEXT: br label [[COND_END]] -; CHECK: cond.end: -; CHECK-NEXT: [[COND:%.*]] = phi i64 [ 55, [[COND_FALSE]] ], [ 77, [[FOR_BODY]] ] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 3 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; CHECK: vector.body: +; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> , i1 true) +; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[PREDPHI]], i64 [[TMP3]] +; CHECK-NEXT: br label [[COND_END:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i64 [ [[COND]], [[COND_END]] ] -; CHECK-NEXT: ret i64 [[COND_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP4]] ; entry: br label %for.body @@ -127,21 +131,23 @@ for.cond.cleanup: define i32 @test3(i64 %y) { ; CHECK-LABEL: @test3( ; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[Y:%.*]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i64> [[BROADCAST_SPLAT]], zeroinitializer ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[COND_END:%.*]] ] -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[Y:%.*]], 0 -; CHECK-NEXT: br i1 [[CMP]], label [[COND_END]], label [[COND_FALSE:%.*]] -; CHECK: cond.false: -; CHECK-NEXT: br label [[COND_END]] -; CHECK: cond.end: -; CHECK-NEXT: [[COND:%.*]] = phi i32 [ 55, [[COND_FALSE]] ], [ [[I]], [[FOR_BODY]] ] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 3 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; CHECK: vector.body: +; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0 +; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP1]], <4 x i32> , <4 x i32> splat (i32 55) +; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> , i1 true) +; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[PREDPHI]], i64 [[TMP3]] +; CHECK-NEXT: br label [[COND_END:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], [[COND_END]] ] -; CHECK-NEXT: ret i32 [[COND_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/use-scalar-epilogue-if-tp-fails.ll b/llvm/test/Transforms/LoopVectorize/use-scalar-epilogue-if-tp-fails.ll index 3b34b75a4c511..52dbe931db8bc 100644 --- a/llvm/test/Transforms/LoopVectorize/use-scalar-epilogue-if-tp-fails.ll +++ b/llvm/test/Transforms/LoopVectorize/use-scalar-epilogue-if-tp-fails.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue < %s | FileCheck %s +; RUN: opt -S -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue < %s | FileCheck --check-prefix=FORCED-TF %s ; RUN: opt -S -passes=loop-vectorize < %s | FileCheck %s ; This tests should produce the same result as with default options, and when tail folding @@ -13,6 +13,24 @@ target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" define void @basic_loop(ptr nocapture readonly %ptr, i32 %size, ptr %pos) { +; FORCED-TF-LABEL: @basic_loop( +; FORCED-TF-NEXT: header: +; FORCED-TF-NEXT: [[PTR0:%.*]] = load ptr, ptr [[POS:%.*]], align 4 +; FORCED-TF-NEXT: br label [[BODY:%.*]] +; FORCED-TF: body: +; FORCED-TF-NEXT: [[DEC66:%.*]] = phi i32 [ [[DEC:%.*]], [[BODY]] ], [ [[SIZE:%.*]], [[HEADER:%.*]] ] +; FORCED-TF-NEXT: [[BUFF:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[BODY]] ], [ [[PTR:%.*]], [[HEADER]] ] +; FORCED-TF-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[BUFF]], i32 1 +; FORCED-TF-NEXT: [[DEC]] = add nsw i32 [[DEC66]], -1 +; FORCED-TF-NEXT: [[TMP0:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1 +; FORCED-TF-NEXT: store i8 [[TMP0]], ptr [[BUFF]], align 1 +; FORCED-TF-NEXT: [[TOBOOL11:%.*]] = icmp eq i32 [[DEC]], 0 +; FORCED-TF-NEXT: br i1 [[TOBOOL11]], label [[END:%.*]], label [[BODY]] +; FORCED-TF: end: +; FORCED-TF-NEXT: [[INCDEC_PTR_LCSSA:%.*]] = phi ptr [ [[INCDEC_PTR]], [[BODY]] ] +; FORCED-TF-NEXT: store ptr [[INCDEC_PTR_LCSSA]], ptr [[POS]], align 4 +; FORCED-TF-NEXT: ret void +; ; CHECK-LABEL: @basic_loop( ; CHECK-NEXT: header: ; CHECK-NEXT: [[PTR0:%.*]] = load ptr, ptr [[POS:%.*]], align 4 @@ -21,36 +39,36 @@ define void @basic_loop(ptr nocapture readonly %ptr, i32 %size, ptr %pos) { ; CHECK: vector.ph: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[SIZE]], 4 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[SIZE]], [[N_MOD_VF]] -; CHECK-NEXT: [[IND_END:%.*]] = sub i32 [[SIZE]], [[N_VEC]] -; CHECK-NEXT: [[IND_END1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 [[N_VEC]] +; CHECK-NEXT: [[TMP0:%.*]] = sub i32 [[SIZE]], [[N_VEC]] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 [[N_VEC]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i32 1 -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i32 1 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1 ; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], ptr [[NEXT_GEP]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[SIZE]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[SIZE]], [[HEADER:%.*]] ] -; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[IND_END1]], [[MIDDLE_BLOCK]] ], [ [[PTR]], [[HEADER]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP0]], [[MIDDLE_BLOCK]] ], [ [[SIZE]], [[HEADER:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[TMP1]], [[MIDDLE_BLOCK]] ], [ [[PTR]], [[HEADER]] ] ; CHECK-NEXT: br label [[BODY:%.*]] ; CHECK: body: ; CHECK-NEXT: [[DEC66:%.*]] = phi i32 [ [[DEC:%.*]], [[BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[BUFF:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[BODY]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[BUFF:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[BUFF]], i32 1 ; CHECK-NEXT: [[DEC]] = add nsw i32 [[DEC66]], -1 -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1 -; CHECK-NEXT: store i8 [[TMP5]], ptr [[BUFF]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1 +; CHECK-NEXT: store i8 [[TMP4]], ptr [[BUFF]], align 1 ; CHECK-NEXT: [[TOBOOL11:%.*]] = icmp eq i32 [[DEC]], 0 ; CHECK-NEXT: br i1 [[TOBOOL11]], label [[END]], label [[BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: end: -; CHECK-NEXT: [[INCDEC_PTR_LCSSA:%.*]] = phi ptr [ [[INCDEC_PTR]], [[BODY]] ], [ [[IND_END1]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[INCDEC_PTR_LCSSA:%.*]] = phi ptr [ [[INCDEC_PTR]], [[BODY]] ], [ [[TMP1]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: store ptr [[INCDEC_PTR_LCSSA]], ptr [[POS]], align 4 ; CHECK-NEXT: ret void ; @@ -74,45 +92,162 @@ end: } define void @metadata(ptr nocapture readonly %ptr, i32 %size, ptr %pos) { +; FORCED-TF-LABEL: @metadata( +; FORCED-TF-NEXT: header: +; FORCED-TF-NEXT: [[PTR0:%.*]] = load ptr, ptr [[POS:%.*]], align 4 +; FORCED-TF-NEXT: br label [[VECTOR_PH:%.*]] +; FORCED-TF: vector.ph: +; FORCED-TF-NEXT: [[N_RND_UP:%.*]] = add i32 [[SIZE:%.*]], 3 +; FORCED-TF-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], 4 +; FORCED-TF-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] +; FORCED-TF-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i32 [[SIZE]], 1 +; FORCED-TF-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TRIP_COUNT_MINUS_1]], i64 0 +; FORCED-TF-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; FORCED-TF-NEXT: br label [[VECTOR_BODY:%.*]] +; FORCED-TF: vector.body: +; FORCED-TF-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE11:%.*]] ] +; FORCED-TF-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 +; FORCED-TF-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 1 +; FORCED-TF-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 2 +; FORCED-TF-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 3 +; FORCED-TF-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 [[TMP0]] +; FORCED-TF-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR]], i32 [[TMP1]] +; FORCED-TF-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PTR]], i32 [[TMP2]] +; FORCED-TF-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PTR]], i32 [[TMP3]] +; FORCED-TF-NEXT: [[TMP4:%.*]] = insertelement <4 x ptr> poison, ptr [[NEXT_GEP]], i32 0 +; FORCED-TF-NEXT: [[TMP5:%.*]] = insertelement <4 x ptr> [[TMP4]], ptr [[NEXT_GEP1]], i32 1 +; FORCED-TF-NEXT: [[TMP6:%.*]] = insertelement <4 x ptr> [[TMP5]], ptr [[NEXT_GEP2]], i32 2 +; FORCED-TF-NEXT: [[TMP7:%.*]] = insertelement <4 x ptr> [[TMP6]], ptr [[NEXT_GEP3]], i32 3 +; FORCED-TF-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <4 x i32> poison, i32 [[INDEX]], i64 0 +; FORCED-TF-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT4]], <4 x i32> poison, <4 x i32> zeroinitializer +; FORCED-TF-NEXT: [[VEC_IV:%.*]] = add <4 x i32> [[BROADCAST_SPLAT5]], +; FORCED-TF-NEXT: [[TMP8:%.*]] = icmp ule <4 x i32> [[VEC_IV]], [[BROADCAST_SPLAT]] +; FORCED-TF-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i32 1 +; FORCED-TF-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP1]], i32 1 +; FORCED-TF-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP2]], i32 1 +; FORCED-TF-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP3]], i32 1 +; FORCED-TF-NEXT: [[TMP13:%.*]] = insertelement <4 x ptr> poison, ptr [[TMP9]], i32 0 +; FORCED-TF-NEXT: [[TMP14:%.*]] = insertelement <4 x ptr> [[TMP13]], ptr [[TMP10]], i32 1 +; FORCED-TF-NEXT: [[TMP15:%.*]] = insertelement <4 x ptr> [[TMP14]], ptr [[TMP11]], i32 2 +; FORCED-TF-NEXT: [[TMP16:%.*]] = insertelement <4 x ptr> [[TMP15]], ptr [[TMP12]], i32 3 +; FORCED-TF-NEXT: [[TMP17:%.*]] = extractelement <4 x i1> [[TMP8]], i32 0 +; FORCED-TF-NEXT: br i1 [[TMP17]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; FORCED-TF: pred.store.if: +; FORCED-TF-NEXT: [[TMP18:%.*]] = load i8, ptr [[TMP9]], align 1 +; FORCED-TF-NEXT: store i8 [[TMP18]], ptr [[NEXT_GEP]], align 1 +; FORCED-TF-NEXT: br label [[PRED_STORE_CONTINUE]] +; FORCED-TF: pred.store.continue: +; FORCED-TF-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP8]], i32 1 +; FORCED-TF-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]] +; FORCED-TF: pred.store.if6: +; FORCED-TF-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP10]], align 1 +; FORCED-TF-NEXT: store i8 [[TMP20]], ptr [[NEXT_GEP1]], align 1 +; FORCED-TF-NEXT: br label [[PRED_STORE_CONTINUE7]] +; FORCED-TF: pred.store.continue7: +; FORCED-TF-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP8]], i32 2 +; FORCED-TF-NEXT: br i1 [[TMP21]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]] +; FORCED-TF: pred.store.if8: +; FORCED-TF-NEXT: [[TMP22:%.*]] = load i8, ptr [[TMP11]], align 1 +; FORCED-TF-NEXT: store i8 [[TMP22]], ptr [[NEXT_GEP2]], align 1 +; FORCED-TF-NEXT: br label [[PRED_STORE_CONTINUE9]] +; FORCED-TF: pred.store.continue9: +; FORCED-TF-NEXT: [[TMP23:%.*]] = extractelement <4 x i1> [[TMP8]], i32 3 +; FORCED-TF-NEXT: br i1 [[TMP23]], label [[PRED_STORE_IF10:%.*]], label [[PRED_STORE_CONTINUE11]] +; FORCED-TF: pred.store.if10: +; FORCED-TF-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP12]], align 1 +; FORCED-TF-NEXT: store i8 [[TMP24]], ptr [[NEXT_GEP3]], align 1 +; FORCED-TF-NEXT: br label [[PRED_STORE_CONTINUE11]] +; FORCED-TF: pred.store.continue11: +; FORCED-TF-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; FORCED-TF-NEXT: [[TMP25:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; FORCED-TF-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; FORCED-TF: middle.block: +; FORCED-TF-NEXT: [[TMP26:%.*]] = xor <4 x i1> [[TMP8]], splat (i1 true) +; FORCED-TF-NEXT: [[TMP27:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP26]], i1 true) +; FORCED-TF-NEXT: [[TMP28:%.*]] = sub i64 [[TMP27]], 1 +; FORCED-TF-NEXT: [[TMP29:%.*]] = extractelement <4 x ptr> [[TMP16]], i64 [[TMP28]] +; FORCED-TF-NEXT: br label [[END:%.*]] +; FORCED-TF: end: +; FORCED-TF-NEXT: store ptr [[TMP29]], ptr [[POS]], align 4 +; FORCED-TF-NEXT: ret void +; ; CHECK-LABEL: @metadata( ; CHECK-NEXT: header: ; CHECK-NEXT: [[PTR0:%.*]] = load ptr, ptr [[POS:%.*]], align 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[SIZE:%.*]], 4 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[SIZE]], 4 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[SIZE]], [[N_MOD_VF]] -; CHECK-NEXT: [[IND_END:%.*]] = sub i32 [[SIZE]], [[N_VEC]] -; CHECK-NEXT: [[IND_END1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 [[N_VEC]] +; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[SIZE:%.*]], 3 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] +; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i32 [[SIZE]], 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TRIP_COUNT_MINUS_1]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i32 [[INDEX]] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i32 1 -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 -; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], ptr [[NEXT_GEP]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE11:%.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 3 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 [[TMP0]] +; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR]], i32 [[TMP1]] +; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PTR]], i32 [[TMP2]] +; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PTR]], i32 [[TMP3]] +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x ptr> poison, ptr [[NEXT_GEP]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x ptr> [[TMP4]], ptr [[NEXT_GEP1]], i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x ptr> [[TMP5]], ptr [[NEXT_GEP2]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x ptr> [[TMP6]], ptr [[NEXT_GEP3]], i32 3 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <4 x i32> poison, i32 [[INDEX]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT4]], <4 x i32> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[VEC_IV:%.*]] = add <4 x i32> [[BROADCAST_SPLAT5]], +; CHECK-NEXT: [[TMP8:%.*]] = icmp ule <4 x i32> [[VEC_IV]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i32 1 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP1]], i32 1 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP2]], i32 1 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP3]], i32 1 +; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x ptr> poison, ptr [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x ptr> [[TMP13]], ptr [[TMP10]], i32 1 +; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x ptr> [[TMP14]], ptr [[TMP11]], i32 2 +; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x ptr> [[TMP15]], ptr [[TMP12]], i32 3 +; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i1> [[TMP8]], i32 0 +; CHECK-NEXT: br i1 [[TMP17]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; CHECK: pred.store.if: +; CHECK-NEXT: [[TMP18:%.*]] = load i8, ptr [[TMP9]], align 1 +; CHECK-NEXT: store i8 [[TMP18]], ptr [[NEXT_GEP]], align 1 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]] +; CHECK: pred.store.continue: +; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP8]], i32 1 +; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]] +; CHECK: pred.store.if6: +; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP10]], align 1 +; CHECK-NEXT: store i8 [[TMP20]], ptr [[NEXT_GEP1]], align 1 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE7]] +; CHECK: pred.store.continue7: +; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP8]], i32 2 +; CHECK-NEXT: br i1 [[TMP21]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]] +; CHECK: pred.store.if8: +; CHECK-NEXT: [[TMP22:%.*]] = load i8, ptr [[TMP11]], align 1 +; CHECK-NEXT: store i8 [[TMP22]], ptr [[NEXT_GEP2]], align 1 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE9]] +; CHECK: pred.store.continue9: +; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i1> [[TMP8]], i32 3 +; CHECK-NEXT: br i1 [[TMP23]], label [[PRED_STORE_IF10:%.*]], label [[PRED_STORE_CONTINUE11]] +; CHECK: pred.store.if10: +; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[TMP12]], align 1 +; CHECK-NEXT: store i8 [[TMP24]], ptr [[NEXT_GEP3]], align 1 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE11]] +; CHECK: pred.store.continue11: +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[SIZE]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]] -; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[SIZE]], [[HEADER:%.*]] ] -; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[IND_END1]], [[MIDDLE_BLOCK]] ], [ [[PTR]], [[HEADER]] ] -; CHECK-NEXT: br label [[BODY:%.*]] -; CHECK: body: -; CHECK-NEXT: [[DEC66:%.*]] = phi i32 [ [[DEC:%.*]], [[BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[BUFF:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[BODY]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[BUFF]], i32 1 -; CHECK-NEXT: [[DEC]] = add nsw i32 [[DEC66]], -1 -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1 -; CHECK-NEXT: store i8 [[TMP5]], ptr [[BUFF]], align 1 -; CHECK-NEXT: [[TOBOOL11:%.*]] = icmp eq i32 [[DEC]], 0 -; CHECK-NEXT: br i1 [[TOBOOL11]], label [[END]], label [[BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: [[TMP26:%.*]] = xor <4 x i1> [[TMP8]], splat (i1 true) +; CHECK-NEXT: [[TMP27:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP26]], i1 true) +; CHECK-NEXT: [[TMP28:%.*]] = sub i64 [[TMP27]], 1 +; CHECK-NEXT: [[TMP29:%.*]] = extractelement <4 x ptr> [[TMP16]], i64 [[TMP28]] +; CHECK-NEXT: br label [[END:%.*]] ; CHECK: end: -; CHECK-NEXT: [[INCDEC_PTR_LCSSA:%.*]] = phi ptr [ [[INCDEC_PTR]], [[BODY]] ], [ [[IND_END1]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: store ptr [[INCDEC_PTR_LCSSA]], ptr [[POS]], align 4 +; CHECK-NEXT: store ptr [[TMP29]], ptr [[POS]], align 4 ; CHECK-NEXT: ret void ; header: From b6dd511f70dcaa874cc0ff4dcd7cd32463da5ba2 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 12 Nov 2025 15:20:38 +0000 Subject: [PATCH 13/34] [X86] AVX512 optimised CTLZ/CTTZ implementations for i256/i512 scalars (#164671) Make use of AVX512 VPLZCNT/VPOPCNT to perform the big integer bit counts per vector element and then use VPCOMPRESS to extract the first non-zero element result. There's more we can do here (widen/split other vector widths etc.) - but this is a good starting point. --- llvm/lib/Target/X86/X86ISelLowering.cpp | 67 ++ llvm/test/CodeGen/X86/bitcnt-big-integer.ll | 721 ++++++++------------ 2 files changed, 370 insertions(+), 418 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index fa3dce256046f..6483e07afadee 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2654,6 +2654,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, ISD::AVGCEILU, ISD::AVGFLOORS, ISD::AVGFLOORU, + ISD::CTLZ, + ISD::CTTZ, + ISD::CTLZ_ZERO_UNDEF, + ISD::CTTZ_ZERO_UNDEF, ISD::BITREVERSE, ISD::ADD, ISD::FADD, @@ -55162,6 +55166,65 @@ static SDValue combineXor(SDNode *N, SelectionDAG &DAG, return combineFneg(N, DAG, DCI, Subtarget); } +// Fold i256/i512 CTLZ/CTTZ patterns to make use of AVX512 +// vXi64 CTLZ/CTTZ and VECTOR_COMPRESS. +// Compute the CTLZ/CTTZ of each element, add the element's bit offset, compress +// the result to remove all zero elements (passthru is set to scalar bitwidth if +// all elements are zero) and extract the lowest compressed element. +static SDValue combineCTZ(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const X86Subtarget &Subtarget) { + EVT VT = N->getValueType(0); + SDValue N0 = N->getOperand(0); + unsigned Opc = N->getOpcode(); + unsigned SizeInBits = VT.getSizeInBits(); + assert((Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF || Opc == ISD::CTTZ || + Opc == ISD::CTTZ_ZERO_UNDEF) && + "Unsupported bit count"); + + if (VT.isScalarInteger() && Subtarget.hasCDI() && + ((SizeInBits == 512 && Subtarget.useAVX512Regs()) || + (SizeInBits == 256 && Subtarget.hasVLX() && + X86::mayFoldLoad(N0, Subtarget)))) { + MVT VecVT = MVT::getVectorVT(MVT::i64, SizeInBits / 64); + MVT BoolVT = VecVT.changeVectorElementType(MVT::i1); + SDValue Vec = DAG.getBitcast(VecVT, N0); + SDLoc DL(N); + + SmallVector RevMask; + SmallVector Offsets; + for (unsigned I = 0, E = VecVT.getVectorNumElements(); I != E; ++I) { + RevMask.push_back((int)((E - 1) - I)); + Offsets.push_back(DAG.getConstant(I * 64, DL, MVT::i64)); + } + + // CTLZ - reverse the elements as we want the top non-zero element at the + // bottom for compression. + unsigned VecOpc = ISD::CTTZ; + if (Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF) { + VecOpc = ISD::CTLZ; + Vec = DAG.getVectorShuffle(VecVT, DL, Vec, Vec, RevMask); + } + + SDValue PassThrough = DAG.getUNDEF(VecVT); + if (Opc == ISD::CTLZ || Opc == ISD::CTTZ) + PassThrough = DAG.getConstant(SizeInBits, DL, VecVT); + + SDValue IsNonZero = DAG.getSetCC(DL, BoolVT, Vec, + DAG.getConstant(0, DL, VecVT), ISD::SETNE); + SDValue Cnt = DAG.getNode(VecOpc, DL, VecVT, Vec); + Cnt = DAG.getNode(ISD::ADD, DL, VecVT, Cnt, + DAG.getBuildVector(VecVT, DL, Offsets)); + Cnt = DAG.getNode(ISD::VECTOR_COMPRESS, DL, VecVT, Cnt, IsNonZero, + PassThrough); + Cnt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cnt, + DAG.getVectorIdxConstant(0, DL)); + return DAG.getZExtOrTrunc(Cnt, DL, VT); + } + + return SDValue(); +} + static SDValue combineBITREVERSE(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) { @@ -60885,6 +60948,10 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget); case ISD::OR: return combineOr(N, DAG, DCI, Subtarget); case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget); + case ISD::CTLZ: + case ISD::CTTZ: + case ISD::CTLZ_ZERO_UNDEF: + case ISD::CTTZ_ZERO_UNDEF:return combineCTZ(N, DAG, DCI, Subtarget); case ISD::BITREVERSE: return combineBITREVERSE(N, DAG, DCI, Subtarget); case ISD::AVGCEILS: case ISD::AVGCEILU: diff --git a/llvm/test/CodeGen/X86/bitcnt-big-integer.ll b/llvm/test/CodeGen/X86/bitcnt-big-integer.ll index fe3fbf141682a..330c978d2a9f7 100644 --- a/llvm/test/CodeGen/X86/bitcnt-big-integer.ll +++ b/llvm/test/CodeGen/X86/bitcnt-big-integer.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,AVX512 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 -mattr=+avx512vpopcntdq | FileCheck %s --check-prefixes=CHECK,AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 -mattr=+avx512vpopcntdq | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512POPCNT ; ; CTPOP @@ -712,23 +712,15 @@ define i32 @load_ctlz_i256(ptr %p0) nounwind { ; ; AVX512-LABEL: load_ctlz_i256: ; AVX512: # %bb.0: -; AVX512-NEXT: movq 8(%rdi), %rcx -; AVX512-NEXT: movq 16(%rdi), %rdx -; AVX512-NEXT: movq 24(%rdi), %rsi -; AVX512-NEXT: lzcntq %rsi, %rax -; AVX512-NEXT: lzcntq %rdx, %r8 -; AVX512-NEXT: addl $64, %r8d -; AVX512-NEXT: testq %rsi, %rsi -; AVX512-NEXT: cmovnel %eax, %r8d -; AVX512-NEXT: lzcntq %rcx, %r9 -; AVX512-NEXT: lzcntq (%rdi), %rax -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %rcx, %rcx -; AVX512-NEXT: cmovnel %r9d, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %rsi, %rdx -; AVX512-NEXT: cmovnel %r8d, %eax +; AVX512-NEXT: vpermq {{.*#+}} ymm0 = mem[3,2,1,0] +; AVX512-NEXT: vplzcntq %ymm0, %ymm1 +; AVX512-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 +; AVX512-NEXT: vptestmq %ymm0, %ymm0, %k1 +; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm0 = [256,256,256,256] +; AVX512-NEXT: vpcompressq %ymm1, %ymm0 {%k1} +; AVX512-NEXT: vmovq %xmm0, %rax ; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %a0 = load i256, ptr %p0 %cnt = call i256 @llvm.ctlz.i256(i256 %a0, i1 0) @@ -845,47 +837,28 @@ define i32 @test_ctlz_i512(i512 %a0) nounwind { ; ; AVX512-LABEL: test_ctlz_i512: ; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r15 -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 -; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 -; AVX512-NEXT: lzcntq %r11, %rax -; AVX512-NEXT: lzcntq %r10, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %r11, %r11 -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: lzcntq %r9, %rax -; AVX512-NEXT: lzcntq %r8, %rbx -; AVX512-NEXT: addl $64, %ebx -; AVX512-NEXT: testq %r9, %r9 -; AVX512-NEXT: cmovnel %eax, %ebx -; AVX512-NEXT: subl $-128, %ebx -; AVX512-NEXT: movq %r10, %rax -; AVX512-NEXT: orq %r11, %rax -; AVX512-NEXT: cmovnel %r14d, %ebx -; AVX512-NEXT: lzcntq %rcx, %rax -; AVX512-NEXT: lzcntq %rdx, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %rcx, %rcx -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: lzcntq %rsi, %r15 -; AVX512-NEXT: lzcntq %rdi, %rax -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %rsi, %rsi -; AVX512-NEXT: cmovnel %r15d, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %rcx, %rdx -; AVX512-NEXT: cmovnel %r14d, %eax -; AVX512-NEXT: addl $256, %eax # imm = 0x100 -; AVX512-NEXT: orq %r11, %r9 -; AVX512-NEXT: orq %r10, %r8 -; AVX512-NEXT: orq %r9, %r8 -; AVX512-NEXT: cmovnel %ebx, %eax -; AVX512-NEXT: # kill: def $eax killed $eax killed $rax -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: popq %r15 +; AVX512-NEXT: vmovq %rdi, %xmm0 +; AVX512-NEXT: vmovq %rsi, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vmovq %rdx, %xmm1 +; AVX512-NEXT: vmovq %rcx, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512-NEXT: vmovq %r8, %xmm1 +; AVX512-NEXT: vmovq %r9, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVX512-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0] +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 +; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512-NEXT: vplzcntq %zmm0, %zmm1 +; AVX512-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm0 = [512,512,512,512,512,512,512,512] +; AVX512-NEXT: vpcompressq %zmm1, %zmm0 {%k1} +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %cnt = call i512 @llvm.ctlz.i512(i512 %a0, i1 0) %res = trunc i512 %cnt to i32 @@ -1010,50 +983,16 @@ define i32 @load_ctlz_i512(ptr %p0) nounwind { ; ; AVX512-LABEL: load_ctlz_i512: ; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: movq 8(%rdi), %r11 -; AVX512-NEXT: movq 16(%rdi), %r9 -; AVX512-NEXT: movq 24(%rdi), %r10 -; AVX512-NEXT: movq 32(%rdi), %rcx -; AVX512-NEXT: movq 40(%rdi), %rdx -; AVX512-NEXT: movq 48(%rdi), %rsi -; AVX512-NEXT: movq 56(%rdi), %r8 -; AVX512-NEXT: lzcntq %r8, %rax -; AVX512-NEXT: lzcntq %rsi, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %r8, %r8 -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: lzcntq %rdx, %rax -; AVX512-NEXT: lzcntq %rcx, %rbx -; AVX512-NEXT: addl $64, %ebx -; AVX512-NEXT: testq %rdx, %rdx -; AVX512-NEXT: cmovnel %eax, %ebx -; AVX512-NEXT: subl $-128, %ebx -; AVX512-NEXT: movq %rsi, %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: cmovnel %r14d, %ebx -; AVX512-NEXT: lzcntq %r10, %rax -; AVX512-NEXT: lzcntq %r9, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %r10, %r10 -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: lzcntq (%rdi), %rax -; AVX512-NEXT: lzcntq %r11, %rdi -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %r11, %r11 -; AVX512-NEXT: cmovnel %edi, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %r10, %r9 -; AVX512-NEXT: cmovnel %r14d, %eax -; AVX512-NEXT: addl $256, %eax # imm = 0x100 -; AVX512-NEXT: orq %r8, %rdx -; AVX512-NEXT: orq %rsi, %rcx -; AVX512-NEXT: orq %rdx, %rcx -; AVX512-NEXT: cmovnel %ebx, %eax +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [7,6,5,4,3,2,1,0] +; AVX512-NEXT: vpermq (%rdi), %zmm0, %zmm0 +; AVX512-NEXT: vplzcntq %zmm0, %zmm1 +; AVX512-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm0 = [512,512,512,512,512,512,512,512] +; AVX512-NEXT: vpcompressq %zmm1, %zmm0 {%k1} +; AVX512-NEXT: vmovq %xmm0, %rax ; AVX512-NEXT: # kill: def $eax killed $eax killed $rax -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %a0 = load i512, ptr %p0 %cnt = call i512 @llvm.ctlz.i512(i512 %a0, i1 0) @@ -2002,23 +1941,14 @@ define i32 @load_ctlz_undef_i256(ptr %p0) nounwind { ; ; AVX512-LABEL: load_ctlz_undef_i256: ; AVX512: # %bb.0: -; AVX512-NEXT: movq 8(%rdi), %rcx -; AVX512-NEXT: movq 16(%rdi), %rdx -; AVX512-NEXT: movq 24(%rdi), %rsi -; AVX512-NEXT: lzcntq %rsi, %rax -; AVX512-NEXT: lzcntq %rdx, %r8 -; AVX512-NEXT: addl $64, %r8d -; AVX512-NEXT: testq %rsi, %rsi -; AVX512-NEXT: cmovnel %eax, %r8d -; AVX512-NEXT: lzcntq %rcx, %r9 -; AVX512-NEXT: lzcntq (%rdi), %rax -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %rcx, %rcx -; AVX512-NEXT: cmovnel %r9d, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %rsi, %rdx -; AVX512-NEXT: cmovnel %r8d, %eax +; AVX512-NEXT: vpermq {{.*#+}} ymm0 = mem[3,2,1,0] +; AVX512-NEXT: vptestmq %ymm0, %ymm0, %k1 +; AVX512-NEXT: vplzcntq %ymm0, %ymm0 +; AVX512-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX512-NEXT: vpcompressq %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: vmovq %xmm0, %rax ; AVX512-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %a0 = load i256, ptr %p0 %cnt = call i256 @llvm.ctlz.i256(i256 %a0, i1 -1) @@ -2134,47 +2064,27 @@ define i32 @test_ctlz_undef_i512(i512 %a0) nounwind { ; ; AVX512-LABEL: test_ctlz_undef_i512: ; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r15 -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 -; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 -; AVX512-NEXT: lzcntq %r11, %rax -; AVX512-NEXT: lzcntq %r10, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %r11, %r11 -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: lzcntq %r9, %rax -; AVX512-NEXT: lzcntq %r8, %rbx -; AVX512-NEXT: addl $64, %ebx -; AVX512-NEXT: testq %r9, %r9 -; AVX512-NEXT: cmovnel %eax, %ebx -; AVX512-NEXT: subl $-128, %ebx -; AVX512-NEXT: movq %r10, %rax -; AVX512-NEXT: orq %r11, %rax -; AVX512-NEXT: cmovnel %r14d, %ebx -; AVX512-NEXT: lzcntq %rcx, %rax -; AVX512-NEXT: lzcntq %rdx, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %rcx, %rcx -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: lzcntq %rsi, %r15 -; AVX512-NEXT: lzcntq %rdi, %rax -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %rsi, %rsi -; AVX512-NEXT: cmovnel %r15d, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %rcx, %rdx -; AVX512-NEXT: cmovnel %r14d, %eax -; AVX512-NEXT: addl $256, %eax # imm = 0x100 -; AVX512-NEXT: orq %r11, %r9 -; AVX512-NEXT: orq %r10, %r8 -; AVX512-NEXT: orq %r9, %r8 -; AVX512-NEXT: cmovnel %ebx, %eax -; AVX512-NEXT: # kill: def $eax killed $eax killed $rax -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: popq %r15 +; AVX512-NEXT: vmovq %rdi, %xmm0 +; AVX512-NEXT: vmovq %rsi, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vmovq %rdx, %xmm1 +; AVX512-NEXT: vmovq %rcx, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512-NEXT: vmovq %r8, %xmm1 +; AVX512-NEXT: vmovq %r9, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVX512-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0] +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 +; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512-NEXT: vplzcntq %zmm0, %zmm0 +; AVX512-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512-NEXT: vpcompressq %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %cnt = call i512 @llvm.ctlz.i512(i512 %a0, i1 -1) %res = trunc i512 %cnt to i32 @@ -2298,50 +2208,15 @@ define i32 @load_ctlz_undef_i512(ptr %p0) nounwind { ; ; AVX512-LABEL: load_ctlz_undef_i512: ; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: movq 8(%rdi), %r11 -; AVX512-NEXT: movq 16(%rdi), %r9 -; AVX512-NEXT: movq 24(%rdi), %r10 -; AVX512-NEXT: movq 32(%rdi), %rcx -; AVX512-NEXT: movq 40(%rdi), %rdx -; AVX512-NEXT: movq 48(%rdi), %rsi -; AVX512-NEXT: movq 56(%rdi), %r8 -; AVX512-NEXT: lzcntq %r8, %rax -; AVX512-NEXT: lzcntq %rsi, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %r8, %r8 -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: lzcntq %rdx, %rax -; AVX512-NEXT: lzcntq %rcx, %rbx -; AVX512-NEXT: addl $64, %ebx -; AVX512-NEXT: testq %rdx, %rdx -; AVX512-NEXT: cmovnel %eax, %ebx -; AVX512-NEXT: subl $-128, %ebx -; AVX512-NEXT: movq %rsi, %rax -; AVX512-NEXT: orq %r8, %rax -; AVX512-NEXT: cmovnel %r14d, %ebx -; AVX512-NEXT: lzcntq %r10, %rax -; AVX512-NEXT: lzcntq %r9, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %r10, %r10 -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: lzcntq (%rdi), %rax -; AVX512-NEXT: lzcntq %r11, %rdi -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %r11, %r11 -; AVX512-NEXT: cmovnel %edi, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %r10, %r9 -; AVX512-NEXT: cmovnel %r14d, %eax -; AVX512-NEXT: addl $256, %eax # imm = 0x100 -; AVX512-NEXT: orq %r8, %rdx -; AVX512-NEXT: orq %rsi, %rcx -; AVX512-NEXT: orq %rdx, %rcx -; AVX512-NEXT: cmovnel %ebx, %eax +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [7,6,5,4,3,2,1,0] +; AVX512-NEXT: vpermq (%rdi), %zmm0, %zmm0 +; AVX512-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512-NEXT: vplzcntq %zmm0, %zmm0 +; AVX512-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512-NEXT: vpcompressq %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: vmovq %xmm0, %rax ; AVX512-NEXT: # kill: def $eax killed $eax killed $rax -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %a0 = load i512, ptr %p0 %cnt = call i512 @llvm.ctlz.i512(i512 %a0, i1 -1) @@ -3282,26 +3157,38 @@ define i32 @load_cttz_i256(ptr %p0) nounwind { ; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_cttz_i256: -; AVX512: # %bb.0: -; AVX512-NEXT: movq 16(%rdi), %rcx -; AVX512-NEXT: movq (%rdi), %rdx -; AVX512-NEXT: movq 8(%rdi), %rsi -; AVX512-NEXT: tzcntq %rdx, %rax -; AVX512-NEXT: tzcntq %rsi, %r8 -; AVX512-NEXT: addl $64, %r8d -; AVX512-NEXT: testq %rdx, %rdx -; AVX512-NEXT: cmovnel %eax, %r8d -; AVX512-NEXT: tzcntq %rcx, %r9 -; AVX512-NEXT: tzcntq 24(%rdi), %rax -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %rcx, %rcx -; AVX512-NEXT: cmovnel %r9d, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %rsi, %rdx -; AVX512-NEXT: cmovnel %r8d, %eax -; AVX512-NEXT: # kill: def $eax killed $eax killed $rax -; AVX512-NEXT: retq +; AVX512F-LABEL: load_cttz_i256: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vmovdqu (%rdi), %ymm0 +; AVX512F-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512F-NEXT: vpaddq %ymm1, %ymm0, %ymm1 +; AVX512F-NEXT: vpandn %ymm1, %ymm0, %ymm1 +; AVX512F-NEXT: vplzcntq %ymm1, %ymm1 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [64,128,192,256] +; AVX512F-NEXT: vpsubq %ymm1, %ymm2, %ymm1 +; AVX512F-NEXT: vptestmq %ymm0, %ymm0, %k1 +; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm0 = [256,256,256,256] +; AVX512F-NEXT: vpcompressq %ymm1, %ymm0 {%k1} +; AVX512F-NEXT: vmovq %xmm0, %rax +; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512POPCNT-LABEL: load_cttz_i256: +; AVX512POPCNT: # %bb.0: +; AVX512POPCNT-NEXT: vmovdqu (%rdi), %ymm0 +; AVX512POPCNT-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512POPCNT-NEXT: vpaddq %ymm1, %ymm0, %ymm1 +; AVX512POPCNT-NEXT: vpandn %ymm1, %ymm0, %ymm1 +; AVX512POPCNT-NEXT: vpopcntq %ymm1, %ymm1 +; AVX512POPCNT-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 +; AVX512POPCNT-NEXT: vptestmq %ymm0, %ymm0, %k1 +; AVX512POPCNT-NEXT: vpbroadcastq {{.*#+}} ymm0 = [256,256,256,256] +; AVX512POPCNT-NEXT: vpcompressq %ymm1, %ymm0 {%k1} +; AVX512POPCNT-NEXT: vmovq %xmm0, %rax +; AVX512POPCNT-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512POPCNT-NEXT: vzeroupper +; AVX512POPCNT-NEXT: retq %a0 = load i256, ptr %p0 %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 0) %res = trunc i256 %cnt to i32 @@ -3399,47 +3286,58 @@ define i32 @test_cttz_i512(i512 %a0) nounwind { ; AVX2-NEXT: popq %r14 ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_cttz_i512: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 -; AVX512-NEXT: tzcntq %rdi, %rax -; AVX512-NEXT: tzcntq %rsi, %rbx -; AVX512-NEXT: addl $64, %ebx -; AVX512-NEXT: testq %rdi, %rdi -; AVX512-NEXT: cmovnel %eax, %ebx -; AVX512-NEXT: tzcntq %rdx, %rax -; AVX512-NEXT: tzcntq %rcx, %r10 -; AVX512-NEXT: addl $64, %r10d -; AVX512-NEXT: testq %rdx, %rdx -; AVX512-NEXT: cmovnel %eax, %r10d -; AVX512-NEXT: subl $-128, %r10d -; AVX512-NEXT: movq %rdi, %rax -; AVX512-NEXT: orq %rsi, %rax -; AVX512-NEXT: cmovnel %ebx, %r10d -; AVX512-NEXT: tzcntq %r8, %rax -; AVX512-NEXT: tzcntq %r9, %rbx -; AVX512-NEXT: addl $64, %ebx -; AVX512-NEXT: testq %r8, %r8 -; AVX512-NEXT: cmovnel %eax, %ebx -; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: tzcntq %r11, %r14 -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %r11, %r11 -; AVX512-NEXT: cmovnel %r14d, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %r9, %r8 -; AVX512-NEXT: cmovnel %ebx, %eax -; AVX512-NEXT: addl $256, %eax # imm = 0x100 -; AVX512-NEXT: orq %rcx, %rsi -; AVX512-NEXT: orq %rdx, %rdi -; AVX512-NEXT: orq %rsi, %rdi -; AVX512-NEXT: cmovnel %r10d, %eax -; AVX512-NEXT: # kill: def $eax killed $eax killed $rax -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_cttz_i512: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vmovq %rcx, %xmm0 +; AVX512F-NEXT: vmovq %rdx, %xmm1 +; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512F-NEXT: vmovq %rsi, %xmm1 +; AVX512F-NEXT: vmovq %rdi, %xmm2 +; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512F-NEXT: vmovq %r9, %xmm1 +; AVX512F-NEXT: vmovq %r8, %xmm2 +; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512F-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 +; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 = -1 +; AVX512F-NEXT: vpaddq %zmm1, %zmm0, %zmm1 +; AVX512F-NEXT: vpandnq %zmm1, %zmm0, %zmm1 +; AVX512F-NEXT: vplzcntq %zmm1, %zmm1 +; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [64,128,192,256,320,384,448,512] +; AVX512F-NEXT: vpsubq %zmm1, %zmm2, %zmm1 +; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512F-NEXT: vpbroadcastq {{.*#+}} zmm0 = [512,512,512,512,512,512,512,512] +; AVX512F-NEXT: vpcompressq %zmm1, %zmm0 {%k1} +; AVX512F-NEXT: vmovd %xmm0, %eax +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512POPCNT-LABEL: test_cttz_i512: +; AVX512POPCNT: # %bb.0: +; AVX512POPCNT-NEXT: vmovq %rcx, %xmm0 +; AVX512POPCNT-NEXT: vmovq %rdx, %xmm1 +; AVX512POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512POPCNT-NEXT: vmovq %rsi, %xmm1 +; AVX512POPCNT-NEXT: vmovq %rdi, %xmm2 +; AVX512POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512POPCNT-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512POPCNT-NEXT: vmovq %r9, %xmm1 +; AVX512POPCNT-NEXT: vmovq %r8, %xmm2 +; AVX512POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512POPCNT-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 +; AVX512POPCNT-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512POPCNT-NEXT: vpternlogd {{.*#+}} zmm1 = -1 +; AVX512POPCNT-NEXT: vpaddq %zmm1, %zmm0, %zmm1 +; AVX512POPCNT-NEXT: vpandnq %zmm1, %zmm0, %zmm1 +; AVX512POPCNT-NEXT: vpopcntq %zmm1, %zmm1 +; AVX512POPCNT-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512POPCNT-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512POPCNT-NEXT: vpbroadcastq {{.*#+}} zmm0 = [512,512,512,512,512,512,512,512] +; AVX512POPCNT-NEXT: vpcompressq %zmm1, %zmm0 {%k1} +; AVX512POPCNT-NEXT: vmovd %xmm0, %eax +; AVX512POPCNT-NEXT: vzeroupper +; AVX512POPCNT-NEXT: retq %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 0) %res = trunc i512 %cnt to i32 ret i32 %res @@ -3553,53 +3451,38 @@ define i32 @load_cttz_i512(ptr %p0) nounwind { ; AVX2-NEXT: popq %r15 ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_cttz_i512: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: movq 48(%rdi), %r11 -; AVX512-NEXT: movq 40(%rdi), %r9 -; AVX512-NEXT: movq 32(%rdi), %r10 -; AVX512-NEXT: movq 24(%rdi), %r8 -; AVX512-NEXT: movq 16(%rdi), %rdx -; AVX512-NEXT: movq (%rdi), %rcx -; AVX512-NEXT: movq 8(%rdi), %rsi -; AVX512-NEXT: tzcntq %rcx, %rax -; AVX512-NEXT: tzcntq %rsi, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %rcx, %rcx -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: tzcntq %rdx, %rax -; AVX512-NEXT: tzcntq %r8, %rbx -; AVX512-NEXT: addl $64, %ebx -; AVX512-NEXT: testq %rdx, %rdx -; AVX512-NEXT: cmovnel %eax, %ebx -; AVX512-NEXT: subl $-128, %ebx -; AVX512-NEXT: movq %rcx, %rax -; AVX512-NEXT: orq %rsi, %rax -; AVX512-NEXT: cmovnel %r14d, %ebx -; AVX512-NEXT: tzcntq %r10, %rax -; AVX512-NEXT: tzcntq %r9, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %r10, %r10 -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: tzcntq 56(%rdi), %rax -; AVX512-NEXT: tzcntq %r11, %rdi -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %r11, %r11 -; AVX512-NEXT: cmovnel %edi, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %r9, %r10 -; AVX512-NEXT: cmovnel %r14d, %eax -; AVX512-NEXT: addl $256, %eax # imm = 0x100 -; AVX512-NEXT: orq %r8, %rsi -; AVX512-NEXT: orq %rdx, %rcx -; AVX512-NEXT: orq %rsi, %rcx -; AVX512-NEXT: cmovnel %ebx, %eax -; AVX512-NEXT: # kill: def $eax killed $eax killed $rax -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: retq +; AVX512F-LABEL: load_cttz_i512: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 = -1 +; AVX512F-NEXT: vpaddq %zmm1, %zmm0, %zmm1 +; AVX512F-NEXT: vpandnq %zmm1, %zmm0, %zmm1 +; AVX512F-NEXT: vplzcntq %zmm1, %zmm1 +; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [64,128,192,256,320,384,448,512] +; AVX512F-NEXT: vpsubq %zmm1, %zmm2, %zmm1 +; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512F-NEXT: vpbroadcastq {{.*#+}} zmm0 = [512,512,512,512,512,512,512,512] +; AVX512F-NEXT: vpcompressq %zmm1, %zmm0 {%k1} +; AVX512F-NEXT: vmovq %xmm0, %rax +; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512POPCNT-LABEL: load_cttz_i512: +; AVX512POPCNT: # %bb.0: +; AVX512POPCNT-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512POPCNT-NEXT: vpternlogd {{.*#+}} zmm1 = -1 +; AVX512POPCNT-NEXT: vpaddq %zmm1, %zmm0, %zmm1 +; AVX512POPCNT-NEXT: vpandnq %zmm1, %zmm0, %zmm1 +; AVX512POPCNT-NEXT: vpopcntq %zmm1, %zmm1 +; AVX512POPCNT-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512POPCNT-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512POPCNT-NEXT: vpbroadcastq {{.*#+}} zmm0 = [512,512,512,512,512,512,512,512] +; AVX512POPCNT-NEXT: vpcompressq %zmm1, %zmm0 {%k1} +; AVX512POPCNT-NEXT: vmovq %xmm0, %rax +; AVX512POPCNT-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512POPCNT-NEXT: vzeroupper +; AVX512POPCNT-NEXT: retq %a0 = load i512, ptr %p0 %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 0) %res = trunc i512 %cnt to i32 @@ -4492,26 +4375,36 @@ define i32 @load_cttz_undef_i256(ptr %p0) nounwind { ; AVX2-NEXT: # kill: def $eax killed $eax killed $rax ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_cttz_undef_i256: -; AVX512: # %bb.0: -; AVX512-NEXT: movq 16(%rdi), %rcx -; AVX512-NEXT: movq (%rdi), %rdx -; AVX512-NEXT: movq 8(%rdi), %rsi -; AVX512-NEXT: tzcntq %rdx, %rax -; AVX512-NEXT: tzcntq %rsi, %r8 -; AVX512-NEXT: addl $64, %r8d -; AVX512-NEXT: testq %rdx, %rdx -; AVX512-NEXT: cmovnel %eax, %r8d -; AVX512-NEXT: tzcntq %rcx, %r9 -; AVX512-NEXT: tzcntq 24(%rdi), %rax -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %rcx, %rcx -; AVX512-NEXT: cmovnel %r9d, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %rsi, %rdx -; AVX512-NEXT: cmovnel %r8d, %eax -; AVX512-NEXT: # kill: def $eax killed $eax killed $rax -; AVX512-NEXT: retq +; AVX512F-LABEL: load_cttz_undef_i256: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vmovdqu (%rdi), %ymm0 +; AVX512F-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512F-NEXT: vpaddq %ymm1, %ymm0, %ymm1 +; AVX512F-NEXT: vpandn %ymm1, %ymm0, %ymm1 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [64,128,192,256] +; AVX512F-NEXT: vplzcntq %ymm1, %ymm1 +; AVX512F-NEXT: vpsubq %ymm1, %ymm2, %ymm1 +; AVX512F-NEXT: vptestmq %ymm0, %ymm0, %k1 +; AVX512F-NEXT: vpcompressq %ymm1, %ymm0 {%k1} {z} +; AVX512F-NEXT: vmovq %xmm0, %rax +; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512POPCNT-LABEL: load_cttz_undef_i256: +; AVX512POPCNT: # %bb.0: +; AVX512POPCNT-NEXT: vmovdqu (%rdi), %ymm0 +; AVX512POPCNT-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512POPCNT-NEXT: vpaddq %ymm1, %ymm0, %ymm1 +; AVX512POPCNT-NEXT: vpandn %ymm1, %ymm0, %ymm1 +; AVX512POPCNT-NEXT: vpopcntq %ymm1, %ymm1 +; AVX512POPCNT-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 +; AVX512POPCNT-NEXT: vptestmq %ymm0, %ymm0, %k1 +; AVX512POPCNT-NEXT: vpcompressq %ymm1, %ymm0 {%k1} {z} +; AVX512POPCNT-NEXT: vmovq %xmm0, %rax +; AVX512POPCNT-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512POPCNT-NEXT: vzeroupper +; AVX512POPCNT-NEXT: retq %a0 = load i256, ptr %p0 %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 -1) %res = trunc i256 %cnt to i32 @@ -4608,47 +4501,56 @@ define i32 @test_cttz_undef_i512(i512 %a0) nounwind { ; AVX2-NEXT: popq %r14 ; AVX2-NEXT: retq ; -; AVX512-LABEL: test_cttz_undef_i512: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 -; AVX512-NEXT: tzcntq %rdi, %rax -; AVX512-NEXT: tzcntq %rsi, %rbx -; AVX512-NEXT: addl $64, %ebx -; AVX512-NEXT: testq %rdi, %rdi -; AVX512-NEXT: cmovnel %eax, %ebx -; AVX512-NEXT: tzcntq %rdx, %rax -; AVX512-NEXT: tzcntq %rcx, %r10 -; AVX512-NEXT: addl $64, %r10d -; AVX512-NEXT: testq %rdx, %rdx -; AVX512-NEXT: cmovnel %eax, %r10d -; AVX512-NEXT: subl $-128, %r10d -; AVX512-NEXT: movq %rdi, %rax -; AVX512-NEXT: orq %rsi, %rax -; AVX512-NEXT: cmovnel %ebx, %r10d -; AVX512-NEXT: tzcntq %r8, %rax -; AVX512-NEXT: tzcntq %r9, %rbx -; AVX512-NEXT: addl $64, %ebx -; AVX512-NEXT: testq %r8, %r8 -; AVX512-NEXT: cmovnel %eax, %ebx -; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: tzcntq %r11, %r14 -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %r11, %r11 -; AVX512-NEXT: cmovnel %r14d, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %r9, %r8 -; AVX512-NEXT: cmovnel %ebx, %eax -; AVX512-NEXT: addl $256, %eax # imm = 0x100 -; AVX512-NEXT: orq %rcx, %rsi -; AVX512-NEXT: orq %rdx, %rdi -; AVX512-NEXT: orq %rsi, %rdi -; AVX512-NEXT: cmovnel %r10d, %eax -; AVX512-NEXT: # kill: def $eax killed $eax killed $rax -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: retq +; AVX512F-LABEL: test_cttz_undef_i512: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vmovq %rcx, %xmm0 +; AVX512F-NEXT: vmovq %rdx, %xmm1 +; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512F-NEXT: vmovq %rsi, %xmm1 +; AVX512F-NEXT: vmovq %rdi, %xmm2 +; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512F-NEXT: vmovq %r9, %xmm1 +; AVX512F-NEXT: vmovq %r8, %xmm2 +; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512F-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 +; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 = -1 +; AVX512F-NEXT: vpaddq %zmm1, %zmm0, %zmm1 +; AVX512F-NEXT: vpandnq %zmm1, %zmm0, %zmm1 +; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [64,128,192,256,320,384,448,512] +; AVX512F-NEXT: vplzcntq %zmm1, %zmm1 +; AVX512F-NEXT: vpsubq %zmm1, %zmm2, %zmm1 +; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512F-NEXT: vpcompressq %zmm1, %zmm0 {%k1} {z} +; AVX512F-NEXT: vmovd %xmm0, %eax +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512POPCNT-LABEL: test_cttz_undef_i512: +; AVX512POPCNT: # %bb.0: +; AVX512POPCNT-NEXT: vmovq %rcx, %xmm0 +; AVX512POPCNT-NEXT: vmovq %rdx, %xmm1 +; AVX512POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512POPCNT-NEXT: vmovq %rsi, %xmm1 +; AVX512POPCNT-NEXT: vmovq %rdi, %xmm2 +; AVX512POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512POPCNT-NEXT: vmovq %r9, %xmm2 +; AVX512POPCNT-NEXT: vmovq %r8, %xmm3 +; AVX512POPCNT-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0] +; AVX512POPCNT-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm2, %ymm2 +; AVX512POPCNT-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512POPCNT-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512POPCNT-NEXT: vpternlogd {{.*#+}} zmm1 = -1 +; AVX512POPCNT-NEXT: vpaddq %zmm1, %zmm0, %zmm1 +; AVX512POPCNT-NEXT: vpandnq %zmm1, %zmm0, %zmm1 +; AVX512POPCNT-NEXT: vpopcntq %zmm1, %zmm1 +; AVX512POPCNT-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512POPCNT-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512POPCNT-NEXT: vpcompressq %zmm1, %zmm0 {%k1} {z} +; AVX512POPCNT-NEXT: vmovd %xmm0, %eax +; AVX512POPCNT-NEXT: vzeroupper +; AVX512POPCNT-NEXT: retq %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 -1) %res = trunc i512 %cnt to i32 ret i32 %res @@ -4761,53 +4663,36 @@ define i32 @load_cttz_undef_i512(ptr %p0) nounwind { ; AVX2-NEXT: popq %r15 ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_cttz_undef_i512: -; AVX512: # %bb.0: -; AVX512-NEXT: pushq %r14 -; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: movq 48(%rdi), %r11 -; AVX512-NEXT: movq 40(%rdi), %r9 -; AVX512-NEXT: movq 32(%rdi), %r10 -; AVX512-NEXT: movq 24(%rdi), %r8 -; AVX512-NEXT: movq 16(%rdi), %rdx -; AVX512-NEXT: movq (%rdi), %rcx -; AVX512-NEXT: movq 8(%rdi), %rsi -; AVX512-NEXT: tzcntq %rcx, %rax -; AVX512-NEXT: tzcntq %rsi, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %rcx, %rcx -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: tzcntq %rdx, %rax -; AVX512-NEXT: tzcntq %r8, %rbx -; AVX512-NEXT: addl $64, %ebx -; AVX512-NEXT: testq %rdx, %rdx -; AVX512-NEXT: cmovnel %eax, %ebx -; AVX512-NEXT: subl $-128, %ebx -; AVX512-NEXT: movq %rcx, %rax -; AVX512-NEXT: orq %rsi, %rax -; AVX512-NEXT: cmovnel %r14d, %ebx -; AVX512-NEXT: tzcntq %r10, %rax -; AVX512-NEXT: tzcntq %r9, %r14 -; AVX512-NEXT: addl $64, %r14d -; AVX512-NEXT: testq %r10, %r10 -; AVX512-NEXT: cmovnel %eax, %r14d -; AVX512-NEXT: tzcntq 56(%rdi), %rax -; AVX512-NEXT: tzcntq %r11, %rdi -; AVX512-NEXT: addl $64, %eax -; AVX512-NEXT: testq %r11, %r11 -; AVX512-NEXT: cmovnel %edi, %eax -; AVX512-NEXT: subl $-128, %eax -; AVX512-NEXT: orq %r9, %r10 -; AVX512-NEXT: cmovnel %r14d, %eax -; AVX512-NEXT: addl $256, %eax # imm = 0x100 -; AVX512-NEXT: orq %r8, %rsi -; AVX512-NEXT: orq %rdx, %rcx -; AVX512-NEXT: orq %rsi, %rcx -; AVX512-NEXT: cmovnel %ebx, %eax -; AVX512-NEXT: # kill: def $eax killed $eax killed $rax -; AVX512-NEXT: popq %rbx -; AVX512-NEXT: popq %r14 -; AVX512-NEXT: retq +; AVX512F-LABEL: load_cttz_undef_i512: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 = -1 +; AVX512F-NEXT: vpaddq %zmm1, %zmm0, %zmm1 +; AVX512F-NEXT: vpandnq %zmm1, %zmm0, %zmm1 +; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [64,128,192,256,320,384,448,512] +; AVX512F-NEXT: vplzcntq %zmm1, %zmm1 +; AVX512F-NEXT: vpsubq %zmm1, %zmm2, %zmm1 +; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512F-NEXT: vpcompressq %zmm1, %zmm0 {%k1} {z} +; AVX512F-NEXT: vmovq %xmm0, %rax +; AVX512F-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512POPCNT-LABEL: load_cttz_undef_i512: +; AVX512POPCNT: # %bb.0: +; AVX512POPCNT-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512POPCNT-NEXT: vpternlogd {{.*#+}} zmm1 = -1 +; AVX512POPCNT-NEXT: vpaddq %zmm1, %zmm0, %zmm1 +; AVX512POPCNT-NEXT: vpandnq %zmm1, %zmm0, %zmm1 +; AVX512POPCNT-NEXT: vpopcntq %zmm1, %zmm1 +; AVX512POPCNT-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512POPCNT-NEXT: vptestmq %zmm0, %zmm0, %k1 +; AVX512POPCNT-NEXT: vpcompressq %zmm1, %zmm0 {%k1} {z} +; AVX512POPCNT-NEXT: vmovq %xmm0, %rax +; AVX512POPCNT-NEXT: # kill: def $eax killed $eax killed $rax +; AVX512POPCNT-NEXT: vzeroupper +; AVX512POPCNT-NEXT: retq %a0 = load i512, ptr %p0 %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 -1) %res = trunc i512 %cnt to i32 From 1deaedd972b21b9eff03e1b89207329b71b2824c Mon Sep 17 00:00:00 2001 From: Kazu Hirata Date: Wed, 12 Nov 2025 07:21:13 -0800 Subject: [PATCH 14/34] [ADT] Make DenseMapBase::swap the public entry point (#167650) Without this patch, DenseMap::swap and SmallDenseMap::swap are inconsistent because DenseMap::swap increments the epoch while SmallDenseMap::swap does not. This patch solves the inconsistency by making DenseMapBase::swap the public entry point and renaming the existing swap to swapImpl. To ease the review process, this patch does not move or group functions according to access specifiers like private: and protected:. --- llvm/include/llvm/ADT/DenseMap.h | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h index 22ef7ed64451e..d5b13e7731550 100644 --- a/llvm/include/llvm/ADT/DenseMap.h +++ b/llvm/include/llvm/ADT/DenseMap.h @@ -360,6 +360,12 @@ class DenseMapBase : public DebugEpochBase { return getBuckets(); } + void swap(DerivedT &RHS) { + this->incrementEpoch(); + RHS.incrementEpoch(); + derived().swapImpl(RHS); + } + protected: DenseMapBase() = default; @@ -736,7 +742,7 @@ class DenseMap : public DenseMapBase, DenseMap(DenseMap &&other) : BaseT() { init(0); - swap(other); + this->swap(other); } template DenseMap(const InputIt &I, const InputIt &E) { @@ -756,15 +762,15 @@ class DenseMap : public DenseMapBase, deallocateBuckets(); } - void swap(DenseMap &RHS) { - this->incrementEpoch(); - RHS.incrementEpoch(); +private: + void swapImpl(DenseMap &RHS) { std::swap(Buckets, RHS.Buckets); std::swap(NumEntries, RHS.NumEntries); std::swap(NumTombstones, RHS.NumTombstones); std::swap(NumBuckets, RHS.NumBuckets); } +public: DenseMap &operator=(const DenseMap &other) { if (&other != this) this->copyFrom(other); @@ -775,7 +781,7 @@ class DenseMap : public DenseMapBase, this->destroyAll(); deallocateBuckets(); init(0); - swap(other); + this->swap(other); return *this; } @@ -895,7 +901,7 @@ class SmallDenseMap SmallDenseMap(SmallDenseMap &&other) : BaseT() { init(0); - swap(other); + this->swap(other); } template @@ -916,7 +922,8 @@ class SmallDenseMap deallocateBuckets(); } - void swap(SmallDenseMap &RHS) { +private: + void swapImpl(SmallDenseMap &RHS) { unsigned TmpNumEntries = RHS.NumEntries; RHS.NumEntries = NumEntries; NumEntries = TmpNumEntries; @@ -987,6 +994,7 @@ class SmallDenseMap new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep)); } +public: SmallDenseMap &operator=(const SmallDenseMap &other) { if (&other != this) this->copyFrom(other); @@ -997,7 +1005,7 @@ class SmallDenseMap this->destroyAll(); deallocateBuckets(); init(0); - swap(other); + this->swap(other); return *this; } From 834a3cca3145775e948e9c081db9741152b11392 Mon Sep 17 00:00:00 2001 From: Steven Perron Date: Wed, 12 Nov 2025 10:27:31 -0500 Subject: [PATCH 15/34] [SPIRV] Handle ptrcast between array and vector types (#166418) This commit adds support for legalizing pointer casts between array and vector types within the SPIRV backend. This is necessary to handle cases where a vector is loaded from or stored to an array, which can occur with HLSL matrix types. The following changes are included: - Added to load a vector from an array. - Added to store a vector to an array. - Added the test case to verify the functionality. --- .../Target/SPIRV/SPIRVLegalizePointerCast.cpp | 80 +++++++++++++++++++ .../pointers/load-store-vec-from-array.ll | 54 +++++++++++++ 2 files changed, 134 insertions(+) create mode 100644 llvm/test/CodeGen/SPIRV/pointers/load-store-vec-from-array.ll diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp index 65dffc7908b78..4ce871b6f5e5d 100644 --- a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp @@ -116,6 +116,81 @@ class SPIRVLegalizePointerCast : public FunctionPass { return LI; } + // Loads elements from an array and constructs a vector. + Value *loadVectorFromArray(IRBuilder<> &B, FixedVectorType *TargetType, + Value *Source) { + // Load each element of the array. + SmallVector LoadedElements; + for (unsigned i = 0; i < TargetType->getNumElements(); ++i) { + // Create a GEP to access the i-th element of the array. + SmallVector Types = {Source->getType(), Source->getType()}; + SmallVector Args; + Args.push_back(B.getInt1(false)); + Args.push_back(Source); + Args.push_back(B.getInt32(0)); + Args.push_back(ConstantInt::get(B.getInt32Ty(), i)); + auto *ElementPtr = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args}); + GR->buildAssignPtr(B, TargetType->getElementType(), ElementPtr); + + // Load the value from the element pointer. + Value *Load = B.CreateLoad(TargetType->getElementType(), ElementPtr); + buildAssignType(B, TargetType->getElementType(), Load); + LoadedElements.push_back(Load); + } + + // Build the vector from the loaded elements. + Value *NewVector = PoisonValue::get(TargetType); + buildAssignType(B, TargetType, NewVector); + + for (unsigned i = 0; i < TargetType->getNumElements(); ++i) { + Value *Index = B.getInt32(i); + SmallVector Types = {TargetType, TargetType, + TargetType->getElementType(), + Index->getType()}; + SmallVector Args = {NewVector, LoadedElements[i], Index}; + NewVector = B.CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args}); + buildAssignType(B, TargetType, NewVector); + } + return NewVector; + } + + // Stores elements from a vector into an array. + void storeArrayFromVector(IRBuilder<> &B, Value *SrcVector, + Value *DstArrayPtr, ArrayType *ArrTy, + Align Alignment) { + auto *VecTy = cast(SrcVector->getType()); + + // Ensure the element types of the array and vector are the same. + assert(VecTy->getElementType() == ArrTy->getElementType() && + "Element types of array and vector must be the same."); + + for (unsigned i = 0; i < VecTy->getNumElements(); ++i) { + // Create a GEP to access the i-th element of the array. + SmallVector Types = {DstArrayPtr->getType(), + DstArrayPtr->getType()}; + SmallVector Args; + Args.push_back(B.getInt1(false)); + Args.push_back(DstArrayPtr); + Args.push_back(B.getInt32(0)); + Args.push_back(ConstantInt::get(B.getInt32Ty(), i)); + auto *ElementPtr = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args}); + GR->buildAssignPtr(B, ArrTy->getElementType(), ElementPtr); + + // Extract the element from the vector and store it. + Value *Index = B.getInt32(i); + SmallVector EltTypes = {VecTy->getElementType(), VecTy, + Index->getType()}; + SmallVector EltArgs = {SrcVector, Index}; + Value *Element = + B.CreateIntrinsic(Intrinsic::spv_extractelt, {EltTypes}, {EltArgs}); + buildAssignType(B, VecTy->getElementType(), Element); + + Types = {Element->getType(), ElementPtr->getType()}; + Args = {Element, ElementPtr, B.getInt16(2), B.getInt8(Alignment.value())}; + B.CreateIntrinsic(Intrinsic::spv_store, {Types}, {Args}); + } + } + // Replaces the load instruction to get rid of the ptrcast used as source // operand. void transformLoad(IRBuilder<> &B, LoadInst *LI, Value *CastedOperand, @@ -154,6 +229,8 @@ class SPIRVLegalizePointerCast : public FunctionPass { // - float v = s.m; else if (SST && SST->getTypeAtIndex(0u) == ToTy) Output = loadFirstValueFromAggregate(B, ToTy, OriginalOperand, LI); + else if (SAT && DVT && SAT->getElementType() == DVT->getElementType()) + Output = loadVectorFromArray(B, DVT, OriginalOperand); else llvm_unreachable("Unimplemented implicit down-cast from load."); @@ -288,6 +365,7 @@ class SPIRVLegalizePointerCast : public FunctionPass { auto *S_VT = dyn_cast(FromTy); auto *D_ST = dyn_cast(ToTy); auto *D_VT = dyn_cast(ToTy); + auto *D_AT = dyn_cast(ToTy); B.SetInsertPoint(BadStore); if (D_ST && isTypeFirstElementAggregate(FromTy, D_ST)) @@ -296,6 +374,8 @@ class SPIRVLegalizePointerCast : public FunctionPass { storeVectorFromVector(B, Src, Dst, Alignment); else if (D_VT && !S_VT && FromTy == D_VT->getElementType()) storeToFirstValueAggregate(B, Src, Dst, D_VT, Alignment); + else if (D_AT && S_VT && S_VT->getElementType() == D_AT->getElementType()) + storeArrayFromVector(B, Src, Dst, D_AT, Alignment); else llvm_unreachable("Unsupported ptrcast use in store. Please fix."); diff --git a/llvm/test/CodeGen/SPIRV/pointers/load-store-vec-from-array.ll b/llvm/test/CodeGen/SPIRV/pointers/load-store-vec-from-array.ll new file mode 100644 index 0000000000000..917bb27afad00 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/pointers/load-store-vec-from-array.ll @@ -0,0 +1,54 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv-unknown-vulkan %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -verify-machineinstrs -mtriple=spirv-unknown-vulkan %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: [[FLOAT:%[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: [[VEC4FLOAT:%[0-9]+]] = OpTypeVector [[FLOAT]] 4 +; CHECK-DAG: [[UINT_TYPE:%[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: [[UINT4:%[0-9]+]] = OpConstant [[UINT_TYPE]] 4 +; CHECK-DAG: [[ARRAY4FLOAT:%[0-9]+]] = OpTypeArray [[FLOAT]] [[UINT4]] +; CHECK-DAG: [[PTR_ARRAY4FLOAT:%[0-9]+]] = OpTypePointer Private [[ARRAY4FLOAT]] +; CHECK-DAG: [[G_IN:%[0-9]+]] = OpVariable [[PTR_ARRAY4FLOAT]] Private +; CHECK-DAG: [[G_OUT:%[0-9]+]] = OpVariable [[PTR_ARRAY4FLOAT]] Private +; CHECK-DAG: [[UINT0:%[0-9]+]] = OpConstant [[UINT_TYPE]] 0 +; CHECK-DAG: [[UINT1:%[0-9]+]] = OpConstant [[UINT_TYPE]] 1 +; CHECK-DAG: [[UINT2:%[0-9]+]] = OpConstant [[UINT_TYPE]] 2 +; CHECK-DAG: [[UINT3:%[0-9]+]] = OpConstant [[UINT_TYPE]] 3 +; CHECK-DAG: [[PTR_FLOAT:%[0-9]+]] = OpTypePointer Private [[FLOAT]] +; CHECK-DAG: [[UNDEF_VEC:%[0-9]+]] = OpUndef [[VEC4FLOAT]] + +@G_in = internal addrspace(10) global [4 x float] zeroinitializer +@G_out = internal addrspace(10) global [4 x float] zeroinitializer + +define spir_func void @main() { +entry: +; CHECK: [[GEP0:%[0-9]+]] = OpAccessChain [[PTR_FLOAT]] [[G_IN]] [[UINT0]] +; CHECK-NEXT: [[LOAD0:%[0-9]+]] = OpLoad [[FLOAT]] [[GEP0]] +; CHECK-NEXT: [[GEP1:%[0-9]+]] = OpAccessChain [[PTR_FLOAT]] [[G_IN]] [[UINT1]] +; CHECK-NEXT: [[LOAD1:%[0-9]+]] = OpLoad [[FLOAT]] [[GEP1]] +; CHECK-NEXT: [[GEP2:%[0-9]+]] = OpAccessChain [[PTR_FLOAT]] [[G_IN]] [[UINT2]] +; CHECK-NEXT: [[LOAD2:%[0-9]+]] = OpLoad [[FLOAT]] [[GEP2]] +; CHECK-NEXT: [[GEP3:%[0-9]+]] = OpAccessChain [[PTR_FLOAT]] [[G_IN]] [[UINT3]] +; CHECK-NEXT: [[LOAD3:%[0-9]+]] = OpLoad [[FLOAT]] [[GEP3]] +; CHECK-NEXT: [[VEC_INSERT0:%[0-9]+]] = OpCompositeInsert [[VEC4FLOAT]] [[LOAD0]] [[UNDEF_VEC]] 0 +; CHECK-NEXT: [[VEC_INSERT1:%[0-9]+]] = OpCompositeInsert [[VEC4FLOAT]] [[LOAD1]] [[VEC_INSERT0]] 1 +; CHECK-NEXT: [[VEC_INSERT2:%[0-9]+]] = OpCompositeInsert [[VEC4FLOAT]] [[LOAD2]] [[VEC_INSERT1]] 2 +; CHECK-NEXT: [[VEC:%[0-9]+]] = OpCompositeInsert [[VEC4FLOAT]] [[LOAD3]] [[VEC_INSERT2]] 3 + %0 = load <4 x float>, ptr addrspace(10) @G_in, align 64 + +; CHECK-NEXT: [[GEP_OUT0:%[0-9]+]] = OpAccessChain [[PTR_FLOAT]] [[G_OUT]] [[UINT0]] +; CHECK-NEXT: [[VEC_EXTRACT0:%[0-9]+]] = OpCompositeExtract [[FLOAT]] [[VEC]] 0 +; CHECK-NEXT: OpStore [[GEP_OUT0]] [[VEC_EXTRACT0]] +; CHECK-NEXT: [[GEP_OUT1:%[0-9]+]] = OpAccessChain [[PTR_FLOAT]] [[G_OUT]] [[UINT1]] +; CHECK-NEXT: [[VEC_EXTRACT1:%[0-9]+]] = OpCompositeExtract [[FLOAT]] [[VEC]] 1 +; CHECK-NEXT: OpStore [[GEP_OUT1]] [[VEC_EXTRACT1]] +; CHECK-NEXT: [[GEP_OUT2:%[0-9]+]] = OpAccessChain [[PTR_FLOAT]] [[G_OUT]] [[UINT2]] +; CHECK-NEXT: [[VEC_EXTRACT2:%[0-9]+]] = OpCompositeExtract [[FLOAT]] [[VEC]] 2 +; CHECK-NEXT: OpStore [[GEP_OUT2]] [[VEC_EXTRACT2]] +; CHECK-NEXT: [[GEP_OUT3:%[0-9]+]] = OpAccessChain [[PTR_FLOAT]] [[G_OUT]] [[UINT3]] +; CHECK-NEXT: [[VEC_EXTRACT3:%[0-9]+]] = OpCompositeExtract [[FLOAT]] [[VEC]] 3 +; CHECK-NEXT: OpStore [[GEP_OUT3]] [[VEC_EXTRACT3]] + store <4 x float> %0, ptr addrspace(10) @G_out, align 64 + +; CHECK-NEXT: OpReturn + ret void +} From 5932477af4948d01ee317e8326954b371d0f0a4f Mon Sep 17 00:00:00 2001 From: Victor Campos Date: Wed, 12 Nov 2025 15:33:41 +0000 Subject: [PATCH 16/34] [libc] Add support for MVE to Arm startup code (#167338) In order to have MVE support, the same bits of the CPACR register that enable the floating-point extension must be set. --- libc/startup/baremetal/arm/start.cpp | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/libc/startup/baremetal/arm/start.cpp b/libc/startup/baremetal/arm/start.cpp index 4740067722022..db89828a0b45e 100644 --- a/libc/startup/baremetal/arm/start.cpp +++ b/libc/startup/baremetal/arm/start.cpp @@ -131,20 +131,30 @@ namespace LIBC_NAMESPACE_DECL { __arm_wsr("CPSR_c", 0x13); // SVC #endif -#ifdef __ARM_FP -// Enable FPU -#if __ARM_ARCH_PROFILE == 'M' +#if __ARM_ARCH_PROFILE == 'M' && \ + (defined(__ARM_FP) || defined(__ARM_FEATURE_MVE)) + // Enable FPU and MVE. They can't be enabled independently: the two are + // governed by the same bits in CPACR. // Based on // https://developer.arm.com/documentation/dui0646/c/Cortex-M7-Peripherals/Floating-Point-Unit/Enabling-the-FPU - // Set CPACR cp10 and cp11 - auto cpacr = (volatile uint32_t *const)0xE000ED88; + // Set CPACR cp10 and cp11. + auto cpacr = reinterpret_cast(0xE000ED88); *cpacr |= (0xF << 20); __dsb(0xF); __isb(0xF); -#elif __ARM_ARCH_PROFILE == 'A' || __ARM_ARCH_PROFILE == 'R' +#if defined(__ARM_FEATURE_MVE) + // Initialize low-overhead-loop tail predication to its neutral state + uint32_t fpscr; + __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(fpscr) : :); + fpscr |= (0x4 << 16); + __asm__ __volatile__("vmsr FPSCR, %0" : : "r"(fpscr) :); +#endif +#elif (__ARM_ARCH_PROFILE == 'A' || __ARM_ARCH_PROFILE == 'R') && \ + defined(__ARM_FP) + // Enable FPU. // Based on // https://developer.arm.com/documentation/dui0472/m/Compiler-Coding-Practices/Enabling-NEON-and-FPU-for-bare-metal - // Set CPACR cp10 and cp11 + // Set CPACR cp10 and cp11. uint32_t cpacr = __arm_rsr("p15:0:c1:c0:2"); cpacr |= (0xF << 20); __arm_wsr("p15:0:c1:c0:2", cpacr); @@ -152,9 +162,8 @@ namespace LIBC_NAMESPACE_DECL { // Set FPEXC.EN uint32_t fpexc; __asm__ __volatile__("vmrs %0, FPEXC" : "=r"(fpexc) : :); - fpexc |= (1 << 30); + fpexc |= (0x1 << 30); __asm__ __volatile__("vmsr FPEXC, %0" : : "r"(fpexc) :); -#endif #endif // Perform the equivalent of scatterloading From 8280070a7306b6d939b616e34407b53ceac73b92 Mon Sep 17 00:00:00 2001 From: Julian Nagele Date: Wed, 12 Nov 2025 15:35:03 +0000 Subject: [PATCH 17/34] [VectorCombine] Try to scalarize vector loads feeding bitcast instructions. (#164682) This change aims to convert vector loads to scalar loads, if they are only converted to scalars after anyway. alive2 proof: https://alive2.llvm.org/ce/z/U_rvht --- .../Transforms/Vectorize/VectorCombine.cpp | 142 ++++++++++++++---- .../AArch64/scalarize-load-ext-extract.ll | 32 ++++ .../AArch64/load-bitcast-scalarization.ll | 136 +++++++++++++++++ 3 files changed, 284 insertions(+), 26 deletions(-) create mode 100644 llvm/test/Transforms/PhaseOrdering/AArch64/scalarize-load-ext-extract.ll create mode 100644 llvm/test/Transforms/VectorCombine/AArch64/load-bitcast-scalarization.ll diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index ed3a0a0ab023d..f1890e4f5fb95 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -129,7 +129,9 @@ class VectorCombine { bool foldExtractedCmps(Instruction &I); bool foldBinopOfReductions(Instruction &I); bool foldSingleElementStore(Instruction &I); - bool scalarizeLoadExtract(Instruction &I); + bool scalarizeLoad(Instruction &I); + bool scalarizeLoadExtract(LoadInst *LI, VectorType *VecTy, Value *Ptr); + bool scalarizeLoadBitcast(LoadInst *LI, VectorType *VecTy, Value *Ptr); bool scalarizeExtExtract(Instruction &I); bool foldConcatOfBoolMasks(Instruction &I); bool foldPermuteOfBinops(Instruction &I); @@ -1852,11 +1854,9 @@ bool VectorCombine::foldSingleElementStore(Instruction &I) { return false; } -/// Try to scalarize vector loads feeding extractelement instructions. -bool VectorCombine::scalarizeLoadExtract(Instruction &I) { - if (!TTI.allowVectorElementIndexingUsingGEP()) - return false; - +/// Try to scalarize vector loads feeding extractelement or bitcast +/// instructions. +bool VectorCombine::scalarizeLoad(Instruction &I) { Value *Ptr; if (!match(&I, m_Load(m_Value(Ptr)))) return false; @@ -1866,35 +1866,30 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) { if (LI->isVolatile() || !DL->typeSizeEqualsStoreSize(VecTy->getScalarType())) return false; - InstructionCost OriginalCost = - TTI.getMemoryOpCost(Instruction::Load, VecTy, LI->getAlign(), - LI->getPointerAddressSpace(), CostKind); - InstructionCost ScalarizedCost = 0; - + bool AllExtracts = true; + bool AllBitcasts = true; Instruction *LastCheckedInst = LI; unsigned NumInstChecked = 0; - DenseMap NeedFreeze; - auto FailureGuard = make_scope_exit([&]() { - // If the transform is aborted, discard the ScalarizationResults. - for (auto &Pair : NeedFreeze) - Pair.second.discard(); - }); - // Check if all users of the load are extracts with no memory modifications - // between the load and the extract. Compute the cost of both the original - // code and the scalarized version. + // Check what type of users we have (must either all be extracts or + // bitcasts) and ensure no memory modifications between the load and + // its users. for (User *U : LI->users()) { - auto *UI = dyn_cast(U); + auto *UI = dyn_cast(U); if (!UI || UI->getParent() != LI->getParent()) return false; - // If any extract is waiting to be erased, then bail out as this will + // If any user is waiting to be erased, then bail out as this will // distort the cost calculation and possibly lead to infinite loops. if (UI->use_empty()) return false; - // Check if any instruction between the load and the extract may modify - // memory. + if (!isa(UI)) + AllExtracts = false; + if (!isa(UI)) + AllBitcasts = false; + + // Check if any instruction between the load and the user may modify memory. if (LastCheckedInst->comesBefore(UI)) { for (Instruction &I : make_range(std::next(LI->getIterator()), UI->getIterator())) { @@ -1906,6 +1901,35 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) { } LastCheckedInst = UI; } + } + + if (AllExtracts) + return scalarizeLoadExtract(LI, VecTy, Ptr); + if (AllBitcasts) + return scalarizeLoadBitcast(LI, VecTy, Ptr); + return false; +} + +/// Try to scalarize vector loads feeding extractelement instructions. +bool VectorCombine::scalarizeLoadExtract(LoadInst *LI, VectorType *VecTy, + Value *Ptr) { + if (!TTI.allowVectorElementIndexingUsingGEP()) + return false; + + DenseMap NeedFreeze; + auto FailureGuard = make_scope_exit([&]() { + // If the transform is aborted, discard the ScalarizationResults. + for (auto &Pair : NeedFreeze) + Pair.second.discard(); + }); + + InstructionCost OriginalCost = + TTI.getMemoryOpCost(Instruction::Load, VecTy, LI->getAlign(), + LI->getPointerAddressSpace(), CostKind); + InstructionCost ScalarizedCost = 0; + + for (User *U : LI->users()) { + auto *UI = cast(U); auto ScalarIdx = canScalarizeAccess(VecTy, UI->getIndexOperand(), LI, AC, DT); @@ -1927,7 +1951,7 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) { nullptr, nullptr, CostKind); } - LLVM_DEBUG(dbgs() << "Found all extractions of a vector load: " << I + LLVM_DEBUG(dbgs() << "Found all extractions of a vector load: " << *LI << "\n LoadExtractCost: " << OriginalCost << " vs ScalarizedCost: " << ScalarizedCost << "\n"); @@ -1973,6 +1997,72 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) { return true; } +/// Try to scalarize vector loads feeding bitcast instructions. +bool VectorCombine::scalarizeLoadBitcast(LoadInst *LI, VectorType *VecTy, + Value *Ptr) { + InstructionCost OriginalCost = + TTI.getMemoryOpCost(Instruction::Load, VecTy, LI->getAlign(), + LI->getPointerAddressSpace(), CostKind); + + Type *TargetScalarType = nullptr; + unsigned VecBitWidth = DL->getTypeSizeInBits(VecTy); + + for (User *U : LI->users()) { + auto *BC = cast(U); + + Type *DestTy = BC->getDestTy(); + if (!DestTy->isIntegerTy() && !DestTy->isFloatingPointTy()) + return false; + + unsigned DestBitWidth = DL->getTypeSizeInBits(DestTy); + if (DestBitWidth != VecBitWidth) + return false; + + // All bitcasts must target the same scalar type. + if (!TargetScalarType) + TargetScalarType = DestTy; + else if (TargetScalarType != DestTy) + return false; + + OriginalCost += + TTI.getCastInstrCost(Instruction::BitCast, TargetScalarType, VecTy, + TTI.getCastContextHint(BC), CostKind, BC); + } + + if (!TargetScalarType) + return false; + + assert(!LI->user_empty() && "Unexpected load without bitcast users"); + InstructionCost ScalarizedCost = + TTI.getMemoryOpCost(Instruction::Load, TargetScalarType, LI->getAlign(), + LI->getPointerAddressSpace(), CostKind); + + LLVM_DEBUG(dbgs() << "Found vector load feeding only bitcasts: " << *LI + << "\n OriginalCost: " << OriginalCost + << " vs ScalarizedCost: " << ScalarizedCost << "\n"); + + if (ScalarizedCost >= OriginalCost) + return false; + + // Ensure we add the load back to the worklist BEFORE its users so they can + // erased in the correct order. + Worklist.push(LI); + + Builder.SetInsertPoint(LI); + auto *ScalarLoad = + Builder.CreateLoad(TargetScalarType, Ptr, LI->getName() + ".scalar"); + ScalarLoad->setAlignment(LI->getAlign()); + ScalarLoad->copyMetadata(*LI); + + // Replace all bitcast users with the scalar load. + for (User *U : LI->users()) { + auto *BC = cast(U); + replaceValue(*BC, *ScalarLoad, false); + } + + return true; +} + bool VectorCombine::scalarizeExtExtract(Instruction &I) { if (!TTI.allowVectorElementIndexingUsingGEP()) return false; @@ -4585,7 +4675,7 @@ bool VectorCombine::run() { if (IsVectorType) { if (scalarizeOpOrCmp(I)) return true; - if (scalarizeLoadExtract(I)) + if (scalarizeLoad(I)) return true; if (scalarizeExtExtract(I)) return true; diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/scalarize-load-ext-extract.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/scalarize-load-ext-extract.ll new file mode 100644 index 0000000000000..f7918b0e0a798 --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/scalarize-load-ext-extract.ll @@ -0,0 +1,32 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -O3 -mtriple=arm64-apple-darwinos -S %s | FileCheck %s + +define noundef i32 @load_ext_extract(ptr %src) { +; CHECK-LABEL: define noundef range(i32 0, 1021) i32 @load_ext_extract( +; CHECK-SAME: ptr readonly captures(none) [[SRC:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[SRC]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = lshr i32 [[TMP14]], 24 +; CHECK-NEXT: [[TMP16:%.*]] = lshr i32 [[TMP14]], 16 +; CHECK-NEXT: [[TMP17:%.*]] = and i32 [[TMP16]], 255 +; CHECK-NEXT: [[TMP18:%.*]] = lshr i32 [[TMP14]], 8 +; CHECK-NEXT: [[TMP19:%.*]] = and i32 [[TMP18]], 255 +; CHECK-NEXT: [[TMP20:%.*]] = and i32 [[TMP14]], 255 +; CHECK-NEXT: [[ADD1:%.*]] = add nuw nsw i32 [[TMP20]], [[TMP19]] +; CHECK-NEXT: [[ADD2:%.*]] = add nuw nsw i32 [[ADD1]], [[TMP17]] +; CHECK-NEXT: [[ADD3:%.*]] = add nuw nsw i32 [[ADD2]], [[TMP15]] +; CHECK-NEXT: ret i32 [[ADD3]] +; +entry: + %x = load <4 x i8>, ptr %src, align 4 + %ext = zext nneg <4 x i8> %x to <4 x i32> + %ext.0 = extractelement <4 x i32> %ext, i64 0 + %ext.1 = extractelement <4 x i32> %ext, i64 1 + %ext.2 = extractelement <4 x i32> %ext, i64 2 + %ext.3 = extractelement <4 x i32> %ext, i64 3 + + %add1 = add i32 %ext.0, %ext.1 + %add2 = add i32 %add1, %ext.2 + %add3 = add i32 %add2, %ext.3 + ret i32 %add3 +} diff --git a/llvm/test/Transforms/VectorCombine/AArch64/load-bitcast-scalarization.ll b/llvm/test/Transforms/VectorCombine/AArch64/load-bitcast-scalarization.ll new file mode 100644 index 0000000000000..ca3df3310a795 --- /dev/null +++ b/llvm/test/Transforms/VectorCombine/AArch64/load-bitcast-scalarization.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=vector-combine -mtriple=arm64-apple-darwinos -S %s | FileCheck %s + +define i32 @load_v4i8_bitcast_to_i32(ptr %x) { +; CHECK-LABEL: define i32 @load_v4i8_bitcast_to_i32( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[R_SCALAR:%.*]] = load i32, ptr [[X]], align 4 +; CHECK-NEXT: ret i32 [[R_SCALAR]] +; + %lv = load <4 x i8>, ptr %x + %r = bitcast <4 x i8> %lv to i32 + ret i32 %r +} + +define i64 @load_v2i32_bitcast_to_i64(ptr %x) { +; CHECK-LABEL: define i64 @load_v2i32_bitcast_to_i64( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[R_SCALAR:%.*]] = load i64, ptr [[X]], align 8 +; CHECK-NEXT: ret i64 [[R_SCALAR]] +; + %lv = load <2 x i32>, ptr %x + %r = bitcast <2 x i32> %lv to i64 + ret i64 %r +} + +define float @load_v4i8_bitcast_to_float(ptr %x) { +; CHECK-LABEL: define float @load_v4i8_bitcast_to_float( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[R_SCALAR:%.*]] = load float, ptr [[X]], align 4 +; CHECK-NEXT: ret float [[R_SCALAR]] +; + %lv = load <4 x i8>, ptr %x + %r = bitcast <4 x i8> %lv to float + ret float %r +} + +define float @load_v2i16_bitcast_to_float(ptr %x) { +; CHECK-LABEL: define float @load_v2i16_bitcast_to_float( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[R_SCALAR:%.*]] = load float, ptr [[X]], align 4 +; CHECK-NEXT: ret float [[R_SCALAR]] +; + %lv = load <2 x i16>, ptr %x + %r = bitcast <2 x i16> %lv to float + ret float %r +} + +define double @load_v4i16_bitcast_to_double(ptr %x) { +; CHECK-LABEL: define double @load_v4i16_bitcast_to_double( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[LV:%.*]] = load <4 x i16>, ptr [[X]], align 8 +; CHECK-NEXT: [[R_SCALAR:%.*]] = bitcast <4 x i16> [[LV]] to double +; CHECK-NEXT: ret double [[R_SCALAR]] +; + %lv = load <4 x i16>, ptr %x + %r = bitcast <4 x i16> %lv to double + ret double %r +} + +define double @load_v2i32_bitcast_to_double(ptr %x) { +; CHECK-LABEL: define double @load_v2i32_bitcast_to_double( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[LV:%.*]] = load <2 x i32>, ptr [[X]], align 8 +; CHECK-NEXT: [[R_SCALAR:%.*]] = bitcast <2 x i32> [[LV]] to double +; CHECK-NEXT: ret double [[R_SCALAR]] +; + %lv = load <2 x i32>, ptr %x + %r = bitcast <2 x i32> %lv to double + ret double %r +} + +; Multiple users with the same bitcast type should be scalarized. +define i32 @load_v4i8_bitcast_multiple_users_same_type(ptr %x) { +; CHECK-LABEL: define i32 @load_v4i8_bitcast_multiple_users_same_type( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[LV_SCALAR:%.*]] = load i32, ptr [[X]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LV_SCALAR]], [[LV_SCALAR]] +; CHECK-NEXT: ret i32 [[ADD]] +; + %lv = load <4 x i8>, ptr %x + %r1 = bitcast <4 x i8> %lv to i32 + %r2 = bitcast <4 x i8> %lv to i32 + %add = add i32 %r1, %r2 + ret i32 %add +} + +; Different bitcast types should not be scalarized. +define i32 @load_v4i8_bitcast_multiple_users_different_types(ptr %x) { +; CHECK-LABEL: define i32 @load_v4i8_bitcast_multiple_users_different_types( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[LV:%.*]] = load <4 x i8>, ptr [[X]], align 4 +; CHECK-NEXT: [[R1:%.*]] = bitcast <4 x i8> [[LV]] to i32 +; CHECK-NEXT: [[R2:%.*]] = bitcast <4 x i8> [[LV]] to float +; CHECK-NEXT: [[R2_INT:%.*]] = bitcast float [[R2]] to i32 +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[R1]], [[R2_INT]] +; CHECK-NEXT: ret i32 [[ADD]] +; + %lv = load <4 x i8>, ptr %x + %r1 = bitcast <4 x i8> %lv to i32 + %r2 = bitcast <4 x i8> %lv to float + %r2.int = bitcast float %r2 to i32 + %add = add i32 %r1, %r2.int + ret i32 %add +} + +; Bitcast to vector should not be scalarized. +define <2 x i16> @load_v4i8_bitcast_to_vector(ptr %x) { +; CHECK-LABEL: define <2 x i16> @load_v4i8_bitcast_to_vector( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[LV:%.*]] = load <4 x i8>, ptr [[X]], align 4 +; CHECK-NEXT: [[R:%.*]] = bitcast <4 x i8> [[LV]] to <2 x i16> +; CHECK-NEXT: ret <2 x i16> [[R]] +; + %lv = load <4 x i8>, ptr %x + %r = bitcast <4 x i8> %lv to <2 x i16> + ret <2 x i16> %r +} + +; Load with both bitcast users and other users should not be scalarized. +define i32 @load_v4i8_mixed_users(ptr %x) { +; CHECK-LABEL: define i32 @load_v4i8_mixed_users( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[LV:%.*]] = load <4 x i8>, ptr [[X]], align 4 +; CHECK-NEXT: [[R1:%.*]] = bitcast <4 x i8> [[LV]] to i32 +; CHECK-NEXT: [[R2:%.*]] = extractelement <4 x i8> [[LV]], i32 0 +; CHECK-NEXT: [[R2_EXT:%.*]] = zext i8 [[R2]] to i32 +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[R1]], [[R2_EXT]] +; CHECK-NEXT: ret i32 [[ADD]] +; + %lv = load <4 x i8>, ptr %x + %r1 = bitcast <4 x i8> %lv to i32 + %r2 = extractelement <4 x i8> %lv, i32 0 + %r2.ext = zext i8 %r2 to i32 + %add = add i32 %r1, %r2.ext + ret i32 %add +} From d4b43f0124c74daafca77626adb5fc5d3a81da39 Mon Sep 17 00:00:00 2001 From: Asher Mancinelli Date: Wed, 12 Nov 2025 07:49:11 -0800 Subject: [PATCH 18/34] [MLIR][NFC] Fix minor spelling issues (#167606) Fixes minor spelling issues in the shape dialect's passes tablegen file. --- mlir/include/mlir/Dialect/Shape/Transforms/Passes.td | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mlir/include/mlir/Dialect/Shape/Transforms/Passes.td b/mlir/include/mlir/Dialect/Shape/Transforms/Passes.td index 588b5ebc5e37a..0b8c465fdcbbe 100644 --- a/mlir/include/mlir/Dialect/Shape/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Shape/Transforms/Passes.td @@ -16,7 +16,7 @@ def OutlineShapeComputationPass let summary = "Using shape.func to preserve shape computation"; let description = [{ This pass outlines the shape computation part in high level IR by adding - shape.func and populate corresponding mapping infoemation into + shape.func and populate corresponding mapping information into ShapeMappingAnalysis. The shape computation part is usually introduced by shape reification, and each single dynamic shape is denoted by shape.with_shape. @@ -80,12 +80,12 @@ def OutlineShapeComputationPass For the above example, the shape computation is inlined in the input IR, which is used for two values' (test.abs and test.concat) shape. And the shape - compuatation part is outlined in the output IR. + computation part is outlined in the output IR. - And the shape mapping infomation will be: + And the shape mapping information will be: ``` - // ---- Shape Mapping Infomation ----- + // ---- Shape Mapping Information ----- // - Shape for: %0 = "test.abs"(%arg0) : (tensor) -> tensor :: @shape_cal_0( of type 'tensor' at index: 0) // - Shape for: %1 = "test.concat"(%0, %arg1) {axis = 0 : i64} : (tensor, tensor<2x4x?xf32>) -> tensor :: @shape_cal_1( of type 'tensor' at index: 0) ``` From 0df5dee28a11e516e7b9e1ab8b50d916174a8f50 Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Wed, 12 Nov 2025 09:55:57 -0600 Subject: [PATCH 19/34] [OpenMP] Apply COLLAPSE to innermost leaf that allows it (#167565) As per the wording from 5.2, the COLLAPSE clause applies once to the entire construct. The 6.0 spec has a somewhat similar wording with the same intent. In practice, apply the clause to the innermost leaf constituent that allows it, without requiring it to be the exact innermost leaf. --- .../llvm/Frontend/OpenMP/ConstructDecompositionT.h | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h b/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h index 3918cecfc1e65..c8eebbf42a68e 100644 --- a/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h +++ b/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h @@ -501,18 +501,7 @@ template bool ConstructDecompositionT::applyClause( const tomp::clause::CollapseT &clause, const ClauseTy *node) { - // Apply "collapse" to the innermost directive. If it's not one that - // allows it flag an error. - if (!leafs.empty()) { - auto &last = leafs.back(); - - if (llvm::omp::isAllowedClauseForDirective(last.id, node->id, version)) { - last.clauses.push_back(node); - return true; - } - } - - return false; + return applyToInnermost(node); } // DEFAULT From 7647fc8bde18d43d5e540d767ab876cf4eb24e79 Mon Sep 17 00:00:00 2001 From: Utkarsh Saxena Date: Wed, 12 Nov 2025 16:58:16 +0100 Subject: [PATCH 20/34] [LifetimeSafety] Add support for conditional operators (#167240) Added support for conditional operators in the lifetime safety analysis. Added a `VisitConditionalOperator` method to the `FactsGenerator` class to handle the ternary operator (`?:`) in lifetime safety analysis. Fixes https://github.com/llvm/llvm-project/issues/157108 --- .../Analyses/LifetimeSafety/FactsGenerator.h | 1 + .../LifetimeSafety/FactsGenerator.cpp | 9 +++ .../Sema/warn-lifetime-safety-dataflow.cpp | 17 +++++ clang/test/Sema/warn-lifetime-safety.cpp | 73 +++++++++++++++++++ .../unittests/Analysis/LifetimeSafetyTest.cpp | 3 +- 5 files changed, 101 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/Analysis/Analyses/LifetimeSafety/FactsGenerator.h b/clang/include/clang/Analysis/Analyses/LifetimeSafety/FactsGenerator.h index 5e58abee2bbb3..4c8ab3f859a49 100644 --- a/clang/include/clang/Analysis/Analyses/LifetimeSafety/FactsGenerator.h +++ b/clang/include/clang/Analysis/Analyses/LifetimeSafety/FactsGenerator.h @@ -43,6 +43,7 @@ class FactsGenerator : public ConstStmtVisitor { void VisitUnaryOperator(const UnaryOperator *UO); void VisitReturnStmt(const ReturnStmt *RS); void VisitBinaryOperator(const BinaryOperator *BO); + void VisitConditionalOperator(const ConditionalOperator *CO); void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *OCE); void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *FCE); void VisitInitListExpr(const InitListExpr *ILE); diff --git a/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp b/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp index bec8e1dabb0b5..381ff99aae420 100644 --- a/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp +++ b/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp @@ -176,6 +176,15 @@ void FactsGenerator::VisitBinaryOperator(const BinaryOperator *BO) { handleAssignment(BO->getLHS(), BO->getRHS()); } +void FactsGenerator::VisitConditionalOperator(const ConditionalOperator *CO) { + if (hasOrigin(CO)) { + // Merge origins from both branches of the conditional operator. + // We kill to clear the initial state and merge both origins into it. + killAndFlowOrigin(*CO, *CO->getTrueExpr()); + flowOrigin(*CO, *CO->getFalseExpr()); + } +} + void FactsGenerator::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *OCE) { // Assignment operators have special "kill-then-propagate" semantics // and are handled separately. diff --git a/clang/test/Sema/warn-lifetime-safety-dataflow.cpp b/clang/test/Sema/warn-lifetime-safety-dataflow.cpp index 31148b990d6bd..e9515b5d61006 100644 --- a/clang/test/Sema/warn-lifetime-safety-dataflow.cpp +++ b/clang/test/Sema/warn-lifetime-safety-dataflow.cpp @@ -414,3 +414,20 @@ void test_use_lifetimebound_call() { // CHECK: Expire ([[L_Y]] (Path: y)) // CHECK: Expire ([[L_X]] (Path: x)) } +// CHECK-LABEL: Function: test_conditional_operator +void test_conditional_operator(bool cond) { + MyObj x, y; + MyObj *p = cond ? &x : &y; +// CHECK: Block B{{[0-9]+}}: +// CHECK: Issue ([[L_X:[0-9]+]] (Path: x), ToOrigin: [[O_DRE_X:[0-9]+]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_X:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_X]] (Expr: DeclRefExpr)) +// CHECK: Block B{{[0-9]+}}: +// CHECK: Issue ([[L_Y:[0-9]+]] (Path: y), ToOrigin: [[O_DRE_Y:[0-9]+]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_Y:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_Y]] (Expr: DeclRefExpr)) +// CHECK: Block B{{[0-9]+}}: +// CHECK: OriginFlow (Dest: [[O_COND_OP:[0-9]+]] (Expr: ConditionalOperator), Src: [[O_ADDR_X]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_COND_OP]] (Expr: ConditionalOperator), Src: [[O_ADDR_Y]] (Expr: UnaryOperator), Merge) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_COND_OP]] (Expr: ConditionalOperator)) +// CHECK: Expire ([[L_Y]] (Path: y)) +// CHECK: Expire ([[L_X]] (Path: x)) +} diff --git a/clang/test/Sema/warn-lifetime-safety.cpp b/clang/test/Sema/warn-lifetime-safety.cpp index 4f234f0ac6e2d..3460a8675bf04 100644 --- a/clang/test/Sema/warn-lifetime-safety.cpp +++ b/clang/test/Sema/warn-lifetime-safety.cpp @@ -440,6 +440,7 @@ void no_error_loan_from_current_iteration(bool cond) { //===----------------------------------------------------------------------===// View Identity(View v [[clang::lifetimebound]]); +MyObj* Identity(MyObj* v [[clang::lifetimebound]]); View Choose(bool cond, View a [[clang::lifetimebound]], View b [[clang::lifetimebound]]); MyObj* GetPointer(const MyObj& obj [[clang::lifetimebound]]); @@ -582,3 +583,75 @@ void lifetimebound_ctor() { } (void)v; } + +// Conditional operator. +void conditional_operator_one_unsafe_branch(bool cond) { + MyObj safe; + MyObj* p = &safe; + { + MyObj temp; + p = cond ? &temp // expected-warning {{object whose reference is captured may not live long enough}} + : &safe; + } // expected-note {{destroyed here}} + + // This is not a use-after-free for any value of `cond` but the analysis + // cannot reason this and marks the above as a false positive. This + // ensures safety regardless of cond's value. + if (cond) + p = &safe; + (void)*p; // expected-note {{later used here}} +} + +void conditional_operator_two_unsafe_branches(bool cond) { + MyObj* p; + { + MyObj a, b; + p = cond ? &a // expected-warning {{object whose reference is captured does not live long enough}} + : &b; // expected-warning {{object whose reference is captured does not live long enough}} + } // expected-note 2 {{destroyed here}} + (void)*p; // expected-note 2 {{later used here}} +} + +void conditional_operator_nested(bool cond) { + MyObj* p; + { + MyObj a, b, c, d; + p = cond ? cond ? &a // expected-warning {{object whose reference is captured does not live long enough}}. + : &b // expected-warning {{object whose reference is captured does not live long enough}}. + : cond ? &c // expected-warning {{object whose reference is captured does not live long enough}}. + : &d; // expected-warning {{object whose reference is captured does not live long enough}}. + } // expected-note 4 {{destroyed here}} + (void)*p; // expected-note 4 {{later used here}} +} + +void conditional_operator_lifetimebound(bool cond) { + MyObj* p; + { + MyObj a, b; + p = Identity(cond ? &a // expected-warning {{object whose reference is captured does not live long enough}} + : &b); // expected-warning {{object whose reference is captured does not live long enough}} + } // expected-note 2 {{destroyed here}} + (void)*p; // expected-note 2 {{later used here}} +} + +void conditional_operator_lifetimebound_nested(bool cond) { + MyObj* p; + { + MyObj a, b; + p = Identity(cond ? Identity(&a) // expected-warning {{object whose reference is captured does not live long enough}} + : Identity(&b)); // expected-warning {{object whose reference is captured does not live long enough}} + } // expected-note 2 {{destroyed here}} + (void)*p; // expected-note 2 {{later used here}} +} + +void conditional_operator_lifetimebound_nested_deep(bool cond) { + MyObj* p; + { + MyObj a, b, c, d; + p = Identity(cond ? Identity(cond ? &a // expected-warning {{object whose reference is captured does not live long enough}} + : &b) // expected-warning {{object whose reference is captured does not live long enough}} + : Identity(cond ? &c // expected-warning {{object whose reference is captured does not live long enough}} + : &d)); // expected-warning {{object whose reference is captured does not live long enough}} + } // expected-note 4 {{destroyed here}} + (void)*p; // expected-note 4 {{later used here}} +} diff --git a/clang/unittests/Analysis/LifetimeSafetyTest.cpp b/clang/unittests/Analysis/LifetimeSafetyTest.cpp index 34af476843c0d..9d61d56e078e3 100644 --- a/clang/unittests/Analysis/LifetimeSafetyTest.cpp +++ b/clang/unittests/Analysis/LifetimeSafetyTest.cpp @@ -689,7 +689,6 @@ TEST_F(LifetimeAnalysisTest, GslPointerConstructFromView) { EXPECT_THAT(Origin("q"), HasLoansTo({"a"}, "p1")); } -// FIXME: Handle loans in ternary operator! TEST_F(LifetimeAnalysisTest, GslPointerInConditionalOperator) { SetupTest(R"( void target(bool cond) { @@ -698,7 +697,7 @@ TEST_F(LifetimeAnalysisTest, GslPointerInConditionalOperator) { POINT(p1); } )"); - EXPECT_THAT(Origin("v"), HasLoansTo({}, "p1")); + EXPECT_THAT(Origin("v"), HasLoansTo({"a", "b"}, "p1")); } // FIXME: Handle temporaries. From f240a73e8368896eefce92689cbee16eb0efdfc9 Mon Sep 17 00:00:00 2001 From: Amr Hesham Date: Wed, 12 Nov 2025 17:19:27 +0100 Subject: [PATCH 21/34] [CIR] Upstream Load/Store Complex with volatile qualifier (#167216) Upstream supporting Load/Store ops for Complex with volatile qualifier --- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 4 +- .../CodeGen/complex-compound-assignment.cpp | 16 +- clang/test/CIR/CodeGen/complex.cpp | 143 ++++++++++++++++++ 3 files changed, 153 insertions(+), 10 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 047f3599eed03..9ed920085c8c6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -339,7 +339,7 @@ mlir::Value ComplexExprEmitter::emitLoadOfLValue(LValue lv, cgf.cgm.errorNYI(loc, "emitLoadOfLValue with Atomic LV"); const Address srcAddr = lv.getAddress(); - return builder.createLoad(cgf.getLoc(loc), srcAddr); + return builder.createLoad(cgf.getLoc(loc), srcAddr, lv.isVolatileQualified()); } /// EmitStoreOfComplex - Store the specified real/imag parts into the @@ -353,7 +353,7 @@ void ComplexExprEmitter::emitStoreOfComplex(mlir::Location loc, mlir::Value val, } const Address destAddr = lv.getAddress(); - builder.createStore(loc, val, destAddr); + builder.createStore(loc, val, destAddr, lv.isVolatileQualified()); } //===----------------------------------------------------------------------===// diff --git a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp index a5070f51fad63..f2dbb3cc76ad2 100644 --- a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp +++ b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp @@ -237,18 +237,18 @@ void foo4() { // CXX_CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] // CXX_CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] // CXX_CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["c", init] -// CXX_CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex -// CXX_CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex +// CXX_CIR: %[[TMP_A:.*]] = cir.load volatile {{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CXX_CIR: %[[TMP_B:.*]] = cir.load volatile {{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CXX_CIR: %[[RESULT:.*]] = cir.complex.add %[[TMP_B]], %[[TMP_A]] : !cir.complex -// CXX_CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex, !cir.ptr> -// CXX_CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex +// CXX_CIR: cir.store volatile {{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex, !cir.ptr> +// CXX_CIR: %[[TMP_B:.*]] = cir.load volatile {{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CXX_CIR: cir.store{{.*}} %[[TMP_B]], %[[C_ADDR]] : !cir.complex, !cir.ptr // CXX_LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 // CXX_LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 // CXX_LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 -// CXX_LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4 -// CXX_LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4 +// CXX_LLVM: %[[TMP_A:.*]] = load volatile { i32, i32 }, ptr %[[A_ADDR]], align 4 +// CXX_LLVM: %[[TMP_B:.*]] = load volatile { i32, i32 }, ptr %[[B_ADDR]], align 4 // CXX_LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0 // CXX_LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1 // CXX_LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0 @@ -257,8 +257,8 @@ void foo4() { // CXX_LLVM: %[[ADD_IMAG:.*]] = add i32 %[[B_IMAG]], %[[A_IMAG]] // CXX_LLVM: %[[TMP_RESULT:.*]] = insertvalue { i32, i32 } poison, i32 %[[ADD_REAL]], 0 // CXX_LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[TMP_RESULT]], i32 %[[ADD_IMAG]], 1 -// CXX_LLVM: store { i32, i32 } %[[RESULT]], ptr %[[B_ADDR]], align 4 -// CXX_LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4 +// CXX_LLVM: store volatile { i32, i32 } %[[RESULT]], ptr %[[B_ADDR]], align 4 +// CXX_LLVM: %[[TMP_B:.*]] = load volatile { i32, i32 }, ptr %[[B_ADDR]], align 4 // CXX_LLVM: store { i32, i32 } %[[TMP_B]], ptr %[[C_ADDR]], align 4 // CXX_OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4 diff --git a/clang/test/CIR/CodeGen/complex.cpp b/clang/test/CIR/CodeGen/complex.cpp index 4eab3999dfc42..82c9f2d7aaf26 100644 --- a/clang/test/CIR/CodeGen/complex.cpp +++ b/clang/test/CIR/CodeGen/complex.cpp @@ -1534,3 +1534,146 @@ void imag_literal_gnu_extension() { // OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1 // OGCG: store i32 0, ptr %[[C_REAL_PTR]], align 4 // OGCG: store i32 3, ptr %[[C_IMAG_PTR]], align 4 + +void load_store_volatile() { + volatile double _Complex a; + volatile double _Complex b; + a = b; + + volatile int _Complex c; + volatile int _Complex d; + c = d; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["c"] +// CIR: %[[D_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["d"] +// CIR: %[[TMP_B:.*]] = cir.load volatile {{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex +// CIR: cir.store volatile {{.*}} %[[TMP_B]], %[[A_ADDR]] : !cir.complex, !cir.ptr> +// CIR: %[[TMP_D:.*]] = cir.load volatile {{.*}} %[[D_ADDR]] : !cir.ptr>, !cir.complex +// CIR: cir.store volatile {{.*}} %[[TMP_D]], %[[C_ADDR]] : !cir.complex, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca { double, double }, i64 1, align 8 +// LLVM: %[[B_ADDR:.*]] = alloca { double, double }, i64 1, align 8 +// LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[D_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[TMP_B:.*]] = load volatile { double, double }, ptr %[[B_ADDR]], align 8 +// LLVM: store volatile { double, double } %[[TMP_B]], ptr %[[A_ADDR]], align 8 +// LLVM: %[[TMP_D:.*]] = load volatile { i32, i32 }, ptr %[[D_ADDR]], align 4 +// LLVM: store volatile { i32, i32 } %[[TMP_D]], ptr %[[C_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { double, double }, align 8 +// OGCG: %[[B_ADDR:.*]] = alloca { double, double }, align 8 +// OGCG: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[D_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_REAL:.*]] = load volatile double, ptr %[[B_REAL_PTR]], align 8 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: %[[B_IMAG:.*]] = load volatile double, ptr %[[B_IMAG_PTR]], align 8 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: store volatile double %[[B_REAL]], ptr %[[A_REAL_PTR]], align 8 +// OGCG: store volatile double %[[B_IMAG]], ptr %[[A_IMAG_PTR]], align 8 +// OGCG: %[[D_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[D_ADDR]], i32 0, i32 0 +// OGCG: %[[D_REAL:.*]] = load volatile i32, ptr %[[D_REAL_PTR]], align 4 +// OGCG: %[[D_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[D_ADDR]], i32 0, i32 1 +// OGCG: %[[D_IMAG:.*]] = load volatile i32, ptr %[[D_IMAG_PTR]], align 4 +// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 0 +// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1 +// OGCG: store volatile i32 %[[D_REAL]], ptr %[[C_REAL_PTR]], align 4 +// OGCG: store volatile i32 %[[D_IMAG]], ptr %[[C_IMAG_PTR]], align 4 + + +void load_store_volatile_2() { + volatile double _Complex av; + double _Complex a; + av = a; + + double _Complex b; + volatile double _Complex bv; + b = bv; + + int _Complex c; + volatile int _Complex cv; + c = cv; + + volatile int _Complex dv; + int _Complex d; + dv = d; +} + +// CIR: %[[AV_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["av"] +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[BV_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["bv"] +// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["c"] +// CIR: %[[CV_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["cv"] +// CIR: %[[DV_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["dv"] +// CIR: %[[D_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["d"] +// CIR: %[[TMP_A:.*]] = cir.load {{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: cir.store volatile {{.*}} %[[TMP_A]], %[[AV_ADDR]] : !cir.complex, !cir.ptr> +// CIR: %[[TMP_BV:.*]] = cir.load volatile {{.*}} %[[BV_ADDR]] : !cir.ptr>, !cir.complex +// CIR: cir.store {{.*}} %[[TMP_BV]], %[[B_ADDR]] : !cir.complex, !cir.ptr> +// CIR: %[[TMP_CV:.*]] = cir.load volatile {{.*}} %[[CV_ADDR]] : !cir.ptr>, !cir.complex +// CIR: cir.store {{.*}} %[[TMP_CV]], %[[C_ADDR]] : !cir.complex, !cir.ptr> +// CIR: %[[TMP_D:.*]] = cir.load {{.*}} %[[D_ADDR]] : !cir.ptr>, !cir.complex +// CIR: cir.store volatile {{.*}} %[[TMP_D]], %[[DV_ADDR]] : !cir.complex, !cir.ptr> + +// LLVM: %[[AV_ADDR:.*]] = alloca { double, double }, i64 1, align 8 +// LLVM: %[[A_ADDR:.*]] = alloca { double, double }, i64 1, align 8 +// LLVM: %[[B_ADDR:.*]] = alloca { double, double }, i64 1, align 8 +// LLVM: %[[BV_ADDR:.*]] = alloca { double, double }, i64 1, align 8 +// LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[CV_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[DV_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[D_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { double, double }, ptr %[[A_ADDR]], align 8 +// LLVM: store volatile { double, double } %[[TMP_A]], ptr %[[AV_ADDR]], align 8 +// LLVM: %[[TMP_BV:.*]] = load volatile { double, double }, ptr %[[BV_ADDR]], align 8 +// LLVM: store { double, double } %[[TMP_BV]], ptr %[[B_ADDR]], align 8 +// LLVM: %[[TMP_CV:.*]] = load volatile { i32, i32 }, ptr %[[CV_ADDR]], align 4 +// LLVM: store { i32, i32 } %[[TMP_CV]], ptr %[[C_ADDR]], align 4 +// LLVM: %[[TMP_D:.*]] = load { i32, i32 }, ptr %[[D_ADDR]], align 4 +// LLVM: store volatile { i32, i32 } %[[TMP_D]], ptr %[[DV_ADDR]], align 4 + +// OGCG: %[[AV_ADDR:.*]] = alloca { double, double }, align 8 +// OGCG: %[[A_ADDR:.*]] = alloca { double, double }, align 8 +// OGCG: %[[B_ADDR:.*]] = alloca { double, double }, align 8 +// OGCG: %[[BV_ADDR:.*]] = alloca { double, double }, align 8 +// OGCG: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[CV_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[DV_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[D_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load double, ptr %[[A_REAL_PTR]], align 8 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load double, ptr %[[A_IMAG_PTR]], align 8 +// OGCG: %[[AV_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[AV_ADDR]], i32 0, i32 0 +// OGCG: %[[AV_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[AV_ADDR]], i32 0, i32 1 +// OGCG: store volatile double %[[A_REAL]], ptr %[[AV_REAL_PTR]], align 8 +// OGCG: store volatile double %[[A_IMAG]], ptr %[[AV_IMAG_PTR]], align 8 +// OGCG: %[[BV_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[BV_ADDR]], i32 0, i32 0 +// OGCG: %[[BV_REAL:.*]] = load volatile double, ptr %[[BV_REAL_PTR]], align 8 +// OGCG: %[[BV_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[BV_ADDR]], i32 0, i32 1 +// OGCG: %[[BV_IMAG:.*]] = load volatile double, ptr %[[BV_IMAG_PTR]], align 8 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: store double %[[BV_REAL]], ptr %[[B_REAL_PTR]], align 8 +// OGCG: store double %[[BV_IMAG]], ptr %[[B_IMAG_PTR]], align 8 +// OGCG: %[[CV_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[CV_ADDR]], i32 0, i32 0 +// OGCG: %[[CV_REAL:.*]] = load volatile i32, ptr %[[CV_REAL_PTR]], align 4 +// OGCG: %[[CV_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[CV_ADDR]], i32 0, i32 1 +// OGCG: %[[CV_IMAG:.*]] = load volatile i32, ptr %[[CV_IMAG_PTR]], align 4 +// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 0 +// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1 +// OGCG: store i32 %[[CV_REAL]], ptr %[[C_REAL_PTR]], align 4 +// OGCG: store i32 %[[CV_IMAG]], ptr %[[C_IMAG_PTR]], align 4 +// OGCG: %[[D_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[D_ADDR]], i32 0, i32 0 +// OGCG: %[[D_REAL:.*]] = load i32, ptr %[[D_REAL_PTR]], align 4 +// OGCG: %[[D_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[D_ADDR]], i32 0, i32 1 +// OGCG: %[[D_IMAG:.*]] = load i32, ptr %[[D_IMAG_PTR]], align 4 +// OGCG: %[[DV_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[DV_ADDR]], i32 0, i32 0 +// OGCG: %[[DV_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[DV_ADDR]], i32 0, i32 1 +// OGCG: store volatile i32 %[[D_REAL]], ptr %[[DV_REAL_PTR]], align 4 +// OGCG: store volatile i32 %[[D_IMAG]], ptr %[[DV_IMAG_PTR]], align 4 From ec085e5201f013d75104124179c429156bfb6258 Mon Sep 17 00:00:00 2001 From: Luke Hutton Date: Wed, 12 Nov 2025 16:23:02 +0000 Subject: [PATCH 22/34] [mlir][tosa] Add missing ext-mxfp description (#167665) Add a missing description. --- mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td | 1 + 1 file changed, 1 insertion(+) diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td index 5b595dd8eb32a..cc23955f31f23 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td @@ -240,6 +240,7 @@ class Tosa_I32EnumAttr; From 4d1f2492d26f8c2fad0eee2a141c7e0bbbc4c868 Mon Sep 17 00:00:00 2001 From: David Green Date: Wed, 12 Nov 2025 16:26:21 +0000 Subject: [PATCH 23/34] [ARM] Use TargetMachine over Subtarget in ARMAsmPrinter (#166329) The subtarget may not be set if no functions are present in the module. Attempt to use the TargetMachine directly in more cases. Fixes #165422 Fixes #167577 --- llvm/lib/Target/ARM/ARMAsmPrinter.cpp | 21 +++++++++++---------- llvm/lib/Target/ARM/ARMSubtarget.cpp | 12 +----------- llvm/lib/Target/ARM/ARMTargetMachine.h | 14 ++++++++++++++ llvm/test/CodeGen/ARM/xxstructor-nodef.ll | 7 +++++++ 4 files changed, 33 insertions(+), 21 deletions(-) create mode 100644 llvm/test/CodeGen/ARM/xxstructor-nodef.ll diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp index 36b99087e0a32..2d2e62c80c702 100644 --- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -97,7 +97,8 @@ void ARMAsmPrinter::emitXXStructor(const DataLayout &DL, const Constant *CV) { const MCExpr *E = MCSymbolRefExpr::create( GetARMGVSymbol(GV, ARMII::MO_NO_FLAG), - (Subtarget->isTargetELF() ? ARM::S_TARGET1 : ARM::S_None), OutContext); + (TM.getTargetTriple().isOSBinFormatELF() ? ARM::S_TARGET1 : ARM::S_None), + OutContext); OutStreamer->emitValue(E, Size); } @@ -595,8 +596,7 @@ void ARMAsmPrinter::emitEndOfAsmFile(Module &M) { ARMTargetStreamer &ATS = static_cast(TS); if (OptimizationGoals > 0 && - (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || - Subtarget->isTargetMuslAEABI())) + (TT.isTargetAEABI() || TT.isTargetGNUAEABI() || TT.isTargetMuslAEABI())) ATS.emitAttribute(ARMBuildAttrs::ABI_optimization_goals, OptimizationGoals); OptimizationGoals = -1; @@ -884,9 +884,10 @@ static uint8_t getModifierSpecifier(ARMCP::ARMCPModifier Modifier) { MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV, unsigned char TargetFlags) { - if (Subtarget->isTargetMachO()) { + const Triple &TT = TM.getTargetTriple(); + if (TT.isOSBinFormatMachO()) { bool IsIndirect = - (TargetFlags & ARMII::MO_NONLAZY) && Subtarget->isGVIndirectSymbol(GV); + (TargetFlags & ARMII::MO_NONLAZY) && getTM().isGVIndirectSymbol(GV); if (!IsIndirect) return getSymbol(GV); @@ -903,9 +904,8 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV, StubSym = MachineModuleInfoImpl::StubValueTy(getSymbol(GV), !GV->hasInternalLinkage()); return MCSym; - } else if (Subtarget->isTargetCOFF()) { - assert(Subtarget->isTargetWindows() && - "Windows is the only supported COFF target"); + } else if (TT.isOSBinFormatCOFF()) { + assert(TT.isOSWindows() && "Windows is the only supported COFF target"); bool IsIndirect = (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)); @@ -932,7 +932,7 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV, } return MCSym; - } else if (Subtarget->isTargetELF()) { + } else if (TT.isOSBinFormatELF()) { return getSymbolPreferLocal(*GV); } llvm_unreachable("unexpected target"); @@ -978,7 +978,8 @@ void ARMAsmPrinter::emitMachineConstantPoolValue( // On Darwin, const-pool entries may get the "FOO$non_lazy_ptr" mangling, so // flag the global as MO_NONLAZY. - unsigned char TF = Subtarget->isTargetMachO() ? ARMII::MO_NONLAZY : 0; + unsigned char TF = + TM.getTargetTriple().isOSBinFormatMachO() ? ARMII::MO_NONLAZY : 0; MCSym = GetARMGVSymbol(GV, TF); } else if (ACPV->isMachineBasicBlock()) { const MachineBasicBlock *MBB = cast(ACPV)->getMBB(); diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp index 58bc338b25856..7ec232ae9bac5 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.cpp +++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp @@ -318,17 +318,7 @@ bool ARMSubtarget::isRWPI() const { } bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue *GV) const { - if (!TM.shouldAssumeDSOLocal(GV)) - return true; - - // 32 bit macho has no relocation for a-b if a is undefined, even if b is in - // the section that is being relocated. This means we have to use o load even - // for GVs that are known to be local to the dso. - if (isTargetMachO() && TM.isPositionIndependent() && - (GV->isDeclarationForLinker() || GV->hasCommonLinkage())) - return true; - - return false; + return TM.isGVIndirectSymbol(GV); } bool ARMSubtarget::isGVInGOT(const GlobalValue *GV) const { diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.h b/llvm/lib/Target/ARM/ARMTargetMachine.h index c417c4c8bae65..1f74e9fdd1dc9 100644 --- a/llvm/lib/Target/ARM/ARMTargetMachine.h +++ b/llvm/lib/Target/ARM/ARMTargetMachine.h @@ -98,6 +98,20 @@ class ARMBaseTargetMachine : public CodeGenTargetMachineImpl { return true; } + bool isGVIndirectSymbol(const GlobalValue *GV) const { + if (!shouldAssumeDSOLocal(GV)) + return true; + + // 32 bit macho has no relocation for a-b if a is undefined, even if b is in + // the section that is being relocated. This means we have to use o load + // even for GVs that are known to be local to the dso. + if (getTargetTriple().isOSBinFormatMachO() && isPositionIndependent() && + (GV->isDeclarationForLinker() || GV->hasCommonLinkage())) + return true; + + return false; + } + yaml::MachineFunctionInfo *createDefaultFuncInfoYAML() const override; yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override; diff --git a/llvm/test/CodeGen/ARM/xxstructor-nodef.ll b/llvm/test/CodeGen/ARM/xxstructor-nodef.ll new file mode 100644 index 0000000000000..db17b2b1c21ab --- /dev/null +++ b/llvm/test/CodeGen/ARM/xxstructor-nodef.ll @@ -0,0 +1,7 @@ +; RUN: llc -mtriple=arm-unknown-linux-gnueabihf < %s | FileCheck %s + +; This test contains a llvm.global_ctors with no other definitions. Make sure we do not crash in that case. +; CHECK: .section .init_array,"aw",%init_array + +declare ccc void @ghczmbignum_GHCziNumziBackendziSelected_init__prof_init() +@llvm.global_ctors = appending global [1 x {i32, void ()*, i8* }] [{i32, void ()*, i8* }{i32 65535, void ()* @ghczmbignum_GHCziNumziBackendziSelected_init__prof_init, i8* null } ] From 7d5c11f6e926b70be0c47c524657ded1c8d074b9 Mon Sep 17 00:00:00 2001 From: Victor Mustya Date: Wed, 12 Nov 2025 08:36:57 -0800 Subject: [PATCH 24/34] [Windows] Adjust exported symbols in static builds with plugin support (#165946) When building LLVM and Clang on Windows with plugin support enabled, some symbols are redundantly exported due to template instantiations and lambda functions. These symbols are not needed in the importing translation units and can be safely removed. In the meantime, the global variables and static data members are needed for correct linking and runtime behavior, so they are added to the export list. Also, the `llvm::::dump()` and `clang::::dump()` methods are not needed for linking in importing translation units, because they are only available in debug builds and should be only used for debugging purposes. Therefore, these methods are removed from the export list. --- llvm/utils/extract_symbols.py | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/llvm/utils/extract_symbols.py b/llvm/utils/extract_symbols.py index 0cbfd2e2910e1..5254d16e410d5 100755 --- a/llvm/utils/extract_symbols.py +++ b/llvm/utils/extract_symbols.py @@ -97,6 +97,16 @@ def should_keep_microsoft_symbol(symbol, calling_convention_decoration): # don't elif symbol.startswith("??_G") or symbol.startswith("??_E"): return None + # Delete template instantiations. These start with ?$ and can be discarded + # because they will be instantiated in the importing translation unit if + # needed. + elif symbol.startswith("??$"): + return None + # Delete lambda object constructors and operator() functions. These start + # with ??R ::= exceptions are not allowed - elif re.search(r"(llvm|clang)@@[A-Z][A-Z0-9_]*[A-JQ].+(X|.+@|.*Z)$", symbol): + elif re.search(r"@(llvm|clang)@@[A-Z][A-Z0-9_]*[A-JQ].+(X|.+@|.*Z)$", symbol): + # Remove llvm::::dump and clang::::dump methods because + # they are used for debugging only. + if symbol.startswith("?dump@"): + return None + return symbol + # Keep mangled global variables and static class members in llvm:: namespace. + # These have a type mangling that looks like (this is derived from + # clang/lib/AST/MicrosoftMangle.cpp): + # ::= + # ::= 0 # private static member + # ::= 1 # protected static member + # ::= 2 # public static member + # ::= 3 # global + # ::= 4 # static local + # ::= + # ::= # pointers, references + elif re.search(r"@llvm@@[0-3].*$", symbol): return symbol return None From 0845c5a7c44c4b42f95832ec3703eb97e47e22b2 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 12 Nov 2025 08:43:55 -0800 Subject: [PATCH 25/34] [Mips] Remove implicit conversions of MCRegister to unsigned. NFC (#167645) --- .../Target/Mips/AsmParser/MipsAsmParser.cpp | 79 +++---- .../Mips/Disassembler/MipsDisassembler.cpp | 196 +++++++++--------- .../Mips/MCTargetDesc/MipsOptionRecord.cpp | 2 +- .../Mips/MCTargetDesc/MipsTargetStreamer.cpp | 54 ++--- .../Mips/MCTargetDesc/MipsTargetStreamer.h | 34 +-- llvm/lib/Target/Mips/MipsOptionRecord.h | 2 +- 6 files changed, 184 insertions(+), 183 deletions(-) diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index f588e56f2ea18..6b28531764db9 100644 --- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -151,7 +151,7 @@ class MipsAsmParser : public MCTargetAsmParser { bool IsCpRestoreSet; bool CurForbiddenSlotAttr; int CpRestoreOffset; - unsigned GPReg; + MCRegister GPReg; unsigned CpSaveLocation; /// If true, then CpSaveLocation is a register, otherwise it's an offset. bool CpSaveLocationIsRegister; @@ -823,7 +823,7 @@ class MipsOperand : public MCParsedAsmOperand { }; struct RegListOp { - SmallVector *List; + SmallVector *List; }; union { @@ -1377,15 +1377,15 @@ class MipsOperand : public MCParsedAsmOperand { if (Size < 2 || Size > 5) return false; - unsigned R0 = RegList.List->front(); - unsigned R1 = RegList.List->back(); + MCRegister R0 = RegList.List->front(); + MCRegister R1 = RegList.List->back(); if (!((R0 == Mips::S0 && R1 == Mips::RA) || (R0 == Mips::S0_64 && R1 == Mips::RA_64))) return false; - int PrevReg = *RegList.List->begin(); + MCRegister PrevReg = RegList.List->front(); for (int i = 1; i < Size - 1; i++) { - int Reg = (*(RegList.List))[i]; + MCRegister Reg = (*(RegList.List))[i]; if ( Reg != PrevReg + 1) return false; PrevReg = Reg; @@ -1447,7 +1447,7 @@ class MipsOperand : public MCParsedAsmOperand { return static_cast(getMemOff())->getValue(); } - const SmallVectorImpl &getRegList() const { + const SmallVectorImpl &getRegList() const { assert((Kind == k_RegList) && "Invalid access!"); return *(RegList.List); } @@ -1548,12 +1548,13 @@ class MipsOperand : public MCParsedAsmOperand { } static std::unique_ptr - CreateRegList(SmallVectorImpl &Regs, SMLoc StartLoc, SMLoc EndLoc, + CreateRegList(SmallVectorImpl &Regs, SMLoc StartLoc, SMLoc EndLoc, MipsAsmParser &Parser) { - assert(Regs.size() > 0 && "Empty list not allowed"); + assert(!Regs.empty() && "Empty list not allowed"); auto Op = std::make_unique(k_RegList, Parser); - Op->RegList.List = new SmallVector(Regs.begin(), Regs.end()); + Op->RegList.List = + new SmallVector(Regs.begin(), Regs.end()); Op->StartLoc = StartLoc; Op->EndLoc = EndLoc; return Op; @@ -1684,7 +1685,7 @@ class MipsOperand : public MCParsedAsmOperand { case k_RegList: OS << "RegList< "; for (auto Reg : (*RegList.List)) - OS << Reg << " "; + OS << Reg.id() << " "; OS << ">"; break; } @@ -6848,9 +6849,9 @@ ParseStatus MipsAsmParser::parseInvNum(OperandVector &Operands) { ParseStatus MipsAsmParser::parseRegisterList(OperandVector &Operands) { MCAsmParser &Parser = getParser(); - SmallVector Regs; - unsigned RegNo; - unsigned PrevReg = Mips::NoRegister; + SmallVector Regs; + MCRegister Reg; + MCRegister PrevReg; bool RegRange = false; SmallVector, 8> TmpOperands; @@ -6860,46 +6861,47 @@ ParseStatus MipsAsmParser::parseRegisterList(OperandVector &Operands) { SMLoc S = Parser.getTok().getLoc(); while (parseAnyRegister(TmpOperands).isSuccess()) { SMLoc E = getLexer().getLoc(); - MipsOperand &Reg = static_cast(*TmpOperands.back()); - RegNo = isGP64bit() ? Reg.getGPR64Reg() : Reg.getGPR32Reg(); + MipsOperand &RegOpnd = static_cast(*TmpOperands.back()); + Reg = isGP64bit() ? RegOpnd.getGPR64Reg() : RegOpnd.getGPR32Reg(); if (RegRange) { // Remove last register operand because registers from register range // should be inserted first. - if ((isGP64bit() && RegNo == Mips::RA_64) || - (!isGP64bit() && RegNo == Mips::RA)) { - Regs.push_back(RegNo); + if ((isGP64bit() && Reg == Mips::RA_64) || + (!isGP64bit() && Reg == Mips::RA)) { + Regs.push_back(Reg); } else { - unsigned TmpReg = PrevReg + 1; - while (TmpReg <= RegNo) { + MCRegister TmpReg = PrevReg + 1; + while (TmpReg <= Reg) { if ((((TmpReg < Mips::S0) || (TmpReg > Mips::S7)) && !isGP64bit()) || (((TmpReg < Mips::S0_64) || (TmpReg > Mips::S7_64)) && isGP64bit())) return Error(E, "invalid register operand"); PrevReg = TmpReg; - Regs.push_back(TmpReg++); + Regs.push_back(TmpReg); + TmpReg = TmpReg.id() + 1; } } RegRange = false; } else { - if ((PrevReg == Mips::NoRegister) && - ((isGP64bit() && (RegNo != Mips::S0_64) && (RegNo != Mips::RA_64)) || - (!isGP64bit() && (RegNo != Mips::S0) && (RegNo != Mips::RA)))) + if (!PrevReg.isValid() && + ((isGP64bit() && (Reg != Mips::S0_64) && (Reg != Mips::RA_64)) || + (!isGP64bit() && (Reg != Mips::S0) && (Reg != Mips::RA)))) return Error(E, "$16 or $31 expected"); - if (!(((RegNo == Mips::FP || RegNo == Mips::RA || - (RegNo >= Mips::S0 && RegNo <= Mips::S7)) && + if (!(((Reg == Mips::FP || Reg == Mips::RA || + (Reg >= Mips::S0 && Reg <= Mips::S7)) && !isGP64bit()) || - ((RegNo == Mips::FP_64 || RegNo == Mips::RA_64 || - (RegNo >= Mips::S0_64 && RegNo <= Mips::S7_64)) && + ((Reg == Mips::FP_64 || Reg == Mips::RA_64 || + (Reg >= Mips::S0_64 && Reg <= Mips::S7_64)) && isGP64bit()))) return Error(E, "invalid register operand"); - if ((PrevReg != Mips::NoRegister) && (RegNo != PrevReg + 1) && - ((RegNo != Mips::FP && RegNo != Mips::RA && !isGP64bit()) || - (RegNo != Mips::FP_64 && RegNo != Mips::RA_64 && isGP64bit()))) + if (PrevReg.isValid() && (Reg != PrevReg + 1) && + ((Reg != Mips::FP && Reg != Mips::RA && !isGP64bit()) || + (Reg != Mips::FP_64 && Reg != Mips::RA_64 && isGP64bit()))) return Error(E, "consecutive register numbers expected"); - Regs.push_back(RegNo); + Regs.push_back(Reg); } if (Parser.getTok().is(AsmToken::Minus)) @@ -6913,7 +6915,7 @@ ParseStatus MipsAsmParser::parseRegisterList(OperandVector &Operands) { if (Parser.getTok().isNot(AsmToken::Dollar)) break; - PrevReg = RegNo; + PrevReg = Reg; } SMLoc E = Parser.getTok().getLoc(); @@ -7780,7 +7782,7 @@ bool MipsAsmParser::parseDirectiveCpLocal(SMLoc Loc) { } getParser().Lex(); // Consume the EndOfStatement. - unsigned NewReg = RegOpnd.getGPR32Reg(); + MCRegister NewReg = RegOpnd.getGPR32Reg(); if (IsPicEnabled) GPReg = NewReg; @@ -7835,7 +7837,6 @@ bool MipsAsmParser::parseDirectiveCpRestore(SMLoc Loc) { bool MipsAsmParser::parseDirectiveCPSetup() { MCAsmParser &Parser = getParser(); - unsigned FuncReg; unsigned Save; bool SaveIsReg = true; @@ -7852,7 +7853,7 @@ bool MipsAsmParser::parseDirectiveCPSetup() { return false; } - FuncReg = FuncRegOpnd.getGPR32Reg(); + MCRegister FuncReg = FuncRegOpnd.getGPR32Reg(); TmpReg.clear(); if (!eatComma("unexpected token, expected comma")) @@ -7878,7 +7879,7 @@ bool MipsAsmParser::parseDirectiveCPSetup() { reportParseError(SaveOpnd.getStartLoc(), "invalid register"); return false; } - Save = SaveOpnd.getGPR32Reg(); + Save = SaveOpnd.getGPR32Reg().id(); } if (!eatComma("unexpected token, expected comma")) @@ -8696,7 +8697,7 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) { "expected general purpose register"); return false; } - unsigned StackReg = StackRegOpnd.getGPR32Reg(); + MCRegister StackReg = StackRegOpnd.getGPR32Reg(); if (Parser.getTok().is(AsmToken::Comma)) Parser.Lex(); diff --git a/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp index 12e31c07aa15a..fd9eb9b8fe9a3 100644 --- a/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp +++ b/llvm/lib/Target/Mips/Disassembler/MipsDisassembler.cpp @@ -103,7 +103,7 @@ LLVMInitializeMipsDisassembler() { createMipselDisassembler); } -static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo) { +static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo) { const MCRegisterInfo *RegInfo = D->getContext().getRegisterInfo(); return RegInfo->getRegClass(RC).getRegister(RegNo); } @@ -123,7 +123,7 @@ static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 30 || RegNo % 2) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::AFGR64RegClassID, RegNo / 2); + MCRegister Reg = getReg(Decoder, Mips::AFGR64RegClassID, RegNo / 2); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -134,7 +134,7 @@ static DecodeStatus DecodeACC64DSPRegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo >= 4) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::ACC64DSPRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::ACC64DSPRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -145,7 +145,7 @@ static DecodeStatus DecodeHI32DSPRegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo >= 4) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::HI32DSPRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::HI32DSPRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -156,7 +156,7 @@ static DecodeStatus DecodeLO32DSPRegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo >= 4) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::LO32DSPRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::LO32DSPRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -167,7 +167,7 @@ static DecodeStatus DecodeMSA128BRegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::MSA128BRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::MSA128BRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -178,7 +178,7 @@ static DecodeStatus DecodeMSA128HRegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::MSA128HRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::MSA128HRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -189,7 +189,7 @@ static DecodeStatus DecodeMSA128WRegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::MSA128WRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::MSA128WRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -200,7 +200,7 @@ static DecodeStatus DecodeMSA128DRegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::MSA128DRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::MSA128DRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -211,7 +211,7 @@ static DecodeStatus DecodeMSACtrlRegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 7) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::MSACtrlRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::MSACtrlRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -222,7 +222,7 @@ static DecodeStatus DecodeCOP0RegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::COP0RegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::COP0RegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -233,7 +233,7 @@ static DecodeStatus DecodeCOP2RegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::COP2RegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::COP2RegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -881,7 +881,7 @@ static DecodeStatus DecodeGPR64RegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::GPR64RegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::GPR64RegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -891,7 +891,7 @@ static DecodeStatus DecodeGPRMM16RegisterClass(MCInst &Inst, unsigned RegNo, const MCDisassembler *Decoder) { if (RegNo > 7) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::GPRMM16RegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::GPRMM16RegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -901,7 +901,7 @@ DecodeGPRMM16ZeroRegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, const MCDisassembler *Decoder) { if (RegNo > 7) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::GPRMM16ZeroRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::GPRMM16ZeroRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -911,7 +911,7 @@ DecodeGPRMM16MovePRegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, const MCDisassembler *Decoder) { if (RegNo > 7) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::GPRMM16MovePRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::GPRMM16MovePRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -948,7 +948,7 @@ static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst, unsigned RegNo, const MCDisassembler *Decoder) { if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -974,7 +974,7 @@ static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::FGR64RegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::FGR64RegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -985,7 +985,7 @@ static DecodeStatus DecodeFGR32RegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::FGR32RegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::FGR32RegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -995,7 +995,7 @@ static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst, unsigned RegNo, const MCDisassembler *Decoder) { if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::CCRRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::CCRRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -1005,7 +1005,7 @@ static DecodeStatus DecodeFCCRegisterClass(MCInst &Inst, unsigned RegNo, const MCDisassembler *Decoder) { if (RegNo > 7) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::FCCRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::FCCRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -1016,7 +1016,7 @@ static DecodeStatus DecodeFGRCCRegisterClass(MCInst &Inst, unsigned RegNo, if (RegNo > 31) return MCDisassembler::Fail; - unsigned Reg = getReg(Decoder, Mips::FGRCCRegClassID, RegNo); + MCRegister Reg = getReg(Decoder, Mips::FGRCCRegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -1024,11 +1024,11 @@ static DecodeStatus DecodeFGRCCRegisterClass(MCInst &Inst, unsigned RegNo, static DecodeStatus DecodeMem(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<16>(Insn & 0xffff); - unsigned Reg = fieldFromInstruction(Insn, 16, 5); - unsigned Base = fieldFromInstruction(Insn, 21, 5); + unsigned RegNo = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 21, 5); - Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); if (Inst.getOpcode() == Mips::SC || Inst.getOpcode() == Mips::SC64 || Inst.getOpcode() == Mips::SCD) @@ -1044,14 +1044,14 @@ static DecodeStatus DecodeMem(MCInst &Inst, unsigned Insn, uint64_t Address, static DecodeStatus DecodeMemEVA(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<9>(Insn >> 7); - unsigned Reg = fieldFromInstruction(Insn, 16, 5); - unsigned Base = fieldFromInstruction(Insn, 21, 5); + unsigned RegNo = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 21, 5); - Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); - if (Inst.getOpcode() == Mips::SCE) - Inst.addOperand(MCOperand::createReg(Reg)); + if (Inst.getOpcode() == Mips::SCE) + Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Base)); @@ -1064,11 +1064,11 @@ static DecodeStatus DecodeLoadByte15(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<16>(Insn & 0xffff); - unsigned Base = fieldFromInstruction(Insn, 16, 5); - unsigned Reg = fieldFromInstruction(Insn, 21, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 16, 5); + unsigned RegNo = fieldFromInstruction(Insn, 21, 5); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); - Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); + MCRegister Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Base)); @@ -1081,9 +1081,9 @@ static DecodeStatus DecodeCacheOp(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<16>(Insn & 0xffff); unsigned Hint = fieldFromInstruction(Insn, 16, 5); - unsigned Base = fieldFromInstruction(Insn, 21, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 21, 5); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Base)); Inst.addOperand(MCOperand::createImm(Offset)); @@ -1096,10 +1096,10 @@ static DecodeStatus DecodeCacheOpMM(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<12>(Insn & 0xfff); - unsigned Base = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 16, 5); unsigned Hint = fieldFromInstruction(Insn, 21, 5); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Base)); Inst.addOperand(MCOperand::createImm(Offset)); @@ -1112,10 +1112,10 @@ static DecodeStatus DecodePrefeOpMM(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<9>(Insn & 0x1ff); - unsigned Base = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 16, 5); unsigned Hint = fieldFromInstruction(Insn, 21, 5); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Base)); Inst.addOperand(MCOperand::createImm(Offset)); @@ -1129,9 +1129,9 @@ static DecodeStatus DecodeCacheeOp_CacheOpR6(MCInst &Inst, unsigned Insn, const MCDisassembler *Decoder) { int Offset = SignExtend32<9>(Insn >> 7); unsigned Hint = fieldFromInstruction(Insn, 16, 5); - unsigned Base = fieldFromInstruction(Insn, 21, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 21, 5); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Base)); Inst.addOperand(MCOperand::createImm(Offset)); @@ -1143,9 +1143,9 @@ static DecodeStatus DecodeCacheeOp_CacheOpR6(MCInst &Inst, unsigned Insn, static DecodeStatus DecodeSyncI(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<16>(Insn & 0xffff); - unsigned Base = fieldFromInstruction(Insn, 21, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 21, 5); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Base)); Inst.addOperand(MCOperand::createImm(Offset)); @@ -1157,9 +1157,9 @@ static DecodeStatus DecodeSyncI_MM(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<16>(Insn & 0xffff); - unsigned Base = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 16, 5); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Base)); Inst.addOperand(MCOperand::createImm(Offset)); @@ -1170,9 +1170,9 @@ static DecodeStatus DecodeSyncI_MM(MCInst &Inst, unsigned Insn, static DecodeStatus DecodeSynciR6(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Immediate = SignExtend32<16>(Insn & 0xffff); - unsigned Base = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 16, 5); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Base)); Inst.addOperand(MCOperand::createImm(Immediate)); @@ -1184,11 +1184,11 @@ static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<10>(fieldFromInstruction(Insn, 16, 10)); - unsigned Reg = fieldFromInstruction(Insn, 6, 5); - unsigned Base = fieldFromInstruction(Insn, 11, 5); + unsigned RegNo = fieldFromInstruction(Insn, 6, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 11, 5); - Reg = getReg(Decoder, Mips::MSA128BRegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::MSA128BRegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Base)); @@ -1288,9 +1288,9 @@ static DecodeStatus DecodeMemMMSPImm5Lsl2(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { unsigned Offset = Insn & 0x1F; - unsigned Reg = fieldFromInstruction(Insn, 5, 5); + unsigned RegNo = fieldFromInstruction(Insn, 5, 5); - Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg); + MCRegister Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Mips::SP)); @@ -1303,9 +1303,9 @@ static DecodeStatus DecodeMemMMGPImm7Lsl2(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { unsigned Offset = Insn & 0x7F; - unsigned Reg = fieldFromInstruction(Insn, 7, 3); + unsigned RegNo = fieldFromInstruction(Insn, 7, 3); - Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg); + MCRegister Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Mips::GP)); @@ -1342,11 +1342,11 @@ static DecodeStatus DecodeMemMMImm9(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<9>(Insn & 0x1ff); - unsigned Reg = fieldFromInstruction(Insn, 21, 5); - unsigned Base = fieldFromInstruction(Insn, 16, 5); + unsigned RegNo = fieldFromInstruction(Insn, 21, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 16, 5); - Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); if (Inst.getOpcode() == Mips::SCE_MM || Inst.getOpcode() == Mips::SC_MMR6) Inst.addOperand(MCOperand::createReg(Reg)); @@ -1362,11 +1362,11 @@ static DecodeStatus DecodeMemMMImm12(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<12>(Insn & 0x0fff); - unsigned Reg = fieldFromInstruction(Insn, 21, 5); - unsigned Base = fieldFromInstruction(Insn, 16, 5); + unsigned RegNo = fieldFromInstruction(Insn, 21, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 16, 5); - Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); switch (Inst.getOpcode()) { case Mips::SWM32_MM: @@ -1396,11 +1396,11 @@ static DecodeStatus DecodeMemMMImm16(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<16>(Insn & 0xffff); - unsigned Reg = fieldFromInstruction(Insn, 21, 5); - unsigned Base = fieldFromInstruction(Insn, 16, 5); + unsigned RegNo = fieldFromInstruction(Insn, 21, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 16, 5); - Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Base)); @@ -1412,11 +1412,11 @@ static DecodeStatus DecodeMemMMImm16(MCInst &Inst, unsigned Insn, static DecodeStatus DecodeFMem(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<16>(Insn & 0xffff); - unsigned Reg = fieldFromInstruction(Insn, 16, 5); - unsigned Base = fieldFromInstruction(Insn, 21, 5); + unsigned RegNo = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 21, 5); - Reg = getReg(Decoder, Mips::FGR64RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::FGR64RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Base)); @@ -1431,11 +1431,11 @@ static DecodeStatus DecodeFMemMMR2(MCInst &Inst, unsigned Insn, // This function is the same as DecodeFMem but with the Reg and Base fields // swapped according to microMIPS spec. int Offset = SignExtend32<16>(Insn & 0xffff); - unsigned Base = fieldFromInstruction(Insn, 16, 5); - unsigned Reg = fieldFromInstruction(Insn, 21, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 16, 5); + unsigned RegNo = fieldFromInstruction(Insn, 21, 5); - Reg = getReg(Decoder, Mips::FGR64RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::FGR64RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Base)); @@ -1447,11 +1447,11 @@ static DecodeStatus DecodeFMemMMR2(MCInst &Inst, unsigned Insn, static DecodeStatus DecodeFMem2(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<16>(Insn & 0xffff); - unsigned Reg = fieldFromInstruction(Insn, 16, 5); - unsigned Base = fieldFromInstruction(Insn, 21, 5); + unsigned RegNo = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 21, 5); - Reg = getReg(Decoder, Mips::COP2RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::COP2RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Base)); @@ -1463,11 +1463,11 @@ static DecodeStatus DecodeFMem2(MCInst &Inst, unsigned Insn, uint64_t Address, static DecodeStatus DecodeFMem3(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<16>(Insn & 0xffff); - unsigned Reg = fieldFromInstruction(Insn, 16, 5); - unsigned Base = fieldFromInstruction(Insn, 21, 5); + unsigned RegNo = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 21, 5); - Reg = getReg(Decoder, Mips::COP3RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::COP3RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Base)); @@ -1480,11 +1480,11 @@ static DecodeStatus DecodeFMemCop2R6(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<11>(Insn & 0x07ff); - unsigned Reg = fieldFromInstruction(Insn, 16, 5); - unsigned Base = fieldFromInstruction(Insn, 11, 5); + unsigned RegNo = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 11, 5); - Reg = getReg(Decoder, Mips::COP2RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::COP2RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Base)); @@ -1497,11 +1497,11 @@ static DecodeStatus DecodeFMemCop2MMR6(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int Offset = SignExtend32<11>(Insn & 0x07ff); - unsigned Reg = fieldFromInstruction(Insn, 21, 5); - unsigned Base = fieldFromInstruction(Insn, 16, 5); + unsigned RegNo = fieldFromInstruction(Insn, 21, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 16, 5); - Reg = getReg(Decoder, Mips::COP2RegClassID, Reg); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Reg = getReg(Decoder, Mips::COP2RegClassID, RegNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); Inst.addOperand(MCOperand::createReg(Reg)); Inst.addOperand(MCOperand::createReg(Base)); @@ -1514,11 +1514,11 @@ static DecodeStatus DecodeSpecial3LlSc(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { int64_t Offset = SignExtend64<9>((Insn >> 7) & 0x1ff); - unsigned Rt = fieldFromInstruction(Insn, 16, 5); - unsigned Base = fieldFromInstruction(Insn, 21, 5); + unsigned RtNo = fieldFromInstruction(Insn, 16, 5); + unsigned BaseNo = fieldFromInstruction(Insn, 21, 5); - Rt = getReg(Decoder, Mips::GPR32RegClassID, Rt); - Base = getReg(Decoder, Mips::GPR32RegClassID, Base); + MCRegister Rt = getReg(Decoder, Mips::GPR32RegClassID, RtNo); + MCRegister Base = getReg(Decoder, Mips::GPR32RegClassID, BaseNo); if(Inst.getOpcode() == Mips::SC_R6 || Inst.getOpcode() == Mips::SCD_R6){ Inst.addOperand(MCOperand::createReg(Rt)); diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp index 6b013de274772..fd8eb33e20b26 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp @@ -67,7 +67,7 @@ void MipsRegInfoRecord::EmitMipsOptionRecord() { Streamer->popSection(); } -void MipsRegInfoRecord::SetPhysRegUsed(unsigned Reg, +void MipsRegInfoRecord::SetPhysRegUsed(MCRegister Reg, const MCRegisterInfo *MCRegInfo) { unsigned Value = 0; diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp index 1e1b9703d8062..01f18acf050d7 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp @@ -126,9 +126,9 @@ void MipsTargetStreamer::emitDirectiveSetDspr2() { forbidModuleDirective(); } void MipsTargetStreamer::emitDirectiveSetNoDsp() { forbidModuleDirective(); } void MipsTargetStreamer::emitDirectiveSetMips3D() { forbidModuleDirective(); } void MipsTargetStreamer::emitDirectiveSetNoMips3D() { forbidModuleDirective(); } -void MipsTargetStreamer::emitDirectiveCpAdd(unsigned RegNo) {} -void MipsTargetStreamer::emitDirectiveCpLoad(unsigned RegNo) {} -void MipsTargetStreamer::emitDirectiveCpLocal(unsigned RegNo) { +void MipsTargetStreamer::emitDirectiveCpAdd(MCRegister Reg) {} +void MipsTargetStreamer::emitDirectiveCpLoad(MCRegister Reg) {} +void MipsTargetStreamer::emitDirectiveCpLocal(MCRegister Reg) { // .cplocal $reg // This directive forces to use the alternate register for context pointer. // For example @@ -141,17 +141,17 @@ void MipsTargetStreamer::emitDirectiveCpLocal(unsigned RegNo) { if (!getABI().IsN32() && !getABI().IsN64()) return; - GPReg = RegNo; + GPReg = Reg; forbidModuleDirective(); } bool MipsTargetStreamer::emitDirectiveCpRestore( - int Offset, function_ref GetATReg, SMLoc IDLoc, + int Offset, function_ref GetATReg, SMLoc IDLoc, const MCSubtargetInfo *STI) { forbidModuleDirective(); return true; } -void MipsTargetStreamer::emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset, +void MipsTargetStreamer::emitDirectiveCpsetup(MCRegister Reg, int RegOrOffset, const MCSymbol &Sym, bool IsReg) { } void MipsTargetStreamer::emitDirectiveCpreturn(unsigned SaveLocation, @@ -324,7 +324,7 @@ void MipsTargetStreamer::emitGPRestore(int Offset, SMLoc IDLoc, /// Emit a store instruction with an immediate offset. void MipsTargetStreamer::emitStoreWithImmOffset( unsigned Opcode, MCRegister SrcReg, MCRegister BaseReg, int64_t Offset, - function_ref GetATReg, SMLoc IDLoc, + function_ref GetATReg, SMLoc IDLoc, const MCSubtargetInfo *STI) { if (isInt<16>(Offset)) { emitRRI(Opcode, SrcReg, BaseReg, Offset, IDLoc, STI); @@ -729,38 +729,38 @@ void MipsTargetAsmStreamer::emitFMask(unsigned FPUBitmask, OS << "," << FPUTopSavedRegOff << '\n'; } -void MipsTargetAsmStreamer::emitDirectiveCpAdd(unsigned RegNo) { +void MipsTargetAsmStreamer::emitDirectiveCpAdd(MCRegister Reg) { OS << "\t.cpadd\t$" - << StringRef(MipsInstPrinter::getRegisterName(RegNo)).lower() << "\n"; + << StringRef(MipsInstPrinter::getRegisterName(Reg)).lower() << "\n"; forbidModuleDirective(); } -void MipsTargetAsmStreamer::emitDirectiveCpLoad(unsigned RegNo) { +void MipsTargetAsmStreamer::emitDirectiveCpLoad(MCRegister Reg) { OS << "\t.cpload\t$" - << StringRef(MipsInstPrinter::getRegisterName(RegNo)).lower() << "\n"; + << StringRef(MipsInstPrinter::getRegisterName(Reg)).lower() << "\n"; forbidModuleDirective(); } -void MipsTargetAsmStreamer::emitDirectiveCpLocal(unsigned RegNo) { +void MipsTargetAsmStreamer::emitDirectiveCpLocal(MCRegister Reg) { OS << "\t.cplocal\t$" - << StringRef(MipsInstPrinter::getRegisterName(RegNo)).lower() << "\n"; - MipsTargetStreamer::emitDirectiveCpLocal(RegNo); + << StringRef(MipsInstPrinter::getRegisterName(Reg)).lower() << "\n"; + MipsTargetStreamer::emitDirectiveCpLocal(Reg); } bool MipsTargetAsmStreamer::emitDirectiveCpRestore( - int Offset, function_ref GetATReg, SMLoc IDLoc, + int Offset, function_ref GetATReg, SMLoc IDLoc, const MCSubtargetInfo *STI) { MipsTargetStreamer::emitDirectiveCpRestore(Offset, GetATReg, IDLoc, STI); OS << "\t.cprestore\t" << Offset << "\n"; return true; } -void MipsTargetAsmStreamer::emitDirectiveCpsetup(unsigned RegNo, +void MipsTargetAsmStreamer::emitDirectiveCpsetup(MCRegister Reg, int RegOrOffset, const MCSymbol &Sym, bool IsReg) { OS << "\t.cpsetup\t$" - << StringRef(MipsInstPrinter::getRegisterName(RegNo)).lower() << ", "; + << StringRef(MipsInstPrinter::getRegisterName(Reg)).lower() << ", "; if (IsReg) OS << "$" @@ -1229,18 +1229,18 @@ void MipsTargetELFStreamer::emitFMask(unsigned FPUBitmask, FPROffset = FPUTopSavedRegOff; } -void MipsTargetELFStreamer::emitDirectiveCpAdd(unsigned RegNo) { +void MipsTargetELFStreamer::emitDirectiveCpAdd(MCRegister Reg) { // .cpadd $reg // This directive inserts code to add $gp to the argument's register // when support for position independent code is enabled. if (!Pic) return; - emitAddu(RegNo, RegNo, GPReg, getABI().IsN64(), &STI); + emitAddu(Reg, Reg, GPReg, getABI().IsN64(), &STI); forbidModuleDirective(); } -void MipsTargetELFStreamer::emitDirectiveCpLoad(unsigned RegNo) { +void MipsTargetELFStreamer::emitDirectiveCpLoad(MCRegister Reg) { // .cpload $reg // This directive expands to: // lui $gp, %hi(_gp_disp) @@ -1283,19 +1283,19 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsigned RegNo) { TmpInst.setOpcode(Mips::ADDu); TmpInst.addOperand(MCOperand::createReg(GPReg)); TmpInst.addOperand(MCOperand::createReg(GPReg)); - TmpInst.addOperand(MCOperand::createReg(RegNo)); + TmpInst.addOperand(MCOperand::createReg(Reg)); getStreamer().emitInstruction(TmpInst, STI); forbidModuleDirective(); } -void MipsTargetELFStreamer::emitDirectiveCpLocal(unsigned RegNo) { +void MipsTargetELFStreamer::emitDirectiveCpLocal(MCRegister Reg) { if (Pic) - MipsTargetStreamer::emitDirectiveCpLocal(RegNo); + MipsTargetStreamer::emitDirectiveCpLocal(Reg); } bool MipsTargetELFStreamer::emitDirectiveCpRestore( - int Offset, function_ref GetATReg, SMLoc IDLoc, + int Offset, function_ref GetATReg, SMLoc IDLoc, const MCSubtargetInfo *STI) { MipsTargetStreamer::emitDirectiveCpRestore(Offset, GetATReg, IDLoc, STI); // .cprestore offset @@ -1315,7 +1315,7 @@ bool MipsTargetELFStreamer::emitDirectiveCpRestore( return true; } -void MipsTargetELFStreamer::emitDirectiveCpsetup(unsigned RegNo, +void MipsTargetELFStreamer::emitDirectiveCpsetup(MCRegister Reg, int RegOrOffset, const MCSymbol &Sym, bool IsReg) { @@ -1353,9 +1353,9 @@ void MipsTargetELFStreamer::emitDirectiveCpsetup(unsigned RegNo, // (d)addu $gp, $gp, $funcreg if (getABI().IsN32()) - emitRRR(Mips::ADDu, GPReg, GPReg, RegNo, SMLoc(), &STI); + emitRRR(Mips::ADDu, GPReg, GPReg, Reg, SMLoc(), &STI); else - emitRRR(Mips::DADDu, GPReg, GPReg, RegNo, SMLoc(), &STI); + emitRRR(Mips::DADDu, GPReg, GPReg, Reg, SMLoc(), &STI); } void MipsTargetELFStreamer::emitDirectiveCpreturn(unsigned SaveLocation, diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.h index b726a80ce6b72..71b5d165a9cb3 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.h +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.h @@ -98,13 +98,13 @@ class MipsTargetStreamer : public MCTargetStreamer { virtual void emitDirectiveSetHardFloat(); // PIC support - virtual void emitDirectiveCpAdd(unsigned RegNo); - virtual void emitDirectiveCpLoad(unsigned RegNo); - virtual void emitDirectiveCpLocal(unsigned RegNo); + virtual void emitDirectiveCpAdd(MCRegister Reg); + virtual void emitDirectiveCpLoad(MCRegister Reg); + virtual void emitDirectiveCpLocal(MCRegister Reg); virtual bool emitDirectiveCpRestore(int Offset, - function_ref GetATReg, + function_ref GetATReg, SMLoc IDLoc, const MCSubtargetInfo *STI); - virtual void emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset, + virtual void emitDirectiveCpsetup(MCRegister Reg, int RegOrOffset, const MCSymbol &Sym, bool IsReg); virtual void emitDirectiveCpreturn(unsigned SaveLocation, bool SaveLocationIsRegister); @@ -164,7 +164,7 @@ class MipsTargetStreamer : public MCTargetStreamer { /// by reporting an error). void emitStoreWithImmOffset(unsigned Opcode, MCRegister SrcReg, MCRegister BaseReg, int64_t Offset, - function_ref GetATReg, SMLoc IDLoc, + function_ref GetATReg, SMLoc IDLoc, const MCSubtargetInfo *STI); void emitLoadWithImmOffset(unsigned Opcode, MCRegister DstReg, MCRegister BaseReg, int64_t Offset, @@ -205,7 +205,7 @@ class MipsTargetStreamer : public MCTargetStreamer { bool FrameInfoSet; int FrameOffset; unsigned FrameReg; - unsigned GPReg; + MCRegister GPReg; unsigned ReturnReg; private: @@ -290,9 +290,9 @@ class MipsTargetAsmStreamer : public MipsTargetStreamer { void emitDirectiveSetHardFloat() override; // PIC support - void emitDirectiveCpAdd(unsigned RegNo) override; - void emitDirectiveCpLoad(unsigned RegNo) override; - void emitDirectiveCpLocal(unsigned RegNo) override; + void emitDirectiveCpAdd(MCRegister Reg) override; + void emitDirectiveCpLoad(MCRegister Reg) override; + void emitDirectiveCpLocal(MCRegister Reg) override; /// Emit a .cprestore directive. If the offset is out of range then it will /// be synthesized using the assembler temporary. @@ -301,9 +301,9 @@ class MipsTargetAsmStreamer : public MipsTargetStreamer { /// temporary and is only called when the assembler temporary is required. It /// must handle the case where no assembler temporary is available (typically /// by reporting an error). - bool emitDirectiveCpRestore(int Offset, function_ref GetATReg, + bool emitDirectiveCpRestore(int Offset, function_ref GetATReg, SMLoc IDLoc, const MCSubtargetInfo *STI) override; - void emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset, + void emitDirectiveCpsetup(MCRegister Reg, int RegOrOffset, const MCSymbol &Sym, bool IsReg) override; void emitDirectiveCpreturn(unsigned SaveLocation, bool SaveLocationIsRegister) override; @@ -370,12 +370,12 @@ class MipsTargetELFStreamer : public MipsTargetStreamer { void emitFMask(unsigned FPUBitmask, int FPUTopSavedRegOff) override; // PIC support - void emitDirectiveCpAdd(unsigned RegNo) override; - void emitDirectiveCpLoad(unsigned RegNo) override; - void emitDirectiveCpLocal(unsigned RegNo) override; - bool emitDirectiveCpRestore(int Offset, function_ref GetATReg, + void emitDirectiveCpAdd(MCRegister Reg) override; + void emitDirectiveCpLoad(MCRegister Reg) override; + void emitDirectiveCpLocal(MCRegister Reg) override; + bool emitDirectiveCpRestore(int Offset, function_ref GetATReg, SMLoc IDLoc, const MCSubtargetInfo *STI) override; - void emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset, + void emitDirectiveCpsetup(MCRegister Reg, int RegOrOffset, const MCSymbol &Sym, bool IsReg) override; void emitDirectiveCpreturn(unsigned SaveLocation, bool SaveLocationIsRegister) override; diff --git a/llvm/lib/Target/Mips/MipsOptionRecord.h b/llvm/lib/Target/Mips/MipsOptionRecord.h index 7897095ef8941..2107baf9f14e5 100644 --- a/llvm/lib/Target/Mips/MipsOptionRecord.h +++ b/llvm/lib/Target/Mips/MipsOptionRecord.h @@ -58,7 +58,7 @@ class MipsRegInfoRecord : public MipsOptionRecord { ~MipsRegInfoRecord() override = default; void EmitMipsOptionRecord() override; - void SetPhysRegUsed(unsigned Reg, const MCRegisterInfo *MCRegInfo); + void SetPhysRegUsed(MCRegister Reg, const MCRegisterInfo *MCRegInfo); private: MipsELFStreamer *Streamer; From d4847f7c10932a6b2636cd8ca6b518605e63945e Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 12 Nov 2025 08:44:13 -0800 Subject: [PATCH 26/34] [X86] Remove implicit conversions of MCRegister to unsigned. NFC (#167648) --- llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp | 2 +- llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp | 2 +- llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp | 2 +- .../lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp | 5 +++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp index 74de51c7eb1cc..e67b138afafec 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -1391,7 +1391,7 @@ class DarwinX86AsmBackend : public X86AsmBackend { return CU::UNWIND_MODE_DWARF; MCRegister Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true); - SavedRegs[SavedRegIdx++] = Reg; + SavedRegs[SavedRegIdx++] = Reg.id(); StackAdjust += OffsetSize; MinAbsOffset = std::min(MinAbsOffset, std::abs(Inst.getOffset())); InstrOffset += PushInstrSize(Reg); diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp index 759d95e5a18ea..88dd5431f586b 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp @@ -451,7 +451,7 @@ void X86InstPrinterCommon::printVKPair(const MCInst *MI, unsigned OpNo, // the assembly would look something like: // "vp2intersect %zmm5, %zmm7, {%k2, %k3}" // but this can work too. - switch (MI->getOperand(OpNo).getReg()) { + switch (MI->getOperand(OpNo).getReg().id()) { case X86::K0_K1: printRegName(OS, X86::K0); return; diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp index af5a69899844c..0c874b7e6d674 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp @@ -535,7 +535,7 @@ bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo &MRI, const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID); const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID); - auto ClearsSuperReg = [=](unsigned RegID) { + auto ClearsSuperReg = [=](MCRegister RegID) { // On X86-64, a general purpose integer register is viewed as a 64-bit // register internal to the processor. // An update to the lower 32 bits of a 64 bit integer register is diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp index 9c442319c220f..b722964a571b3 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFTargetStreamer.cpp @@ -55,6 +55,7 @@ struct FPOInstruction { StackAlign, SetFrame, } Op; + // FIXME: This should be a union of MCRegister and unsigned. unsigned RegOrOffset; }; @@ -215,7 +216,7 @@ bool X86WinCOFFTargetStreamer::emitFPOSetFrame(MCRegister Reg, SMLoc L) { FPOInstruction Inst; Inst.Label = emitFPOLabel(); Inst.Op = FPOInstruction::SetFrame; - Inst.RegOrOffset = Reg; + Inst.RegOrOffset = Reg.id(); CurFPOData->Instructions.push_back(Inst); return false; } @@ -226,7 +227,7 @@ bool X86WinCOFFTargetStreamer::emitFPOPushReg(MCRegister Reg, SMLoc L) { FPOInstruction Inst; Inst.Label = emitFPOLabel(); Inst.Op = FPOInstruction::PushReg; - Inst.RegOrOffset = Reg; + Inst.RegOrOffset = Reg.id(); CurFPOData->Instructions.push_back(Inst); return false; } From 33a352f2d58f399a4968dc67c43650eafb200997 Mon Sep 17 00:00:00 2001 From: Pranav Bhandarkar Date: Wed, 12 Nov 2025 10:56:50 -0600 Subject: [PATCH 27/34] [Flang][OpenMP] - Fix the mapping flags used on descriptors mapped by MapsForPrivatizedSymbolsPass (#167554) The descriptors of a variable that has been privatized should be mapped `tofrom` instead of `to`. --- .../OpenMP/MapsForPrivatizedSymbols.cpp | 24 ++++++++---- .../target-private-allocatable.f90 | 2 +- .../Lower/OpenMP/optional-argument-map-2.f90 | 2 +- .../omp-maps-for-privatized-symbols.fir | 37 +++++++------------ 4 files changed, 32 insertions(+), 33 deletions(-) diff --git a/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp b/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp index 0972861b8450a..6404e1892ca5d 100644 --- a/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp +++ b/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp @@ -104,21 +104,31 @@ class MapsForPrivatizedSymbolsPass llvm::SmallVector boundsOps; if (needsBoundsOps(varPtr)) genBoundsOps(builder, varPtr, boundsOps); + mlir::Type varType = varPtr.getType(); mlir::omp::VariableCaptureKind captureKind = mlir::omp::VariableCaptureKind::ByRef; - if (fir::isa_trivial(fir::unwrapRefType(varPtr.getType())) || - fir::isa_char(fir::unwrapRefType(varPtr.getType()))) { - if (canPassByValue(fir::unwrapRefType(varPtr.getType()))) { + if (fir::isa_trivial(fir::unwrapRefType(varType)) || + fir::isa_char(fir::unwrapRefType(varType))) { + if (canPassByValue(fir::unwrapRefType(varType))) { captureKind = mlir::omp::VariableCaptureKind::ByCopy; } } + // Use tofrom if what we are mapping is not a trivial type. In all + // likelihood, it is a descriptor + mlir::omp::ClauseMapFlags mapFlag; + if (fir::isa_trivial(fir::unwrapRefType(varType)) || + fir::isa_char(fir::unwrapRefType(varType))) + mapFlag = mlir::omp::ClauseMapFlags::to; + else + mapFlag = mlir::omp::ClauseMapFlags::to | mlir::omp::ClauseMapFlags::from; + return omp::MapInfoOp::create( - builder, loc, varPtr.getType(), varPtr, - TypeAttr::get(llvm::cast(varPtr.getType()) - .getElementType()), - builder.getAttr(omp::ClauseMapFlags::to), + builder, loc, varType, varPtr, + TypeAttr::get( + llvm::cast(varType).getElementType()), + builder.getAttr(mapFlag), builder.getAttr(captureKind), /*varPtrPtr=*/Value{}, /*members=*/SmallVector{}, diff --git a/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-allocatable.f90 b/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-allocatable.f90 index 272f34fc0fd1a..cfe42367b051b 100644 --- a/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-allocatable.f90 +++ b/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-allocatable.f90 @@ -72,7 +72,7 @@ end subroutine target_allocatable ! CPU-SAME: {bindc_name = "alloc_var", {{.*}}} ! CPU: %[[VAR_DECL:.*]]:2 = hlfir.declare %[[VAR_ALLOC]] ! CPU: %[[BASE_ADDR:.*]] = fir.box_offset %[[VAR_DECL]]#0 base_addr : (!fir.ref>>) -> [[MEMBER_TYPE:.*]] -! CPU: %[[MEMBER:.*]] = omp.map.info var_ptr(%[[VAR_DECL]]#0 : [[TYPE]], i32) map_clauses(to) capture(ByRef) var_ptr_ptr(%[[BASE_ADDR]] : [[MEMBER_TYPE:.*]]) -> {{.*}} +! CPU: %[[MEMBER:.*]] = omp.map.info var_ptr(%[[VAR_DECL]]#0 : [[TYPE]], i32) map_clauses(tofrom) capture(ByRef) var_ptr_ptr(%[[BASE_ADDR]] : [[MEMBER_TYPE:.*]]) -> {{.*}} ! CPU: %[[MAP_VAR:.*]] = omp.map.info var_ptr(%[[VAR_DECL]]#0 : [[TYPE]], [[DESC_TYPE]]) map_clauses(to) capture(ByRef) members(%[[MEMBER]] : [0] : !fir.llvm_ptr>) -> !fir.ref>> ! CPU: omp.target map_entries(%[[MAP_VAR]] -> %arg0, %[[MEMBER]] -> %arg1 : [[TYPE]], [[MEMBER_TYPE]]) private( diff --git a/flang/test/Lower/OpenMP/optional-argument-map-2.f90 b/flang/test/Lower/OpenMP/optional-argument-map-2.f90 index 86f2e662a3f30..2eb18448e901f 100644 --- a/flang/test/Lower/OpenMP/optional-argument-map-2.f90 +++ b/flang/test/Lower/OpenMP/optional-argument-map-2.f90 @@ -72,7 +72,7 @@ end module mod ! CHECK-FPRIV: %[[VAL_13:.*]] = arith.subi %[[VAL_12]]#1, %[[VAL_11]] : index ! CHECK-FPRIV: %[[VAL_14:.*]] = omp.map.bounds lower_bound(%[[VAL_10]] : index) upper_bound(%[[VAL_13]] : index) extent(%[[VAL_12]]#1 : index) stride(%[[VAL_11]] : index) start_idx(%[[VAL_10]] : index) {stride_in_bytes = true} ! CHECK-FPRIV: %[[VAL_16:.*]] = fir.box_offset %[[VAL_0]] base_addr : (!fir.ref>) -> !fir.llvm_ptr>> -! CHECK-FPRIV: %[[VAL_17:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref>, !fir.char<1,?>) map_clauses(to) capture(ByRef) var_ptr_ptr(%[[VAL_16]] : !fir.llvm_ptr>>) bounds(%[[VAL_14]]) -> !fir.llvm_ptr>> {name = ""} +! CHECK-FPRIV: %[[VAL_17:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref>, !fir.char<1,?>) map_clauses(tofrom) capture(ByRef) var_ptr_ptr(%[[VAL_16]] : !fir.llvm_ptr>>) bounds(%[[VAL_14]]) -> !fir.llvm_ptr>> {name = ""} ! CHECK-FPRIV: %[[VAL_18:.*]] = omp.map.info var_ptr(%[[VAL_0]] : !fir.ref>, !fir.boxchar<1>) map_clauses(to) capture(ByRef) members(%[[VAL_17]] : [0] : !fir.llvm_ptr>>) -> !fir.ref> ! CHECK-FPRIV: omp.target map_entries(%[[VAL_7]] -> %[[VAL_19:.*]], %[[VAL_18]] -> %[[VAL_20:.*]], %[[VAL_17]] -> %[[VAL_21:.*]] : !fir.ref>, !fir.ref>, !fir.llvm_ptr>>) private(@_QMmodFroutine_boxcharEa_firstprivate_boxchar_c8xU %[[VAL_3]]#0 -> %[[VAL_22:.*]] [map_idx=1] : !fir.boxchar<1>) { ! CHECK-FPRIV: %[[VAL_23:.*]] = arith.constant 4 : index diff --git a/flang/test/Transforms/omp-maps-for-privatized-symbols.fir b/flang/test/Transforms/omp-maps-for-privatized-symbols.fir index 10a76126ed054..6054c70a2700d 100644 --- a/flang/test/Transforms/omp-maps-for-privatized-symbols.fir +++ b/flang/test/Transforms/omp-maps-for-privatized-symbols.fir @@ -6,7 +6,12 @@ module attributes {omp.is_target_device = false} { // extract box address, see if it is null, etc omp.yield(%arg1: !fir.ref>>) } - + omp.private {type = firstprivate} @_QFtarget_simpleEfp_int_firstprivate_i32 : i32 copy { + ^bb0(%arg0: !fir.ref, %arg1: !fir.ref): + %0 = fir.load %arg0 : !fir.ref + hlfir.assign %0 to %arg1 : i32, !fir.ref + omp.yield(%arg1 : !fir.ref) + } func.func @_QPtarget_simple() { %0 = fir.alloca i32 {bindc_name = "a", uniq_name = "_QFtarget_simpleEa"} %1:2 = hlfir.declare %0 {uniq_name = "_QFtarget_simpleEa"} : (!fir.ref) -> (!fir.ref, !fir.ref) @@ -15,34 +20,18 @@ module attributes {omp.is_target_device = false} { %4 = fir.embox %3 : (!fir.heap) -> !fir.box> fir.store %4 to %2 : !fir.ref>> %5:2 = hlfir.declare %2 {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtarget_simpleEsimple_var"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) + %6 = fir.alloca i32 {bindc_name = "fp_int", uniq_name = "_QFtarget_simpleEfp_int"} + %7:2 = hlfir.declare %6 {uniq_name = "_QFtarget_simpleEfp_int"} : (!fir.ref) -> (!fir.ref, !fir.ref) %c2_i32 = arith.constant 2 : i32 hlfir.assign %c2_i32 to %1#0 : i32, !fir.ref - %6 = omp.map.info var_ptr(%1#1 : !fir.ref, i32) map_clauses(to) capture(ByRef) -> !fir.ref {name = "a"} - omp.target map_entries(%6 -> %arg0 : !fir.ref) private(@_QFtarget_simpleEsimple_var_private_ref_box_heap_i32 %5#0 -> %arg1 : !fir.ref>>) { - %11:2 = hlfir.declare %arg0 {uniq_name = "_QFtarget_simpleEa"} : (!fir.ref) -> (!fir.ref, !fir.ref) - %12:2 = hlfir.declare %arg1 {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtarget_simpleEsimple_var"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) - %c10_i32 = arith.constant 10 : i32 - %13 = fir.load %11#0 : !fir.ref - %14 = arith.addi %c10_i32, %13 : i32 - hlfir.assign %14 to %12#0 realloc : i32, !fir.ref>> + %8 = omp.map.info var_ptr(%1#1 : !fir.ref, i32) map_clauses(to) capture(ByRef) -> !fir.ref {name = "a"} + omp.target map_entries(%8 -> %arg0 : !fir.ref) private(@_QFtarget_simpleEsimple_var_private_ref_box_heap_i32 %5#0 -> %arg1, @_QFtarget_simpleEfp_int_firstprivate_i32 %7#0 -> %arg2 : !fir.ref>>, !fir.ref) { omp.terminator } - %7 = fir.load %5#1 : !fir.ref>> - %8 = fir.box_addr %7 : (!fir.box>) -> !fir.heap - %9 = fir.convert %8 : (!fir.heap) -> i64 - %c0_i64 = arith.constant 0 : i64 - %10 = arith.cmpi ne, %9, %c0_i64 : i64 - fir.if %10 { - %11 = fir.load %5#1 : !fir.ref>> - %12 = fir.box_addr %11 : (!fir.box>) -> !fir.heap - fir.freemem %12 : !fir.heap - %13 = fir.zero_bits !fir.heap - %14 = fir.embox %13 : (!fir.heap) -> !fir.box> - fir.store %14 to %5#1 : !fir.ref>> - } return } } // CHECK: %[[MAP0:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref, i32) map_clauses(to) capture(ByRef) -> !fir.ref {name = "a"} -// CHECK: %[[MAP1:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref>>, !fir.box>) map_clauses(to) capture(ByRef) -> !fir.ref>> -// CHECK: omp.target map_entries(%[[MAP0]] -> %arg0, %[[MAP1]] -> %arg1 : !fir.ref, !fir.ref>>) +// CHECK: %[[MAP1:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref>>, !fir.box>) map_clauses(tofrom) capture(ByRef) -> !fir.ref>> +// CHECK: %[[MAP2:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref, i32) map_clauses(to) capture(ByCopy) -> !fir.ref +// CHECK: omp.target map_entries(%[[MAP0]] -> %arg0, %[[MAP1]] -> %arg1, %[[MAP2]] -> %arg2 : !fir.ref, !fir.ref>>, !fir.ref) From 5b56816dffd4e66632a751cb8d094083b18e201a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= Date: Wed, 12 Nov 2025 17:59:38 +0100 Subject: [PATCH 28/34] [NFC][SPIRV][IRTranslator] Replace leftover `MF->getTarget().getTargetTriple().isSPIRV() ` with `targetSupportsBF16Type(MF)` (#167704) --- llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index d656f1071400d..53c831b203cae 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -3482,7 +3482,7 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U, bool IRTranslator::translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder) { - if (!MF->getTarget().getTargetTriple().isSPIRV() && containsBF16Type(U)) + if (containsBF16Type(U) && !targetSupportsBF16Type(MF)) return false; const AtomicRMWInst &I = cast(U); From 3f7ac6e757c37ebe20f3c6bfc1789e6f20330244 Mon Sep 17 00:00:00 2001 From: "Mikhail R. Gadelha" Date: Wed, 12 Nov 2025 14:02:48 -0300 Subject: [PATCH 29/34] [RISCV] Update SpacemiT-X60 vector permutation instructions latencies (#152738) This PR adds hardware-measured latencies for all instructions defined in Section 16 of the RVV specification: "Vector Permutation Instructions" to the SpacemiT-X60 scheduling model. --------- Signed-off-by: Mikhail R. Gadelha --- .../lib/Target/RISCV/RISCVSchedSpacemitX60.td | 83 +- .../tools/llvm-mca/RISCV/SpacemitX60/rvv-fp.s | 122 +- .../RISCV/SpacemitX60/rvv-permutation.s | 1120 ++++++++--------- 3 files changed, 684 insertions(+), 641 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVSchedSpacemitX60.td b/llvm/lib/Target/RISCV/RISCVSchedSpacemitX60.td index 41071b29e5c9e..4271a6816e05b 100644 --- a/llvm/lib/Target/RISCV/RISCVSchedSpacemitX60.td +++ b/llvm/lib/Target/RISCV/RISCVSchedSpacemitX60.td @@ -750,39 +750,82 @@ foreach mx = SchedMxList in { } // 16. Vector Permutation Instructions +// Slide foreach mx = SchedMxList in { defvar IsWorstCase = SMX60IsWorstCaseMX.c; - defm "" : LMULWriteResMX<"WriteVSlideI", [SMX60_VIEU], mx, IsWorstCase>; + // Latency for slide up: 4/4/8/16, ReleaseAtCycles is 2/4/8/16 + defvar VSlideUpLat = ConstValueUntilLMULThenDouble<"M2", 4, mx>.c; + defvar VSlideUpOcc = ConstOneUntilMF2ThenDouble.c; + let Latency = VSlideUpLat, ReleaseAtCycles =[VSlideUpOcc] in { + defm "" : LMULWriteResMX<"WriteVSlideUpX", [SMX60_VIEU], mx, IsWorstCase>; + } - defm "" : LMULWriteResMX<"WriteVISlide1X", [SMX60_VIEU], mx, IsWorstCase>; - defm "" : LMULWriteResMX<"WriteVFSlide1F", [SMX60_VFP], mx, IsWorstCase>; + // Latency for slide down: 4/5/9/17, ReleaseAtCycles is 3/5/9/17 + defvar VSlideDownLat = GetLMULValue<[4, 4, 4, 4, 5, 9, 17], mx>.c; + defvar VSlideDownOcc = GetLMULValue<[1, 1, 1, 3, 5, 9, 17], mx>.c; + let Latency = VSlideDownLat, ReleaseAtCycles =[VSlideDownOcc] in { + defm "" : LMULWriteResMX<"WriteVSlideDownX", [SMX60_VIEU], mx, IsWorstCase>; + } + // The following group slide up and down together, so we use the worst-case + // (slide down) for all. + let Latency = VSlideDownLat, ReleaseAtCycles =[VSlideDownOcc] in { + defm "" : LMULWriteResMX<"WriteVSlideI", [SMX60_VIEU], mx, IsWorstCase>; + defm "" : LMULWriteResMX<"WriteVISlide1X", [SMX60_VIEU], mx, IsWorstCase>; - defm "" : LMULWriteResMX<"WriteVSlideUpX", [SMX60_VIEU], mx, IsWorstCase>; - defm "" : LMULWriteResMX<"WriteVSlideDownX", [SMX60_VIEU], mx, IsWorstCase>; + defm "" : LMULWriteResMX<"WriteVFSlide1F", [SMX60_VFP], mx, IsWorstCase>; + } } -def : WriteRes; -def : WriteRes; - -def : WriteRes; -def : WriteRes; +// ReleaseAtCycles is 2/2/2/2/2/3/6, but we can't set based on MX for now +// TODO: Split this into separate WriteRes for each MX +let Latency = 6, ReleaseAtCycles = [6] in { + def : WriteRes; +} -// Gather and Compress -foreach mx = SchedMxList in { - foreach sew = SchedSEWSet.val in { - defvar IsWorstCase = SMX60IsWorstCaseMXSEW.c; - defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [SMX60_VIEU], mx, sew, IsWorstCase>; - defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [SMX60_VIEU], mx, sew, IsWorstCase>; - defm "" : LMULSEWWriteResMXSEW<"WriteVCompressV", [SMX60_VIEU], mx, sew, IsWorstCase>; - } +// ReleaseAtCycles is 1/1/1/1/1/2/4, but we can't set based on MX for now +// TODO: Split this into separate WriteRes for each MX +let Latency = 4, ReleaseAtCycles = [4] in { + def : WriteRes; + def : WriteRes; + def : WriteRes; } +// Integer LMUL Gather and Compress foreach mx = SchedMxList in { defvar IsWorstCase = SMX60IsWorstCaseMX.c; - defm "" : LMULWriteResMX<"WriteVRGatherVX", [SMX60_VIEU], mx, IsWorstCase>; - defm "" : LMULWriteResMX<"WriteVRGatherVI", [SMX60_VIEU], mx, IsWorstCase>; + defvar VRGatherLat = ConstValueUntilLMULThenDouble<"M2", 4, mx>.c; + let Latency = VRGatherLat, ReleaseAtCycles = [ConstOneUntilMF2ThenDouble.c] in { + defm "" : LMULWriteResMX<"WriteVRGatherVX", [SMX60_VIEU], mx, IsWorstCase>; + defm "" : LMULWriteResMX<"WriteVRGatherVI", [SMX60_VIEU], mx, IsWorstCase>; + } + + foreach sew = SchedSEWSet.val in { + defvar IsWorstCaseSEW = SMX60IsWorstCaseMXSEW.c; + + defvar VRGatherVVLat = GetLMULValue<[4, 4, 4, 4, 16, 64, 256], mx>.c; + defvar VRGatherVVOcc = GetLMULValue<[1, 1, 1, 4, 16, 64, 256], mx>.c; + let Latency = VRGatherVVLat, ReleaseAtCycles = [VRGatherVVOcc] in { + defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [SMX60_VIEU], mx, sew, IsWorstCaseSEW>; + } + // For sew == 8, latency is half of the other cases, except for the fractional LMULs (const 4 cycles) + defvar VRGatherEI16Lat = !if(!eq(sew, 8), + GetLMULValue<[4, 4, 4, 8, 32, 128, 256], mx>.c, + GetLMULValue<[4, 4, 4, 4, 16, 64, 256], mx>.c); + defvar VRGatherEI16Occ = !if(!eq(sew, 8), + GetLMULValue<[1, 1, 2, 8, 32, 128, 256], mx>.c, + GetLMULValue<[1, 1, 1, 4, 16, 64, 256], mx>.c); + let Latency = VRGatherEI16Lat, ReleaseAtCycles = [VRGatherEI16Occ] in { + defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [SMX60_VIEU], mx, sew, IsWorstCaseSEW>; + } + + defvar VCompressVLat = GetLMULValue<[4, 4, 4, 4, 10, 36, 136], mx>.c; + defvar VCompressVOcc = GetLMULValue<[1, 1, 1, 3, 10, 36, 136], mx>.c; + let Latency = VCompressVLat, ReleaseAtCycles = [VCompressVOcc] in { + defm "" : LMULSEWWriteResMXSEW<"WriteVCompressV", [SMX60_VIEU], mx, sew, IsWorstCaseSEW>; + } + } } // Others diff --git a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-fp.s b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-fp.s index f59c7987b615b..311a13c9427b1 100644 --- a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-fp.s +++ b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-fp.s @@ -2911,65 +2911,65 @@ vfwsub.wv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu # CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFMADD_VV vfmadd.vv v8, v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_F_S vfmv.f.s fs0, v8 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_F_S vfmv.f.s fs0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VFMV_S_F vfmv.s.f v8, fs0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VFMV_S_F vfmv.s.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFMV_V_F vfmv.v.f v8, fs0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu @@ -3763,7 +3763,7 @@ vfwsub.wv v8, v16, v24 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] -# CHECK-NEXT: - 915.00 - - - 885.00 30.00 - +# CHECK-NEXT: - 915.00 - - - 885.00 120.00 - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] Instructions: @@ -4758,65 +4758,65 @@ vfwsub.wv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu # CHECK-NEXT: - - - - - 1.00 - - vfmadd.vv v8, v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.f.s fs0, v8 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.f.s fs0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vfmv.s.f v8, fs0 +# CHECK-NEXT: - - - - - - 4.00 - vfmv.s.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - 1.00 - - vfmv.v.f v8, fs0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu diff --git a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-permutation.s b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-permutation.s index 5ae0d43b42d10..de1a5971fcd1d 100644 --- a/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-permutation.s +++ b/llvm/test/tools/llvm-mca/RISCV/SpacemitX60/rvv-permutation.s @@ -1330,93 +1330,93 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu # CHECK-NEXT: 1 16 4.00 16 SMX60_VIEU[4] VMV_V_I vmv.v.i v8, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_X_S vmv.x.s s0, v8 +# CHECK-NEXT: 1 6 6.00 6 SMX60_VIEU[6] VMV_X_S vmv.x.s s0, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV_S_X vmv.s.x v8, s0 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMV_S_X vmv.s.x v8, s0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VMV1R_V vmv1r.v v8, v8 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu @@ -1638,487 +1638,487 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu # CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VIOTA_M viota.m v8, v16 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 10 10.00 10 SMX60_VIEU[10] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 36 36.00 36 SMX60_VIEU[36] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 136 136.00 136 SMX60_VIEU[136] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 10 10.00 10 SMX60_VIEU[10] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 36 36.00 36 SMX60_VIEU[36] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 136 136.00 136 SMX60_VIEU[136] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 10 10.00 10 SMX60_VIEU[10] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 36 36.00 36 SMX60_VIEU[36] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 136 136.00 136 SMX60_VIEU[136] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 10 10.00 10 SMX60_VIEU[10] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 36 36.00 36 SMX60_VIEU[36] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VCOMPRESS_VM vcompress.vm v8, v16, v24 +# CHECK-NEXT: 1 136 136.00 136 SMX60_VIEU[136] VCOMPRESS_VM vcompress.vm v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1UP_VX vslide1up.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDE1UP_VX vslide1up.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDE1DOWN_VX vslide1down.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VX vslideup.vx v8, v16, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VSLIDEUP_VX vslideup.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEUP_VI vslideup.vi v8, v16, 12 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEUP_VI vslideup.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEDOWN_VX vslidedown.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VIEU[3] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VIEU[5] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VIEU[9] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VIEU[17] VSLIDEDOWN_VI vslidedown.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 64 64.00 64 SMX60_VIEU[64] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 256 256.00 256 SMX60_VIEU[256] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 64 64.00 64 SMX60_VIEU[64] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 256 256.00 256 SMX60_VIEU[256] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 64 64.00 64 SMX60_VIEU[64] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 256 256.00 256 SMX60_VIEU[256] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 64 64.00 64 SMX60_VIEU[64] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VV vrgather.vv v8, v16, v24 +# CHECK-NEXT: 1 256 256.00 256 SMX60_VIEU[256] VRGATHER_VV vrgather.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VX vrgather.vx v8, v16, t5 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VX vrgather.vx v8, v16, t5 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHER_VI vrgather.vi v8, v16, 12 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHER_VI vrgather.vi v8, v16, 12 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 4 2.00 4 SMX60_VIEU[2] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 8 8.00 8 SMX60_VIEU[8] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 32 32.00 32 SMX60_VIEU[32] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 128 128.00 128 SMX60_VIEU[128] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 64 64.00 64 SMX60_VIEU[64] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 256 256.00 256 SMX60_VIEU[256] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 64 64.00 64 SMX60_VIEU[64] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 256 256.00 256 SMX60_VIEU[256] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 16 16.00 16 SMX60_VIEU[16] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 64 64.00 64 SMX60_VIEU[64] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VIEU VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: 1 256 256.00 256 SMX60_VIEU[256] VRGATHEREI16_VV vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: 1 4 4.00 4 SMX60_VIEU[4] VMERGE_VIM vmerge.vim v8, v8, 12, v0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e8, mf4, tu, mu @@ -2282,65 +2282,65 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu # CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFMERGE_VFM vfmerge.vfm v8, v8, ft0, v0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VFP[3] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VFP[5] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VFP[9] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VFP[17] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VFP[3] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VFP[5] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VFP[9] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VFP[17] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VFP[3] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VFP[5] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VFP[9] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VFP[17] VFSLIDE1DOWN_VF vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VFP[3] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VFP[5] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VFP[9] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VFP[17] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 1.00 4 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VFP[3] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VFP[5] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VFP[9] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VFP[17] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 4 3.00 4 SMX60_VFP[3] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 5 5.00 5 SMX60_VFP[5] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 9 9.00 9 SMX60_VFP[9] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: 1 1 1.00 U 1 SMX60_IEU,SMX60_IEUA VSETVLI vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: 1 1 1.00 1 SMX60_VFP VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: 1 17 17.00 17 SMX60_VFP[17] VFSLIDE1UP_VF vfslide1up.vf v8, v16, ft0 # CHECK: Resources: # CHECK-NEXT: [0] - SMX60_FP @@ -2354,7 +2354,7 @@ vfslide1up.vf v8, v16, ft0 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] -# CHECK-NEXT: - 572.00 - - - 45.00 923.00 - +# CHECK-NEXT: - 572.00 - - - 225.00 5253.00 - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3.0] [3.1] [4] [5] [6] Instructions: @@ -2491,93 +2491,93 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu # CHECK-NEXT: - - - - - - 4.00 - vmv.v.i v8, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.x.s s0, v8 +# CHECK-NEXT: - - - - - - 6.00 - vmv.x.s s0, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vmv.s.x v8, s0 +# CHECK-NEXT: - - - - - - 4.00 - vmv.s.x v8, s0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vmv1r.v v8, v8 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -2805,43 +2805,43 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 3.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 10.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 36.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 136.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 3.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 10.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 36.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 136.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 3.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 10.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 36.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 136.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 3.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 10.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 36.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vcompress.vm v8, v16, v24 +# CHECK-NEXT: - - - - - - 136.00 - vcompress.vm v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -2849,43 +2849,43 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1up.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslide1up.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -2893,43 +2893,43 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslide1down.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslide1down.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -2937,43 +2937,43 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 2.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 4.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 8.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 16.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 2.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 4.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 8.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 16.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 2.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 4.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 8.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 16.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 2.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 4.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 8.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 16.00 - vslideup.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -2981,43 +2981,43 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 3.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 5.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 9.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 17.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 3.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 5.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 9.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 17.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 3.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 5.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 9.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 17.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 3.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 5.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 9.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslideup.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 17.00 - vslideup.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -3025,43 +3025,43 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 3.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 5.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 9.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 17.00 - vslidedown.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -3069,43 +3069,43 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 3.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 5.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 9.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 17.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 3.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 5.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 9.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 17.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 3.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 5.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 9.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 17.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 3.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 5.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 9.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vslidedown.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 17.00 - vslidedown.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -3113,43 +3113,43 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 64.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 256.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 64.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 256.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 64.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 256.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 64.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 256.00 - vrgather.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -3157,43 +3157,43 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 2.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 8.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 2.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 8.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 2.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 8.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 2.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 8.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vx v8, v16, t5 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vx v8, v16, t5 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -3201,85 +3201,85 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 2.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 8.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 2.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 8.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 2.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 8.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 2.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 4.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 8.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgather.vi v8, v16, 12 +# CHECK-NEXT: - - - - - - 16.00 - vrgather.vi v8, v16, 12 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 2.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf8, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 8.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 32.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 128.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 4.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 16.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 64.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 256.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 4.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 16.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 64.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 256.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 4.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 16.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 64.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - - 1.00 - vrgatherei16.vv v8, v16, v24 +# CHECK-NEXT: - - - - - - 256.00 - vrgatherei16.vv v8, v16, v24 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf2, tu, mu # CHECK-NEXT: - - - - - - 4.00 - vmerge.vim v8, v8, 12, v0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e8, mf4, tu, mu @@ -3447,58 +3447,58 @@ vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 3.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 5.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 9.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 17.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 3.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 5.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 9.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 17.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 3.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 5.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 9.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1down.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 17.00 - - vfslide1down.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf2, tu, mu # CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, mf4, tu, mu # CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m1, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 3.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m2, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 5.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m4, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 9.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e16, m8, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 17.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, mf2, tu, mu # CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m1, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 3.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m2, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 5.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m4, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 9.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e32, m8, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 17.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m1, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 3.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m2, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 5.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m4, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 9.00 - - vfslide1up.vf v8, v16, ft0 # CHECK-NEXT: - 1.00 - - - - - - vsetvli t3, zero, e64, m8, tu, mu -# CHECK-NEXT: - - - - - 1.00 - - vfslide1up.vf v8, v16, ft0 +# CHECK-NEXT: - - - - - 17.00 - - vfslide1up.vf v8, v16, ft0 From 0bbf644a01c1dfdf1ab8ae7fcc816e98264a8672 Mon Sep 17 00:00:00 2001 From: serge-sans-paille Date: Wed, 12 Nov 2025 18:21:06 +0100 Subject: [PATCH 30/34] Remove unused standard headers: memory, unordered_* (#167297) --- llvm/include/llvm/ADT/ArrayRef.h | 3 +-- llvm/include/llvm/ADT/FunctionExtras.h | 1 - llvm/include/llvm/Analysis/TensorSpec.h | 1 - llvm/include/llvm/CodeGen/MachineDominators.h | 1 - llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFCFIProgram.h | 1 - .../llvm/ExecutionEngine/Orc/TargetProcess/LibraryResolver.h | 1 - .../llvm/ExecutionEngine/Orc/TargetProcess/LibraryScanner.h | 2 -- llvm/include/llvm/MCA/Instruction.h | 2 -- llvm/include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h | 1 - llvm/include/llvm/ProfileData/SampleProfReader.h | 1 - llvm/include/llvm/Support/Jobserver.h | 1 - llvm/include/llvm/Support/LSP/Logging.h | 1 - llvm/include/llvm/Support/ThreadPool.h | 1 - llvm/include/llvm/Support/Timer.h | 1 - llvm/include/llvm/Transforms/IPO/SampleProfileMatcher.h | 2 ++ llvm/lib/Analysis/StackSafetyAnalysis.cpp | 1 - llvm/lib/CodeGen/VLIWMachineScheduler.cpp | 1 - llvm/lib/DebugInfo/MSF/MSFBuilder.cpp | 1 - llvm/lib/MC/MCPseudoProbe.cpp | 1 - llvm/lib/ObjCopy/MachO/MachOObject.cpp | 1 - llvm/lib/TableGen/TGLexer.h | 1 - llvm/lib/Target/DirectX/DXILShaderFlags.h | 1 - llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h | 1 - .../Target/SPIRV/Analysis/SPIRVConvergenceRegionAnalysis.cpp | 1 + .../lib/Target/SPIRV/Analysis/SPIRVConvergenceRegionAnalysis.h | 1 - llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCTargetDesc.h | 1 - llvm/lib/Transforms/IPO/LowerTypeTests.cpp | 1 - llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp | 2 ++ llvm/tools/llvm-exegesis/lib/PerfHelper.h | 1 - llvm/tools/llvm-exegesis/lib/SnippetGenerator.h | 1 - llvm/tools/llvm-pdbutil/PrettyClassDefinitionDumper.h | 2 -- llvm/unittests/Support/LockFileManagerTest.cpp | 1 - llvm/unittests/Transforms/IPO/AttributorTest.cpp | 1 - .../tools/llvm-exegesis/Mips/RegisterAliasingTest.cpp | 1 - llvm/unittests/tools/llvm-exegesis/Mips/TargetTest.cpp | 1 - llvm/unittests/tools/llvm-exegesis/RISCV/TargetTest.cpp | 1 - .../unittests/tools/llvm-exegesis/X86/RegisterAliasingTest.cpp | 1 - .../tools/llvm-exegesis/X86/SchedClassResolutionTest.cpp | 1 - .../unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp | 2 -- llvm/unittests/tools/llvm-exegesis/X86/TargetTest.cpp | 1 - 40 files changed, 6 insertions(+), 42 deletions(-) diff --git a/llvm/include/llvm/ADT/ArrayRef.h b/llvm/include/llvm/ADT/ArrayRef.h index 450f4d04c97fc..d7ed2c78749f0 100644 --- a/llvm/include/llvm/ADT/ArrayRef.h +++ b/llvm/include/llvm/ADT/ArrayRef.h @@ -10,8 +10,8 @@ #define LLVM_ADT_ARRAYREF_H #include "llvm/ADT/Hashing.h" -#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include #include @@ -19,7 +19,6 @@ #include #include #include -#include #include #include diff --git a/llvm/include/llvm/ADT/FunctionExtras.h b/llvm/include/llvm/ADT/FunctionExtras.h index 2498cb7796f1f..807a2e769999c 100644 --- a/llvm/include/llvm/ADT/FunctionExtras.h +++ b/llvm/include/llvm/ADT/FunctionExtras.h @@ -39,7 +39,6 @@ #include "llvm/Support/MemAlloc.h" #include "llvm/Support/type_traits.h" #include -#include #include namespace llvm { diff --git a/llvm/include/llvm/Analysis/TensorSpec.h b/llvm/include/llvm/Analysis/TensorSpec.h index d432ce8a203c4..8b19b6bb976ec 100644 --- a/llvm/include/llvm/Analysis/TensorSpec.h +++ b/llvm/include/llvm/Analysis/TensorSpec.h @@ -15,7 +15,6 @@ #include "llvm/ADT/StringMap.h" #include "llvm/IR/LLVMContext.h" -#include #include #include diff --git a/llvm/include/llvm/CodeGen/MachineDominators.h b/llvm/include/llvm/CodeGen/MachineDominators.h index 41df86468aa37..faea0b7de525f 100644 --- a/llvm/include/llvm/CodeGen/MachineDominators.h +++ b/llvm/include/llvm/CodeGen/MachineDominators.h @@ -24,7 +24,6 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/GenericDomTree.h" #include -#include #include namespace llvm { diff --git a/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFCFIProgram.h b/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFCFIProgram.h index 0a1300b4acaa4..e636296b058fd 100644 --- a/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFCFIProgram.h +++ b/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFCFIProgram.h @@ -17,7 +17,6 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/Error.h" #include "llvm/TargetParser/Triple.h" -#include #include namespace llvm { diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/LibraryResolver.h b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/LibraryResolver.h index fc41641fd5cff..79cfc4832fe9a 100644 --- a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/LibraryResolver.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/LibraryResolver.h @@ -21,7 +21,6 @@ #include #include -#include namespace llvm { namespace orc { diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/LibraryScanner.h b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/LibraryScanner.h index d1c201306bf54..61aefbda35337 100644 --- a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/LibraryScanner.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/LibraryScanner.h @@ -27,8 +27,6 @@ #include #include #include -#include -#include namespace llvm { namespace orc { diff --git a/llvm/include/llvm/MCA/Instruction.h b/llvm/include/llvm/MCA/Instruction.h index 3cdbf84748c79..b6b5b5979dec9 100644 --- a/llvm/include/llvm/MCA/Instruction.h +++ b/llvm/include/llvm/MCA/Instruction.h @@ -26,8 +26,6 @@ #include "llvm/Support/raw_ostream.h" #endif -#include - namespace llvm { namespace mca { diff --git a/llvm/include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h b/llvm/include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h index 25ba27c7c7a22..a70c2388c5168 100644 --- a/llvm/include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h +++ b/llvm/include/llvm/ObjectYAML/CodeViewYAMLTypeHashing.h @@ -21,7 +21,6 @@ #include "llvm/Support/Error.h" #include "llvm/Support/YAMLTraits.h" #include -#include #include namespace llvm { diff --git a/llvm/include/llvm/ProfileData/SampleProfReader.h b/llvm/include/llvm/ProfileData/SampleProfReader.h index 799938ab901c1..67834f72c2400 100644 --- a/llvm/include/llvm/ProfileData/SampleProfReader.h +++ b/llvm/include/llvm/ProfileData/SampleProfReader.h @@ -244,7 +244,6 @@ #include #include #include -#include #include namespace llvm { diff --git a/llvm/include/llvm/Support/Jobserver.h b/llvm/include/llvm/Support/Jobserver.h index 3c0c04537735d..1fd4f7ed007af 100644 --- a/llvm/include/llvm/Support/Jobserver.h +++ b/llvm/include/llvm/Support/Jobserver.h @@ -67,7 +67,6 @@ #define LLVM_SUPPORT_JOBSERVER_H #include "llvm/ADT/StringRef.h" -#include namespace llvm { diff --git a/llvm/include/llvm/Support/LSP/Logging.h b/llvm/include/llvm/Support/LSP/Logging.h index fe65899b1d4ce..f19cc49dbb606 100644 --- a/llvm/include/llvm/Support/LSP/Logging.h +++ b/llvm/include/llvm/Support/LSP/Logging.h @@ -11,7 +11,6 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/FormatVariadic.h" -#include #include namespace llvm { diff --git a/llvm/include/llvm/Support/ThreadPool.h b/llvm/include/llvm/Support/ThreadPool.h index d3276a18dc2c6..1be7779f2c72c 100644 --- a/llvm/include/llvm/Support/ThreadPool.h +++ b/llvm/include/llvm/Support/ThreadPool.h @@ -27,7 +27,6 @@ #include #include #include -#include #include #include diff --git a/llvm/include/llvm/Support/Timer.h b/llvm/include/llvm/Support/Timer.h index 527d67f3b360c..097eaf3422ca3 100644 --- a/llvm/include/llvm/Support/Timer.h +++ b/llvm/include/llvm/Support/Timer.h @@ -15,7 +15,6 @@ #include "llvm/Support/DataTypes.h" #include "llvm/Support/Mutex.h" #include -#include #include #include diff --git a/llvm/include/llvm/Transforms/IPO/SampleProfileMatcher.h b/llvm/include/llvm/Transforms/IPO/SampleProfileMatcher.h index 3bdcf9a18fe40..c695784641b4e 100644 --- a/llvm/include/llvm/Transforms/IPO/SampleProfileMatcher.h +++ b/llvm/include/llvm/Transforms/IPO/SampleProfileMatcher.h @@ -17,6 +17,8 @@ #include "llvm/ADT/StringSet.h" #include "llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h" +#include + namespace llvm { using AnchorList = std::vector>; diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp index 5e92ca1d38e70..fbe74d21c7199 100644 --- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp +++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp @@ -30,7 +30,6 @@ #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/raw_ostream.h" #include -#include #include using namespace llvm; diff --git a/llvm/lib/CodeGen/VLIWMachineScheduler.cpp b/llvm/lib/CodeGen/VLIWMachineScheduler.cpp index 2fd1dd5f84a91..53d166d277cb8 100644 --- a/llvm/lib/CodeGen/VLIWMachineScheduler.cpp +++ b/llvm/lib/CodeGen/VLIWMachineScheduler.cpp @@ -34,7 +34,6 @@ #include #include #include -#include #include using namespace llvm; diff --git a/llvm/lib/DebugInfo/MSF/MSFBuilder.cpp b/llvm/lib/DebugInfo/MSF/MSFBuilder.cpp index bb3411bb9568e..7890bcce6c7ca 100644 --- a/llvm/lib/DebugInfo/MSF/MSFBuilder.cpp +++ b/llvm/lib/DebugInfo/MSF/MSFBuilder.cpp @@ -21,7 +21,6 @@ #include #include #include -#include #include #include diff --git a/llvm/lib/MC/MCPseudoProbe.cpp b/llvm/lib/MC/MCPseudoProbe.cpp index b493337b39317..11e42118a29ef 100644 --- a/llvm/lib/MC/MCPseudoProbe.cpp +++ b/llvm/lib/MC/MCPseudoProbe.cpp @@ -24,7 +24,6 @@ #include #include #include -#include #include #include diff --git a/llvm/lib/ObjCopy/MachO/MachOObject.cpp b/llvm/lib/ObjCopy/MachO/MachOObject.cpp index 8d2c02dc37c99..e45cc547ee446 100644 --- a/llvm/lib/ObjCopy/MachO/MachOObject.cpp +++ b/llvm/lib/ObjCopy/MachO/MachOObject.cpp @@ -9,7 +9,6 @@ #include "MachOObject.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Support/SystemZ/zOSSupport.h" -#include using namespace llvm; using namespace llvm::objcopy::macho; diff --git a/llvm/lib/TableGen/TGLexer.h b/llvm/lib/TableGen/TGLexer.h index 753470dfb5374..a0ade6412024e 100644 --- a/llvm/lib/TableGen/TGLexer.h +++ b/llvm/lib/TableGen/TGLexer.h @@ -19,7 +19,6 @@ #include "llvm/Support/DataTypes.h" #include "llvm/Support/SMLoc.h" #include -#include #include #include diff --git a/llvm/lib/Target/DirectX/DXILShaderFlags.h b/llvm/lib/Target/DirectX/DXILShaderFlags.h index f94f7997436ac..a0820572e5fed 100644 --- a/llvm/lib/Target/DirectX/DXILShaderFlags.h +++ b/llvm/lib/Target/DirectX/DXILShaderFlags.h @@ -22,7 +22,6 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include -#include namespace llvm { class Module; diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h index c5e57d0df22a7..712bdbe2af187 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h @@ -21,7 +21,6 @@ #include "llvm/TargetParser/SubtargetFeature.h" #include #include -#include namespace llvm { diff --git a/llvm/lib/Target/SPIRV/Analysis/SPIRVConvergenceRegionAnalysis.cpp b/llvm/lib/Target/SPIRV/Analysis/SPIRVConvergenceRegionAnalysis.cpp index 3e4a58a20f942..0798483462e18 100644 --- a/llvm/lib/Target/SPIRV/Analysis/SPIRVConvergenceRegionAnalysis.cpp +++ b/llvm/lib/Target/SPIRV/Analysis/SPIRVConvergenceRegionAnalysis.cpp @@ -21,6 +21,7 @@ #include "llvm/Transforms/Utils/LoopSimplify.h" #include #include +#include #define DEBUG_TYPE "spirv-convergence-region-analysis" diff --git a/llvm/lib/Target/SPIRV/Analysis/SPIRVConvergenceRegionAnalysis.h b/llvm/lib/Target/SPIRV/Analysis/SPIRVConvergenceRegionAnalysis.h index ed0a1e10562a8..7f4e1a1791e9e 100644 --- a/llvm/lib/Target/SPIRV/Analysis/SPIRVConvergenceRegionAnalysis.h +++ b/llvm/lib/Target/SPIRV/Analysis/SPIRVConvergenceRegionAnalysis.h @@ -20,7 +20,6 @@ #include "llvm/Analysis/LoopInfo.h" #include "llvm/IR/Dominators.h" #include -#include namespace llvm { class IntrinsicInst; diff --git a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCTargetDesc.h b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCTargetDesc.h index f9ba5e2d55cba..d36453a4f078d 100644 --- a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCTargetDesc.h +++ b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCTargetDesc.h @@ -15,7 +15,6 @@ #include "llvm/Support/DataTypes.h" #include -#include namespace llvm { class MCAsmBackend; diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp index aa1346d9ee56a..94663ff928a0b 100644 --- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp +++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp @@ -78,7 +78,6 @@ #include #include #include -#include #include #include #include diff --git a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp index 70b8614826826..b9fb7a3ae4b5b 100644 --- a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp @@ -18,6 +18,8 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Transforms/Utils/LongestCommonSequence.h" +#include + using namespace llvm; using namespace sampleprof; diff --git a/llvm/tools/llvm-exegesis/lib/PerfHelper.h b/llvm/tools/llvm-exegesis/lib/PerfHelper.h index 4a825b293b716..744e3c2994515 100644 --- a/llvm/tools/llvm-exegesis/lib/PerfHelper.h +++ b/llvm/tools/llvm-exegesis/lib/PerfHelper.h @@ -21,7 +21,6 @@ #include #include -#include #ifdef _MSC_VER typedef int pid_t; diff --git a/llvm/tools/llvm-exegesis/lib/SnippetGenerator.h b/llvm/tools/llvm-exegesis/lib/SnippetGenerator.h index 770e4e8d1f42d..1ef0beb6d7c5a 100644 --- a/llvm/tools/llvm-exegesis/lib/SnippetGenerator.h +++ b/llvm/tools/llvm-exegesis/lib/SnippetGenerator.h @@ -25,7 +25,6 @@ #include "llvm/MC/MCInst.h" #include "llvm/Support/Error.h" #include -#include #include namespace llvm { diff --git a/llvm/tools/llvm-pdbutil/PrettyClassDefinitionDumper.h b/llvm/tools/llvm-pdbutil/PrettyClassDefinitionDumper.h index 50c8f5dde0cd4..9e492a4fb7f1e 100644 --- a/llvm/tools/llvm-pdbutil/PrettyClassDefinitionDumper.h +++ b/llvm/tools/llvm-pdbutil/PrettyClassDefinitionDumper.h @@ -15,8 +15,6 @@ #include "llvm/DebugInfo/PDB/PDBSymbolData.h" #include "llvm/DebugInfo/PDB/PDBSymbolFunc.h" -#include - namespace llvm { class BitVector; diff --git a/llvm/unittests/Support/LockFileManagerTest.cpp b/llvm/unittests/Support/LockFileManagerTest.cpp index 627b2daef650c..bd61b6c36efb3 100644 --- a/llvm/unittests/Support/LockFileManagerTest.cpp +++ b/llvm/unittests/Support/LockFileManagerTest.cpp @@ -12,7 +12,6 @@ #include "llvm/Testing/Support/Error.h" #include "llvm/Testing/Support/SupportHelpers.h" #include "gtest/gtest.h" -#include using namespace llvm; using llvm::unittest::TempDir; diff --git a/llvm/unittests/Transforms/IPO/AttributorTest.cpp b/llvm/unittests/Transforms/IPO/AttributorTest.cpp index e345c60f781d2..8d90b308f840c 100644 --- a/llvm/unittests/Transforms/IPO/AttributorTest.cpp +++ b/llvm/unittests/Transforms/IPO/AttributorTest.cpp @@ -17,7 +17,6 @@ #include "llvm/Testing/Support/Error.h" #include "llvm/Transforms/Utils/CallGraphUpdater.h" #include "gtest/gtest.h" -#include namespace llvm { diff --git a/llvm/unittests/tools/llvm-exegesis/Mips/RegisterAliasingTest.cpp b/llvm/unittests/tools/llvm-exegesis/Mips/RegisterAliasingTest.cpp index 9cce106cc138e..62a7ee5407707 100644 --- a/llvm/unittests/tools/llvm-exegesis/Mips/RegisterAliasingTest.cpp +++ b/llvm/unittests/tools/llvm-exegesis/Mips/RegisterAliasingTest.cpp @@ -9,7 +9,6 @@ #include "RegisterAliasing.h" #include -#include #include "MipsInstrInfo.h" #include "TestBase.h" diff --git a/llvm/unittests/tools/llvm-exegesis/Mips/TargetTest.cpp b/llvm/unittests/tools/llvm-exegesis/Mips/TargetTest.cpp index 1f458c0ef1b34..d18ece99d4a6b 100644 --- a/llvm/unittests/tools/llvm-exegesis/Mips/TargetTest.cpp +++ b/llvm/unittests/tools/llvm-exegesis/Mips/TargetTest.cpp @@ -9,7 +9,6 @@ #include "Target.h" #include -#include #include "MCTargetDesc/MipsMCTargetDesc.h" #include "TestBase.h" diff --git a/llvm/unittests/tools/llvm-exegesis/RISCV/TargetTest.cpp b/llvm/unittests/tools/llvm-exegesis/RISCV/TargetTest.cpp index 13a1e5a22228e..b45adc6a85a9d 100644 --- a/llvm/unittests/tools/llvm-exegesis/RISCV/TargetTest.cpp +++ b/llvm/unittests/tools/llvm-exegesis/RISCV/TargetTest.cpp @@ -9,7 +9,6 @@ #include "Target.h" #include -#include #include "MCTargetDesc/RISCVMCTargetDesc.h" #include "TestBase.h" diff --git a/llvm/unittests/tools/llvm-exegesis/X86/RegisterAliasingTest.cpp b/llvm/unittests/tools/llvm-exegesis/X86/RegisterAliasingTest.cpp index e24c8b8140f71..418c97f2238f6 100644 --- a/llvm/unittests/tools/llvm-exegesis/X86/RegisterAliasingTest.cpp +++ b/llvm/unittests/tools/llvm-exegesis/X86/RegisterAliasingTest.cpp @@ -10,7 +10,6 @@ #include "RegisterAliasing.h" #include -#include #include "TestBase.h" #include "X86InstrInfo.h" diff --git a/llvm/unittests/tools/llvm-exegesis/X86/SchedClassResolutionTest.cpp b/llvm/unittests/tools/llvm-exegesis/X86/SchedClassResolutionTest.cpp index 97b9a1997e0d0..20a55d8c4f0de 100644 --- a/llvm/unittests/tools/llvm-exegesis/X86/SchedClassResolutionTest.cpp +++ b/llvm/unittests/tools/llvm-exegesis/X86/SchedClassResolutionTest.cpp @@ -9,7 +9,6 @@ #include "SchedClassResolution.h" #include -#include #include "TestBase.h" #include "llvm/MC/TargetRegistry.h" diff --git a/llvm/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp b/llvm/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp index 5953f4e6df04a..3e0b1f4b131c5 100644 --- a/llvm/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp +++ b/llvm/unittests/tools/llvm-exegesis/X86/SnippetGeneratorTest.cpp @@ -16,8 +16,6 @@ #include "X86InstrInfo.h" #include "llvm/ADT/SetOperations.h" -#include - namespace llvm { namespace exegesis { namespace { diff --git a/llvm/unittests/tools/llvm-exegesis/X86/TargetTest.cpp b/llvm/unittests/tools/llvm-exegesis/X86/TargetTest.cpp index 5a21a6929f640..e37585666a09f 100644 --- a/llvm/unittests/tools/llvm-exegesis/X86/TargetTest.cpp +++ b/llvm/unittests/tools/llvm-exegesis/X86/TargetTest.cpp @@ -9,7 +9,6 @@ #include "Target.h" #include -#include #include "MCTargetDesc/X86MCTargetDesc.h" #include "MmapUtils.h" From ca72e8d0b70c6b793afa6d7e9589cf37c4fc1a62 Mon Sep 17 00:00:00 2001 From: Piotr Fusik Date: Wed, 12 Nov 2025 18:24:37 +0100 Subject: [PATCH 31/34] [RISCV] Expand multiplication by `2^N * 3/5/9 + 1` with SHL_ADD (#166933) --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 15 ++- llvm/test/CodeGen/RISCV/rv64zba.ll | 100 ++++++++++++++++++++ 2 files changed, 113 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 637f1943b8511..5a081d54d0726 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -16878,12 +16878,23 @@ static SDValue expandMulToShlAddShlAdd(SDNode *N, SelectionDAG &DAG, break; } - // 2/4/8 * 3/5/9 + 1 -> (shXadd (shYadd X, X), X) int ShX; if (int ShY = isShifted359(MulAmt - 1, ShX)) { assert(ShX != 0 && "MulAmt=4,6,10 handled before"); + // 2/4/8 * 3/5/9 + 1 -> (shXadd (shYadd X, X), X) if (ShX <= 3) return getShlAddShlAdd(N, DAG, ShX, ShY, /*AddX=*/true, Shift); + // 2^N * 3/5/9 + 1 -> (add (shYadd (shl X, N), (shl X, N)), X) + if (Shift == 0) { + SDLoc DL(N); + EVT VT = N->getValueType(0); + SDValue X = N->getOperand(0); + SDValue Shl = + DAG.getNode(ISD::SHL, DL, VT, X, DAG.getConstant(ShX, DL, VT)); + SDValue ShlAdd = DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Shl, + DAG.getTargetConstant(ShY, DL, VT), Shl); + return DAG.getNode(ISD::ADD, DL, VT, ShlAdd, X); + } } return SDValue(); } @@ -16944,7 +16955,7 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG, DAG.getTargetConstant(Shift, DL, VT), Shift1); } - // TODO: 2^(C1>3) * 3,5,9 +/- 1 + // TODO: 2^(C1>3) * 3/5/9 - 1 // 2^n + 2/4/8 + 1 -> (add (shl X, C1), (shXadd X, X)) if (MulAmt > 2 && isPowerOf2_64((MulAmt - 1) & (MulAmt - 2))) { diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll index e56c7b41d43ce..156599fb72877 100644 --- a/llvm/test/CodeGen/RISCV/rv64zba.ll +++ b/llvm/test/CodeGen/RISCV/rv64zba.ll @@ -944,6 +944,58 @@ define i64 @addmul146(i64 %a, i64 %b) { ret i64 %d } +define i64 @mul49(i64 %a) { +; RV64I-LABEL: mul49: +; RV64I: # %bb.0: +; RV64I-NEXT: li a1, 49 +; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBA-LABEL: mul49: +; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: slli a1, a0, 4 +; RV64ZBA-NEXT: sh1add a1, a1, a1 +; RV64ZBA-NEXT: add a0, a1, a0 +; RV64ZBA-NEXT: ret +; +; RV64XANDESPERF-LABEL: mul49: +; RV64XANDESPERF: # %bb.0: +; RV64XANDESPERF-NEXT: slli a1, a0, 4 +; RV64XANDESPERF-NEXT: nds.lea.h a1, a1, a1 +; RV64XANDESPERF-NEXT: add a0, a1, a0 +; RV64XANDESPERF-NEXT: ret + %c = mul i64 %a, 49 + ret i64 %c +} + +define i64 @zext_mul49(i32 signext %a) { +; RV64I-LABEL: zext_mul49: +; RV64I: # %bb.0: +; RV64I-NEXT: li a1, 49 +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: mulhu a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBA-LABEL: zext_mul49: +; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: slli.uw a1, a0, 4 +; RV64ZBA-NEXT: sh1add a1, a1, a1 +; RV64ZBA-NEXT: add.uw a0, a0, a1 +; RV64ZBA-NEXT: ret +; +; RV64XANDESPERF-LABEL: zext_mul49: +; RV64XANDESPERF: # %bb.0: +; RV64XANDESPERF-NEXT: slli a1, a0, 32 +; RV64XANDESPERF-NEXT: srli a1, a1, 28 +; RV64XANDESPERF-NEXT: nds.lea.h a1, a1, a1 +; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a1, a0 +; RV64XANDESPERF-NEXT: ret + %b = zext i32 %a to i64 + %c = mul i64 %b, 49 + ret i64 %c +} + define i64 @mul50(i64 %a) { ; RV64I-LABEL: mul50: ; RV64I: # %bb.0: @@ -1044,6 +1096,54 @@ define i64 @addmul100(i64 %a, i64 %b) { ret i64 %d } +define i64 @mul145(i64 %a) { +; RV64I-LABEL: mul145: +; RV64I: # %bb.0: +; RV64I-NEXT: li a1, 145 +; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBA-LABEL: mul145: +; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: slli a1, a0, 4 +; RV64ZBA-NEXT: sh3add a1, a1, a1 +; RV64ZBA-NEXT: add a0, a1, a0 +; RV64ZBA-NEXT: ret +; +; RV64XANDESPERF-LABEL: mul145: +; RV64XANDESPERF: # %bb.0: +; RV64XANDESPERF-NEXT: slli a1, a0, 4 +; RV64XANDESPERF-NEXT: nds.lea.d a1, a1, a1 +; RV64XANDESPERF-NEXT: add a0, a1, a0 +; RV64XANDESPERF-NEXT: ret + %c = mul i64 %a, 145 + ret i64 %c +} + +define i64 @mul161(i64 %a) { +; RV64I-LABEL: mul161: +; RV64I: # %bb.0: +; RV64I-NEXT: li a1, 161 +; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64ZBA-LABEL: mul161: +; RV64ZBA: # %bb.0: +; RV64ZBA-NEXT: slli a1, a0, 5 +; RV64ZBA-NEXT: sh2add a1, a1, a1 +; RV64ZBA-NEXT: add a0, a1, a0 +; RV64ZBA-NEXT: ret +; +; RV64XANDESPERF-LABEL: mul161: +; RV64XANDESPERF: # %bb.0: +; RV64XANDESPERF-NEXT: slli a1, a0, 5 +; RV64XANDESPERF-NEXT: nds.lea.w a1, a1, a1 +; RV64XANDESPERF-NEXT: add a0, a1, a0 +; RV64XANDESPERF-NEXT: ret + %c = mul i64 %a, 161 + ret i64 %c +} + define i64 @mul162(i64 %a) { ; RV64I-LABEL: mul162: ; RV64I: # %bb.0: From a3058d5ccd681d57076d6bbdc0881b1ee503230a Mon Sep 17 00:00:00 2001 From: Chenguang Wang Date: Wed, 12 Nov 2025 09:25:08 -0800 Subject: [PATCH 32/34] [libc++] Guard fileno() and isatty() usage correctly for Newlib. (#166668) Including unistd.h does not expose fileno() on Newlib. --- libcxx/src/print.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/libcxx/src/print.cpp b/libcxx/src/print.cpp index 3f2baa6dcc60b..82cf2afd052e2 100644 --- a/libcxx/src/print.cpp +++ b/libcxx/src/print.cpp @@ -22,6 +22,14 @@ # include #elif __has_include() # include +# if defined(_NEWLIB_VERSION) +# if defined(_POSIX_C_SOURCE) && __has_include() +# include +# define HAS_FILENO_AND_ISATTY +# endif +# else +# define HAS_FILENO_AND_ISATTY +# endif #endif _LIBCPP_BEGIN_NAMESPACE_STD @@ -56,7 +64,7 @@ __write_to_windows_console([[maybe_unused]] FILE* __stream, [[maybe_unused]] wst } # endif // _LIBCPP_HAS_WIDE_CHARACTERS -#elif __has_include() // !_LIBCPP_WIN32API +#elif defined(HAS_FILENO_AND_ISATTY) // !_LIBCPP_WIN32API _LIBCPP_EXPORTED_FROM_ABI bool __is_posix_terminal(FILE* __stream) { return isatty(fileno(__stream)); } #endif From c0ac0c47e43f4e35548272bbc6ed8a3e6ec004e6 Mon Sep 17 00:00:00 2001 From: Lei Huang Date: Wed, 12 Nov 2025 12:25:49 -0500 Subject: [PATCH 33/34] [PowerPC] Add intrinsic support for xvrlw (#167349) --- llvm/include/llvm/IR/IntrinsicsPowerPC.td | 7 ++ llvm/lib/Target/PowerPC/PPCInstrAltivec.td | 1 + llvm/lib/Target/PowerPC/PPCInstrFuture.td | 10 +- llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll | 108 ++++++++++++++++++ llvm/test/CodeGen/PowerPC/vec_rotate_lw.ll | 22 ++++ llvm/test/CodeGen/PowerPC/vector-rotates.ll | 41 +++++++ 6 files changed, 187 insertions(+), 2 deletions(-) create mode 100644 llvm/test/CodeGen/PowerPC/vec_rotate_lw.ll diff --git a/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/llvm/include/llvm/IR/IntrinsicsPowerPC.td index 636e88898a55e..3907e864bed1e 100644 --- a/llvm/include/llvm/IR/IntrinsicsPowerPC.td +++ b/llvm/include/llvm/IR/IntrinsicsPowerPC.td @@ -387,6 +387,12 @@ class PowerPC_VSX_Sca_DDD_Intrinsic [llvm_double_ty], [llvm_double_ty, llvm_double_ty], [IntrNoMem]>; +/// PowerPC_VSX_WWW_Intrinsic - A PowerPC intrinsic that takes two v4i32 +/// vectors and returns one. These intrinsics have no side effects. +class PowerPC_VSX_WWW_Intrinsic + : PowerPC_VSX_Intrinsic; //===----------------------------------------------------------------------===// // PowerPC Altivec Intrinsic Definitions. @@ -1214,6 +1220,7 @@ def int_ppc_altivec_vsraw : PowerPC_Vec_WWW_Intrinsic<"vsraw">; def int_ppc_altivec_vrlb : PowerPC_Vec_BBB_Intrinsic<"vrlb">; def int_ppc_altivec_vrlh : PowerPC_Vec_HHH_Intrinsic<"vrlh">; def int_ppc_altivec_vrlw : PowerPC_Vec_WWW_Intrinsic<"vrlw">; +def int_ppc_vsx_xvrlw : PowerPC_VSX_WWW_Intrinsic<"xvrlw">; def int_ppc_altivec_vrld : PowerPC_Vec_DDD_Intrinsic<"vrld">; let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.". diff --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td index 23d6d8853800f..fe1eea2b33615 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td @@ -889,6 +889,7 @@ def : Pat<(v16i8 (rotl v16i8:$vA, v16i8:$vB)), (v16i8 (VRLB v16i8:$vA, v16i8:$vB))>; def : Pat<(v8i16 (rotl v8i16:$vA, v8i16:$vB)), (v8i16 (VRLH v8i16:$vA, v8i16:$vB))>; +let Predicates = [IsNotISAFuture] in def : Pat<(v4i32 (rotl v4i32:$vA, v4i32:$vB)), (v4i32 (VRLW v4i32:$vA, v4i32:$vB))>; diff --git a/llvm/lib/Target/PowerPC/PPCInstrFuture.td b/llvm/lib/Target/PowerPC/PPCInstrFuture.td index dfbbba0116f25..e417ffe6d3677 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrFuture.td +++ b/llvm/lib/Target/PowerPC/PPCInstrFuture.td @@ -420,8 +420,10 @@ let Predicates = [HasVSX, IsISAFuture] in { : VXForm_VRTAB5<323, (outs vrrc:$VRT), (ins vrrc:$VRA, vrrc:$VRB), "vucmprlh $VRT, $VRA, $VRB", []>; - def XVRLW: XX3Form_XTAB6<60, 184, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), - "xvrlw $XT, $XA, $XB", []>; + def XVRLW : XX3Form_XTAB6<60, 184, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvrlw $XT, $XA, $XB", + [(set v4i32:$XT, (int_ppc_vsx_xvrlw v4i32:$XA, + v4i32:$XB))]>; // AES Acceleration Instructions def XXAESENCP : XX3Form_XTABp5_M2<194, (outs vsrprc:$XTp), @@ -550,6 +552,10 @@ def : Pat<(int_ppc_vsx_stxvprl v256i1:$XTp, addr:$RA, i64:$RB), (STXVPRL $XTp, $RA, $RB)>; def : Pat<(int_ppc_vsx_stxvprll v256i1:$XTp, addr:$RA, i64:$RB), (STXVPRLL $XTp, $RA, $RB)>; +let Predicates = [HasVSX, IsISAFuture] in { + def : Pat<(v4i32 (rotl v4i32:$vA, v4i32:$vB)), (v4i32 (XVRLW v4i32:$vA, + v4i32:$vB))>; +} //---------------------------- Instruction aliases ---------------------------// // Predicate combinations available: diff --git a/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll b/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll index 12078adbbc2f3..383dcdb06c331 100644 --- a/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll +++ b/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll @@ -2,6 +2,7 @@ ; RUN: llc < %s -mtriple=ppc32-- | FileCheck %s --check-prefixes=CHECK,CHECK32,CHECK32_32 ; RUN: llc < %s -mtriple=ppc32-- -mcpu=ppc64 | FileCheck %s --check-prefixes=CHECK,CHECK32,CHECK32_64 ; RUN: llc < %s -mtriple=powerpc64le-- | FileCheck %s --check-prefixes=CHECK,CHECK64 +; RUN: llc < %s -mcpu=future -mtriple=powerpc64le-- | FileCheck %s --check-prefix=FUTURE declare i8 @llvm.fshl.i8(i8, i8, i8) declare i16 @llvm.fshl.i16(i16, i16, i16) @@ -24,6 +25,13 @@ define i8 @rotl_i8_const_shift(i8 %x) { ; CHECK-NEXT: rlwimi 4, 3, 3, 0, 28 ; CHECK-NEXT: mr 3, 4 ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotl_i8_const_shift: +; FUTURE: # %bb.0: +; FUTURE-NEXT: rotlwi 4, 3, 27 +; FUTURE-NEXT: rlwimi 4, 3, 3, 0, 28 +; FUTURE-NEXT: mr 3, 4 +; FUTURE-NEXT: blr %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 3) ret i8 %f } @@ -43,6 +51,11 @@ define i64 @rotl_i64_const_shift(i64 %x) { ; CHECK64: # %bb.0: ; CHECK64-NEXT: rotldi 3, 3, 3 ; CHECK64-NEXT: blr +; +; FUTURE-LABEL: rotl_i64_const_shift: +; FUTURE: # %bb.0: +; FUTURE-NEXT: rotldi 3, 3, 3 +; FUTURE-NEXT: blr %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 3) ret i64 %f } @@ -60,6 +73,17 @@ define i16 @rotl_i16(i16 %x, i16 %z) { ; CHECK-NEXT: srw 4, 5, 4 ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotl_i16: +; FUTURE: # %bb.0: +; FUTURE-NEXT: clrlwi 6, 4, 28 +; FUTURE-NEXT: neg 4, 4 +; FUTURE-NEXT: clrlwi 5, 3, 16 +; FUTURE-NEXT: clrlwi 4, 4, 28 +; FUTURE-NEXT: slw 3, 3, 6 +; FUTURE-NEXT: srw 4, 5, 4 +; FUTURE-NEXT: or 3, 3, 4 +; FUTURE-NEXT: blr %f = call i16 @llvm.fshl.i16(i16 %x, i16 %x, i16 %z) ret i16 %f } @@ -69,6 +93,11 @@ define i32 @rotl_i32(i32 %x, i32 %z) { ; CHECK: # %bb.0: ; CHECK-NEXT: rotlw 3, 3, 4 ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotl_i32: +; FUTURE: # %bb.0: +; FUTURE-NEXT: rotlw 3, 3, 4 +; FUTURE-NEXT: blr %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %z) ret i32 %f } @@ -100,6 +129,11 @@ define i64 @rotl_i64(i64 %x, i64 %z) { ; CHECK64: # %bb.0: ; CHECK64-NEXT: rotld 3, 3, 4 ; CHECK64-NEXT: blr +; +; FUTURE-LABEL: rotl_i64: +; FUTURE: # %bb.0: +; FUTURE-NEXT: rotld 3, 3, 4 +; FUTURE-NEXT: blr %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %z) ret i64 %f } @@ -124,6 +158,11 @@ define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) { ; CHECK64: # %bb.0: ; CHECK64-NEXT: vrlw 2, 2, 3 ; CHECK64-NEXT: blr +; +; FUTURE-LABEL: rotl_v4i32: +; FUTURE: # %bb.0: +; FUTURE-NEXT: xvrlw 34, 34, 35 +; FUTURE-NEXT: blr %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z) ret <4 x i32> %f } @@ -150,6 +189,12 @@ define <4 x i32> @rotl_v4i32_const_shift(<4 x i32> %x) { ; CHECK64-NEXT: vspltisw 3, 3 ; CHECK64-NEXT: vrlw 2, 2, 3 ; CHECK64-NEXT: blr +; +; FUTURE-LABEL: rotl_v4i32_const_shift: +; FUTURE: # %bb.0: +; FUTURE-NEXT: vspltisw 3, 3 +; FUTURE-NEXT: xvrlw 34, 34, 35 +; FUTURE-NEXT: blr %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> ) ret <4 x i32> %f } @@ -163,6 +208,13 @@ define i8 @rotr_i8_const_shift(i8 %x) { ; CHECK-NEXT: rlwimi 4, 3, 5, 0, 26 ; CHECK-NEXT: mr 3, 4 ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotr_i8_const_shift: +; FUTURE: # %bb.0: +; FUTURE-NEXT: rotlwi 4, 3, 29 +; FUTURE-NEXT: rlwimi 4, 3, 5, 0, 26 +; FUTURE-NEXT: mr 3, 4 +; FUTURE-NEXT: blr %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3) ret i8 %f } @@ -172,6 +224,11 @@ define i32 @rotr_i32_const_shift(i32 %x) { ; CHECK: # %bb.0: ; CHECK-NEXT: rotlwi 3, 3, 29 ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotr_i32_const_shift: +; FUTURE: # %bb.0: +; FUTURE-NEXT: rotlwi 3, 3, 29 +; FUTURE-NEXT: blr %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 3) ret i32 %f } @@ -189,6 +246,17 @@ define i16 @rotr_i16(i16 %x, i16 %z) { ; CHECK-NEXT: slw 3, 3, 4 ; CHECK-NEXT: or 3, 5, 3 ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotr_i16: +; FUTURE: # %bb.0: +; FUTURE-NEXT: clrlwi 6, 4, 28 +; FUTURE-NEXT: neg 4, 4 +; FUTURE-NEXT: clrlwi 5, 3, 16 +; FUTURE-NEXT: clrlwi 4, 4, 28 +; FUTURE-NEXT: srw 5, 5, 6 +; FUTURE-NEXT: slw 3, 3, 4 +; FUTURE-NEXT: or 3, 5, 3 +; FUTURE-NEXT: blr %f = call i16 @llvm.fshr.i16(i16 %x, i16 %x, i16 %z) ret i16 %f } @@ -199,6 +267,12 @@ define i32 @rotr_i32(i32 %x, i32 %z) { ; CHECK-NEXT: neg 4, 4 ; CHECK-NEXT: rotlw 3, 3, 4 ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotr_i32: +; FUTURE: # %bb.0: +; FUTURE-NEXT: neg 4, 4 +; FUTURE-NEXT: rotlw 3, 3, 4 +; FUTURE-NEXT: blr %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %z) ret i32 %f } @@ -231,6 +305,12 @@ define i64 @rotr_i64(i64 %x, i64 %z) { ; CHECK64-NEXT: neg 4, 4 ; CHECK64-NEXT: rotld 3, 3, 4 ; CHECK64-NEXT: blr +; +; FUTURE-LABEL: rotr_i64: +; FUTURE: # %bb.0: +; FUTURE-NEXT: neg 4, 4 +; FUTURE-NEXT: rotld 3, 3, 4 +; FUTURE-NEXT: blr %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 %z) ret i64 %f } @@ -263,6 +343,12 @@ define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) { ; CHECK64-NEXT: vsubuwm 3, 4, 3 ; CHECK64-NEXT: vrlw 2, 2, 3 ; CHECK64-NEXT: blr +; +; FUTURE-LABEL: rotr_v4i32: +; FUTURE: # %bb.0: +; FUTURE-NEXT: vnegw 3, 3 +; FUTURE-NEXT: xvrlw 34, 34, 35 +; FUTURE-NEXT: blr %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z) ret <4 x i32> %f } @@ -293,6 +379,12 @@ define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) { ; CHECK64-NEXT: vsubuwm 3, 4, 3 ; CHECK64-NEXT: vrlw 2, 2, 3 ; CHECK64-NEXT: blr +; +; FUTURE-LABEL: rotr_v4i32_const_shift: +; FUTURE: # %bb.0: +; FUTURE-NEXT: xxspltiw 0, 29 +; FUTURE-NEXT: xvrlw 34, 34, 0 +; FUTURE-NEXT: blr %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> ) ret <4 x i32> %f } @@ -301,6 +393,10 @@ define i32 @rotl_i32_shift_by_bitwidth(i32 %x) { ; CHECK-LABEL: rotl_i32_shift_by_bitwidth: ; CHECK: # %bb.0: ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotl_i32_shift_by_bitwidth: +; FUTURE: # %bb.0: +; FUTURE-NEXT: blr %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 32) ret i32 %f } @@ -309,6 +405,10 @@ define i32 @rotr_i32_shift_by_bitwidth(i32 %x) { ; CHECK-LABEL: rotr_i32_shift_by_bitwidth: ; CHECK: # %bb.0: ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotr_i32_shift_by_bitwidth: +; FUTURE: # %bb.0: +; FUTURE-NEXT: blr %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 32) ret i32 %f } @@ -317,6 +417,10 @@ define <4 x i32> @rotl_v4i32_shift_by_bitwidth(<4 x i32> %x) { ; CHECK-LABEL: rotl_v4i32_shift_by_bitwidth: ; CHECK: # %bb.0: ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotl_v4i32_shift_by_bitwidth: +; FUTURE: # %bb.0: +; FUTURE-NEXT: blr %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> ) ret <4 x i32> %f } @@ -325,6 +429,10 @@ define <4 x i32> @rotr_v4i32_shift_by_bitwidth(<4 x i32> %x) { ; CHECK-LABEL: rotr_v4i32_shift_by_bitwidth: ; CHECK: # %bb.0: ; CHECK-NEXT: blr +; +; FUTURE-LABEL: rotr_v4i32_shift_by_bitwidth: +; FUTURE: # %bb.0: +; FUTURE-NEXT: blr %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> ) ret <4 x i32> %f } diff --git a/llvm/test/CodeGen/PowerPC/vec_rotate_lw.ll b/llvm/test/CodeGen/PowerPC/vec_rotate_lw.ll new file mode 100644 index 0000000000000..03b1456f0c036 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vec_rotate_lw.ll @@ -0,0 +1,22 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -mcpu=future -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \ +; RUN: FileCheck %s + +; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \ +; RUN: -mcpu=future -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \ +; RUN: FileCheck %s + +; RUN: llc -verify-machineinstrs -mtriple=powerpc-unknown-aix \ +; RUN: -mcpu=future -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \ +; RUN: FileCheck %s + +define <4 x i32> @testVRLWMI(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: testVRLWMI: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvrlw v2, v2, v3 +; CHECK-NEXT: blr +entry: + %0 = tail call <4 x i32> @llvm.ppc.vsx.xvrlw(<4 x i32> %a, <4 x i32> %b) + ret <4 x i32> %0 +} diff --git a/llvm/test/CodeGen/PowerPC/vector-rotates.ll b/llvm/test/CodeGen/PowerPC/vector-rotates.ll index 2de8804ba8e24..38e273634da2a 100644 --- a/llvm/test/CodeGen/PowerPC/vector-rotates.ll +++ b/llvm/test/CodeGen/PowerPC/vector-rotates.ll @@ -5,6 +5,9 @@ ; RUN: llc -O3 -mtriple=powerpc64-unknown-unknown -ppc-asm-full-reg-names \ ; RUN: -verify-machineinstrs -mcpu=pwr7 < %s | \ ; RUN: FileCheck --check-prefix=CHECK-P7 %s +; RUN: llc -O3 -mtriple=powerpc64-unknown-unknown -ppc-asm-full-reg-names \ +; RUN: -verify-machineinstrs -mcpu=future < %s | \ +; RUN: FileCheck --check-prefix=CHECK-FUTURE %s define <16 x i8> @rotl_v16i8(<16 x i8> %a) { ; CHECK-P8-LABEL: rotl_v16i8: @@ -23,6 +26,14 @@ define <16 x i8> @rotl_v16i8(<16 x i8> %a) { ; CHECK-P7-NEXT: lxvw4x vs35, 0, r3 ; CHECK-P7-NEXT: vrlb v2, v2, v3 ; CHECK-P7-NEXT: blr +; +; CHECK-FUTURE-LABEL: rotl_v16i8: +; CHECK-FUTURE: # %bb.0: # %entry +; CHECK-FUTURE-NEXT: addis r3, r2, .LCPI0_0@toc@ha +; CHECK-FUTURE-NEXT: addi r3, r3, .LCPI0_0@toc@l +; CHECK-FUTURE-NEXT: lxv vs35, 0(r3) +; CHECK-FUTURE-NEXT: vrlb v2, v2, v3 +; CHECK-FUTURE-NEXT: blr entry: %b = shl <16 x i8> %a, %c = lshr <16 x i8> %a, @@ -47,6 +58,14 @@ define <8 x i16> @rotl_v8i16(<8 x i16> %a) { ; CHECK-P7-NEXT: lxvw4x vs35, 0, r3 ; CHECK-P7-NEXT: vrlh v2, v2, v3 ; CHECK-P7-NEXT: blr +; +; CHECK-FUTURE-LABEL: rotl_v8i16: +; CHECK-FUTURE: # %bb.0: # %entry +; CHECK-FUTURE-NEXT: addis r3, r2, .LCPI1_0@toc@ha +; CHECK-FUTURE-NEXT: addi r3, r3, .LCPI1_0@toc@l +; CHECK-FUTURE-NEXT: lxv vs35, 0(r3) +; CHECK-FUTURE-NEXT: vrlh v2, v2, v3 +; CHECK-FUTURE-NEXT: blr entry: %b = shl <8 x i16> %a, %c = lshr <8 x i16> %a, @@ -71,6 +90,14 @@ define <4 x i32> @rotl_v4i32_0(<4 x i32> %a) { ; CHECK-P7-NEXT: lxvw4x vs35, 0, r3 ; CHECK-P7-NEXT: vrlw v2, v2, v3 ; CHECK-P7-NEXT: blr +; +; CHECK-FUTURE-LABEL: rotl_v4i32_0: +; CHECK-FUTURE: # %bb.0: # %entry +; CHECK-FUTURE-NEXT: addis r3, r2, .LCPI2_0@toc@ha +; CHECK-FUTURE-NEXT: addi r3, r3, .LCPI2_0@toc@l +; CHECK-FUTURE-NEXT: lxv vs0, 0(r3) +; CHECK-FUTURE-NEXT: xvrlw vs34, vs34, vs0 +; CHECK-FUTURE-NEXT: blr entry: %b = shl <4 x i32> %a, %c = lshr <4 x i32> %a, @@ -94,6 +121,12 @@ define <4 x i32> @rotl_v4i32_1(<4 x i32> %a) { ; CHECK-P7-NEXT: vsubuwm v3, v4, v3 ; CHECK-P7-NEXT: vrlw v2, v2, v3 ; CHECK-P7-NEXT: blr +; +; CHECK-FUTURE-LABEL: rotl_v4i32_1: +; CHECK-FUTURE: # %bb.0: # %entry +; CHECK-FUTURE-NEXT: xxspltiw vs0, 23 +; CHECK-FUTURE-NEXT: xvrlw vs34, vs34, vs0 +; CHECK-FUTURE-NEXT: blr entry: %b = shl <4 x i32> %a, %c = lshr <4 x i32> %a, @@ -124,6 +157,14 @@ define <2 x i64> @rotl_v2i64(<2 x i64> %a) { ; CHECK-P7-NEXT: addi r3, r1, -16 ; CHECK-P7-NEXT: lxvd2x vs34, 0, r3 ; CHECK-P7-NEXT: blr +; +; CHECK-FUTURE-LABEL: rotl_v2i64: +; CHECK-FUTURE: # %bb.0: # %entry +; CHECK-FUTURE-NEXT: addis r3, r2, .LCPI4_0@toc@ha +; CHECK-FUTURE-NEXT: addi r3, r3, .LCPI4_0@toc@l +; CHECK-FUTURE-NEXT: lxv vs35, 0(r3) +; CHECK-FUTURE-NEXT: vrld v2, v2, v3 +; CHECK-FUTURE-NEXT: blr entry: %b = shl <2 x i64> %a, %c = lshr <2 x i64> %a, From 19043b2d507c080d2e2cc0950043d3f2d5386c4d Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Wed, 12 Nov 2025 11:33:23 -0600 Subject: [PATCH 34/34] [OpenMP] Report errors when construct decomposition fails (#167568) Store the list of errors in the ConsstructDecomposition class in addition to the broken up output. This not used in flang yet, because the splitting happens at a time when diagnostic messages can no longer be emitted. Use unit tests to test this instead. --- .../Frontend/OpenMP/ConstructDecompositionT.h | 104 +++++++++++------ .../Frontend/OpenMPDecompositionTest.cpp | 105 +++++++++++++++++- 2 files changed, 173 insertions(+), 36 deletions(-) diff --git a/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h b/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h index c8eebbf42a68e..36b49e69650d8 100644 --- a/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h +++ b/llvm/include/llvm/Frontend/OpenMP/ConstructDecompositionT.h @@ -68,6 +68,13 @@ find_unique(Container &&container, Predicate &&pred) { namespace tomp { +enum struct ErrorCode : int { + NoLeafAllowing, // No leaf that allows this clause + NoLeafPrivatizing, // No leaf that has a privatizing clause + InvalidDirNameMod, // Invalid directive name modifier + RedModNotApplied, // Reduction modifier not applied +}; + // ClauseType: Either an instance of ClauseT, or a type derived from ClauseT. // This is the clause representation in the code using this infrastructure. // @@ -114,10 +121,16 @@ struct ConstructDecompositionT { } tomp::ListT> output; + llvm::SmallVector> errors; private: bool split(); + bool error(const ClauseTy *node, ErrorCode ec) { + errors.emplace_back(node, ec); + return false; + } + struct LeafReprInternal { llvm::omp::Directive id = llvm::omp::Directive::OMPD_unknown; tomp::type::ListT clauses; @@ -456,10 +469,9 @@ bool ConstructDecompositionT::applyClause(Specific &&specific, // S Some clauses are permitted only on a single leaf construct of the // S combined or composite construct, in which case the effect is as if // S the clause is applied to that specific construct. (p339, 31-33) - if (applyToUnique(node)) - return true; - - return false; + if (!applyToUnique(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // --- Specific clauses ----------------------------------------------- @@ -487,7 +499,9 @@ bool ConstructDecompositionT::applyClause( }); }); - return applied; + if (!applied) + return error(node, ErrorCode::NoLeafPrivatizing); + return true; } // COLLAPSE @@ -501,7 +515,9 @@ template bool ConstructDecompositionT::applyClause( const tomp::clause::CollapseT &clause, const ClauseTy *node) { - return applyToInnermost(node); + if (!applyToInnermost(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // DEFAULT @@ -516,7 +532,9 @@ bool ConstructDecompositionT::applyClause( const tomp::clause::DefaultT &clause, const ClauseTy *node) { // [5.2:340:31] - return applyToAll(node); + if (!applyToAll(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // FIRSTPRIVATE @@ -644,7 +662,9 @@ bool ConstructDecompositionT::applyClause( applied = true; } - return applied; + if (!applied) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // IF @@ -679,10 +699,12 @@ bool ConstructDecompositionT::applyClause( hasDir->clauses.push_back(unmodified); return true; } - return false; + return error(node, ErrorCode::InvalidDirNameMod); } - return applyToAll(node); + if (!applyToAll(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // LASTPRIVATE @@ -708,12 +730,9 @@ template bool ConstructDecompositionT::applyClause( const tomp::clause::LastprivateT &clause, const ClauseTy *node) { - bool applied = false; - // [5.2:340:21] - applied = applyToAll(node); - if (!applied) - return false; + if (!applyToAll(node)) + return error(node, ErrorCode::NoLeafAllowing); auto inFirstprivate = [&](const ObjectTy &object) { if (ClauseSet *set = findClausesWith(object)) { @@ -739,7 +758,6 @@ bool ConstructDecompositionT::applyClause( llvm::omp::Clause::OMPC_shared, tomp::clause::SharedT{/*List=*/sharedObjects}); dirParallel->clauses.push_back(shared); - applied = true; } // [5.2:340:24] @@ -748,7 +766,6 @@ bool ConstructDecompositionT::applyClause( llvm::omp::Clause::OMPC_shared, tomp::clause::SharedT{/*List=*/sharedObjects}); dirTeams->clauses.push_back(shared); - applied = true; } } @@ -772,11 +789,10 @@ bool ConstructDecompositionT::applyClause( /*Mapper=*/std::nullopt, /*Iterator=*/std::nullopt, /*LocatorList=*/std::move(tofrom)}}); dirTarget->clauses.push_back(map); - applied = true; } } - return applied; + return true; } // LINEAR @@ -802,7 +818,7 @@ bool ConstructDecompositionT::applyClause( const ClauseTy *node) { // [5.2:341:15.1] if (!applyToInnermost(node)) - return false; + return error(node, ErrorCode::NoLeafAllowing); // [5.2:341:15.2], [5.2:341:19] auto dirSimd = findDirective(llvm::omp::Directive::OMPD_simd); @@ -847,7 +863,9 @@ template bool ConstructDecompositionT::applyClause( const tomp::clause::NowaitT &clause, const ClauseTy *node) { - return applyToOutermost(node); + if (!applyToOutermost(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // OMPX_ATTRIBUTE @@ -855,8 +873,9 @@ template bool ConstructDecompositionT::applyClause( const tomp::clause::OmpxAttributeT &clause, const ClauseTy *node) { - // ERROR: no leaf that allows clause - return applyToAll(node); + if (!applyToAll(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // OMPX_BARE @@ -864,7 +883,9 @@ template bool ConstructDecompositionT::applyClause( const tomp::clause::OmpxBareT &clause, const ClauseTy *node) { - return applyToOutermost(node); + if (!applyToOutermost(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // ORDER @@ -879,7 +900,9 @@ bool ConstructDecompositionT::applyClause( const tomp::clause::OrderT &clause, const ClauseTy *node) { // [5.2:340:31] - return applyToAll(node); + if (!applyToAll(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // PRIVATE @@ -894,7 +917,9 @@ template bool ConstructDecompositionT::applyClause( const tomp::clause::PrivateT &clause, const ClauseTy *node) { - return applyToInnermost(node); + if (!applyToInnermost(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // REDUCTION @@ -996,31 +1021,37 @@ bool ConstructDecompositionT::applyClause( /*List=*/objects}}); ReductionModifier effective = modifier.value_or(ReductionModifier::Default); - bool effectiveApplied = false; + bool modifierApplied = false; + bool allowingLeaf = false; // Walk over the leaf constructs starting from the innermost, and apply // the clause as required by the spec. for (auto &leaf : llvm::reverse(leafs)) { if (!llvm::omp::isAllowedClauseForDirective(leaf.id, node->id, version)) continue; + // Found a leaf that allows this clause. Keep track of this for better + // error reporting. + allowingLeaf = true; if (!applyToParallel && &leaf == dirParallel) continue; if (!applyToTeams && &leaf == dirTeams) continue; // Some form of the clause will be applied past this point. - if (isValidModifier(leaf.id, effective, effectiveApplied)) { + if (isValidModifier(leaf.id, effective, modifierApplied)) { // Apply clause with modifier. leaf.clauses.push_back(node); - effectiveApplied = true; + modifierApplied = true; } else { // Apply clause without modifier. leaf.clauses.push_back(unmodified); } // The modifier must be applied to some construct. - applied = effectiveApplied; + applied = modifierApplied; } + if (!allowingLeaf) + return error(node, ErrorCode::NoLeafAllowing); if (!applied) - return false; + return error(node, ErrorCode::RedModNotApplied); tomp::ObjectListT sharedObjects; llvm::transform(objects, std::back_inserter(sharedObjects), @@ -1067,11 +1098,10 @@ bool ConstructDecompositionT::applyClause( /*LocatorList=*/std::move(tofrom)}}); dirTarget->clauses.push_back(map); - applied = true; } } - return applied; + return true; } // SHARED @@ -1086,7 +1116,9 @@ bool ConstructDecompositionT::applyClause( const tomp::clause::SharedT &clause, const ClauseTy *node) { // [5.2:340:31] - return applyToAll(node); + if (!applyToAll(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // THREAD_LIMIT @@ -1101,7 +1133,9 @@ bool ConstructDecompositionT::applyClause( const tomp::clause::ThreadLimitT &clause, const ClauseTy *node) { // [5.2:340:31] - return applyToAll(node); + if (!applyToAll(node)) + return error(node, ErrorCode::NoLeafAllowing); + return true; } // --- Splitting ------------------------------------------------------ diff --git a/llvm/unittests/Frontend/OpenMPDecompositionTest.cpp b/llvm/unittests/Frontend/OpenMPDecompositionTest.cpp index e0c6b3904310c..23c3c4d5d192c 100644 --- a/llvm/unittests/Frontend/OpenMPDecompositionTest.cpp +++ b/llvm/unittests/Frontend/OpenMPDecompositionTest.cpp @@ -277,16 +277,42 @@ struct StringifyClause { std::string Str; }; +std::string stringify(const omp::Clause &C) { // + return StringifyClause(C).Str; +} + std::string stringify(const omp::DirectiveWithClauses &DWC) { std::stringstream Stream; Stream << getOpenMPDirectiveName(DWC.id, llvm::omp::FallbackVersion).str(); for (const omp::Clause &C : DWC.clauses) - Stream << ' ' << StringifyClause(C).Str; + Stream << ' ' << stringify(C); return Stream.str(); } +std::string stringify(tomp::ErrorCode E) { + switch (E) { + case tomp::ErrorCode::NoLeafAllowing: + return "no leaf that allows this clause"; + case tomp::ErrorCode::NoLeafPrivatizing: + return "no leaf with a privatizing clause"; + case tomp::ErrorCode::InvalidDirNameMod: + return "invalid directive name modifier"; + case tomp::ErrorCode::RedModNotApplied: + return "the reduction modifier cannot be applied"; + } + return "unrecognized error code " + std::to_string(llvm::to_underlying(E)); +} + +std::string stringify(std::pair &ER) { + std::stringstream Stream; + + Stream << "error while applying '" << stringify(*ER.first) + << "': " << stringify(ER.second); + return Stream.str(); +} + // --- Tests ---------------------------------------------------------- namespace red { @@ -1109,4 +1135,81 @@ TEST_F(OpenMPDecompositionTest, Misc1) { std::string Dir0 = stringify(Dec.output[0]); ASSERT_EQ(Dir0, "simd linear(, , (x)) lastprivate(, (x))"); } + +// --- Failure/error reporting tests + +TEST_F(OpenMPDecompositionTest, Error1) { + // "parallel for at(compilation)" is invalid because the "at" clause + // does not apply to either "parallel" or "for". + + omp::List Clauses{ + {OMPC_at, omp::clause::At{omp::clause::At::ActionTime::Compilation}}, + }; + + omp::ConstructDecomposition Dec(AnyVersion, Helper, OMPD_parallel_for, + Clauses); + ASSERT_EQ(Dec.errors.size(), 1u); + std::string Err0 = stringify(Dec.errors[0]); + ASSERT_EQ(Err0, + "error while applying 'at(0)': no leaf that allows this clause"); +} + +TEST_F(OpenMPDecompositionTest, Error2) { + // "parallel loop allocate(x) private(x)" is invalid because "allocate" + // can only be applied to "parallel", while "private" is applied to "loop". + // This violates the requirement that the leaf with an "allocate" also has + // a privatizing clause. + + omp::Object x{"x"}; + + omp::List Clauses{ + {OMPC_allocate, omp::clause::Allocate{{std::nullopt, std::nullopt, {x}}}}, + {OMPC_private, omp::clause::Private{{x}}}, + }; + + omp::ConstructDecomposition Dec(AnyVersion, Helper, OMPD_parallel_loop, + Clauses); + ASSERT_EQ(Dec.errors.size(), 1u); + std::string Err0 = stringify(Dec.errors[0]); + ASSERT_EQ(Err0, "error while applying 'allocate(, , (x))': no leaf with a " + "privatizing clause"); +} + +TEST_F(OpenMPDecompositionTest, Error3) { + // "parallel for if(target: e)" is invalid because the "target" directive- + // name-modifier does not refer to a constituent directive. + + omp::ExprTy e; + + omp::List Clauses{ + {OMPC_if, omp::clause::If{{llvm::omp::Directive::OMPD_target, e}}}, + }; + + omp::ConstructDecomposition Dec(AnyVersion, Helper, OMPD_parallel_for, + Clauses); + ASSERT_EQ(Dec.errors.size(), 1u); + std::string Err0 = stringify(Dec.errors[0]); + ASSERT_EQ(Err0, "error while applying 'if(target, expr)': invalid directive " + "name modifier"); +} + +TEST_F(OpenMPDecompositionTest, Error4) { + // "masked taskloop reduction(+, task: x)" is invalid because the "task" + // modifier can only be applied to "parallel" or a worksharing directive. + + omp::Object x{"x"}; + auto Add = red::makeOp(omp::clause::DefinedOperator::IntrinsicOperator::Add); + auto TaskMod = omp::clause::Reduction::ReductionModifier::Task; + + omp::List Clauses{ + {OMPC_reduction, omp::clause::Reduction{{TaskMod, {Add}, {x}}}}, + }; + + omp::ConstructDecomposition Dec(AnyVersion, Helper, OMPD_masked_taskloop, + Clauses); + ASSERT_EQ(Dec.errors.size(), 1u); + std::string Err0 = stringify(Dec.errors[0]); + ASSERT_EQ(Err0, "error while applying 'reduction(2, (3), (x))': the " + "reduction modifier cannot be applied"); +} } // namespace