From a464e3856e36cc8d887aafdf382876c8675c03e8 Mon Sep 17 00:00:00 2001 From: Florian Hahn Date: Sun, 16 Nov 2025 12:15:05 +0000 Subject: [PATCH 01/17] [LV] Check debug location for more recipes in vplan-printing.ll. Extend test to check printing of debug locations to cover a range of wide and replicating recipes. Currently those do not print the debug metadata. --- .../LoopVectorize/vplan-printing.ll | 54 +++++++++++++------ 1 file changed, 38 insertions(+), 16 deletions(-) diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing.ll index 91e0037d12c61..84c6cc2675a80 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-printing.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-printing.ll @@ -329,8 +329,8 @@ for.end: ret void } -define void @debug_loc_vpinstruction(ptr nocapture %asd, ptr nocapture %bsd) !dbg !5 { -; CHECK-LABEL: Checking a loop in 'debug_loc_vpinstruction' +define void @recipe_debug_loc_location(ptr nocapture %src) !dbg !5 { +; CHECK-LABEL: Checking a loop in 'recipe_debug_loc_location' ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { ; CHECK-NEXT: Live-in vp<[[VF:%.]]> = VF ; CHECK-NEXT: Live-in vp<[[VFxUF:%.]]> = VF * UF @@ -347,14 +347,20 @@ define void @debug_loc_vpinstruction(ptr nocapture %asd, ptr nocapture %bsd) !db ; CHECK-NEXT: vector.body: ; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[CAN_IV_NEXT:%.+]]> ; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>, vp<[[VF]]> -; CHECK-NEXT: CLONE ir<%isd> = getelementptr inbounds ir<%asd>, vp<[[STEPS]]> +; CHECK-NEXT: CLONE ir<%isd> = getelementptr inbounds ir<%src>, vp<[[STEPS]]> +; CHECK-NOT: !dbg ; CHECK-NEXT: vp<[[VEC_PTR:%.+]]> = vector-pointer ir<%isd> +; CHECK-NOT: !dbg ; CHECK-NEXT: WIDEN ir<%lsd> = load vp<[[VEC_PTR]]> +; CHECK-NOT: !dbg ; CHECK-NEXT: WIDEN ir<%psd> = add nuw nsw ir<%lsd>, ir<23> +; CHECK-NOT: !dbg ; CHECK-NEXT: WIDEN ir<%cmp1> = icmp slt ir<%lsd>, ir<100> -; CHECK-NEXT: EMIT vp<[[NOT1:%.+]]> = not ir<%cmp1>, !dbg /tmp/s.c:5:3 +; CHECK-NOT: !dbg +; CHECK-NEXT: EMIT vp<[[NOT1:%.+]]> = not ir<%cmp1>, !dbg /tmp/s.c:9:3 ; CHECK-NEXT: WIDEN ir<%cmp2> = icmp sge ir<%lsd>, ir<200> -; CHECK-NEXT: EMIT vp<[[SEL1:%.+]]> = logical-and vp<[[NOT1]]>, ir<%cmp2>, !dbg /tmp/s.c:5:21 +; CHECK-NOT: !dbg +; CHECK-NEXT: EMIT vp<[[SEL1:%.+]]> = logical-and vp<[[NOT1]]>, ir<%cmp2>, !dbg /tmp/s.c:11:3 ; CHECK-NEXT: EMIT vp<[[OR1:%.+]]> = or vp<[[SEL1]]>, ir<%cmp1> ; CHECK-NEXT: Successor(s): pred.sdiv ; CHECK-EMPTY: @@ -365,18 +371,23 @@ define void @debug_loc_vpinstruction(ptr nocapture %asd, ptr nocapture %bsd) !db ; CHECK-EMPTY: ; CHECK-NEXT: pred.sdiv.if: ; CHECK-NEXT: REPLICATE ir<%sd1> = sdiv ir<%psd>, ir<%lsd> (S->V) +; CHECK-NOT: !dbg ; CHECK-NEXT: Successor(s): pred.sdiv.continue ; CHECK-EMPTY: ; CHECK-NEXT: pred.sdiv.continue: ; CHECK-NEXT: PHI-PREDICATED-INSTRUCTION vp<[[PHI:%.+]]> = ir<%sd1> +; CHECK-NOT: !dbg ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): if.then.0 ; CHECK-EMPTY: ; CHECK-NEXT: if.then.0: ; CHECK-NEXT: BLEND ir<%ysd.0> = ir<%psd> vp<[[PHI]]>/vp<[[OR1]]> +; CHECK-NOT: !dbg ; CHECK-NEXT: vp<[[VEC_PTR2:%.+]]> = vector-pointer ir<%isd> +; CHECK-NOT: !dbg ; CHECK-NEXT: WIDEN store vp<[[VEC_PTR2]]>, ir<%ysd.0> +; CHECK-NOT: !dbg ; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]> ; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VTC]]> ; CHECK-NEXT: No successors @@ -406,23 +417,23 @@ entry: loop: %iv = phi i64 [ 0, %entry ], [ %iv.next, %if.end ] - %isd = getelementptr inbounds i32, ptr %asd, i64 %iv - %lsd = load i32, ptr %isd, align 4 - %psd = add nuw nsw i32 %lsd, 23 - %cmp1 = icmp slt i32 %lsd, 100 - br i1 %cmp1, label %if.then, label %check, !dbg !7 + %isd = getelementptr inbounds i32, ptr %src, i64 %iv, !dbg !7 + %lsd = load i32, ptr %isd, align 4, !dbg !8 + %psd = add nuw nsw i32 %lsd, 23, !dbg !9 + %cmp1 = icmp slt i32 %lsd, 100, !dbg !10 + br i1 %cmp1, label %if.then, label %check, !dbg !11 check: - %cmp2 = icmp sge i32 %lsd, 200 - br i1 %cmp2, label %if.then, label %if.end, !dbg !8 + %cmp2 = icmp sge i32 %lsd, 200, !dbg !12 + br i1 %cmp2, label %if.then, label %if.end, !dbg !13 if.then: - %sd1 = sdiv i32 %psd, %lsd + %sd1 = sdiv i32 %psd, %lsd, !dbg !14 br label %if.end if.end: - %ysd.0 = phi i32 [ %sd1, %if.then ], [ %psd, %check ] - store i32 %ysd.0, ptr %isd, align 4 + %ysd.0 = phi i32 [ %sd1, %if.then ], [ %psd, %check ], !dbg !16 + store i32 %ysd.0, ptr %isd, align 4, !dbg !17 %iv.next = add nuw nsw i64 %iv, 1 %exitcond = icmp eq i64 %iv.next, 128 br i1 %exitcond, label %exit, label %loop @@ -1078,4 +1089,15 @@ attributes #0 = { readonly nounwind "vector-function-abi-variant"="_ZGV_LLVM_M2v !5 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 4, type: !6, scopeLine: 4, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2) !6 = !DISubroutineType(types: !2) !7 = !DILocation(line: 5, column: 3, scope: !5) -!8 = !DILocation(line: 5, column: 21, scope: !5) +!8 = !DILocation(line: 6, column: 3, scope: !5) +!9 = !DILocation(line: 7, column: 3, scope: !5) +!10 = !DILocation(line: 8, column: 3, scope: !5) +!11 = !DILocation(line: 9, column: 3, scope: !5) +!12 = !DILocation(line: 10, column: 3, scope: !5) +!13 = !DILocation(line: 11, column: 3, scope: !5) +!14 = !DILocation(line: 12, column: 3, scope: !5) +!15 = !DILocation(line: 13, column: 3, scope: !5) +!16 = !DILocation(line: 14, column: 3, scope: !5) +!17 = !DILocation(line: 15, column: 3, scope: !5) +!18 = !DILocation(line: 16, column: 3, scope: !5) +!19 = !DILocation(line: 17, column: 3, scope: !5) From c61a4406db904bd33ca01db8eda67d52560265a3 Mon Sep 17 00:00:00 2001 From: Hui Date: Sun, 16 Nov 2025 14:11:40 +0000 Subject: [PATCH 02/17] [libc++][test] re-enable the inference test for clang (#168258) Fixes #168210 --- .../support.dynamic/hardware_inference_size.compile.pass.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libcxx/test/std/language.support/support.dynamic/hardware_inference_size.compile.pass.cpp b/libcxx/test/std/language.support/support.dynamic/hardware_inference_size.compile.pass.cpp index 2656f0595bf50..aed3d42161bae 100644 --- a/libcxx/test/std/language.support/support.dynamic/hardware_inference_size.compile.pass.cpp +++ b/libcxx/test/std/language.support/support.dynamic/hardware_inference_size.compile.pass.cpp @@ -7,7 +7,6 @@ //===----------------------------------------------------------------------===// // UNSUPPORTED: c++03, c++11, c++14 -// UNSUPPORTED: (clang || apple-clang) && stdlib=libc++ #include From f12ad95991144c35e95d331afc29b9b065d4e0b8 Mon Sep 17 00:00:00 2001 From: Michael Kruse Date: Sun, 16 Nov 2025 15:12:52 +0100 Subject: [PATCH 03/17] [Polly] Remove ScopPass infrastructure (#125783) PR #125442 replaces the pass-based Polly architecture with a monolithic pass consisting of phases. Reasons listed in https://github.com/llvm/llvm-project/pull/125442. With this change, the SCoP-passes became redundant problematic versions of the same functionality and are removed. --- polly/docs/ReleaseNotes.rst | 2 + polly/include/polly/CodeGen/CodeGeneration.h | 13 +- polly/include/polly/CodeGen/IslAst.h | 21 +- polly/include/polly/CodePreparation.h | 9 +- polly/include/polly/DeLICM.h | 19 +- polly/include/polly/DeadCodeElimination.h | 8 - polly/include/polly/DependenceInfo.h | 26 +- polly/include/polly/ForwardOpTree.h | 21 +- polly/include/polly/JSONExporter.h | 17 +- polly/include/polly/MaximalStaticExpansion.h | 20 -- polly/include/polly/Pass/PhaseManager.h | 6 +- polly/include/polly/PruneUnprofitable.h | 11 +- polly/include/polly/ScheduleOptimizer.h | 24 +- polly/include/polly/ScopGraphPrinter.h | 1 - polly/include/polly/ScopInfo.h | 1 - polly/include/polly/ScopPass.h | 264 ------------------ polly/include/polly/Simplify.h | 24 +- polly/lib/Analysis/DependenceInfo.cpp | 27 -- polly/lib/Analysis/PruneUnprofitable.cpp | 16 -- polly/lib/Analysis/ScopDetection.cpp | 1 - polly/lib/Analysis/ScopInfo.cpp | 1 - polly/lib/Analysis/ScopPass.cpp | 134 --------- polly/lib/CMakeLists.txt | 1 - polly/lib/CodeGen/CodeGeneration.cpp | 13 - polly/lib/CodeGen/IslAst.cpp | 19 -- polly/lib/Exchange/JSONExporter.cpp | 27 -- polly/lib/Pass/PhaseManager.cpp | 11 +- polly/lib/Support/PollyPasses.def | 44 --- polly/lib/Support/RegisterPasses.cpp | 126 --------- polly/lib/Transform/CodePreparation.cpp | 15 +- polly/lib/Transform/DeLICM.cpp | 43 --- polly/lib/Transform/DeadCodeElimination.cpp | 23 -- polly/lib/Transform/FlattenSchedule.cpp | 1 - polly/lib/Transform/ForwardOpTree.cpp | 42 --- polly/lib/Transform/MatmulOptimizer.cpp | 1 - .../lib/Transform/MaximalStaticExpansion.cpp | 42 --- polly/lib/Transform/ScheduleOptimizer.cpp | 39 +-- polly/lib/Transform/ScopInliner.cpp | 1 - polly/lib/Transform/Simplify.cpp | 35 --- ...invariant_load_base_pointer_conditional.ll | 2 +- polly/unittests/CMakeLists.txt | 1 - .../unittests/ScopPassManager/CMakeLists.txt | 7 - .../ScopPassManager/PassManagerTest.cpp | 66 ----- 43 files changed, 51 insertions(+), 1174 deletions(-) delete mode 100644 polly/include/polly/ScopPass.h delete mode 100644 polly/lib/Analysis/ScopPass.cpp delete mode 100644 polly/unittests/ScopPassManager/CMakeLists.txt delete mode 100644 polly/unittests/ScopPassManager/PassManagerTest.cpp diff --git a/polly/docs/ReleaseNotes.rst b/polly/docs/ReleaseNotes.rst index 215a802843304..618a4265f09cf 100644 --- a/polly/docs/ReleaseNotes.rst +++ b/polly/docs/ReleaseNotes.rst @@ -17,3 +17,5 @@ In Polly |version| the following important changes have been incorporated. * Polly's support for the legacy pass manager has been removed. + * The infrastructure around ScopPasses has been removed. + diff --git a/polly/include/polly/CodeGen/CodeGeneration.h b/polly/include/polly/CodeGen/CodeGeneration.h index 2340fbe016b49..bf0b8e69f46bb 100644 --- a/polly/include/polly/CodeGen/CodeGeneration.h +++ b/polly/include/polly/CodeGen/CodeGeneration.h @@ -10,12 +10,16 @@ #define POLLY_CODEGENERATION_H #include "polly/CodeGen/IRBuilder.h" -#include "polly/ScopPass.h" -#include "llvm/IR/PassManager.h" + +namespace llvm { +class RegionInfo; +} namespace polly { class IslAstInfo; +using llvm::BasicBlock; + enum VectorizerChoice { VECTORIZER_NONE, VECTORIZER_STRIPMINE, @@ -28,11 +32,6 @@ extern VectorizerChoice PollyVectorizerChoice; /// UnreachableInst. void markBlockUnreachable(BasicBlock &Block, PollyIRBuilder &Builder); -struct CodeGenerationPass final : PassInfoMixin { - PreservedAnalyses run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &AR, SPMUpdater &U); -}; - extern bool PerfMonitoring; bool runCodeGeneration(Scop &S, llvm::RegionInfo &RI, IslAstInfo &AI); diff --git a/polly/include/polly/CodeGen/IslAst.h b/polly/include/polly/CodeGen/IslAst.h index 3e1ff2c8a24da..243ca46f9ba32 100644 --- a/polly/include/polly/CodeGen/IslAst.h +++ b/polly/include/polly/CodeGen/IslAst.h @@ -22,12 +22,11 @@ #define POLLY_ISLAST_H #include "polly/DependenceInfo.h" -#include "polly/ScopPass.h" #include "llvm/ADT/SmallPtrSet.h" -#include "llvm/IR/PassManager.h" #include "isl/ctx.h" namespace polly { +using llvm::raw_ostream; using llvm::SmallPtrSet; class Dependences; @@ -164,24 +163,6 @@ class IslAstInfo { ///} }; -struct IslAstAnalysis : AnalysisInfoMixin { - static AnalysisKey Key; - - using Result = IslAstInfo; - - IslAstInfo run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR); -}; - -struct IslAstPrinterPass final : PassInfoMixin { - IslAstPrinterPass(raw_ostream &OS) : OS(OS) {} - - PreservedAnalyses run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &, SPMUpdater &U); - - raw_ostream &OS; -}; - std::unique_ptr runIslAstGen(Scop &S, DependenceAnalysis::Result &DA); } // namespace polly diff --git a/polly/include/polly/CodePreparation.h b/polly/include/polly/CodePreparation.h index 1a15e3d4d5a29..35864957c6e0a 100644 --- a/polly/include/polly/CodePreparation.h +++ b/polly/include/polly/CodePreparation.h @@ -13,19 +13,16 @@ #ifndef POLLY_CODEPREPARATION_H #define POLLY_CODEPREPARATION_H -#include "llvm/IR/PassManager.h" - namespace llvm { class DominatorTree; +class Function; class LoopInfo; class RegionInfo; } // namespace llvm namespace polly { -struct CodePreparationPass final : llvm::PassInfoMixin { - llvm::PreservedAnalyses run(llvm::Function &F, - llvm::FunctionAnalysisManager &FAM); -}; +bool runCodePreparation(llvm::Function &F, llvm::DominatorTree *DT, + llvm::LoopInfo *LI, llvm::RegionInfo *RI); } // namespace polly #endif /* POLLY_CODEPREPARATION_H */ diff --git a/polly/include/polly/DeLICM.h b/polly/include/polly/DeLICM.h index 63fc509e0bd46..61f2218f8c2a8 100644 --- a/polly/include/polly/DeLICM.h +++ b/polly/include/polly/DeLICM.h @@ -17,7 +17,6 @@ #ifndef POLLY_DELICM_H #define POLLY_DELICM_H -#include "polly/ScopPass.h" #include "isl/isl-noexceptions.h" namespace llvm { @@ -25,23 +24,7 @@ class raw_ostream; } // namespace llvm namespace polly { - -struct DeLICMPass final : llvm::PassInfoMixin { - DeLICMPass() {} - - llvm::PreservedAnalyses run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U); -}; - -struct DeLICMPrinterPass final : llvm::PassInfoMixin { - DeLICMPrinterPass(raw_ostream &OS) : OS(OS) {} - - PreservedAnalyses run(Scop &S, ScopAnalysisManager &, - ScopStandardAnalysisResults &SAR, SPMUpdater &); - -private: - llvm::raw_ostream &OS; -}; +class Scop; /// Determine whether two lifetimes are conflicting. /// diff --git a/polly/include/polly/DeadCodeElimination.h b/polly/include/polly/DeadCodeElimination.h index 4d8da56c76eec..e6aa900117274 100644 --- a/polly/include/polly/DeadCodeElimination.h +++ b/polly/include/polly/DeadCodeElimination.h @@ -14,17 +14,9 @@ #define POLLY_DEADCODEELIMINATION_H #include "polly/DependenceInfo.h" -#include "polly/ScopPass.h" namespace polly { -struct DeadCodeElimPass final : llvm::PassInfoMixin { - DeadCodeElimPass() {} - - llvm::PreservedAnalyses run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U); -}; - bool runDeadCodeElim(Scop &S, DependenceAnalysis::Result &DA); } // namespace polly diff --git a/polly/include/polly/DependenceInfo.h b/polly/include/polly/DependenceInfo.h index 88ea468dd5473..c4d7b033e0245 100644 --- a/polly/include/polly/DependenceInfo.h +++ b/polly/include/polly/DependenceInfo.h @@ -22,11 +22,20 @@ #ifndef POLLY_DEPENDENCE_INFO_H #define POLLY_DEPENDENCE_INFO_H -#include "polly/ScopPass.h" +#include "llvm/ADT/DenseMap.h" #include "isl/ctx.h" #include "isl/isl-noexceptions.h" +namespace llvm { +class raw_ostream; +} + namespace polly { +class MemoryAccess; +class Scop; +class ScopStmt; + +using llvm::DenseMap; /// The accumulated dependence information for a SCoP. /// @@ -193,8 +202,7 @@ class Dependences final { extern Dependences::AnalysisLevel OptAnalysisLevel; -struct DependenceAnalysis final : public AnalysisInfoMixin { - static AnalysisKey Key; +struct DependenceAnalysis final { struct Result { Scop &S; std::unique_ptr D[Dependences::NumAnalysisLevels]; @@ -219,18 +227,6 @@ struct DependenceAnalysis final : public AnalysisInfoMixin { /// dependencies. void abandonDependences(); }; - Result run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR); -}; - -struct DependenceInfoPrinterPass final - : PassInfoMixin { - DependenceInfoPrinterPass(raw_ostream &OS) : OS(OS) {} - - PreservedAnalyses run(Scop &S, ScopAnalysisManager &, - ScopStandardAnalysisResults &, SPMUpdater &); - - raw_ostream &OS; }; DependenceAnalysis::Result runDependenceAnalysis(Scop &S); diff --git a/polly/include/polly/ForwardOpTree.h b/polly/include/polly/ForwardOpTree.h index 8b2ece1f08e15..0193a79208afd 100644 --- a/polly/include/polly/ForwardOpTree.h +++ b/polly/include/polly/ForwardOpTree.h @@ -13,27 +13,8 @@ #ifndef POLLY_FORWARDOPTREE_H #define POLLY_FORWARDOPTREE_H -#include "polly/ScopPass.h" - namespace polly { - -struct ForwardOpTreePass final : llvm::PassInfoMixin { - ForwardOpTreePass() {} - - llvm::PreservedAnalyses run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U); -}; - -struct ForwardOpTreePrinterPass final - : llvm::PassInfoMixin { - ForwardOpTreePrinterPass(raw_ostream &OS) : OS(OS) {} - - PreservedAnalyses run(Scop &S, ScopAnalysisManager &, - ScopStandardAnalysisResults &SAR, SPMUpdater &); - -private: - llvm::raw_ostream &OS; -}; +class Scop; /// Pass that redirects scalar reads to array elements that are known to contain /// the same value. diff --git a/polly/include/polly/JSONExporter.h b/polly/include/polly/JSONExporter.h index 82a881c737064..821c0d70b67e3 100644 --- a/polly/include/polly/JSONExporter.h +++ b/polly/include/polly/JSONExporter.h @@ -10,26 +10,15 @@ #define POLLY_JSONEXPORTER_H #include "polly/DependenceInfo.h" -#include "polly/ScopPass.h" -#include "llvm/IR/PassManager.h" namespace polly { -/// This pass exports a scop to a jscop file. The filename is generated from the -/// concatenation of the function and scop name. -struct JSONExportPass final : llvm::PassInfoMixin { - llvm::PreservedAnalyses run(Scop &, ScopAnalysisManager &, - ScopStandardAnalysisResults &, SPMUpdater &); -}; - /// This pass imports a scop from a jscop file. The filename is deduced from the /// concatenation of the function and scop name. -struct JSONImportPass final : llvm::PassInfoMixin { - llvm::PreservedAnalyses run(Scop &, ScopAnalysisManager &, - ScopStandardAnalysisResults &, SPMUpdater &); -}; - void runImportJSON(Scop &S, DependenceAnalysis::Result &DA); + +/// This pass exports a scop to a jscop file. The filename is generated from the +/// concatenation of the function and scop name. void runExportJSON(Scop &S); } // namespace polly diff --git a/polly/include/polly/MaximalStaticExpansion.h b/polly/include/polly/MaximalStaticExpansion.h index 1f9fbcb1d6a70..974c35fc2953f 100644 --- a/polly/include/polly/MaximalStaticExpansion.h +++ b/polly/include/polly/MaximalStaticExpansion.h @@ -15,29 +15,9 @@ #define POLLY_MAXIMALSTATICEXPANSION_H #include "polly/DependenceInfo.h" -#include "polly/ScopPass.h" -#include "llvm/IR/PassManager.h" namespace polly { -class MaximalStaticExpansionPass - : public llvm::PassInfoMixin { -public: - llvm::PreservedAnalyses run(Scop &, ScopAnalysisManager &, - ScopStandardAnalysisResults &, SPMUpdater &); -}; - -struct MaximalStaticExpansionPrinterPass - : llvm::PassInfoMixin { - MaximalStaticExpansionPrinterPass(raw_ostream &OS) : OS(OS) {} - - PreservedAnalyses run(Scop &S, ScopAnalysisManager &, - ScopStandardAnalysisResults &SAR, SPMUpdater &); - -private: - llvm::raw_ostream &OS; -}; - void runMaximalStaticExpansion(Scop &S, DependenceAnalysis::Result &DI); } // namespace polly diff --git a/polly/include/polly/Pass/PhaseManager.h b/polly/include/polly/Pass/PhaseManager.h index 9ff9bbf02d71f..7f27a1c4fd930 100644 --- a/polly/include/polly/Pass/PhaseManager.h +++ b/polly/include/polly/Pass/PhaseManager.h @@ -17,14 +17,16 @@ #include "polly/DependenceInfo.h" #include "llvm/ADT/Bitset.h" +#include "llvm/IR/PassManager.h" #include namespace llvm { -class Function; -class Error; +template struct enum_iteration_traits; } // namespace llvm namespace polly { +using llvm::Function; +using llvm::StringRef; /// Phases (in execution order) within the Polly pass. enum class PassPhase { diff --git a/polly/include/polly/PruneUnprofitable.h b/polly/include/polly/PruneUnprofitable.h index 16b76cc62f1d2..16f08694e6445 100644 --- a/polly/include/polly/PruneUnprofitable.h +++ b/polly/include/polly/PruneUnprofitable.h @@ -13,17 +13,8 @@ #ifndef POLLY_PRUNEUNPROFITABLE_H #define POLLY_PRUNEUNPROFITABLE_H -#include "polly/ScopPass.h" - namespace polly { - -struct PruneUnprofitablePass final - : llvm::PassInfoMixin { - PruneUnprofitablePass() {} - - llvm::PreservedAnalyses run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U); -}; +class Scop; bool runPruneUnprofitable(Scop &S); } // namespace polly diff --git a/polly/include/polly/ScheduleOptimizer.h b/polly/include/polly/ScheduleOptimizer.h index ac45572ba7ed5..00ac81654d8a7 100644 --- a/polly/include/polly/ScheduleOptimizer.h +++ b/polly/include/polly/ScheduleOptimizer.h @@ -10,28 +10,12 @@ #define POLLY_SCHEDULEOPTIMIZER_H #include "polly/DependenceInfo.h" -#include "polly/ScopPass.h" -namespace polly { - -struct IslScheduleOptimizerPass final - : llvm::PassInfoMixin { - IslScheduleOptimizerPass() {} - - llvm::PreservedAnalyses run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U); -}; +namespace llvm { +class TargetTransformInfo; +} -struct IslScheduleOptimizerPrinterPass final - : llvm::PassInfoMixin { - IslScheduleOptimizerPrinterPass(raw_ostream &OS) : OS(OS) {} - - PreservedAnalyses run(Scop &S, ScopAnalysisManager &, - ScopStandardAnalysisResults &SAR, SPMUpdater &); - -private: - llvm::raw_ostream &OS; -}; +namespace polly { void runIslScheduleOptimizer(Scop &S, llvm::TargetTransformInfo *TTI, DependenceAnalysis::Result &Deps); diff --git a/polly/include/polly/ScopGraphPrinter.h b/polly/include/polly/ScopGraphPrinter.h index c4e669f0c3503..e85c237f9984e 100644 --- a/polly/include/polly/ScopGraphPrinter.h +++ b/polly/include/polly/ScopGraphPrinter.h @@ -22,7 +22,6 @@ #include "llvm/Analysis/RegionInfo.h" #include "llvm/Analysis/RegionIterator.h" #include "llvm/Analysis/RegionPrinter.h" -#include "llvm/IR/PassManager.h" namespace llvm { diff --git a/polly/include/polly/ScopInfo.h b/polly/include/polly/ScopInfo.h index 7541ddc21e39f..e426f283ddf9d 100644 --- a/polly/include/polly/ScopInfo.h +++ b/polly/include/polly/ScopInfo.h @@ -26,7 +26,6 @@ #include "llvm/IR/DebugLoc.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" -#include "llvm/IR/PassManager.h" #include "llvm/IR/ValueHandle.h" #include "isl/isl-noexceptions.h" #include diff --git a/polly/include/polly/ScopPass.h b/polly/include/polly/ScopPass.h deleted file mode 100644 index 80ccd5717f96c..0000000000000 --- a/polly/include/polly/ScopPass.h +++ /dev/null @@ -1,264 +0,0 @@ -//===--------- ScopPass.h - Pass for Static Control Parts --------*-C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file defines the ScopPass class. ScopPasses are just RegionPasses, -// except they operate on Polly IR (Scop and ScopStmt) built by ScopInfo Pass. -// Because they operate on Polly IR, not the LLVM IR, ScopPasses are not allowed -// to modify the LLVM IR. Due to this limitation, the ScopPass class takes -// care of declaring that no LLVM passes are invalidated. -// -//===----------------------------------------------------------------------===// - -#ifndef POLLY_SCOP_PASS_H -#define POLLY_SCOP_PASS_H - -#include "polly/ScopInfo.h" -#include "llvm/ADT/PriorityWorklist.h" -#include "llvm/Analysis/TargetTransformInfo.h" -#include "llvm/IR/PassManager.h" -#include "llvm/IR/PassManagerImpl.h" - -namespace polly { -using llvm::AllAnalysesOn; -using llvm::AnalysisManager; -using llvm::DominatorTreeAnalysis; -using llvm::InnerAnalysisManagerProxy; -using llvm::LoopAnalysis; -using llvm::OuterAnalysisManagerProxy; -using llvm::PassManager; -using llvm::RegionInfoAnalysis; -using llvm::ScalarEvolutionAnalysis; -using llvm::SmallPriorityWorklist; -using llvm::TargetIRAnalysis; -using llvm::TargetTransformInfo; - -class Scop; -class SPMUpdater; -struct ScopStandardAnalysisResults; - -using ScopAnalysisManager = - AnalysisManager; -using ScopAnalysisManagerFunctionProxy = - InnerAnalysisManagerProxy; -using FunctionAnalysisManagerScopProxy = - OuterAnalysisManagerProxy; -} // namespace polly - -namespace llvm { -using polly::Scop; -using polly::ScopAnalysisManager; -using polly::ScopAnalysisManagerFunctionProxy; -using polly::ScopInfo; -using polly::ScopStandardAnalysisResults; -using polly::SPMUpdater; - -template <> -class InnerAnalysisManagerProxy::Result { -public: - explicit Result(ScopAnalysisManager &InnerAM, ScopInfo &SI) - : InnerAM(&InnerAM), SI(&SI) {} - Result(Result &&R) : InnerAM(std::move(R.InnerAM)), SI(R.SI) { - R.InnerAM = nullptr; - } - Result &operator=(Result &&RHS) { - InnerAM = RHS.InnerAM; - SI = RHS.SI; - RHS.InnerAM = nullptr; - return *this; - } - ~Result() { - if (!InnerAM) - return; - InnerAM->clear(); - } - - ScopAnalysisManager &getManager() { return *InnerAM; } - - bool invalidate(Function &F, const PreservedAnalyses &PA, - FunctionAnalysisManager::Invalidator &Inv); - -private: - ScopAnalysisManager *InnerAM; - ScopInfo *SI; -}; - -// A partial specialization of the require analysis template pass to handle -// extra parameters -template -struct RequireAnalysisPass - : PassInfoMixin< - RequireAnalysisPass> { - PreservedAnalyses run(Scop &L, ScopAnalysisManager &AM, - ScopStandardAnalysisResults &AR, SPMUpdater &) { - (void)AM.template getResult(L, AR); - return PreservedAnalyses::all(); - } -}; - -template <> -InnerAnalysisManagerProxy::Result -InnerAnalysisManagerProxy::run( - Function &F, FunctionAnalysisManager &FAM); - -template <> -PreservedAnalyses -PassManager::run(Scop &InitialS, ScopAnalysisManager &AM, - ScopStandardAnalysisResults &, SPMUpdater &); -extern template class PassManager; -extern template class InnerAnalysisManagerProxy; -extern template class OuterAnalysisManagerProxy; -} // namespace llvm - -namespace polly { - -template -class OwningInnerAnalysisManagerProxy final - : public InnerAnalysisManagerProxy { -public: - OwningInnerAnalysisManagerProxy() - : InnerAnalysisManagerProxy(InnerAM) {} - using Result = typename InnerAnalysisManagerProxy::Result; - Result run(IRUnitT &IR, AnalysisManager &AM, - ExtraArgTs...) { - return Result(InnerAM); - } - - AnalysisManagerT &getManager() { return InnerAM; } - -private: - AnalysisManagerT InnerAM; -}; - -template <> -OwningInnerAnalysisManagerProxy::Result -OwningInnerAnalysisManagerProxy::run( - Function &F, FunctionAnalysisManager &FAM); -extern template class OwningInnerAnalysisManagerProxy; - -using OwningScopAnalysisManagerFunctionProxy = - OwningInnerAnalysisManagerProxy; -using ScopPassManager = - PassManager; - -struct ScopStandardAnalysisResults { - DominatorTree &DT; - ScopInfo &SI; - ScalarEvolution &SE; - LoopInfo &LI; - RegionInfo &RI; - TargetTransformInfo &TTI; -}; - -class SPMUpdater final { -public: - SPMUpdater(SmallPriorityWorklist &Worklist, - ScopAnalysisManager &SAM) - : InvalidateCurrentScop(false), Worklist(Worklist), SAM(SAM) {} - - bool invalidateCurrentScop() const { return InvalidateCurrentScop; } - - void invalidateScop(Scop &S) { - if (&S == CurrentScop) - InvalidateCurrentScop = true; - - Worklist.erase(&S.getRegion()); - SAM.clear(S, S.getName()); - } - -private: - Scop *CurrentScop; - bool InvalidateCurrentScop; - SmallPriorityWorklist &Worklist; - ScopAnalysisManager &SAM; - template friend struct FunctionToScopPassAdaptor; -}; - -template -struct FunctionToScopPassAdaptor final - : PassInfoMixin> { - explicit FunctionToScopPassAdaptor(ScopPassT Pass) : Pass(std::move(Pass)) {} - - PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) { - ScopDetection &SD = AM.getResult(F); - ScopInfo &SI = AM.getResult(F); - if (SI.empty()) { - // With no scops having been detected, no IR changes have been made and - // therefore all analyses are preserved. However, we must still free the - // Scop analysis results which may hold AssertingVH that cause an error - // if its value is destroyed. - PreservedAnalyses PA = PreservedAnalyses::all(); - PA.abandon(); - PA.abandon(); - AM.invalidate(F, PA); - return PreservedAnalyses::all(); - } - - SmallPriorityWorklist Worklist; - for (auto &S : SI) - if (S.second) - Worklist.insert(S.first); - - ScopStandardAnalysisResults AR = {AM.getResult(F), - AM.getResult(F), - AM.getResult(F), - AM.getResult(F), - AM.getResult(F), - AM.getResult(F)}; - - ScopAnalysisManager &SAM = - AM.getResult(F).getManager(); - - SPMUpdater Updater{Worklist, SAM}; - - while (!Worklist.empty()) { - Region *R = Worklist.pop_back_val(); - if (!SD.isMaxRegionInScop(*R, /*Verify=*/false)) - continue; - Scop *scop = SI.getScop(R); - if (!scop) - continue; - Updater.CurrentScop = scop; - Updater.InvalidateCurrentScop = false; - PreservedAnalyses PassPA = Pass.run(*scop, SAM, AR, Updater); - - SAM.invalidate(*scop, PassPA); - if (Updater.invalidateCurrentScop()) - SI.recompute(); - }; - - // FIXME: For the same reason as we add a BarrierNoopPass in the legacy pass - // manager, do not preserve any analyses. While CodeGeneration may preserve - // IR analyses sufficiently to process another Scop in the same function (it - // has to, otherwise the ScopDetection result itself would need to be - // invalidated), it is not sufficient for other purposes. For instance, - // CodeGeneration does not inform LoopInfo about new loops in the - // Polly-generated IR. - return PreservedAnalyses::none(); - } - -private: - ScopPassT Pass; -}; - -template -FunctionToScopPassAdaptor -createFunctionToScopPassAdaptor(ScopPassT Pass) { - return FunctionToScopPassAdaptor(std::move(Pass)); -} -} // namespace polly - -#endif diff --git a/polly/include/polly/Simplify.h b/polly/include/polly/Simplify.h index 4565eb26edaf0..c4703384a77dc 100644 --- a/polly/include/polly/Simplify.h +++ b/polly/include/polly/Simplify.h @@ -13,11 +13,11 @@ #ifndef POLLY_TRANSFORM_SIMPLIFY_H #define POLLY_TRANSFORM_SIMPLIFY_H -#include "polly/ScopPass.h" #include "llvm/ADT/SmallVector.h" namespace polly { class MemoryAccess; +class Scop; class ScopStmt; /// Return a vector that contains MemoryAccesses in the order in @@ -36,28 +36,6 @@ class ScopStmt; /// undefined. llvm::SmallVector getAccessesInOrder(ScopStmt &Stmt); -struct SimplifyPass final : PassInfoMixin { - SimplifyPass(int CallNo = 0) : CallNo(CallNo) {} - - llvm::PreservedAnalyses run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &AR, SPMUpdater &U); - -private: - int CallNo; -}; - -struct SimplifyPrinterPass final : PassInfoMixin { - SimplifyPrinterPass(raw_ostream &OS, int CallNo = 0) - : OS(OS), CallNo(CallNo) {} - - PreservedAnalyses run(Scop &S, ScopAnalysisManager &, - ScopStandardAnalysisResults &, SPMUpdater &); - -private: - raw_ostream &OS; - int CallNo; -}; - bool runSimplify(Scop &S, int CallNo); } // namespace polly diff --git a/polly/lib/Analysis/DependenceInfo.cpp b/polly/lib/Analysis/DependenceInfo.cpp index 5183fc5725ece..0f208ec74634b 100644 --- a/polly/lib/Analysis/DependenceInfo.cpp +++ b/polly/lib/Analysis/DependenceInfo.cpp @@ -858,33 +858,6 @@ void DependenceAnalysis::Result::abandonDependences() { Deps.release(); } -DependenceAnalysis::Result -DependenceAnalysis::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR) { - return {S, {}}; -} - -AnalysisKey DependenceAnalysis::Key; - -PreservedAnalyses -DependenceInfoPrinterPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &U) { - auto &DI = SAM.getResult(S, SAR); - - if (auto d = DI.D[OptAnalysisLevel].get()) { - d->print(OS); - return PreservedAnalyses::all(); - } - - // Otherwise create the dependences on-the-fly and print them - Dependences D(S.getSharedIslCtx(), OptAnalysisLevel); - D.calculateDependences(S); - D.print(OS); - - return PreservedAnalyses::all(); -} - DependenceAnalysis::Result polly::runDependenceAnalysis(Scop &S) { DependenceAnalysis::Result Result{S, {}}; return Result; diff --git a/polly/lib/Analysis/PruneUnprofitable.cpp b/polly/lib/Analysis/PruneUnprofitable.cpp index 40cc9178da0f3..7201d3d1e319f 100644 --- a/polly/lib/Analysis/PruneUnprofitable.cpp +++ b/polly/lib/Analysis/PruneUnprofitable.cpp @@ -13,7 +13,6 @@ #include "polly/PruneUnprofitable.h" #include "polly/ScopDetection.h" #include "polly/ScopInfo.h" -#include "polly/ScopPass.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/DebugLoc.h" #include "llvm/Support/Debug.h" @@ -79,18 +78,3 @@ bool polly::runPruneUnprofitable(Scop &S) { return false; } - -llvm::PreservedAnalyses -PruneUnprofitablePass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U) { - bool Changed = runPruneUnprofitable(S); - - if (!Changed) - return PreservedAnalyses::all(); - - PreservedAnalyses PA; - PA.preserveSet>(); - PA.preserveSet>(); - PA.preserveSet>(); - return PA; -} diff --git a/polly/lib/Analysis/ScopDetection.cpp b/polly/lib/Analysis/ScopDetection.cpp index 29e89348125f2..9e0b495b02e29 100644 --- a/polly/lib/Analysis/ScopDetection.cpp +++ b/polly/lib/Analysis/ScopDetection.cpp @@ -72,7 +72,6 @@ #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" -#include "llvm/IR/PassManager.h" #include "llvm/IR/Value.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Regex.h" diff --git a/polly/lib/Analysis/ScopInfo.cpp b/polly/lib/Analysis/ScopInfo.cpp index 70e184d3f897f..bf993a27dd17d 100644 --- a/polly/lib/Analysis/ScopInfo.cpp +++ b/polly/lib/Analysis/ScopInfo.cpp @@ -53,7 +53,6 @@ #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" -#include "llvm/IR/PassManager.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" #include "llvm/Support/Compiler.h" diff --git a/polly/lib/Analysis/ScopPass.cpp b/polly/lib/Analysis/ScopPass.cpp deleted file mode 100644 index 61417e799cfa5..0000000000000 --- a/polly/lib/Analysis/ScopPass.cpp +++ /dev/null @@ -1,134 +0,0 @@ -//===- ScopPass.cpp - The base class of Passes that operate on Polly IR ---===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file contains the definitions of the ScopPass members. -// -//===----------------------------------------------------------------------===// - -#include "polly/ScopPass.h" -#include "polly/ScopInfo.h" -#include "llvm/Analysis/BasicAliasAnalysis.h" -#include "llvm/Analysis/GlobalsModRef.h" -#include "llvm/Analysis/LazyBlockFrequencyInfo.h" -#include "llvm/Analysis/LazyBranchProbabilityInfo.h" -#include "llvm/Analysis/OptimizationRemarkEmitter.h" -#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" -#include "llvm/Analysis/TargetTransformInfo.h" -#include - -using namespace llvm; -using namespace polly; - -namespace polly { -template class OwningInnerAnalysisManagerProxy; -} - -namespace llvm { - -template class PassManager; -template class InnerAnalysisManagerProxy; -template class OuterAnalysisManagerProxy; - -template <> -PreservedAnalyses -PassManager::run(Scop &S, ScopAnalysisManager &AM, - ScopStandardAnalysisResults &AR, SPMUpdater &U) { - auto PA = PreservedAnalyses::all(); - for (auto &Pass : Passes) { - auto PassPA = Pass->run(S, AM, AR, U); - - AM.invalidate(S, PassPA); - PA.intersect(std::move(PassPA)); - } - - // All analyses for 'this' Scop have been invalidated above. - // If ScopPasses affect break other scops they have to propagate this - // information through the updater - PA.preserveSet>(); - return PA; -} - -bool ScopAnalysisManagerFunctionProxy::Result::invalidate( - Function &F, const PreservedAnalyses &PA, - FunctionAnalysisManager::Invalidator &Inv) { - - // First, check whether our ScopInfo is about to be invalidated - auto PAC = PA.getChecker(); - if (!(PAC.preserved() || PAC.preservedSet>()) || - Inv.invalidate(F, PA) || - Inv.invalidate(F, PA) || - Inv.invalidate(F, PA) || - Inv.invalidate(F, PA)) { - - // As everything depends on ScopInfo, we must drop all existing results - for (auto &S : *SI) - if (auto *scop = S.second.get()) - if (InnerAM) - InnerAM->clear(*scop, scop->getName()); - - InnerAM = nullptr; - return true; // Invalidate the proxy result as well. - } - - bool allPreserved = PA.allAnalysesInSetPreserved>(); - - // Invalidate all non-preserved analyses - // Even if all analyses were preserved, we still need to run deferred - // invalidation - for (auto &S : *SI) { - std::optional InnerPA; - auto *scop = S.second.get(); - if (!scop) - continue; - - if (auto *OuterProxy = - InnerAM->getCachedResult(*scop)) { - for (const auto &InvPair : OuterProxy->getOuterInvalidations()) { - auto *OuterAnalysisID = InvPair.first; - const auto &InnerAnalysisIDs = InvPair.second; - - if (Inv.invalidate(OuterAnalysisID, F, PA)) { - if (!InnerPA) - InnerPA = PA; - for (auto *InnerAnalysisID : InnerAnalysisIDs) - InnerPA->abandon(InnerAnalysisID); - } - } - - if (InnerPA) { - InnerAM->invalidate(*scop, *InnerPA); - continue; - } - } - - if (!allPreserved) - InnerAM->invalidate(*scop, PA); - } - - return false; // This proxy is still valid -} - -template <> -ScopAnalysisManagerFunctionProxy::Result -ScopAnalysisManagerFunctionProxy::run(Function &F, - FunctionAnalysisManager &FAM) { - return Result(*InnerAM, FAM.getResult(F)); -} -} // namespace llvm - -namespace polly { -template <> -OwningScopAnalysisManagerFunctionProxy::Result -OwningScopAnalysisManagerFunctionProxy::run(Function &F, - FunctionAnalysisManager &FAM) { - return Result(InnerAM, FAM.getResult(F)); -} -} // namespace polly diff --git a/polly/lib/CMakeLists.txt b/polly/lib/CMakeLists.txt index e4f196f151c9e..7c609fda0a61a 100644 --- a/polly/lib/CMakeLists.txt +++ b/polly/lib/CMakeLists.txt @@ -48,7 +48,6 @@ add_llvm_pass_plugin(Polly Analysis/ScopInfo.cpp Analysis/ScopBuilder.cpp Analysis/ScopGraphPrinter.cpp - Analysis/ScopPass.cpp Analysis/PruneUnprofitable.cpp CodeGen/BlockGenerators.cpp ${ISL_CODEGEN_FILES} diff --git a/polly/lib/CodeGen/CodeGeneration.cpp b/polly/lib/CodeGen/CodeGeneration.cpp index 88eb09316bc08..5d2b6363ddf38 100644 --- a/polly/lib/CodeGen/CodeGeneration.cpp +++ b/polly/lib/CodeGen/CodeGeneration.cpp @@ -34,7 +34,6 @@ #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" -#include "llvm/IR/PassManager.h" #include "llvm/IR/Verifier.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -318,18 +317,6 @@ static bool generateCode(Scop &S, IslAstInfo &AI, LoopInfo &LI, return true; } -PreservedAnalyses CodeGenerationPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &AR, - SPMUpdater &U) { - auto &AI = SAM.getResult(S, AR); - if (generateCode(S, AI, AR.LI, AR.DT, AR.SE, AR.RI)) { - U.invalidateScop(S); - return PreservedAnalyses::none(); - } - - return PreservedAnalyses::all(); -} - bool polly::runCodeGeneration(Scop &S, RegionInfo &RI, IslAstInfo &AI) { return generateCode(S, AI, *S.getLI(), *S.getDT(), *S.getSE(), RI); } diff --git a/polly/lib/CodeGen/IslAst.cpp b/polly/lib/CodeGen/IslAst.cpp index 3177cda225f1d..0ea14ae2fc2e0 100644 --- a/polly/lib/CodeGen/IslAst.cpp +++ b/polly/lib/CodeGen/IslAst.cpp @@ -32,7 +32,6 @@ #include "polly/Options.h" #include "polly/ScopDetection.h" #include "polly/ScopInfo.h" -#include "polly/ScopPass.h" #include "polly/Support/GICHelper.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/Function.h" @@ -663,15 +662,6 @@ static std::unique_ptr runIslAst( return Ast; } -IslAstInfo IslAstAnalysis::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR) { - auto GetDeps = [&](Dependences::AnalysisLevel Lvl) -> const Dependences & { - return SAM.getResult(S, SAR).getDependences(Lvl); - }; - - return std::move(*runIslAst(S, GetDeps)); -} - static __isl_give isl_printer *cbPrintUser(__isl_take isl_printer *P, __isl_take isl_ast_print_options *O, __isl_keep isl_ast_node *Node, @@ -771,15 +761,6 @@ void IslAstInfo::print(raw_ostream &OS) { isl_printer_free(P); } -AnalysisKey IslAstAnalysis::Key; -PreservedAnalyses IslAstPrinterPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &U) { - auto &Ast = SAM.getResult(S, SAR); - Ast.print(OS); - return PreservedAnalyses::all(); -} - std::unique_ptr polly::runIslAstGen(Scop &S, DependenceAnalysis::Result &DA) { auto GetDeps = [&](Dependences::AnalysisLevel Lvl) -> const Dependences & { diff --git a/polly/lib/Exchange/JSONExporter.cpp b/polly/lib/Exchange/JSONExporter.cpp index 7d30c030aa6e1..e3920662ddd57 100644 --- a/polly/lib/Exchange/JSONExporter.cpp +++ b/polly/lib/Exchange/JSONExporter.cpp @@ -14,7 +14,6 @@ #include "polly/DependenceInfo.h" #include "polly/Options.h" #include "polly/ScopInfo.h" -#include "polly/ScopPass.h" #include "polly/Support/ISLTools.h" #include "polly/Support/ScopLocation.h" #include "llvm/ADT/Statistic.h" @@ -716,32 +715,6 @@ static bool importScop(Scop &S, const Dependences &D, const DataLayout &DL, return true; } -PreservedAnalyses JSONExportPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &) { - exportScop(S); - return PreservedAnalyses::all(); -} - -PreservedAnalyses JSONImportPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &) { - const Dependences &D = - SAM.getResult(S, SAR).getDependences( - Dependences::AL_Statement); - const DataLayout &DL = S.getFunction().getParent()->getDataLayout(); - - if (!importScop(S, D, DL)) - report_fatal_error("Tried to import a malformed jscop file."); - - // This invalidates all analyses on Scop. - PreservedAnalyses PA; - PA.preserveSet>(); - PA.preserveSet>(); - PA.preserveSet>(); - return PA; -} - void polly::runImportJSON(Scop &S, DependenceAnalysis::Result &DA) { const Dependences &D = DA.getDependences(Dependences::AL_Statement); const DataLayout &DL = S.getFunction().getParent()->getDataLayout(); diff --git a/polly/lib/Pass/PhaseManager.cpp b/polly/lib/Pass/PhaseManager.cpp index 2ff6f24753097..330dfe8b1ef1e 100644 --- a/polly/lib/Pass/PhaseManager.cpp +++ b/polly/lib/Pass/PhaseManager.cpp @@ -25,8 +25,10 @@ #include "polly/ScopInfo.h" #include "polly/Simplify.h" #include "polly/Support/PollyDebug.h" +#include "llvm/ADT/PriorityWorklist.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Module.h" #define DEBUG_TYPE "polly-pass" @@ -77,10 +79,13 @@ class PhaseManager { // TODO: Setting ModifiedIR will invalidate any analysis, even if DT, LI are // preserved. if (Opts.isPhaseEnabled(PassPhase::Prepare)) { - PreservedAnalyses PA = CodePreparationPass().run(F, FAM); - FAM.invalidate(F, PA); - if (!PA.areAllPreserved()) + if (runCodePreparation(F, &DT, &LI, nullptr)) { + PreservedAnalyses PA; + PA.preserve(); + PA.preserve(); + FAM.invalidate(F, PA); ModifiedIR = true; + } } // Can't do anything without detection diff --git a/polly/lib/Support/PollyPasses.def b/polly/lib/Support/PollyPasses.def index 496839760a844..c95ffa36db488 100644 --- a/polly/lib/Support/PollyPasses.def +++ b/polly/lib/Support/PollyPasses.def @@ -11,53 +11,9 @@ MODULE_PASS("polly-custom", createModuleToFunctionPassAdaptor(PollyFunctionPass( CGSCC_PASS("polly-inline", ScopInlinerPass(), parseNoOptions) #undef CGSCC_PASS -#ifndef FUNCTION_ANALYSIS -#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) -#endif -FUNCTION_ANALYSIS("polly-detect", ScopAnalysis()) -FUNCTION_ANALYSIS("polly-function-scops", ScopInfoAnalysis()) -#undef FUNCTION_ANALYSIS - #ifndef FUNCTION_PASS #define FUNCTION_PASS(NAME, CREATE_PASS, PARSER) #endif -FUNCTION_PASS("polly-prepare", CodePreparationPass(), parseNoOptions) -FUNCTION_PASS("print", ScopAnalysisPrinterPass(llvm::errs()), parseNoOptions) -FUNCTION_PASS("print", ScopInfoPrinterPass(llvm::errs()), parseNoOptions) -FUNCTION_PASS("polly-scop-viewer", ScopViewer(), parseNoOptions) -FUNCTION_PASS("polly-scop-only-viewer", ScopOnlyViewer(), parseNoOptions) -FUNCTION_PASS("polly-scop-printer", ScopPrinter(), parseNoOptions) -FUNCTION_PASS("polly-scop-only-printer", ScopOnlyPrinter(), parseNoOptions) FUNCTION_PASS("polly", PollyFunctionPass(Opts), parsePollyDefaultOptions) FUNCTION_PASS("polly-custom", PollyFunctionPass(Opts), parsePollyCustomOptions) #undef FUNCTION_PASS - -#ifndef SCOP_ANALYSIS -#define SCOP_ANALYSIS(NAME, CREATE_PASS) -#endif -SCOP_ANALYSIS("pass-instrumentation", llvm::PassInstrumentationAnalysis(PIC)) -SCOP_ANALYSIS("polly-ast", IslAstAnalysis()) -SCOP_ANALYSIS("polly-dependences", DependenceAnalysis()) -#undef SCOP_ANALYSIS - -#ifndef SCOP_PASS -#define SCOP_PASS(NAME, CREATE_PASS) -#endif -SCOP_PASS("polly-export-jscop", JSONExportPass()) -SCOP_PASS("polly-import-jscop", JSONImportPass()) -SCOP_PASS("print", IslAstPrinterPass(llvm::outs())) -SCOP_PASS("print", DependenceInfoPrinterPass(llvm::outs())) -SCOP_PASS("polly-codegen", CodeGenerationPass()) -SCOP_PASS("polly-simplify", SimplifyPass()) -SCOP_PASS("print", SimplifyPrinterPass(llvm::outs())) -SCOP_PASS("polly-optree", ForwardOpTreePass()) -SCOP_PASS("print", ForwardOpTreePrinterPass(llvm::outs())) -SCOP_PASS("polly-delicm", DeLICMPass()) -SCOP_PASS("print", DeLICMPrinterPass(llvm::outs())) -SCOP_PASS("polly-prune-unprofitable", PruneUnprofitablePass()) -SCOP_PASS("polly-opt-isl", IslScheduleOptimizerPass()) -SCOP_PASS("print", IslScheduleOptimizerPrinterPass(llvm::outs())) -SCOP_PASS("polly-dce", DeadCodeElimPass()) -SCOP_PASS("polly-mse", MaximalStaticExpansionPass()) -SCOP_PASS("print", MaximalStaticExpansionPrinterPass(llvm::outs())) -#undef SCOP_PASS diff --git a/polly/lib/Support/RegisterPasses.cpp b/polly/lib/Support/RegisterPasses.cpp index 2c0f4df761fb1..a430beebae7b1 100644 --- a/polly/lib/Support/RegisterPasses.cpp +++ b/polly/lib/Support/RegisterPasses.cpp @@ -539,33 +539,6 @@ static llvm::Expected parseNoOptions(StringRef Params) { return std::monostate{}; } -static OwningScopAnalysisManagerFunctionProxy -createScopAnalyses(FunctionAnalysisManager &FAM, - PassInstrumentationCallbacks *PIC) { - OwningScopAnalysisManagerFunctionProxy Proxy; -#define SCOP_ANALYSIS(NAME, CREATE_PASS) \ - Proxy.getManager().registerPass([PIC] { \ - (void)PIC; \ - return CREATE_PASS; \ - }); -#include "PollyPasses.def" - - Proxy.getManager().registerPass( - [&FAM] { return FunctionAnalysisManagerScopProxy(FAM); }); - return Proxy; -} - -static void registerFunctionAnalyses(FunctionAnalysisManager &FAM, - PassInstrumentationCallbacks *PIC) { - -#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \ - FAM.registerPass([] { return CREATE_PASS; }); - -#include "PollyPasses.def" - - FAM.registerPass([&FAM, PIC] { return createScopAnalyses(FAM, PIC); }); -} - static llvm::Expected parseCGPipeline(StringRef Name, llvm::CGSCCPassManager &CGPM, PassInstrumentationCallbacks *PIC, @@ -587,15 +560,6 @@ static llvm::Expected parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM, PassInstrumentationCallbacks *PIC, ArrayRef Pipeline) { - if (llvm::parseAnalysisUtilityPasses( - "polly-scop-analyses", Name, FPM)) - return true; - -#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \ - if (llvm::parseAnalysisUtilityPasses< \ - std::remove_reference::type>(NAME, Name, \ - FPM)) \ - return true; #define FUNCTION_PASS(NAME, CREATE_PASS, PARSER) \ if (PassBuilder::checkParametrizedPassName(Name, NAME)) { \ @@ -612,83 +576,6 @@ parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM, return false; } -static bool parseScopPass(StringRef Name, ScopPassManager &SPM, - PassInstrumentationCallbacks *PIC) { -#define SCOP_ANALYSIS(NAME, CREATE_PASS) \ - if (llvm::parseAnalysisUtilityPasses< \ - std::remove_reference::type>(NAME, Name, \ - SPM)) \ - return true; - -#define SCOP_PASS(NAME, CREATE_PASS) \ - if (Name == NAME) { \ - SPM.addPass(CREATE_PASS); \ - return true; \ - } - -#include "PollyPasses.def" - - return false; -} - -static bool parseScopPipeline(StringRef Name, FunctionPassManager &FPM, - PassInstrumentationCallbacks *PIC, - ArrayRef Pipeline) { - if (Name != "scop") - return false; - if (!Pipeline.empty()) { - ScopPassManager SPM; - for (const auto &E : Pipeline) - if (!parseScopPass(E.Name, SPM, PIC)) - return false; - FPM.addPass(createFunctionToScopPassAdaptor(std::move(SPM))); - } - return true; -} - -static bool isScopPassName(StringRef Name) { -#define SCOP_ANALYSIS(NAME, CREATE_PASS) \ - if (Name == "require<" NAME ">") \ - return true; \ - if (Name == "invalidate<" NAME ">") \ - return true; - -#define SCOP_PASS(NAME, CREATE_PASS) \ - if (Name == NAME) \ - return true; - -#include "PollyPasses.def" - - return false; -} - -static bool -parseTopLevelPipeline(llvm::ModulePassManager &MPM, - PassInstrumentationCallbacks *PIC, - ArrayRef Pipeline) { - StringRef FirstName = Pipeline.front().Name; - - if (!isScopPassName(FirstName)) - return false; - - FunctionPassManager FPM; - ScopPassManager SPM; - - for (auto &Element : Pipeline) { - auto &Name = Element.Name; - auto &InnerPipeline = Element.InnerPipeline; - if (!InnerPipeline.empty()) // Scop passes don't have inner pipelines - return false; - if (!parseScopPass(Name, SPM, PIC)) - return false; - } - - FPM.addPass(createFunctionToScopPassAdaptor(std::move(SPM))); - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - - return true; -} - static llvm::Expected parseModulePipeline(StringRef Name, llvm::ModulePassManager &MPM, PassInstrumentationCallbacks *PIC, @@ -758,20 +645,12 @@ void registerPollyPasses(PassBuilder &PB) { } #include "PollyPasses.def" - PB.registerAnalysisRegistrationCallback([PIC](FunctionAnalysisManager &FAM) { - registerFunctionAnalyses(FAM, PIC); - }); PB.registerPipelineParsingCallback( [PIC](StringRef Name, FunctionPassManager &FPM, ArrayRef Pipeline) -> bool { ExitOnError Err("Unable to parse Polly module pass: "); return Err(parseFunctionPipeline(Name, FPM, PIC, Pipeline)); }); - PB.registerPipelineParsingCallback( - [PIC](StringRef Name, FunctionPassManager &FPM, - ArrayRef Pipeline) -> bool { - return parseScopPipeline(Name, FPM, PIC, Pipeline); - }); PB.registerPipelineParsingCallback( [PIC](StringRef Name, CGSCCPassManager &CGPM, ArrayRef Pipeline) -> bool { @@ -784,11 +663,6 @@ void registerPollyPasses(PassBuilder &PB) { ExitOnError Err("Unable to parse Polly module pass: "); return Err(parseModulePipeline(Name, MPM, PIC, Pipeline)); }); - PB.registerParseTopLevelPipelineCallback( - [PIC](llvm::ModulePassManager &MPM, - ArrayRef Pipeline) -> bool { - return parseTopLevelPipeline(MPM, PIC, Pipeline); - }); switch (PassPosition) { case POSITION_EARLY: diff --git a/polly/lib/Transform/CodePreparation.cpp b/polly/lib/Transform/CodePreparation.cpp index 5b96c865ad80f..3e76dbdff1296 100644 --- a/polly/lib/Transform/CodePreparation.cpp +++ b/polly/lib/Transform/CodePreparation.cpp @@ -45,16 +45,7 @@ static bool runCodePreprationImpl(Function &F, DominatorTree *DT, LoopInfo *LI, return true; } -PreservedAnalyses CodePreparationPass::run(Function &F, - FunctionAnalysisManager &FAM) { - auto &DT = FAM.getResult(F); - auto &LI = FAM.getResult(F); - bool Changed = runCodePreprationImpl(F, &DT, &LI, nullptr); - if (!Changed) - return PreservedAnalyses::all(); - - PreservedAnalyses PA; - PA.preserve(); - PA.preserve(); - return PA; +bool polly::runCodePreparation(Function &F, DominatorTree *DT, LoopInfo *LI, + RegionInfo *RI) { + return runCodePreprationImpl(F, DT, LI, RI); } diff --git a/polly/lib/Transform/DeLICM.cpp b/polly/lib/Transform/DeLICM.cpp index e8f2d951404f3..4deace112f5b4 100644 --- a/polly/lib/Transform/DeLICM.cpp +++ b/polly/lib/Transform/DeLICM.cpp @@ -17,7 +17,6 @@ #include "polly/DeLICM.h" #include "polly/Options.h" #include "polly/ScopInfo.h" -#include "polly/ScopPass.h" #include "polly/Support/GICHelper.h" #include "polly/Support/ISLOStream.h" #include "polly/Support/ISLTools.h" @@ -1394,50 +1393,8 @@ static std::unique_ptr runDeLICMImpl(Scop &S, LoopInfo &LI) { return Impl; } - -static PreservedAnalyses runDeLICMUsingNPM(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &U, raw_ostream *OS) { - LoopInfo &LI = SAR.LI; - std::unique_ptr Impl = runDeLICMImpl(S, LI); - - if (OS) { - *OS << "Printing analysis 'Polly - DeLICM/DePRE' for region: '" - << S.getName() << "' in function '" << S.getFunction().getName() - << "':\n"; - if (Impl) { - assert(Impl->getScop() == &S); - - *OS << "DeLICM result:\n"; - Impl->print(*OS); - } - } - - if (!Impl->isModified()) - return PreservedAnalyses::all(); - - PreservedAnalyses PA; - PA.preserveSet>(); - PA.preserveSet>(); - PA.preserveSet>(); - return PA; -} } // anonymous namespace -llvm::PreservedAnalyses polly::DeLICMPass::run(Scop &S, - ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &U) { - return runDeLICMUsingNPM(S, SAM, SAR, U, nullptr); -} - -llvm::PreservedAnalyses DeLICMPrinterPass::run(Scop &S, - ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &U) { - return runDeLICMUsingNPM(S, SAM, SAR, U, &OS); -} - bool polly::isConflicting( isl::union_set ExistingOccupied, isl::union_set ExistingUnused, isl::union_map ExistingKnown, isl::union_map ExistingWrites, diff --git a/polly/lib/Transform/DeadCodeElimination.cpp b/polly/lib/Transform/DeadCodeElimination.cpp index df95e5190431c..7cb7400c4728f 100644 --- a/polly/lib/Transform/DeadCodeElimination.cpp +++ b/polly/lib/Transform/DeadCodeElimination.cpp @@ -143,26 +143,3 @@ bool polly::runDeadCodeElim(Scop &S, DependenceAnalysis::Result &DA) { return Changed; } - -llvm::PreservedAnalyses DeadCodeElimPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &U) { - DependenceAnalysis::Result &DA = SAM.getResult(S, SAR); - const Dependences &Deps = DA.getDependences(Dependences::AL_Statement); - - bool Changed = runDeadCodeElimination(S, DCEPreciseSteps, Deps); - - // FIXME: We can probably avoid the recomputation of all dependences by - // updating them explicitly. - if (Changed) - DA.recomputeDependences(Dependences::AL_Statement); - - if (!Changed) - return PreservedAnalyses::all(); - - PreservedAnalyses PA; - PA.preserveSet>(); - PA.preserveSet>(); - PA.preserveSet>(); - return PA; -} diff --git a/polly/lib/Transform/FlattenSchedule.cpp b/polly/lib/Transform/FlattenSchedule.cpp index 35a8ce6877036..3bb3c2ff761ea 100644 --- a/polly/lib/Transform/FlattenSchedule.cpp +++ b/polly/lib/Transform/FlattenSchedule.cpp @@ -16,7 +16,6 @@ #include "polly/FlattenAlgo.h" #include "polly/Options.h" #include "polly/ScopInfo.h" -#include "polly/ScopPass.h" #include "polly/Support/ISLOStream.h" #include "polly/Support/ISLTools.h" #include "polly/Support/PollyDebug.h" diff --git a/polly/lib/Transform/ForwardOpTree.cpp b/polly/lib/Transform/ForwardOpTree.cpp index 24d4a4af6e681..cf0ce79efd63c 100644 --- a/polly/lib/Transform/ForwardOpTree.cpp +++ b/polly/lib/Transform/ForwardOpTree.cpp @@ -14,7 +14,6 @@ #include "polly/Options.h" #include "polly/ScopBuilder.h" #include "polly/ScopInfo.h" -#include "polly/ScopPass.h" #include "polly/Support/GICHelper.h" #include "polly/Support/ISLOStream.h" #include "polly/Support/ISLTools.h" @@ -1070,49 +1069,8 @@ static std::unique_ptr runForwardOpTreeImpl(Scop &S, return Impl; } - -static PreservedAnalyses -runForwardOpTreeUsingNPM(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U, - raw_ostream *OS) { - LoopInfo &LI = SAR.LI; - - std::unique_ptr Impl = runForwardOpTreeImpl(S, LI); - if (OS) { - *OS << "Printing analysis 'Polly - Forward operand tree' for region: '" - << S.getName() << "' in function '" << S.getFunction().getName() - << "':\n"; - if (Impl) { - assert(Impl->getScop() == &S); - - Impl->print(*OS); - } - } - - if (!Impl->isModified()) - return PreservedAnalyses::all(); - - PreservedAnalyses PA; - PA.preserveSet>(); - PA.preserveSet>(); - PA.preserveSet>(); - return PA; -} } // namespace -llvm::PreservedAnalyses ForwardOpTreePass::run(Scop &S, - ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &U) { - return runForwardOpTreeUsingNPM(S, SAM, SAR, U, nullptr); -} - -llvm::PreservedAnalyses -ForwardOpTreePrinterPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U) { - return runForwardOpTreeUsingNPM(S, SAM, SAR, U, &OS); -} - bool polly::runForwardOpTree(Scop &S) { LoopInfo &LI = *S.getLI(); diff --git a/polly/lib/Transform/MatmulOptimizer.cpp b/polly/lib/Transform/MatmulOptimizer.cpp index 01d431a97e7db..7a6b3d25871c3 100644 --- a/polly/lib/Transform/MatmulOptimizer.cpp +++ b/polly/lib/Transform/MatmulOptimizer.cpp @@ -11,7 +11,6 @@ #include "polly/Options.h" #include "polly/ScheduleTreeTransform.h" #include "polly/ScopInfo.h" -#include "polly/ScopPass.h" #include "polly/Simplify.h" #include "polly/Support/GICHelper.h" #include "polly/Support/ISLTools.h" diff --git a/polly/lib/Transform/MaximalStaticExpansion.cpp b/polly/lib/Transform/MaximalStaticExpansion.cpp index 62a4d251875c5..514a21f41688c 100644 --- a/polly/lib/Transform/MaximalStaticExpansion.cpp +++ b/polly/lib/Transform/MaximalStaticExpansion.cpp @@ -15,7 +15,6 @@ #include "polly/DependenceInfo.h" #include "polly/Options.h" #include "polly/ScopInfo.h" -#include "polly/ScopPass.h" #include "polly/Support/ISLTools.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringRef.h" @@ -449,49 +448,8 @@ runMaximalStaticExpansionImpl(Scop &S, OptimizationRemarkEmitter &ORE, Impl->expand(); return Impl; } - -static PreservedAnalyses runMSEUsingNPM(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - raw_ostream *OS) { - OptimizationRemarkEmitter ORE(&S.getFunction()); - - auto &DI = SAM.getResult(S, SAR); - auto &D = DI.getDependences(Dependences::AL_Reference); - - std::unique_ptr Impl = - runMaximalStaticExpansionImpl(S, ORE, D); - - if (OS) { - *OS << "Printing analysis 'Polly - Maximal static expansion of SCoP' for " - "region: '" - << S.getName() << "' in function '" << S.getFunction().getName() - << "':\n"; - - if (Impl) { - *OS << "MSE result:\n"; - Impl->print(*OS); - } - } - - return PreservedAnalyses::all(); -} - } // namespace -PreservedAnalyses -MaximalStaticExpansionPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &) { - return runMSEUsingNPM(S, SAM, SAR, nullptr); -} - -PreservedAnalyses -MaximalStaticExpansionPrinterPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &) { - return runMSEUsingNPM(S, SAM, SAR, &OS); -} - void polly::runMaximalStaticExpansion(Scop &S, DependenceAnalysis::Result &DI) { OptimizationRemarkEmitter ORE(&S.getFunction()); diff --git a/polly/lib/Transform/ScheduleOptimizer.cpp b/polly/lib/Transform/ScheduleOptimizer.cpp index 551c4e97bc0b5..b9b9abbd85ae4 100644 --- a/polly/lib/Transform/ScheduleOptimizer.cpp +++ b/polly/lib/Transform/ScheduleOptimizer.cpp @@ -52,6 +52,7 @@ #include "polly/MatmulOptimizer.h" #include "polly/Options.h" #include "polly/ScheduleTreeTransform.h" +#include "polly/ScopInfo.h" #include "polly/Support/ISLOStream.h" #include "polly/Support/ISLTools.h" #include "llvm/ADT/Sequence.h" @@ -966,44 +967,6 @@ static void runScheduleOptimizerPrinter(raw_ostream &OS, } // namespace -static llvm::PreservedAnalyses -runIslScheduleOptimizerUsingNPM(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U, - raw_ostream *OS) { - DependenceAnalysis::Result &Deps = SAM.getResult(S, SAR); - auto GetDeps = [&Deps](Dependences::AnalysisLevel) -> const Dependences & { - return Deps.getDependences(Dependences::AL_Statement); - }; - OptimizationRemarkEmitter ORE(&S.getFunction()); - TargetTransformInfo *TTI = &SAR.TTI; - isl::schedule LastSchedule; - bool DepsChanged = false; - runIslScheduleOptimizerImpl(S, GetDeps, TTI, &ORE, LastSchedule, DepsChanged); - if (DepsChanged) - Deps.abandonDependences(); - - if (OS) { - *OS << "Printing analysis 'Polly - Optimize schedule of SCoP' for region: '" - << S.getName() << "' in function '" << S.getFunction().getName() - << "':\n"; - runScheduleOptimizerPrinter(*OS, LastSchedule); - } - return PreservedAnalyses::all(); -} - -llvm::PreservedAnalyses -IslScheduleOptimizerPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U) { - return runIslScheduleOptimizerUsingNPM(S, SAM, SAR, U, nullptr); -} - -llvm::PreservedAnalyses -IslScheduleOptimizerPrinterPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &U) { - return runIslScheduleOptimizerUsingNPM(S, SAM, SAR, U, &OS); -} - void polly::runIslScheduleOptimizer(Scop &S, TargetTransformInfo *TTI, DependenceAnalysis::Result &Deps) { auto GetDeps = [&Deps](Dependences::AnalysisLevel) -> const Dependences & { diff --git a/polly/lib/Transform/ScopInliner.cpp b/polly/lib/Transform/ScopInliner.cpp index 8e7a0dedaf533..794ba98dc543c 100644 --- a/polly/lib/Transform/ScopInliner.cpp +++ b/polly/lib/Transform/ScopInliner.cpp @@ -21,7 +21,6 @@ #include "llvm/Analysis/OptimizationRemarkEmitter.h" #include "llvm/Analysis/RegionInfo.h" #include "llvm/IR/Dominators.h" -#include "llvm/IR/PassManager.h" #include "llvm/Passes/PassBuilder.h" #include "llvm/Transforms/IPO/AlwaysInliner.h" diff --git a/polly/lib/Transform/Simplify.cpp b/polly/lib/Transform/Simplify.cpp index cf0f8c5ca5ef2..df88b5ea84559 100644 --- a/polly/lib/Transform/Simplify.cpp +++ b/polly/lib/Transform/Simplify.cpp @@ -13,7 +13,6 @@ #include "polly/Simplify.h" #include "polly/Options.h" #include "polly/ScopInfo.h" -#include "polly/ScopPass.h" #include "polly/Support/GICHelper.h" #include "polly/Support/ISLOStream.h" #include "polly/Support/ISLTools.h" @@ -761,42 +760,8 @@ void SimplifyImpl::printScop(raw_ostream &OS, Scop &S) const { printAccesses(OS); } -static llvm::PreservedAnalyses -runSimplifyUsingNPM(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U, int CallNo, - raw_ostream *OS) { - SimplifyImpl Impl(CallNo); - Impl.run(S, &SAR.LI); - if (OS) { - *OS << "Printing analysis 'Polly - Simplify' for region: '" << S.getName() - << "' in function '" << S.getFunction().getName() << "':\n"; - Impl.printScop(*OS, S); - } - - if (!Impl.isModified()) - return llvm::PreservedAnalyses::all(); - - PreservedAnalyses PA; - PA.preserveSet>(); - PA.preserveSet>(); - PA.preserveSet>(); - return PA; -} - } // anonymous namespace -llvm::PreservedAnalyses SimplifyPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, - SPMUpdater &U) { - return runSimplifyUsingNPM(S, SAM, SAR, U, CallNo, nullptr); -} - -llvm::PreservedAnalyses -SimplifyPrinterPass::run(Scop &S, ScopAnalysisManager &SAM, - ScopStandardAnalysisResults &SAR, SPMUpdater &U) { - return runSimplifyUsingNPM(S, SAM, SAR, U, CallNo, &OS); -} - SmallVector polly::getAccessesInOrder(ScopStmt &Stmt) { SmallVector Accesses; diff --git a/polly/test/CodeGen/invariant_load_base_pointer_conditional.ll b/polly/test/CodeGen/invariant_load_base_pointer_conditional.ll index 4dbcc3b3b049d..1b4b5ebebd8ef 100644 --- a/polly/test/CodeGen/invariant_load_base_pointer_conditional.ll +++ b/polly/test/CodeGen/invariant_load_base_pointer_conditional.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: %polly.preload.tmp6.merge = phi ptr [ %polly.access.BPLoc.load, %polly.preload.exec ], [ null, %polly.preload.cond ] ; ; CHECK-LABEL: polly.stmt.bb5: -; CHECK-NEXT: %[[offset:.*]] = shl nuw nsw i64 %polly.indvar6, 2 +; CHECK-NEXT: %[[offset:.*]] = shl nuw nsw i64 %polly.indvar16, 2 ; CHECK-NEXT: %{{.*}} = getelementptr i8, ptr %polly.preload.tmp6.merge, i64 %[[offset]] ; ; void f(int **BPLoc, int *A, int N) { diff --git a/polly/unittests/CMakeLists.txt b/polly/unittests/CMakeLists.txt index 093a2146f63c5..7b91fd8e52537 100644 --- a/polly/unittests/CMakeLists.txt +++ b/polly/unittests/CMakeLists.txt @@ -27,6 +27,5 @@ endfunction() add_subdirectory(Isl) add_subdirectory(Flatten) add_subdirectory(DeLICM) -add_subdirectory(ScopPassManager) add_subdirectory(ScheduleOptimizer) add_subdirectory(Support) diff --git a/polly/unittests/ScopPassManager/CMakeLists.txt b/polly/unittests/ScopPassManager/CMakeLists.txt deleted file mode 100644 index 88300144af352..0000000000000 --- a/polly/unittests/ScopPassManager/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -add_polly_unittest(ScopPassManagerTests - PassManagerTest.cpp - ) -if (NOT LLVM_LINK_LLVM_DYLIB) - llvm_map_components_to_libnames(llvm_libs Passes Core Analysis) - target_link_libraries(ScopPassManagerTests PRIVATE ${llvm_libs}) -endif() diff --git a/polly/unittests/ScopPassManager/PassManagerTest.cpp b/polly/unittests/ScopPassManager/PassManagerTest.cpp deleted file mode 100644 index 49299c2124d6e..0000000000000 --- a/polly/unittests/ScopPassManager/PassManagerTest.cpp +++ /dev/null @@ -1,66 +0,0 @@ -#include "llvm/IR/PassManager.h" -#include "polly/CodeGen/IslAst.h" -#include "polly/DependenceInfo.h" -#include "polly/ScopPass.h" -#include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/CGSCCPassManager.h" -#include "llvm/Passes/PassBuilder.h" -#include "llvm/Transforms/Scalar/LoopPassManager.h" -#include "gtest/gtest.h" - -using namespace polly; -using namespace llvm; - -namespace { -class ScopPassRegistry : public ::testing::Test { -protected: - ModuleAnalysisManager MAM; - FunctionAnalysisManager FAM; - LoopAnalysisManager LAM; - CGSCCAnalysisManager CGAM; - ScopAnalysisManager SAM; - AAManager AM; - -public: - ScopPassRegistry(ScopPassRegistry &&) = delete; - ScopPassRegistry(const ScopPassRegistry &) = delete; - ScopPassRegistry &operator=(ScopPassRegistry &&) = delete; - ScopPassRegistry &operator=(const ScopPassRegistry &) = delete; - ScopPassRegistry() { - PassBuilder PB; - - AM = PB.buildDefaultAAPipeline(); - PB.registerModuleAnalyses(MAM); - PB.registerFunctionAnalyses(FAM); - PB.registerLoopAnalyses(LAM); - PB.registerCGSCCAnalyses(CGAM); - - FAM.registerPass([] { return ScopAnalysis(); }); - FAM.registerPass([] { return ScopInfoAnalysis(); }); - FAM.registerPass([this] { return ScopAnalysisManagerFunctionProxy(SAM); }); - - // SAM.registerPass([] { return IslAstAnalysis(); }); - // SAM.registerPass([] { return DependenceAnalysis(); }); - SAM.registerPass([this] { return FunctionAnalysisManagerScopProxy(FAM); }); - - PB.crossRegisterProxies(LAM, FAM, CGAM, MAM); - } -}; - -TEST_F(ScopPassRegistry, PrintScops) { - FunctionPassManager FPM; - FPM.addPass(ScopAnalysisPrinterPass(errs())); -} - -TEST_F(ScopPassRegistry, PrintScopInfo) { - FunctionPassManager FPM; - FPM.addPass(ScopInfoPrinterPass(errs())); -} - -TEST_F(ScopPassRegistry, PrinIslAstInfo) { - FunctionPassManager FPM; - ScopPassManager SPM; - // SPM.addPass(IslAstPrinterPass(errs())); - FPM.addPass(createFunctionToScopPassAdaptor(std::move(SPM))); -} -} // namespace From b8059e757fb95b1d3cd9b657e540bf2cd47dad82 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 16 Nov 2025 14:51:32 +0000 Subject: [PATCH 04/17] [X86] Avoid extra (PMADDUBSW(X,AND(Y)) in multiplication (#168262) On SSSE3 targets we use PMADDUBSW of odd/even with suitable masking to avoid having to extend/truncate with `` types and avoid additional Port0/5 pressure. However, lower i8 elements in the pair can safely use PMULLW directly without any pre-masking as we will only use the lower i8 bits of the result which is only affected by the lower i8 of the inputs. --- llvm/lib/Target/X86/X86ISelLowering.cpp | 4 +- llvm/test/CodeGen/X86/avx2-arith.ll | 11 +- llvm/test/CodeGen/X86/combine-mul.ll | 2 +- llvm/test/CodeGen/X86/gfni-shifts.ll | 64 ++-- llvm/test/CodeGen/X86/midpoint-int-vec-128.ll | 241 ++++++------- llvm/test/CodeGen/X86/midpoint-int-vec-256.ll | 324 ++++++++---------- .../CodeGen/X86/min-legal-vector-width.ll | 106 +++--- llvm/test/CodeGen/X86/pmul.ll | 217 ++++++------ .../CodeGen/X86/prefer-avx256-wide-mul.ll | 9 +- .../CodeGen/X86/srem-seteq-vec-nonsplat.ll | 22 +- llvm/test/CodeGen/X86/vector-fshr-128.ll | 4 +- llvm/test/CodeGen/X86/vector-fshr-256.ll | 12 +- llvm/test/CodeGen/X86/vector-fshr-512.ll | 12 +- llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll | 4 +- llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll | 6 +- llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll | 6 +- llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll | 4 +- llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll | 6 +- llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll | 6 +- llvm/test/CodeGen/X86/vector-mul.ll | 16 +- llvm/test/CodeGen/X86/vector-shift-shl-128.ll | 4 +- llvm/test/CodeGen/X86/vector-shift-shl-256.ll | 20 +- llvm/test/CodeGen/X86/vector-shift-shl-512.ll | 8 +- .../X86/vector-shuffle-combining-sse41.ll | 37 +- 24 files changed, 520 insertions(+), 625 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 66f5802a67465..593c7627a6575 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -29629,9 +29629,9 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget, } if (!(IsLoLaneAllZeroOrUndef || IsHiLaneAllZeroOrUndef)) { SDValue Mask = DAG.getBitcast(VT, DAG.getConstant(0x00FF, dl, ExVT)); - SDValue BLo = DAG.getNode(ISD::AND, dl, VT, Mask, B); SDValue BHi = DAG.getNode(X86ISD::ANDNP, dl, VT, Mask, B); - SDValue RLo = DAG.getNode(X86ISD::VPMADDUBSW, dl, ExVT, A, BLo); + SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, DAG.getBitcast(ExVT, A), + DAG.getBitcast(ExVT, B)); SDValue RHi = DAG.getNode(X86ISD::VPMADDUBSW, dl, ExVT, A, BHi); RLo = DAG.getNode(ISD::AND, dl, VT, DAG.getBitcast(VT, RLo), Mask); RHi = DAG.getNode(X86ISD::VSHLI, dl, ExVT, RHi, diff --git a/llvm/test/CodeGen/X86/avx2-arith.ll b/llvm/test/CodeGen/X86/avx2-arith.ll index 1133cdfd083be..d21df472f06cb 100644 --- a/llvm/test/CodeGen/X86/avx2-arith.ll +++ b/llvm/test/CodeGen/X86/avx2-arith.ll @@ -121,14 +121,13 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone { define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone { ; CHECK-LABEL: mul_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; CHECK-NEXT: vpand %ymm2, %ymm1, %ymm3 -; CHECK-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3 -; CHECK-NEXT: vpand %ymm2, %ymm3, %ymm3 -; CHECK-NEXT: vpandn %ymm1, %ymm2, %ymm1 +; CHECK-NEXT: vpmullw %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; CHECK-NEXT: vpand %ymm3, %ymm2, %ymm2 +; CHECK-NEXT: vpandn %ymm1, %ymm3, %ymm1 ; CHECK-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: vpsllw $8, %ymm0, %ymm0 -; CHECK-NEXT: vpor %ymm0, %ymm3, %ymm0 +; CHECK-NEXT: vpor %ymm0, %ymm2, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} %x = mul <32 x i8> %i, %j ret <32 x i8> %x diff --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll index 29c41cac222b2..15d187a5baeec 100644 --- a/llvm/test/CodeGen/X86/combine-mul.ll +++ b/llvm/test/CodeGen/X86/combine-mul.ll @@ -504,7 +504,7 @@ define <16 x i8> @PR35579(<16 x i8> %x) { ; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] ; SSE-NEXT: psllw $8, %xmm1 -; SSE-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,2,0,4,0,2,0,8,0,2,0,4,0,2,0] +; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,1,2,1,4,1,2,1,8,1,2,1,4,1,2,1] ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: retq diff --git a/llvm/test/CodeGen/X86/gfni-shifts.ll b/llvm/test/CodeGen/X86/gfni-shifts.ll index 30f1874c51fed..638d88481f071 100644 --- a/llvm/test/CodeGen/X86/gfni-shifts.ll +++ b/llvm/test/CodeGen/X86/gfni-shifts.ll @@ -388,7 +388,7 @@ define <16 x i8> @constant_shl_v16i8(<16 x i8> %a) nounwind { ; GFNISSE-NEXT: movdqa %xmm0, %xmm1 ; GFNISSE-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; GFNISSE-NEXT: psllw $8, %xmm1 -; GFNISSE-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; GFNISSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; GFNISSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; GFNISSE-NEXT: por %xmm1, %xmm0 ; GFNISSE-NEXT: retq @@ -397,7 +397,7 @@ define <16 x i8> @constant_shl_v16i8(<16 x i8> %a) nounwind { ; GFNIAVX1: # %bb.0: ; GFNIAVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; GFNIAVX1-NEXT: vpsllw $8, %xmm1, %xmm1 -; GFNIAVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; GFNIAVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; GFNIAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; GFNIAVX1-NEXT: retq @@ -1213,21 +1213,20 @@ define <32 x i8> @splatvar_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { define <32 x i8> @constant_shl_v32i8(<32 x i8> %a) nounwind { ; GFNISSE-LABEL: constant_shl_v32i8: ; GFNISSE: # %bb.0: -; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm2 = [1,4,16,64,128,32,8,2] +; GFNISSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; GFNISSE-NEXT: movdqa %xmm0, %xmm3 -; GFNISSE-NEXT: pmaddubsw %xmm2, %xmm3 +; GFNISSE-NEXT: pmullw %xmm2, %xmm3 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] ; GFNISSE-NEXT: pand %xmm4, %xmm3 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm5 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; GFNISSE-NEXT: pmaddubsw %xmm5, %xmm0 ; GFNISSE-NEXT: psllw $8, %xmm0 ; GFNISSE-NEXT: por %xmm3, %xmm0 -; GFNISSE-NEXT: movdqa %xmm1, %xmm3 -; GFNISSE-NEXT: pmaddubsw %xmm2, %xmm3 -; GFNISSE-NEXT: pand %xmm4, %xmm3 +; GFNISSE-NEXT: pmullw %xmm1, %xmm2 +; GFNISSE-NEXT: pand %xmm4, %xmm2 ; GFNISSE-NEXT: pmaddubsw %xmm5, %xmm1 ; GFNISSE-NEXT: psllw $8, %xmm1 -; GFNISSE-NEXT: por %xmm3, %xmm1 +; GFNISSE-NEXT: por %xmm2, %xmm1 ; GFNISSE-NEXT: retq ; ; GFNIAVX1-LABEL: constant_shl_v32i8: @@ -1239,9 +1238,9 @@ define <32 x i8> @constant_shl_v32i8(<32 x i8> %a) nounwind { ; GFNIAVX1-NEXT: vpmaddubsw %xmm1, %xmm3, %xmm1 ; GFNIAVX1-NEXT: vpsllw $8, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = [1,4,16,64,128,32,8,2] -; GFNIAVX1-NEXT: vpmaddubsw %xmm2, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpmaddubsw %xmm2, %xmm0, %xmm0 +; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] +; GFNIAVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm3 +; GFNIAVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; GFNIAVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; GFNIAVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 @@ -1251,14 +1250,14 @@ define <32 x i8> @constant_shl_v32i8(<32 x i8> %a) nounwind { ; GFNIAVX2: # %bb.0: ; GFNIAVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; GFNIAVX2-NEXT: vpsllw $8, %ymm1, %ymm1 -; GFNIAVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; GFNIAVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; GFNIAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; GFNIAVX2-NEXT: retq ; ; GFNIAVX512VL-LABEL: constant_shl_v32i8: ; GFNIAVX512VL: # %bb.0: -; GFNIAVX512VL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; GFNIAVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; GFNIAVX512VL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; GFNIAVX512VL-NEXT: vpsllw $8, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpternlogd {{.*#+}} ymm0 = ymm0 | (ymm1 & m32bcst) @@ -2521,9 +2520,9 @@ define <64 x i8> @splatvar_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { define <64 x i8> @constant_shl_v64i8(<64 x i8> %a) nounwind { ; GFNISSE-LABEL: constant_shl_v64i8: ; GFNISSE: # %bb.0: -; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm4 = [1,4,16,64,128,32,8,2] +; GFNISSE-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; GFNISSE-NEXT: movdqa %xmm0, %xmm6 -; GFNISSE-NEXT: pmaddubsw %xmm4, %xmm6 +; GFNISSE-NEXT: pmullw %xmm4, %xmm6 ; GFNISSE-NEXT: pmovzxbw {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] ; GFNISSE-NEXT: pand %xmm5, %xmm6 ; GFNISSE-NEXT: movdqa {{.*#+}} xmm7 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] @@ -2531,23 +2530,22 @@ define <64 x i8> @constant_shl_v64i8(<64 x i8> %a) nounwind { ; GFNISSE-NEXT: psllw $8, %xmm0 ; GFNISSE-NEXT: por %xmm6, %xmm0 ; GFNISSE-NEXT: movdqa %xmm1, %xmm6 -; GFNISSE-NEXT: pmaddubsw %xmm4, %xmm6 +; GFNISSE-NEXT: pmullw %xmm4, %xmm6 ; GFNISSE-NEXT: pand %xmm5, %xmm6 ; GFNISSE-NEXT: pmaddubsw %xmm7, %xmm1 ; GFNISSE-NEXT: psllw $8, %xmm1 ; GFNISSE-NEXT: por %xmm6, %xmm1 ; GFNISSE-NEXT: movdqa %xmm2, %xmm6 -; GFNISSE-NEXT: pmaddubsw %xmm4, %xmm6 +; GFNISSE-NEXT: pmullw %xmm4, %xmm6 ; GFNISSE-NEXT: pand %xmm5, %xmm6 ; GFNISSE-NEXT: pmaddubsw %xmm7, %xmm2 ; GFNISSE-NEXT: psllw $8, %xmm2 ; GFNISSE-NEXT: por %xmm6, %xmm2 -; GFNISSE-NEXT: movdqa %xmm3, %xmm6 -; GFNISSE-NEXT: pmaddubsw %xmm4, %xmm6 -; GFNISSE-NEXT: pand %xmm5, %xmm6 +; GFNISSE-NEXT: pmullw %xmm3, %xmm4 +; GFNISSE-NEXT: pand %xmm5, %xmm4 ; GFNISSE-NEXT: pmaddubsw %xmm7, %xmm3 ; GFNISSE-NEXT: psllw $8, %xmm3 -; GFNISSE-NEXT: por %xmm6, %xmm3 +; GFNISSE-NEXT: por %xmm4, %xmm3 ; GFNISSE-NEXT: retq ; ; GFNIAVX1-LABEL: constant_shl_v64i8: @@ -2559,9 +2557,9 @@ define <64 x i8> @constant_shl_v64i8(<64 x i8> %a) nounwind { ; GFNIAVX1-NEXT: vpmaddubsw %xmm2, %xmm4, %xmm5 ; GFNIAVX1-NEXT: vpsllw $8, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 -; GFNIAVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = [1,4,16,64,128,32,8,2] -; GFNIAVX1-NEXT: vpmaddubsw %xmm5, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpmaddubsw %xmm5, %xmm0, %xmm0 +; GFNIAVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] +; GFNIAVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 ; GFNIAVX1-NEXT: vbroadcastss {{.*#+}} ymm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] ; GFNIAVX1-NEXT: vandps %ymm4, %ymm0, %ymm0 @@ -2572,8 +2570,8 @@ define <64 x i8> @constant_shl_v64i8(<64 x i8> %a) nounwind { ; GFNIAVX1-NEXT: vpmaddubsw %xmm2, %xmm6, %xmm2 ; GFNIAVX1-NEXT: vpsllw $8, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 -; GFNIAVX1-NEXT: vpmaddubsw %xmm5, %xmm6, %xmm3 -; GFNIAVX1-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1 +; GFNIAVX1-NEXT: vpmullw %xmm5, %xmm6, %xmm3 +; GFNIAVX1-NEXT: vpmullw %xmm5, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 ; GFNIAVX1-NEXT: vandps %ymm4, %ymm1, %ymm1 ; GFNIAVX1-NEXT: vorps %ymm2, %ymm1, %ymm1 @@ -2581,9 +2579,9 @@ define <64 x i8> @constant_shl_v64i8(<64 x i8> %a) nounwind { ; ; GFNIAVX2-LABEL: constant_shl_v64i8: ; GFNIAVX2: # %bb.0: -; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; GFNIAVX2-NEXT: # ymm2 = mem[0,1,0,1] -; GFNIAVX2-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm3 +; GFNIAVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm3 ; GFNIAVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; GFNIAVX2-NEXT: vpand %ymm4, %ymm3, %ymm3 ; GFNIAVX2-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] @@ -2591,7 +2589,7 @@ define <64 x i8> @constant_shl_v64i8(<64 x i8> %a) nounwind { ; GFNIAVX2-NEXT: vpmaddubsw %ymm5, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsllw $8, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpor %ymm0, %ymm3, %ymm0 -; GFNIAVX2-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm2 +; GFNIAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm2 ; GFNIAVX2-NEXT: vpand %ymm4, %ymm2, %ymm2 ; GFNIAVX2-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpsllw $8, %ymm1, %ymm1 @@ -2601,10 +2599,10 @@ define <64 x i8> @constant_shl_v64i8(<64 x i8> %a) nounwind { ; GFNIAVX512VL-LABEL: constant_shl_v64i8: ; GFNIAVX512VL: # %bb.0: ; GFNIAVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; GFNIAVX512VL-NEXT: # ymm2 = mem[0,1,0,1] -; GFNIAVX512VL-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm3 -; GFNIAVX512VL-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm2 +; GFNIAVX512VL-NEXT: vpmullw %ymm2, %ymm1, %ymm3 +; GFNIAVX512VL-NEXT: vpmullw %ymm2, %ymm0, %ymm2 ; GFNIAVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 ; GFNIAVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; GFNIAVX512VL-NEXT: # ymm3 = mem[0,1,0,1] @@ -2618,7 +2616,7 @@ define <64 x i8> @constant_shl_v64i8(<64 x i8> %a) nounwind { ; ; GFNIAVX512BW-LABEL: constant_shl_v64i8: ; GFNIAVX512BW: # %bb.0: -; GFNIAVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; GFNIAVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; GFNIAVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; GFNIAVX512BW-NEXT: vpsllw $8, %zmm0, %zmm0 ; GFNIAVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 | (zmm1 & m32bcst) diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll index a798f4c38f68f..541ca9d4f4096 100644 --- a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll +++ b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll @@ -2368,17 +2368,15 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin ; SSE41-NEXT: psubb %xmm3, %xmm1 ; SSE41-NEXT: psrlw $1, %xmm1 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: pand %xmm3, %xmm4 -; SSE41-NEXT: movdqa %xmm1, %xmm5 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm5 -; SSE41-NEXT: pand %xmm3, %xmm5 -; SSE41-NEXT: pandn %xmm2, %xmm3 -; SSE41-NEXT: pmaddubsw %xmm3, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: pmullw %xmm2, %xmm3 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm4, %xmm3 +; SSE41-NEXT: pandn %xmm2, %xmm4 +; SSE41-NEXT: pmaddubsw %xmm4, %xmm1 ; SSE41-NEXT: psllw $8, %xmm1 -; SSE41-NEXT: por %xmm1, %xmm5 -; SSE41-NEXT: paddb %xmm5, %xmm0 +; SSE41-NEXT: por %xmm1, %xmm3 +; SSE41-NEXT: paddb %xmm3, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: vec128_i8_signed_reg_reg: @@ -2390,14 +2388,13 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin ; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4 -; AVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4 -; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm3 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpandn %xmm2, %xmm4, %xmm2 ; AVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; @@ -2429,12 +2426,10 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1 -; XOP-FALLBACK-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; XOP-FALLBACK-NEXT: vpandn %xmm2, %xmm3, %xmm4 -; XOP-FALLBACK-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; XOP-FALLBACK-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOP-FALLBACK-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 -; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14] +; XOP-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; XOP-FALLBACK-NEXT: vpmaddubsw %xmm3, %xmm1, %xmm3 +; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2],xmm3[2],xmm1[4],xmm3[4],xmm1[6],xmm3[6],xmm1[8],xmm3[8],xmm1[10],xmm3[10],xmm1[12],xmm3[12],xmm1[14],xmm3[14] ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; XOP-FALLBACK-NEXT: retq ; @@ -2447,12 +2442,10 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1 -; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; XOPAVX1-NEXT: vpandn %xmm2, %xmm3, %xmm4 -; XOPAVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOPAVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 -; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14] +; XOPAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; XOPAVX1-NEXT: vpmaddubsw %xmm3, %xmm1, %xmm3 +; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2],xmm3[2],xmm1[4],xmm3[4],xmm1[6],xmm3[6],xmm1[8],xmm3[8],xmm1[10],xmm3[10],xmm1[12],xmm3[12],xmm1[14],xmm3[14] ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; XOPAVX1-NEXT: retq ; @@ -2591,17 +2584,15 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw ; SSE41-NEXT: psubb %xmm2, %xmm1 ; SSE41-NEXT: psrlw $1, %xmm1 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: movdqa %xmm4, %xmm3 -; SSE41-NEXT: pand %xmm2, %xmm3 -; SSE41-NEXT: movdqa %xmm1, %xmm5 -; SSE41-NEXT: pmaddubsw %xmm3, %xmm5 -; SSE41-NEXT: pand %xmm2, %xmm5 -; SSE41-NEXT: pandn %xmm4, %xmm2 -; SSE41-NEXT: pmaddubsw %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: pmullw %xmm4, %xmm2 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm3, %xmm2 +; SSE41-NEXT: pandn %xmm4, %xmm3 +; SSE41-NEXT: pmaddubsw %xmm3, %xmm1 ; SSE41-NEXT: psllw $8, %xmm1 -; SSE41-NEXT: por %xmm1, %xmm5 -; SSE41-NEXT: paddb %xmm5, %xmm0 +; SSE41-NEXT: por %xmm1, %xmm2 +; SSE41-NEXT: paddb %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: vec128_i8_unsigned_reg_reg: @@ -2615,14 +2606,13 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw ; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm4 -; AVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm4 -; AVX1-NEXT: vpandn %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm2 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpmaddubsw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; @@ -2656,12 +2646,10 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1 -; XOP-FALLBACK-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; XOP-FALLBACK-NEXT: vpandn %xmm2, %xmm3, %xmm4 -; XOP-FALLBACK-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; XOP-FALLBACK-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOP-FALLBACK-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 -; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14] +; XOP-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; XOP-FALLBACK-NEXT: vpmaddubsw %xmm3, %xmm1, %xmm3 +; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2],xmm3[2],xmm1[4],xmm3[4],xmm1[6],xmm3[6],xmm1[8],xmm3[8],xmm1[10],xmm3[10],xmm1[12],xmm3[12],xmm1[14],xmm3[14] ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; XOP-FALLBACK-NEXT: retq ; @@ -2674,12 +2662,10 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1 -; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; XOPAVX1-NEXT: vpandn %xmm2, %xmm3, %xmm4 -; XOPAVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOPAVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 -; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14] +; XOPAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; XOPAVX1-NEXT: vpmaddubsw %xmm3, %xmm1, %xmm3 +; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2],xmm3[2],xmm1[4],xmm3[4],xmm1[6],xmm3[6],xmm1[8],xmm3[8],xmm1[10],xmm3[10],xmm1[12],xmm3[12],xmm1[14],xmm3[14] ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; XOPAVX1-NEXT: retq ; @@ -2822,16 +2808,14 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind ; SSE41-NEXT: psubb %xmm3, %xmm0 ; SSE41-NEXT: psrlw $1, %xmm0 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: pand %xmm3, %xmm4 -; SSE41-NEXT: movdqa %xmm0, %xmm5 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm5 -; SSE41-NEXT: pand %xmm3, %xmm5 -; SSE41-NEXT: pandn %xmm2, %xmm3 -; SSE41-NEXT: pmaddubsw %xmm3, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: pmullw %xmm2, %xmm3 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm4, %xmm3 +; SSE41-NEXT: pandn %xmm2, %xmm4 +; SSE41-NEXT: pmaddubsw %xmm4, %xmm0 ; SSE41-NEXT: psllw $8, %xmm0 -; SSE41-NEXT: por %xmm5, %xmm0 +; SSE41-NEXT: por %xmm3, %xmm0 ; SSE41-NEXT: paddb %xmm1, %xmm0 ; SSE41-NEXT: retq ; @@ -2845,14 +2829,13 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind ; AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4 -; AVX1-NEXT: vpmaddubsw %xmm4, %xmm0, %xmm4 -; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4 -; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpandn %xmm2, %xmm4, %xmm2 ; AVX1-NEXT: vpmaddubsw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0 -; AVX1-NEXT: vpor %xmm0, %xmm4, %xmm0 +; AVX1-NEXT: vpor %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -2886,12 +2869,10 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm0, %xmm0 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm0, %xmm0 -; XOP-FALLBACK-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; XOP-FALLBACK-NEXT: vpandn %xmm2, %xmm3, %xmm4 -; XOP-FALLBACK-NEXT: vpmaddubsw %xmm4, %xmm0, %xmm4 -; XOP-FALLBACK-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOP-FALLBACK-NEXT: vpmaddubsw %xmm2, %xmm0, %xmm0 -; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2],xmm4[2],xmm0[4],xmm4[4],xmm0[6],xmm4[6],xmm0[8],xmm4[8],xmm0[10],xmm4[10],xmm0[12],xmm4[12],xmm0[14],xmm4[14] +; XOP-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; XOP-FALLBACK-NEXT: vpmaddubsw %xmm3, %xmm0, %xmm3 +; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0 +; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2],xmm3[2],xmm0[4],xmm3[4],xmm0[6],xmm3[6],xmm0[8],xmm3[8],xmm0[10],xmm3[10],xmm0[12],xmm3[12],xmm0[14],xmm3[14] ; XOP-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; XOP-FALLBACK-NEXT: retq ; @@ -2905,12 +2886,10 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm0, %xmm0 -; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; XOPAVX1-NEXT: vpandn %xmm2, %xmm3, %xmm4 -; XOPAVX1-NEXT: vpmaddubsw %xmm4, %xmm0, %xmm4 -; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOPAVX1-NEXT: vpmaddubsw %xmm2, %xmm0, %xmm0 -; XOPAVX1-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2],xmm4[2],xmm0[4],xmm4[4],xmm0[6],xmm4[6],xmm0[8],xmm4[8],xmm0[10],xmm4[10],xmm0[12],xmm4[12],xmm0[14],xmm4[14] +; XOPAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; XOPAVX1-NEXT: vpmaddubsw %xmm3, %xmm0, %xmm3 +; XOPAVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2],xmm3[2],xmm0[4],xmm3[4],xmm0[6],xmm3[6],xmm0[8],xmm3[8],xmm0[10],xmm3[10],xmm0[12],xmm3[12],xmm0[14],xmm3[14] ; XOPAVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; XOPAVX1-NEXT: retq ; @@ -3053,16 +3032,14 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind ; SSE41-NEXT: psubb %xmm3, %xmm1 ; SSE41-NEXT: psrlw $1, %xmm1 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: pand %xmm3, %xmm4 -; SSE41-NEXT: movdqa %xmm1, %xmm5 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm5 -; SSE41-NEXT: pand %xmm3, %xmm5 -; SSE41-NEXT: pandn %xmm2, %xmm3 -; SSE41-NEXT: pmaddubsw %xmm3, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: pmullw %xmm2, %xmm3 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm4, %xmm3 +; SSE41-NEXT: pandn %xmm2, %xmm4 +; SSE41-NEXT: pmaddubsw %xmm4, %xmm1 ; SSE41-NEXT: psllw $8, %xmm1 -; SSE41-NEXT: por %xmm5, %xmm1 +; SSE41-NEXT: por %xmm3, %xmm1 ; SSE41-NEXT: paddb %xmm1, %xmm0 ; SSE41-NEXT: retq ; @@ -3076,14 +3053,13 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind ; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4 -; AVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4 -; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm3 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpandn %xmm2, %xmm4, %xmm2 ; AVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; @@ -3117,12 +3093,10 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1 -; XOP-FALLBACK-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; XOP-FALLBACK-NEXT: vpandn %xmm2, %xmm3, %xmm4 -; XOP-FALLBACK-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; XOP-FALLBACK-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOP-FALLBACK-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 -; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14] +; XOP-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; XOP-FALLBACK-NEXT: vpmaddubsw %xmm3, %xmm1, %xmm3 +; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2],xmm3[2],xmm1[4],xmm3[4],xmm1[6],xmm3[6],xmm1[8],xmm3[8],xmm1[10],xmm3[10],xmm1[12],xmm3[12],xmm1[14],xmm3[14] ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; XOP-FALLBACK-NEXT: retq ; @@ -3136,12 +3110,10 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1 -; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; XOPAVX1-NEXT: vpandn %xmm2, %xmm3, %xmm4 -; XOPAVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOPAVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 -; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14] +; XOPAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; XOPAVX1-NEXT: vpmaddubsw %xmm3, %xmm1, %xmm3 +; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2],xmm3[2],xmm1[4],xmm3[4],xmm1[6],xmm3[6],xmm1[8],xmm3[8],xmm1[10],xmm3[10],xmm1[12],xmm3[12],xmm1[14],xmm3[14] ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; XOPAVX1-NEXT: retq ; @@ -3286,16 +3258,14 @@ define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind ; SSE41-NEXT: psubb %xmm3, %xmm0 ; SSE41-NEXT: psrlw $1, %xmm0 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: pand %xmm3, %xmm4 -; SSE41-NEXT: movdqa %xmm0, %xmm5 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm5 -; SSE41-NEXT: pand %xmm3, %xmm5 -; SSE41-NEXT: pandn %xmm2, %xmm3 -; SSE41-NEXT: pmaddubsw %xmm3, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: pmullw %xmm2, %xmm3 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm4, %xmm3 +; SSE41-NEXT: pandn %xmm2, %xmm4 +; SSE41-NEXT: pmaddubsw %xmm4, %xmm0 ; SSE41-NEXT: psllw $8, %xmm0 -; SSE41-NEXT: por %xmm5, %xmm0 +; SSE41-NEXT: por %xmm3, %xmm0 ; SSE41-NEXT: paddb %xmm1, %xmm0 ; SSE41-NEXT: retq ; @@ -3310,14 +3280,13 @@ define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind ; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4 -; AVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4 -; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm3 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpandn %xmm2, %xmm4, %xmm2 ; AVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; @@ -3353,12 +3322,10 @@ define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1 -; XOP-FALLBACK-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; XOP-FALLBACK-NEXT: vpandn %xmm2, %xmm3, %xmm4 -; XOP-FALLBACK-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; XOP-FALLBACK-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOP-FALLBACK-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 -; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14] +; XOP-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; XOP-FALLBACK-NEXT: vpmaddubsw %xmm3, %xmm1, %xmm3 +; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2],xmm3[2],xmm1[4],xmm3[4],xmm1[6],xmm3[6],xmm1[8],xmm3[8],xmm1[10],xmm3[10],xmm1[12],xmm3[12],xmm1[14],xmm3[14] ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; XOP-FALLBACK-NEXT: retq ; @@ -3373,12 +3340,10 @@ define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1 -; XOPAVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; XOPAVX1-NEXT: vpandn %xmm2, %xmm3, %xmm4 -; XOPAVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm4 -; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOPAVX1-NEXT: vpmaddubsw %xmm2, %xmm1, %xmm1 -; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[2],xmm4[2],xmm1[4],xmm4[4],xmm1[6],xmm4[6],xmm1[8],xmm4[8],xmm1[10],xmm4[10],xmm1[12],xmm4[12],xmm1[14],xmm4[14] +; XOPAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3 +; XOPAVX1-NEXT: vpmaddubsw %xmm3, %xmm1, %xmm3 +; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2],xmm3[2],xmm1[4],xmm3[4],xmm1[6],xmm3[6],xmm1[8],xmm3[8],xmm1[10],xmm3[10],xmm1[12],xmm3[12],xmm1[14],xmm3[14] ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; XOPAVX1-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll index 7c9adaf31aff5..85791cd65163a 100644 --- a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll +++ b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll @@ -1896,40 +1896,38 @@ define <16 x i16> @vec256_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwin define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwind { ; AVX1-LABEL: vec256_i8_signed_reg_reg: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm4 ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm5 ; AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm6 ; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpsubb %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpminsb %xmm3, %xmm2, %xmm6 -; AVX1-NEXT: vpmaxsb %xmm3, %xmm2, %xmm3 -; AVX1-NEXT: vpsubb %xmm6, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 +; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm6 +; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsubb %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm8 -; AVX1-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8 -; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8 -; AVX1-NEXT: vpandn %xmm5, %xmm7, %xmm5 +; AVX1-NEXT: vpmullw %xmm5, %xmm1, %xmm7 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm7 +; AVX1-NEXT: vpandn %xmm5, %xmm8, %xmm5 ; AVX1-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm1, %xmm8, %xmm1 +; AVX1-NEXT: vpor %xmm1, %xmm7, %xmm1 ; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5 -; AVX1-NEXT: vpmaddubsw %xmm5, %xmm3, %xmm5 -; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5 -; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4 -; AVX1-NEXT: vpmaddubsw %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpsllw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm5 +; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm5 +; AVX1-NEXT: vpandn %xmm4, %xmm8, %xmm4 +; AVX1-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2 +; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -1943,14 +1941,13 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin ; AVX2-NEXT: vpsubb %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm4 -; AVX2-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4 -; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm4 -; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm3 +; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX2-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX2-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX2-NEXT: vpor %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; @@ -1974,15 +1971,13 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin ; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] ; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8 ; XOP-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8 -; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5 -; XOP-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1 +; XOP-NEXT: vpmullw %xmm5, %xmm1, %xmm1 ; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30] ; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm1, %xmm1 ; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4 ; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6 ; XOP-NEXT: vpmaddubsw %xmm6, %xmm2, %xmm6 -; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4 -; XOP-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2 +; XOP-NEXT: vpmullw %xmm4, %xmm2, %xmm2 ; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm2, %xmm2 ; XOP-NEXT: vpaddb %xmm3, %xmm2, %xmm2 ; XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0 @@ -1998,14 +1993,13 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin ; AVX512F-NEXT: vpsubb %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm4 -; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4 -; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm4 -; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm3 +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX512F-NEXT: vpor %ymm1, %ymm4, %ymm1 +; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1 ; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX512F-NEXT: retq ; @@ -2087,19 +2081,17 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ; AVX1-NEXT: vpor %xmm4, %xmm6, %xmm6 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm8 -; AVX1-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8 -; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8 -; AVX1-NEXT: vpandn %xmm6, %xmm7, %xmm6 +; AVX1-NEXT: vpmullw %xmm6, %xmm1, %xmm7 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm7 +; AVX1-NEXT: vpandn %xmm6, %xmm8, %xmm6 ; AVX1-NEXT: vpmaddubsw %xmm6, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm1, %xmm8, %xmm1 +; AVX1-NEXT: vpor %xmm1, %xmm7, %xmm1 ; AVX1-NEXT: vpor %xmm4, %xmm5, %xmm4 -; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5 -; AVX1-NEXT: vpmaddubsw %xmm5, %xmm3, %xmm5 -; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5 -; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4 +; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm5 +; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm5 +; AVX1-NEXT: vpandn %xmm4, %xmm8, %xmm4 ; AVX1-NEXT: vpmaddubsw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpsllw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3 @@ -2119,14 +2111,13 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw ; AVX2-NEXT: vpsubb %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpand %ymm2, %ymm3, %ymm4 -; AVX2-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4 -; AVX2-NEXT: vpand %ymm2, %ymm4, %ymm4 -; AVX2-NEXT: vpandn %ymm3, %ymm2, %ymm2 -; AVX2-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm2 +; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX2-NEXT: vpandn %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX2-NEXT: vpor %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; @@ -2150,15 +2141,13 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw ; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] ; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8 ; XOP-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8 -; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5 -; XOP-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1 +; XOP-NEXT: vpmullw %xmm5, %xmm1, %xmm1 ; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30] ; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm1, %xmm1 ; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4 ; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6 ; XOP-NEXT: vpmaddubsw %xmm6, %xmm2, %xmm6 -; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4 -; XOP-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2 +; XOP-NEXT: vpmullw %xmm4, %xmm2, %xmm2 ; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm2, %xmm2 ; XOP-NEXT: vpaddb %xmm3, %xmm2, %xmm2 ; XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0 @@ -2175,14 +2164,13 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw ; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512F-NEXT: vpand %ymm2, %ymm3, %ymm4 -; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4 -; AVX512F-NEXT: vpand %ymm2, %ymm4, %ymm4 -; AVX512F-NEXT: vpandn %ymm3, %ymm2, %ymm2 -; AVX512F-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm1 +; AVX512F-NEXT: vpmullw %ymm3, %ymm1, %ymm2 +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm3, %ymm4, %ymm3 +; AVX512F-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX512F-NEXT: vpor %ymm1, %ymm4, %ymm1 +; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1 ; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX512F-NEXT: retq ; @@ -2247,41 +2235,39 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind { ; AVX1-LABEL: vec256_i8_signed_mem_reg: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vmovdqa (%rdi), %xmm1 -; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2 -; AVX1-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm4 +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3 +; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm4 ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm5 ; AVX1-NEXT: vpminsb %xmm0, %xmm1, %xmm6 ; AVX1-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpsubb %xmm6, %xmm0, %xmm0 -; AVX1-NEXT: vpminsb %xmm3, %xmm2, %xmm6 -; AVX1-NEXT: vpmaxsb %xmm3, %xmm2, %xmm3 -; AVX1-NEXT: vpsubb %xmm6, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 +; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm6 +; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsubb %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm6, %xmm0, %xmm0 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm8 -; AVX1-NEXT: vpmaddubsw %xmm8, %xmm0, %xmm8 -; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8 -; AVX1-NEXT: vpandn %xmm5, %xmm7, %xmm5 +; AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm7 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm7 +; AVX1-NEXT: vpandn %xmm5, %xmm8, %xmm5 ; AVX1-NEXT: vpmaddubsw %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0 -; AVX1-NEXT: vpor %xmm0, %xmm8, %xmm0 +; AVX1-NEXT: vpor %xmm0, %xmm7, %xmm0 ; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5 -; AVX1-NEXT: vpmaddubsw %xmm5, %xmm3, %xmm5 -; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5 -; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4 -; AVX1-NEXT: vpmaddubsw %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpsllw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm5 +; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm5 +; AVX1-NEXT: vpandn %xmm4, %xmm8, %xmm4 +; AVX1-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2 +; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -2296,14 +2282,13 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind ; AVX2-NEXT: vpsubb %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm4 -; AVX2-NEXT: vpmaddubsw %ymm4, %ymm0, %ymm4 -; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm4 -; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm3 +; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX2-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX2-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpsllw $8, %ymm0, %ymm0 -; AVX2-NEXT: vpor %ymm0, %ymm4, %ymm0 +; AVX2-NEXT: vpor %ymm0, %ymm3, %ymm0 ; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; @@ -2328,15 +2313,13 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind ; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] ; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8 ; XOP-NEXT: vpmaddubsw %xmm8, %xmm0, %xmm8 -; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5 -; XOP-NEXT: vpmaddubsw %xmm5, %xmm0, %xmm0 +; XOP-NEXT: vpmullw %xmm5, %xmm0, %xmm0 ; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30] ; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm0, %xmm0 ; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4 ; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6 ; XOP-NEXT: vpmaddubsw %xmm6, %xmm1, %xmm6 -; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4 -; XOP-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm1 +; XOP-NEXT: vpmullw %xmm4, %xmm1, %xmm1 ; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm1, %xmm1 ; XOP-NEXT: vpaddb %xmm3, %xmm1, %xmm1 ; XOP-NEXT: vpaddb %xmm2, %xmm0, %xmm0 @@ -2353,14 +2336,13 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind ; AVX512F-NEXT: vpsubb %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm4 -; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm0, %ymm4 -; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm4 -; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm3 +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsllw $8, %ymm0, %ymm0 -; AVX512F-NEXT: vpor %ymm0, %ymm4, %ymm0 +; AVX512F-NEXT: vpor %ymm0, %ymm3, %ymm0 ; AVX512F-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: retq ; @@ -2443,19 +2425,17 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind ; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm8 -; AVX1-NEXT: vpmaddubsw %xmm8, %xmm2, %xmm8 -; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8 -; AVX1-NEXT: vpandn %xmm5, %xmm7, %xmm5 +; AVX1-NEXT: vpmullw %xmm5, %xmm2, %xmm7 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm7 +; AVX1-NEXT: vpandn %xmm5, %xmm8, %xmm5 ; AVX1-NEXT: vpmaddubsw %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpor %xmm2, %xmm8, %xmm2 +; AVX1-NEXT: vpor %xmm2, %xmm7, %xmm2 ; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5 -; AVX1-NEXT: vpmaddubsw %xmm5, %xmm3, %xmm5 -; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5 -; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4 +; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm5 +; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm5 +; AVX1-NEXT: vpandn %xmm4, %xmm8, %xmm4 ; AVX1-NEXT: vpmaddubsw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpsllw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3 @@ -2474,14 +2454,13 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind ; AVX2-NEXT: vpsubb %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm4 -; AVX2-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4 -; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm4 -; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm3 +; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX2-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX2-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX2-NEXT: vpor %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; @@ -2506,15 +2485,13 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind ; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] ; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8 ; XOP-NEXT: vpmaddubsw %xmm8, %xmm2, %xmm8 -; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5 -; XOP-NEXT: vpmaddubsw %xmm5, %xmm2, %xmm2 +; XOP-NEXT: vpmullw %xmm5, %xmm2, %xmm2 ; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30] ; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm2, %xmm2 ; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4 ; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6 ; XOP-NEXT: vpmaddubsw %xmm6, %xmm3, %xmm6 -; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4 -; XOP-NEXT: vpmaddubsw %xmm4, %xmm3, %xmm3 +; XOP-NEXT: vpmullw %xmm4, %xmm3, %xmm3 ; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm3, %xmm3 ; XOP-NEXT: vpaddb %xmm1, %xmm3, %xmm1 ; XOP-NEXT: vpaddb %xmm0, %xmm2, %xmm0 @@ -2531,14 +2508,13 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind ; AVX512F-NEXT: vpsubb %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm4 -; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4 -; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm4 -; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm3 +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX512F-NEXT: vpor %ymm1, %ymm4, %ymm1 +; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1 ; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX512F-NEXT: retq ; @@ -2603,44 +2579,42 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; AVX1-LABEL: vec256_i8_signed_mem_mem: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa (%rsi), %xmm1 -; AVX1-NEXT: vmovdqa 16(%rsi), %xmm2 -; AVX1-NEXT: vmovdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa (%rsi), %xmm0 +; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1 +; AVX1-NEXT: vmovdqa (%rdi), %xmm2 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3 -; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm4 -; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm5 -; AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm6 -; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm4 +; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm5 +; AVX1-NEXT: vpminsb %xmm0, %xmm2, %xmm6 +; AVX1-NEXT: vpmaxsb %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpsubb %xmm6, %xmm0, %xmm0 +; AVX1-NEXT: vpminsb %xmm1, %xmm3, %xmm6 +; AVX1-NEXT: vpmaxsb %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vpsubb %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm6 -; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpsubb %xmm6, %xmm2, %xmm2 -; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm6, %xmm0, %xmm0 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm8 -; AVX1-NEXT: vpmaddubsw %xmm8, %xmm1, %xmm8 -; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8 -; AVX1-NEXT: vpandn %xmm5, %xmm7, %xmm5 -; AVX1-NEXT: vpmaddubsw %xmm5, %xmm1, %xmm1 -; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm1, %xmm8, %xmm1 +; AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm7 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm7 +; AVX1-NEXT: vpandn %xmm5, %xmm8, %xmm5 +; AVX1-NEXT: vpmaddubsw %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0 +; AVX1-NEXT: vpor %xmm0, %xmm7, %xmm0 ; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5 -; AVX1-NEXT: vpmaddubsw %xmm5, %xmm2, %xmm5 -; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5 -; AVX1-NEXT: vpandn %xmm4, %xmm7, %xmm4 -; AVX1-NEXT: vpmaddubsw %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpor %xmm2, %xmm5, %xmm2 -; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm5 +; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm5 +; AVX1-NEXT: vpandn %xmm4, %xmm8, %xmm4 +; AVX1-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 +; AVX1-NEXT: vpor %xmm1, %xmm5, %xmm1 +; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: vec256_i8_signed_mem_mem: @@ -2654,14 +2628,13 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind ; AVX2-NEXT: vpsubb %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm4 -; AVX2-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4 -; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm4 -; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm3 +; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX2-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX2-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX2-NEXT: vpor %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; @@ -2687,15 +2660,13 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind ; XOP-NEXT: vbroadcastss {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] ; XOP-NEXT: vpandn %xmm5, %xmm7, %xmm8 ; XOP-NEXT: vpmaddubsw %xmm8, %xmm0, %xmm8 -; XOP-NEXT: vpand %xmm7, %xmm5, %xmm5 -; XOP-NEXT: vpmaddubsw %xmm5, %xmm0, %xmm0 +; XOP-NEXT: vpmullw %xmm5, %xmm0, %xmm0 ; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,16,2,18,4,20,6,22,8,24,10,26,12,28,14,30] ; XOP-NEXT: vpperm %xmm5, %xmm8, %xmm0, %xmm0 ; XOP-NEXT: vpor %xmm6, %xmm4, %xmm4 ; XOP-NEXT: vpandn %xmm4, %xmm7, %xmm6 ; XOP-NEXT: vpmaddubsw %xmm6, %xmm1, %xmm6 -; XOP-NEXT: vpand %xmm7, %xmm4, %xmm4 -; XOP-NEXT: vpmaddubsw %xmm4, %xmm1, %xmm1 +; XOP-NEXT: vpmullw %xmm4, %xmm1, %xmm1 ; XOP-NEXT: vpperm %xmm5, %xmm6, %xmm1, %xmm1 ; XOP-NEXT: vpaddb %xmm3, %xmm1, %xmm1 ; XOP-NEXT: vpaddb %xmm2, %xmm0, %xmm0 @@ -2713,14 +2684,13 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind ; AVX512F-NEXT: vpsubb %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm4 -; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm1, %ymm4 -; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm4 -; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm3 +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3 +; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX512F-NEXT: vpor %ymm1, %ymm4, %ymm1 +; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1 ; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX512F-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/min-legal-vector-width.ll b/llvm/test/CodeGen/X86/min-legal-vector-width.ll index 04f0a65c99da8..aa2dd00237b07 100644 --- a/llvm/test/CodeGen/X86/min-legal-vector-width.ll +++ b/llvm/test/CodeGen/X86/min-legal-vector-width.ll @@ -889,19 +889,17 @@ define dso_local void @mul256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"=" ; CHECK-SKX-NOVBMI-NEXT: vmovdqa 32(%rdi), %ymm1 ; CHECK-SKX-NOVBMI-NEXT: vmovdqa (%rsi), %ymm2 ; CHECK-SKX-NOVBMI-NEXT: vmovdqa 32(%rsi), %ymm3 -; CHECK-SKX-NOVBMI-NEXT: vpbroadcastd {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; CHECK-SKX-NOVBMI-NEXT: vpand %ymm4, %ymm3, %ymm5 -; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm5 -; CHECK-SKX-NOVBMI-NEXT: vpandn %ymm3, %ymm4, %ymm3 +; CHECK-SKX-NOVBMI-NEXT: vpmullw %ymm3, %ymm1, %ymm4 +; CHECK-SKX-NOVBMI-NEXT: vpbroadcastd {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; CHECK-SKX-NOVBMI-NEXT: vpandn %ymm3, %ymm5, %ymm3 ; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1 ; CHECK-SKX-NOVBMI-NEXT: vpsllw $8, %ymm1, %ymm1 -; CHECK-SKX-NOVBMI-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 | (ymm5 & ymm4) -; CHECK-SKX-NOVBMI-NEXT: vpand %ymm4, %ymm2, %ymm3 -; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3 -; CHECK-SKX-NOVBMI-NEXT: vpandn %ymm2, %ymm4, %ymm2 +; CHECK-SKX-NOVBMI-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 | (ymm4 & ymm5) +; CHECK-SKX-NOVBMI-NEXT: vpmullw %ymm2, %ymm0, %ymm3 +; CHECK-SKX-NOVBMI-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0 ; CHECK-SKX-NOVBMI-NEXT: vpsllw $8, %ymm0, %ymm0 -; CHECK-SKX-NOVBMI-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm3 & ymm4) +; CHECK-SKX-NOVBMI-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm3 & ymm5) ; CHECK-SKX-NOVBMI-NEXT: vmovdqa %ymm0, (%rdx) ; CHECK-SKX-NOVBMI-NEXT: vmovdqa %ymm1, 32(%rdx) ; CHECK-SKX-NOVBMI-NEXT: vzeroupper @@ -913,20 +911,18 @@ define dso_local void @mul256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"=" ; CHECK-SKX-VBMI-NEXT: vmovdqa 32(%rdi), %ymm1 ; CHECK-SKX-VBMI-NEXT: vmovdqa (%rsi), %ymm2 ; CHECK-SKX-VBMI-NEXT: vmovdqa 32(%rsi), %ymm3 -; CHECK-SKX-VBMI-NEXT: vpbroadcastd {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; CHECK-SKX-VBMI-NEXT: vpandn %ymm3, %ymm4, %ymm5 -; CHECK-SKX-VBMI-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm5 -; CHECK-SKX-VBMI-NEXT: vpand %ymm4, %ymm3, %ymm3 +; CHECK-SKX-VBMI-NEXT: vpmullw %ymm3, %ymm1, %ymm4 +; CHECK-SKX-VBMI-NEXT: vpbroadcastd {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; CHECK-SKX-VBMI-NEXT: vpandn %ymm3, %ymm5, %ymm3 ; CHECK-SKX-VBMI-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1 ; CHECK-SKX-VBMI-NEXT: vmovdqa {{.*#+}} ymm3 = [0,32,2,34,4,36,6,38,8,40,10,42,12,44,14,46,16,48,18,50,20,52,22,54,24,56,26,58,28,60,30,62] -; CHECK-SKX-VBMI-NEXT: vpermt2b %ymm5, %ymm3, %ymm1 -; CHECK-SKX-VBMI-NEXT: vpandn %ymm2, %ymm4, %ymm5 -; CHECK-SKX-VBMI-NEXT: vpmaddubsw %ymm5, %ymm0, %ymm5 -; CHECK-SKX-VBMI-NEXT: vpand %ymm4, %ymm2, %ymm2 +; CHECK-SKX-VBMI-NEXT: vpermt2b %ymm1, %ymm3, %ymm4 +; CHECK-SKX-VBMI-NEXT: vpmullw %ymm2, %ymm0, %ymm1 +; CHECK-SKX-VBMI-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; CHECK-SKX-VBMI-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0 -; CHECK-SKX-VBMI-NEXT: vpermt2b %ymm5, %ymm3, %ymm0 -; CHECK-SKX-VBMI-NEXT: vmovdqa %ymm0, (%rdx) -; CHECK-SKX-VBMI-NEXT: vmovdqa %ymm1, 32(%rdx) +; CHECK-SKX-VBMI-NEXT: vpermt2b %ymm0, %ymm3, %ymm1 +; CHECK-SKX-VBMI-NEXT: vmovdqa %ymm1, (%rdx) +; CHECK-SKX-VBMI-NEXT: vmovdqa %ymm4, 32(%rdx) ; CHECK-SKX-VBMI-NEXT: vzeroupper ; CHECK-SKX-VBMI-NEXT: retq ; @@ -936,19 +932,17 @@ define dso_local void @mul256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"=" ; CHECK-AVX512-NEXT: vmovdqa 32(%rdi), %ymm1 ; CHECK-AVX512-NEXT: vmovdqa (%rsi), %ymm2 ; CHECK-AVX512-NEXT: vmovdqa 32(%rsi), %ymm3 -; CHECK-AVX512-NEXT: vpbroadcastd {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; CHECK-AVX512-NEXT: vpand %ymm4, %ymm3, %ymm5 -; CHECK-AVX512-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm5 -; CHECK-AVX512-NEXT: vpandn %ymm3, %ymm4, %ymm3 +; CHECK-AVX512-NEXT: vpmullw %ymm3, %ymm1, %ymm4 +; CHECK-AVX512-NEXT: vpbroadcastd {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; CHECK-AVX512-NEXT: vpandn %ymm3, %ymm5, %ymm3 ; CHECK-AVX512-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1 ; CHECK-AVX512-NEXT: vpsllw $8, %ymm1, %ymm1 -; CHECK-AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 | (ymm5 & ymm4) -; CHECK-AVX512-NEXT: vpand %ymm4, %ymm2, %ymm3 -; CHECK-AVX512-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3 -; CHECK-AVX512-NEXT: vpandn %ymm2, %ymm4, %ymm2 +; CHECK-AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 | (ymm4 & ymm5) +; CHECK-AVX512-NEXT: vpmullw %ymm2, %ymm0, %ymm3 +; CHECK-AVX512-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; CHECK-AVX512-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0 ; CHECK-AVX512-NEXT: vpsllw $8, %ymm0, %ymm0 -; CHECK-AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm3 & ymm4) +; CHECK-AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm3 & ymm5) ; CHECK-AVX512-NEXT: vmovdqa %ymm0, (%rdx) ; CHECK-AVX512-NEXT: vmovdqa %ymm1, 32(%rdx) ; CHECK-AVX512-NEXT: vzeroupper @@ -960,20 +954,18 @@ define dso_local void @mul256(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"=" ; CHECK-VBMI-NEXT: vmovdqa 32(%rdi), %ymm1 ; CHECK-VBMI-NEXT: vmovdqa (%rsi), %ymm2 ; CHECK-VBMI-NEXT: vmovdqa 32(%rsi), %ymm3 -; CHECK-VBMI-NEXT: vpbroadcastd {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; CHECK-VBMI-NEXT: vpandn %ymm3, %ymm4, %ymm5 -; CHECK-VBMI-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm5 -; CHECK-VBMI-NEXT: vpand %ymm4, %ymm3, %ymm3 +; CHECK-VBMI-NEXT: vpmullw %ymm3, %ymm1, %ymm4 +; CHECK-VBMI-NEXT: vpbroadcastd {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; CHECK-VBMI-NEXT: vpandn %ymm3, %ymm5, %ymm3 ; CHECK-VBMI-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1 ; CHECK-VBMI-NEXT: vmovdqa {{.*#+}} ymm3 = [0,32,2,34,4,36,6,38,8,40,10,42,12,44,14,46,16,48,18,50,20,52,22,54,24,56,26,58,28,60,30,62] -; CHECK-VBMI-NEXT: vpermt2b %ymm5, %ymm3, %ymm1 -; CHECK-VBMI-NEXT: vpandn %ymm2, %ymm4, %ymm5 -; CHECK-VBMI-NEXT: vpmaddubsw %ymm5, %ymm0, %ymm5 -; CHECK-VBMI-NEXT: vpand %ymm4, %ymm2, %ymm2 +; CHECK-VBMI-NEXT: vpermt2b %ymm1, %ymm3, %ymm4 +; CHECK-VBMI-NEXT: vpmullw %ymm2, %ymm0, %ymm1 +; CHECK-VBMI-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; CHECK-VBMI-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0 -; CHECK-VBMI-NEXT: vpermt2b %ymm5, %ymm3, %ymm0 -; CHECK-VBMI-NEXT: vmovdqa %ymm0, (%rdx) -; CHECK-VBMI-NEXT: vmovdqa %ymm1, 32(%rdx) +; CHECK-VBMI-NEXT: vpermt2b %ymm0, %ymm3, %ymm1 +; CHECK-VBMI-NEXT: vmovdqa %ymm1, (%rdx) +; CHECK-VBMI-NEXT: vmovdqa %ymm4, 32(%rdx) ; CHECK-VBMI-NEXT: vzeroupper ; CHECK-VBMI-NEXT: retq %d = load <64 x i8>, ptr %a @@ -988,13 +980,12 @@ define dso_local void @mul512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"=" ; CHECK-SKX-NOVBMI: # %bb.0: ; CHECK-SKX-NOVBMI-NEXT: vmovdqa64 (%rdi), %zmm0 ; CHECK-SKX-NOVBMI-NEXT: vmovdqa64 (%rsi), %zmm1 -; CHECK-SKX-NOVBMI-NEXT: vpbroadcastd {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; CHECK-SKX-NOVBMI-NEXT: vpandq %zmm2, %zmm1, %zmm3 -; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %zmm3, %zmm0, %zmm3 -; CHECK-SKX-NOVBMI-NEXT: vpandnq %zmm1, %zmm2, %zmm1 +; CHECK-SKX-NOVBMI-NEXT: vpmullw %zmm1, %zmm0, %zmm2 +; CHECK-SKX-NOVBMI-NEXT: vpbroadcastd {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; CHECK-SKX-NOVBMI-NEXT: vpandnq %zmm1, %zmm3, %zmm1 ; CHECK-SKX-NOVBMI-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0 ; CHECK-SKX-NOVBMI-NEXT: vpsllw $8, %zmm0, %zmm0 -; CHECK-SKX-NOVBMI-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm3 & zmm2) +; CHECK-SKX-NOVBMI-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm2 & zmm3) ; CHECK-SKX-NOVBMI-NEXT: vmovdqa64 %zmm0, (%rdx) ; CHECK-SKX-NOVBMI-NEXT: vzeroupper ; CHECK-SKX-NOVBMI-NEXT: retq @@ -1003,13 +994,11 @@ define dso_local void @mul512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"=" ; CHECK-SKX-VBMI: # %bb.0: ; CHECK-SKX-VBMI-NEXT: vmovdqa64 (%rdi), %zmm0 ; CHECK-SKX-VBMI-NEXT: vmovdqa64 (%rsi), %zmm1 -; CHECK-SKX-VBMI-NEXT: vpbroadcastd {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; CHECK-SKX-VBMI-NEXT: vpandnq %zmm1, %zmm2, %zmm3 -; CHECK-SKX-VBMI-NEXT: vpmaddubsw %zmm3, %zmm0, %zmm3 -; CHECK-SKX-VBMI-NEXT: vpandq %zmm2, %zmm1, %zmm1 +; CHECK-SKX-VBMI-NEXT: vpmullw %zmm1, %zmm0, %zmm2 +; CHECK-SKX-VBMI-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1 ; CHECK-SKX-VBMI-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0 ; CHECK-SKX-VBMI-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,64,2,66,4,68,6,70,8,72,10,74,12,76,14,78,16,80,18,82,20,84,22,86,24,88,26,90,28,92,30,94,32,96,34,98,36,100,38,102,40,104,42,106,44,108,46,110,48,112,50,114,52,116,54,118,56,120,58,122,60,124,62,126] -; CHECK-SKX-VBMI-NEXT: vpermi2b %zmm3, %zmm0, %zmm1 +; CHECK-SKX-VBMI-NEXT: vpermi2b %zmm0, %zmm2, %zmm1 ; CHECK-SKX-VBMI-NEXT: vmovdqa64 %zmm1, (%rdx) ; CHECK-SKX-VBMI-NEXT: vzeroupper ; CHECK-SKX-VBMI-NEXT: retq @@ -1018,13 +1007,12 @@ define dso_local void @mul512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"=" ; CHECK-AVX512: # %bb.0: ; CHECK-AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 ; CHECK-AVX512-NEXT: vmovdqa64 (%rsi), %zmm1 -; CHECK-AVX512-NEXT: vpbroadcastd {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; CHECK-AVX512-NEXT: vpandq %zmm2, %zmm1, %zmm3 -; CHECK-AVX512-NEXT: vpmaddubsw %zmm3, %zmm0, %zmm3 -; CHECK-AVX512-NEXT: vpandnq %zmm1, %zmm2, %zmm1 +; CHECK-AVX512-NEXT: vpmullw %zmm1, %zmm0, %zmm2 +; CHECK-AVX512-NEXT: vpbroadcastd {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; CHECK-AVX512-NEXT: vpandnq %zmm1, %zmm3, %zmm1 ; CHECK-AVX512-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0 ; CHECK-AVX512-NEXT: vpsllw $8, %zmm0, %zmm0 -; CHECK-AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm3 & zmm2) +; CHECK-AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm2 & zmm3) ; CHECK-AVX512-NEXT: vmovdqa64 %zmm0, (%rdx) ; CHECK-AVX512-NEXT: vzeroupper ; CHECK-AVX512-NEXT: retq @@ -1033,13 +1021,11 @@ define dso_local void @mul512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"=" ; CHECK-VBMI: # %bb.0: ; CHECK-VBMI-NEXT: vmovdqa64 (%rdi), %zmm0 ; CHECK-VBMI-NEXT: vmovdqa64 (%rsi), %zmm1 -; CHECK-VBMI-NEXT: vpbroadcastd {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; CHECK-VBMI-NEXT: vpandnq %zmm1, %zmm2, %zmm3 -; CHECK-VBMI-NEXT: vpmaddubsw %zmm3, %zmm0, %zmm3 -; CHECK-VBMI-NEXT: vpandq %zmm2, %zmm1, %zmm1 +; CHECK-VBMI-NEXT: vpmullw %zmm1, %zmm0, %zmm2 +; CHECK-VBMI-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1 ; CHECK-VBMI-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0 ; CHECK-VBMI-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,64,2,66,4,68,6,70,8,72,10,74,12,76,14,78,16,80,18,82,20,84,22,86,24,88,26,90,28,92,30,94,32,96,34,98,36,100,38,102,40,104,42,106,44,108,46,110,48,112,50,114,52,116,54,118,56,120,58,122,60,124,62,126] -; CHECK-VBMI-NEXT: vpermi2b %zmm3, %zmm0, %zmm1 +; CHECK-VBMI-NEXT: vpermi2b %zmm0, %zmm2, %zmm1 ; CHECK-VBMI-NEXT: vmovdqa64 %zmm1, (%rdx) ; CHECK-VBMI-NEXT: vzeroupper ; CHECK-VBMI-NEXT: retq diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll index 00731fe3e9556..189c5aa9fee20 100644 --- a/llvm/test/CodeGen/X86/pmul.ll +++ b/llvm/test/CodeGen/X86/pmul.ll @@ -25,7 +25,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind { ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117] ; SSE41-NEXT: psllw $8, %xmm1 -; SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: por %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -160,16 +160,14 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind { ; ; SSE41-LABEL: mul_v16i8: ; SSE41: # %bb.0: # %entry -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: pand %xmm2, %xmm3 -; SSE41-NEXT: movdqa %xmm0, %xmm4 -; SSE41-NEXT: pmaddubsw %xmm3, %xmm4 -; SSE41-NEXT: pand %xmm2, %xmm4 -; SSE41-NEXT: pandn %xmm1, %xmm2 -; SSE41-NEXT: pmaddubsw %xmm2, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmullw %xmm1, %xmm2 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm3, %xmm2 +; SSE41-NEXT: pandn %xmm1, %xmm3 +; SSE41-NEXT: pmaddubsw %xmm3, %xmm0 ; SSE41-NEXT: psllw $8, %xmm0 -; SSE41-NEXT: por %xmm4, %xmm0 +; SSE41-NEXT: por %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v16i8: @@ -400,28 +398,27 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind { ; ; SSE41-LABEL: mul_v32i8c: ; SSE41: # %bb.0: # %entry -; SSE41-NEXT: pmovsxbw {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117] +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: pmaddubsw %xmm2, %xmm3 +; SSE41-NEXT: pmullw %xmm2, %xmm3 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm4, %xmm3 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117] ; SSE41-NEXT: pmaddubsw %xmm5, %xmm0 ; SSE41-NEXT: psllw $8, %xmm0 ; SSE41-NEXT: por %xmm3, %xmm0 -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: pmaddubsw %xmm2, %xmm3 -; SSE41-NEXT: pand %xmm4, %xmm3 +; SSE41-NEXT: pmullw %xmm1, %xmm2 +; SSE41-NEXT: pand %xmm4, %xmm2 ; SSE41-NEXT: pmaddubsw %xmm5, %xmm1 ; SSE41-NEXT: psllw $8, %xmm1 -; SSE41-NEXT: por %xmm3, %xmm1 +; SSE41-NEXT: por %xmm2, %xmm1 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v32i8c: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117] ; AVX2-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0] +; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -430,7 +427,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind { ; AVX512F: # %bb.0: # %entry ; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117] ; AVX512F-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: retq @@ -584,49 +581,44 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind { ; ; SSE41-LABEL: mul_v32i8: ; SSE41: # %bb.0: # %entry -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: movdqa %xmm4, %xmm5 -; SSE41-NEXT: pandn %xmm2, %xmm5 -; SSE41-NEXT: pand %xmm4, %xmm2 -; SSE41-NEXT: movdqa %xmm0, %xmm6 -; SSE41-NEXT: pmaddubsw %xmm2, %xmm6 -; SSE41-NEXT: pand %xmm4, %xmm6 -; SSE41-NEXT: pmaddubsw %xmm5, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: pmullw %xmm2, %xmm4 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm5, %xmm4 +; SSE41-NEXT: movdqa %xmm5, %xmm6 +; SSE41-NEXT: pandn %xmm2, %xmm6 +; SSE41-NEXT: pmaddubsw %xmm6, %xmm0 ; SSE41-NEXT: psllw $8, %xmm0 -; SSE41-NEXT: por %xmm6, %xmm0 -; SSE41-NEXT: movdqa %xmm3, %xmm2 -; SSE41-NEXT: pand %xmm4, %xmm2 -; SSE41-NEXT: movdqa %xmm1, %xmm5 -; SSE41-NEXT: pmaddubsw %xmm2, %xmm5 -; SSE41-NEXT: pand %xmm4, %xmm5 -; SSE41-NEXT: pandn %xmm3, %xmm4 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm1 +; SSE41-NEXT: por %xmm4, %xmm0 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: pmullw %xmm3, %xmm2 +; SSE41-NEXT: pand %xmm5, %xmm2 +; SSE41-NEXT: pandn %xmm3, %xmm5 +; SSE41-NEXT: pmaddubsw %xmm5, %xmm1 ; SSE41-NEXT: psllw $8, %xmm1 -; SSE41-NEXT: por %xmm5, %xmm1 +; SSE41-NEXT: por %xmm2, %xmm1 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v32i8: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm3 -; AVX2-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3 -; AVX2-NEXT: vpand %ymm2, %ymm3, %ymm3 -; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm2 +; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpandn %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpsllw $8, %ymm0, %ymm0 -; AVX2-NEXT: vpor %ymm0, %ymm3, %ymm0 +; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: mul_v32i8: ; AVX512F: # %bb.0: # %entry -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm3 -; AVX512F-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3 -; AVX512F-NEXT: vpand %ymm2, %ymm3, %ymm3 -; AVX512F-NEXT: vpandn %ymm1, %ymm2, %ymm1 +; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm2 +; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm1, %ymm3, %ymm1 ; AVX512F-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpsllw $8, %ymm0, %ymm0 -; AVX512F-NEXT: vpor %ymm0, %ymm3, %ymm0 +; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: mul_v32i8: @@ -773,9 +765,9 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; ; SSE41-LABEL: mul_v64i8c: ; SSE41: # %bb.0: # %entry -; SSE41-NEXT: pmovsxbw {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117] +; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; SSE41-NEXT: movdqa %xmm0, %xmm6 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm6 +; SSE41-NEXT: pmullw %xmm4, %xmm6 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm5, %xmm6 ; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117] @@ -783,36 +775,35 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; SSE41-NEXT: psllw $8, %xmm0 ; SSE41-NEXT: por %xmm6, %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm6 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm6 +; SSE41-NEXT: pmullw %xmm4, %xmm6 ; SSE41-NEXT: pand %xmm5, %xmm6 ; SSE41-NEXT: pmaddubsw %xmm7, %xmm1 ; SSE41-NEXT: psllw $8, %xmm1 ; SSE41-NEXT: por %xmm6, %xmm1 ; SSE41-NEXT: movdqa %xmm2, %xmm6 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm6 +; SSE41-NEXT: pmullw %xmm4, %xmm6 ; SSE41-NEXT: pand %xmm5, %xmm6 ; SSE41-NEXT: pmaddubsw %xmm7, %xmm2 ; SSE41-NEXT: psllw $8, %xmm2 ; SSE41-NEXT: por %xmm6, %xmm2 -; SSE41-NEXT: movdqa %xmm3, %xmm6 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm6 -; SSE41-NEXT: pand %xmm5, %xmm6 +; SSE41-NEXT: pmullw %xmm3, %xmm4 +; SSE41-NEXT: pand %xmm5, %xmm4 ; SSE41-NEXT: pmaddubsw %xmm7, %xmm3 ; SSE41-NEXT: psllw $8, %xmm3 -; SSE41-NEXT: por %xmm6, %xmm3 +; SSE41-NEXT: por %xmm4, %xmm3 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v64i8c: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0] -; AVX2-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm3 +; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] +; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm3 ; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm5 = [0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117] ; AVX2-NEXT: vpmaddubsw %ymm5, %ymm0, %ymm0 ; AVX2-NEXT: vpsllw $8, %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm0, %ymm3, %ymm0 -; AVX2-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm2 +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm2 ; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2 ; AVX2-NEXT: vpmaddubsw %ymm5, %ymm1, %ymm1 ; AVX2-NEXT: vpsllw $8, %ymm1, %ymm1 @@ -822,9 +813,9 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; AVX512F-LABEL: mul_v64i8c: ; AVX512F: # %bb.0: # %entry ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm2 = [117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0] -; AVX512F-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm3 -; AVX512F-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm2 +; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] +; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm3 +; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm2 ; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 ; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117] ; AVX512F-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm0 @@ -837,7 +828,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; ; AVX512BW-LABEL: mul_v64i8c: ; AVX512BW: # %bb.0: # %entry -; AVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 # [117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 # [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; AVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117,0,117] ; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm0 ; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 | (zmm1 & m32bcst) @@ -899,59 +890,52 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind { ; ; SSE41-LABEL: mul_v64i8: ; SSE41: # %bb.0: # %entry +; SSE41-NEXT: movdqa %xmm0, %xmm9 +; SSE41-NEXT: pmullw %xmm4, %xmm9 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: movdqa %xmm8, %xmm9 -; SSE41-NEXT: pandn %xmm4, %xmm9 -; SSE41-NEXT: pand %xmm8, %xmm4 -; SSE41-NEXT: movdqa %xmm0, %xmm10 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm10 -; SSE41-NEXT: pand %xmm8, %xmm10 -; SSE41-NEXT: pmaddubsw %xmm9, %xmm0 -; SSE41-NEXT: psllw $8, %xmm0 -; SSE41-NEXT: por %xmm10, %xmm0 -; SSE41-NEXT: movdqa %xmm8, %xmm4 -; SSE41-NEXT: pandn %xmm5, %xmm4 -; SSE41-NEXT: pand %xmm8, %xmm5 -; SSE41-NEXT: movdqa %xmm1, %xmm9 -; SSE41-NEXT: pmaddubsw %xmm5, %xmm9 ; SSE41-NEXT: pand %xmm8, %xmm9 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm1 +; SSE41-NEXT: movdqa %xmm8, %xmm10 +; SSE41-NEXT: pandn %xmm4, %xmm10 +; SSE41-NEXT: pmaddubsw %xmm10, %xmm0 +; SSE41-NEXT: psllw $8, %xmm0 +; SSE41-NEXT: por %xmm9, %xmm0 +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: pmullw %xmm5, %xmm4 +; SSE41-NEXT: pand %xmm8, %xmm4 +; SSE41-NEXT: movdqa %xmm8, %xmm9 +; SSE41-NEXT: pandn %xmm5, %xmm9 +; SSE41-NEXT: pmaddubsw %xmm9, %xmm1 ; SSE41-NEXT: psllw $8, %xmm1 -; SSE41-NEXT: por %xmm9, %xmm1 -; SSE41-NEXT: movdqa %xmm8, %xmm4 -; SSE41-NEXT: pandn %xmm6, %xmm4 -; SSE41-NEXT: pand %xmm8, %xmm6 -; SSE41-NEXT: movdqa %xmm2, %xmm5 -; SSE41-NEXT: pmaddubsw %xmm6, %xmm5 -; SSE41-NEXT: pand %xmm8, %xmm5 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm2 +; SSE41-NEXT: por %xmm4, %xmm1 +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: pmullw %xmm6, %xmm4 +; SSE41-NEXT: pand %xmm8, %xmm4 +; SSE41-NEXT: movdqa %xmm8, %xmm5 +; SSE41-NEXT: pandn %xmm6, %xmm5 +; SSE41-NEXT: pmaddubsw %xmm5, %xmm2 ; SSE41-NEXT: psllw $8, %xmm2 -; SSE41-NEXT: por %xmm5, %xmm2 -; SSE41-NEXT: movdqa %xmm7, %xmm4 +; SSE41-NEXT: por %xmm4, %xmm2 +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: pmullw %xmm7, %xmm4 ; SSE41-NEXT: pand %xmm8, %xmm4 -; SSE41-NEXT: movdqa %xmm3, %xmm5 -; SSE41-NEXT: pmaddubsw %xmm4, %xmm5 -; SSE41-NEXT: pand %xmm8, %xmm5 ; SSE41-NEXT: pandn %xmm7, %xmm8 ; SSE41-NEXT: pmaddubsw %xmm8, %xmm3 ; SSE41-NEXT: psllw $8, %xmm3 -; SSE41-NEXT: por %xmm5, %xmm3 +; SSE41-NEXT: por %xmm4, %xmm3 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v64i8: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm5 -; AVX2-NEXT: vpmaddubsw %ymm5, %ymm0, %ymm5 -; AVX2-NEXT: vpand %ymm4, %ymm5, %ymm5 -; AVX2-NEXT: vpandn %ymm2, %ymm4, %ymm2 +; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm4 +; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX2-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX2-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; AVX2-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpsllw $8, %ymm0, %ymm0 -; AVX2-NEXT: vpor %ymm0, %ymm5, %ymm0 -; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm2 -; AVX2-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm2 -; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2 -; AVX2-NEXT: vpandn %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vpor %ymm0, %ymm4, %ymm0 +; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm2 +; AVX2-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX2-NEXT: vpandn %ymm3, %ymm5, %ymm3 ; AVX2-NEXT: vpmaddubsw %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vpsllw $8, %ymm1, %ymm1 ; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 @@ -959,33 +943,30 @@ define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind { ; ; AVX512F-LABEL: mul_v64i8: ; AVX512F: # %bb.0: # %entry -; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] -; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3 -; AVX512F-NEXT: vpand %ymm2, %ymm3, %ymm4 -; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm5 -; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm5, %ymm4 -; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm6 -; AVX512F-NEXT: vpmaddubsw %ymm6, %ymm0, %ymm6 -; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm6, %zmm4 -; AVX512F-NEXT: vpandn %ymm1, %ymm2, %ymm1 +; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 +; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm4 +; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm5 +; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 +; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm5 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] +; AVX512F-NEXT: vpandn %ymm1, %ymm5, %ymm1 ; AVX512F-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpsllw $8, %ymm0, %ymm0 -; AVX512F-NEXT: vpandn %ymm3, %ymm2, %ymm1 -; AVX512F-NEXT: vpmaddubsw %ymm1, %ymm5, %ymm1 +; AVX512F-NEXT: vpandn %ymm2, %ymm5, %ymm1 +; AVX512F-NEXT: vpmaddubsw %ymm1, %ymm3, %ymm1 ; AVX512F-NEXT: vpsllw $8, %ymm1, %ymm1 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 -; AVX512F-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm4 & zmm2) +; AVX512F-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm4 & zmm5) ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: mul_v64i8: ; AVX512BW: # %bb.0: # %entry -; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm3 -; AVX512BW-NEXT: vpmaddubsw %zmm3, %zmm0, %zmm3 -; AVX512BW-NEXT: vpandnq %zmm1, %zmm2, %zmm1 +; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm2 +; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm3, %zmm1 ; AVX512BW-NEXT: vpmaddubsw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm0 -; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm3 & zmm2) +; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | (zmm2 & zmm3) ; AVX512BW-NEXT: retq entry: %A = mul <64 x i8> %i, %j diff --git a/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll b/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll index 59b03f8c02223..c9e48f817fb44 100644 --- a/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll +++ b/llvm/test/CodeGen/X86/prefer-avx256-wide-mul.ll @@ -58,13 +58,12 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) { define <32 x i8> @test_mul_32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX256BW-LABEL: test_mul_32i8: ; AVX256BW: # %bb.0: -; AVX256BW-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX256BW-NEXT: vpand %ymm2, %ymm1, %ymm3 -; AVX256BW-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3 -; AVX256BW-NEXT: vpandn %ymm1, %ymm2, %ymm1 +; AVX256BW-NEXT: vpmullw %ymm1, %ymm0, %ymm2 +; AVX256BW-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX256BW-NEXT: vpandn %ymm1, %ymm3, %ymm1 ; AVX256BW-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 ; AVX256BW-NEXT: vpsllw $8, %ymm0, %ymm0 -; AVX256BW-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm3 & ymm2) +; AVX256BW-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm2 & ymm3) ; AVX256BW-NEXT: retq ; ; AVX512BWVL-LABEL: test_mul_32i8: diff --git a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll index bb7245c31b326..ec94d003f10ea 100644 --- a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll +++ b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll @@ -2275,8 +2275,8 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-SSE41: # %bb.0: ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm4 ; CHECK-SSE41-NEXT: movq %rdi, %rax -; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm0 -; CHECK-SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [171,0,183,0,61,0,127,0,9,0,41,0,1,0,161,0] +; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm0 = [171,103,183,171,61,1,127,183,9,0,41,183,1,1,161,221] +; CHECK-SSE41-NEXT: pmullw %xmm1, %xmm0 ; CHECK-SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] ; CHECK-SSE41-NEXT: pand %xmm5, %xmm0 ; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm6 @@ -2302,8 +2302,8 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-SSE41-NEXT: pcmpgtb %xmm6, %xmm1 ; CHECK-SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255] ; CHECK-SSE41-NEXT: pblendvb %xmm0, %xmm7, %xmm1 -; CHECK-SSE41-NEXT: movdqa %xmm4, %xmm0 -; CHECK-SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [197,0,27,0,1,0,1,0,223,0,205,0,161,0,171,0] +; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm0 = [197,205,27,241,1,1,1,163,223,223,205,183,161,1,171,239] +; CHECK-SSE41-NEXT: pmullw %xmm4, %xmm0 ; CHECK-SSE41-NEXT: pand %xmm5, %xmm0 ; CHECK-SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 # [0,205,0,241,0,1,0,163,0,223,0,183,0,1,0,239] ; CHECK-SSE41-NEXT: psllw $8, %xmm4 @@ -2341,7 +2341,7 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,79,0,103,0,27,0,121,0,129,0,129,0,129,0,47] ; CHECK-AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; CHECK-AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm4 -; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm5 # [0,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0] +; CHECK-AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm5 # [0,0,0,0,1,1,1,0,1,1,0,1,0,1,0,1] ; CHECK-AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; CHECK-AVX1-NEXT: vpand %xmm3, %xmm5, %xmm5 ; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm6 # [0,0,0,0,0,1,0,0,0,1,0,1,0,1,0,1] @@ -2361,7 +2361,7 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-AVX1-NEXT: vbroadcastss {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ; CHECK-AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 ; CHECK-AVX1-NEXT: vpaddb %xmm4, %xmm6, %xmm4 -; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm6 # [13,0,19,0,2,0,2,0,62,0,5,0,97,0,3,0] +; CHECK-AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm6 # [13,5,19,34,2,8,2,88,62,62,5,7,97,2,3,60] ; CHECK-AVX1-NEXT: vpand %xmm3, %xmm6, %xmm6 ; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,5,0,34,0,8,0,88,0,62,0,7,0,2,0,60] ; CHECK-AVX1-NEXT: vpsllw $8, %xmm4, %xmm4 @@ -2375,7 +2375,7 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7, %xmm7 # [0,86,0,95,0,147,0,43,0,49,0,127,0,65,0,147] ; CHECK-AVX1-NEXT: vpsrlw $8, %xmm7, %xmm7 ; CHECK-AVX1-NEXT: vpackuswb %xmm6, %xmm7, %xmm6 -; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm7 # [0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0] +; CHECK-AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm7 # [0,0,1,0,0,255,0,1,0,1,0,1,1,1,0,1] ; CHECK-AVX1-NEXT: vpand %xmm3, %xmm7, %xmm7 ; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm8 # [0,0,0,0,0,255,0,1,0,1,0,1,0,1,0,1] ; CHECK-AVX1-NEXT: vpsllw $8, %xmm8, %xmm8 @@ -2394,7 +2394,7 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6, %xmm6 ; CHECK-AVX1-NEXT: vpand %xmm5, %xmm6, %xmm5 ; CHECK-AVX1-NEXT: vpaddb %xmm5, %xmm7, %xmm5 -; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm6 # [3,0,7,0,84,0,127,0,114,0,50,0,2,0,97,0] +; CHECK-AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm6 # [3,87,7,6,84,128,127,56,114,1,50,7,2,8,97,117] ; CHECK-AVX1-NEXT: vpand %xmm3, %xmm6, %xmm3 ; CHECK-AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5, %xmm5 # [0,87,0,6,0,128,0,56,0,1,0,7,0,8,0,117] ; CHECK-AVX1-NEXT: vpsllw $8, %xmm5, %xmm5 @@ -2423,7 +2423,7 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-AVX2-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,79,0,103,0,27,0,121,0,129,0,129,0,129,0,47,0,86,0,95,0,147,0,43,0,49,0,127,0,65,0,147] ; CHECK-AVX2-NEXT: vpsrlw $8, %ymm4, %ymm4 ; CHECK-AVX2-NEXT: vpackuswb %ymm3, %ymm4, %ymm3 -; CHECK-AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm4 # [0,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0] +; CHECK-AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm4 # [0,0,0,0,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,255,0,1,0,1,0,1,1,1,0,1] ; CHECK-AVX2-NEXT: vpbroadcastw {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; CHECK-AVX2-NEXT: vpand %ymm5, %ymm4, %ymm4 ; CHECK-AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm6 # [0,0,0,0,0,1,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,255,0,1,0,1,0,1,0,1,0,1] @@ -2443,7 +2443,7 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 ; CHECK-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 ; CHECK-AVX2-NEXT: vpaddb %ymm3, %ymm4, %ymm3 -; CHECK-AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4 # [13,0,19,0,2,0,2,0,62,0,5,0,97,0,3,0,3,0,7,0,84,0,127,0,114,0,50,0,2,0,97,0] +; CHECK-AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4 # [13,5,19,34,2,8,2,88,62,62,5,7,97,2,3,60,3,87,7,6,84,128,127,56,114,1,50,7,2,8,97,117] ; CHECK-AVX2-NEXT: vpand %ymm5, %ymm4, %ymm4 ; CHECK-AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,5,0,34,0,8,0,88,0,62,0,7,0,2,0,60,0,87,0,6,0,128,0,56,0,1,0,7,0,8,0,117] ; CHECK-AVX2-NEXT: vpsllw $8, %ymm3, %ymm3 @@ -2458,7 +2458,7 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; ; CHECK-AVX512VL-LABEL: pr51133: ; CHECK-AVX512VL: # %bb.0: -; CHECK-AVX512VL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2 # [197,0,27,0,1,0,1,0,223,0,205,0,161,0,171,0,171,0,183,0,61,0,127,0,9,0,41,0,1,0,161,0] +; CHECK-AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2 # [197,205,27,241,1,1,1,163,223,223,205,183,161,1,171,239,171,103,183,171,61,1,127,183,9,0,41,183,1,1,161,221] ; CHECK-AVX512VL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3 # [0,205,0,241,0,1,0,163,0,223,0,183,0,1,0,239,0,103,0,171,0,1,0,183,0,0,0,183,0,1,0,221] ; CHECK-AVX512VL-NEXT: vpsllw $8, %ymm3, %ymm3 ; CHECK-AVX512VL-NEXT: vpternlogd {{.*#+}} ymm3 = ymm3 | (ymm2 & m32bcst) diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll index 33a6a7679bb9a..a5d6900f77f97 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll @@ -2014,7 +2014,7 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,64,0,16,0,4,0,1,0,1,0,4,0,16,0,64] ; SSE41-NEXT: psllw $8, %xmm1 ; SSE41-NEXT: por %xmm3, %xmm1 -; SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64] ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: por %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -2033,7 +2033,7 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [0,64,0,16,0,4,0,1,0,1,0,4,0,16,0,64] ; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64] ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll index 217431be10d88..0cffa1b78a654 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll @@ -1631,9 +1631,9 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX1-NEXT: vpsllw $8, %xmm3, %xmm3 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; AVX1-NEXT: vorps %ymm1, %ymm3, %ymm1 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = [128,32,8,2,128,2,8,32] -; AVX1-NEXT: vpmaddubsw %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpmaddubsw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64] +; AVX1-NEXT: vpmullw %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 @@ -1653,7 +1653,7 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2 # [0,64,0,16,0,4,0,1,0,1,0,4,0,16,0,64,0,64,0,16,0,4,0,1,0,1,0,4,0,16,0,64] ; AVX2-NEXT: vpsllw $8, %ymm2, %ymm2 ; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 -; AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0,128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0] +; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64,128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64] ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -1672,7 +1672,7 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2 # [0,64,0,16,0,4,0,1,0,1,0,4,0,16,0,64,0,64,0,16,0,4,0,1,0,1,0,4,0,16,0,64] ; AVX512F-NEXT: vpsllw $8, %ymm2, %ymm2 ; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1 -; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0,128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64,128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64] ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: retq @@ -1690,7 +1690,7 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm0 ; AVX512VL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2 # [0,64,0,16,0,4,0,1,0,1,0,4,0,16,0,64,0,64,0,16,0,4,0,1,0,1,0,4,0,16,0,64] ; AVX512VL-NEXT: vpsllw $8, %ymm2, %ymm2 -; AVX512VL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0,128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0] +; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64,128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64] ; AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0 ; AVX512VL-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | ymm1 | ymm2 ; AVX512VL-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-fshr-512.ll b/llvm/test/CodeGen/X86/vector-fshr-512.ll index 3a522ccb6214a..25f8f94eb834c 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-512.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-512.ll @@ -915,10 +915,10 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind { ; AVX512F-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3 ; AVX512F-NEXT: vpsllw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm3 -; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0,128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0] +; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64,128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64] ; AVX512F-NEXT: # ymm4 = mem[0,1,0,1] -; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm0, %ymm0 -; AVX512F-NEXT: vpmaddubsw %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpmullw %ymm4, %ymm0, %ymm0 +; AVX512F-NEXT: vpmullw %ymm4, %ymm2, %ymm2 ; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0 ; AVX512F-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 ; AVX512F-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | zmm1 | zmm3 @@ -957,10 +957,10 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind { ; AVX512VL-NEXT: vpmaddubsw %ymm3, %ymm0, %ymm3 ; AVX512VL-NEXT: vpsllw $8, %ymm3, %ymm3 ; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm3 -; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0,128,0,32,0,8,0,2,0,128,0,2,0,8,0,32,0] +; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64,128,64,32,16,8,4,2,1,128,1,2,4,8,16,32,64] ; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1] -; AVX512VL-NEXT: vpmaddubsw %ymm4, %ymm0, %ymm0 -; AVX512VL-NEXT: vpmaddubsw %ymm4, %ymm2, %ymm2 +; AVX512VL-NEXT: vpmullw %ymm4, %ymm0, %ymm0 +; AVX512VL-NEXT: vpmullw %ymm4, %ymm2, %ymm2 ; AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0 ; AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 ; AVX512VL-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 | zmm1 | zmm3 diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll index e68d1d792c90a..9b7d66def8b5b 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -731,7 +731,7 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: movdqa %xmm2, %xmm1 ; SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,8,0,10,0,12,0,14,0,13,0,11,0,9,0,7] ; SSE41-NEXT: psllw $8, %xmm1 -; SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [7,0,9,0,11,0,13,0,14,0,12,0,10,0,9,0] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [7,8,9,10,11,12,13,14,14,13,12,11,10,9,9,7] ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE41-NEXT: por %xmm1, %xmm2 ; SSE41-NEXT: psubb %xmm2, %xmm0 @@ -762,7 +762,7 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2 # [0,8,0,10,0,12,0,14,0,13,0,11,0,9,0,7] ; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [7,0,9,0,11,0,13,0,14,0,12,0,10,0,9,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [7,8,9,10,11,12,13,14,14,13,12,11,10,9,9,7] ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll index 7355f3683fc2e..fa5692aa9cef1 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll @@ -660,7 +660,7 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpackuswb %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm5 ; AVX1-NEXT: vpsubb %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm5 # [22,0,20,0,18,0,16,0,14,0,12,0,10,0,8,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm5 # [22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7] ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255] ; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [0,21,0,19,0,17,0,15,0,13,0,11,0,9,0,7] @@ -686,7 +686,7 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3 # [7,0,9,0,11,0,13,0,15,0,17,0,19,0,21,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3 # [7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22] ; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 ; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [0,8,0,10,0,12,0,14,0,16,0,18,0,20,0,22] ; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 @@ -720,7 +720,7 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm2, %ymm1 ; AVX2NOBW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2 # [0,8,0,10,0,12,0,14,0,16,0,18,0,20,0,22,0,21,0,19,0,17,0,15,0,13,0,11,0,9,0,7] ; AVX2NOBW-NEXT: vpsllw $8, %ymm2, %ymm2 -; AVX2NOBW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [7,0,9,0,11,0,13,0,15,0,17,0,19,0,21,0,22,0,20,0,18,0,16,0,14,0,12,0,10,0,8,0] +; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7] ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll index 5445330c82922..b11756a5e3b4e 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-512.ll @@ -544,7 +544,7 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3 ; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm5 ; AVX512F-NEXT: vpsubb %ymm5, %ymm3, %ymm3 -; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm5 # [38,0,36,0,34,0,32,0,30,0,28,0,26,0,24,0,22,0,20,0,18,0,16,0,14,0,12,0,10,0,8,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm5 # [38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7] ; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5 ; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [0,37,0,35,0,33,0,31,0,29,0,27,0,25,0,23,0,21,0,19,0,17,0,15,0,13,0,11,0,9,0,7] @@ -570,7 +570,7 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpackuswb %ymm4, %ymm3, %ymm3 ; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpsubb %ymm1, %ymm3, %ymm1 -; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3 # [7,0,9,0,11,0,13,0,15,0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31,0,33,0,35,0,37,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3 # [7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38] ; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3 ; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [0,8,0,10,0,12,0,14,0,16,0,18,0,20,0,22,0,24,0,26,0,28,0,30,0,32,0,34,0,36,0,38] ; AVX512F-NEXT: vpsllw $8, %ymm1, %ymm1 @@ -603,7 +603,7 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512BW-NEXT: vpmovb2m %zmm1, %k0 ; AVX512BW-NEXT: vpmovm2b %k0, %zmm2 ; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1 -; AVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2 # [7,0,9,0,11,0,13,0,15,0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31,0,33,0,35,0,37,0,38,0,36,0,34,0,32,0,30,0,28,0,26,0,24,0,22,0,20,0,18,0,16,0,14,0,12,0,10,0,8,0] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2 # [7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7] ; AVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [0,8,0,10,0,12,0,14,0,16,0,18,0,20,0,22,0,24,0,26,0,28,0,30,0,32,0,34,0,36,0,38,0,37,0,35,0,33,0,31,0,29,0,27,0,25,0,23,0,21,0,19,0,17,0,15,0,13,0,11,0,9,0,7] ; AVX512BW-NEXT: vpsllw $8, %zmm1, %zmm1 ; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 | (zmm2 & m32bcst) diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll index 6cd5098504f91..ef255e598e4a1 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -840,7 +840,7 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: movdqa %xmm2, %xmm1 ; SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,8,0,10,0,12,0,14,0,13,0,11,0,9,0,7] ; SSE41-NEXT: psllw $8, %xmm1 -; SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [7,0,9,0,11,0,13,0,14,0,12,0,10,0,9,0] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [7,8,9,10,11,12,13,14,14,13,12,11,10,9,9,7] ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE41-NEXT: por %xmm1, %xmm2 ; SSE41-NEXT: psubb %xmm2, %xmm0 @@ -882,7 +882,7 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2 # [0,8,0,10,0,12,0,14,0,13,0,11,0,9,0,7] ; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [7,0,9,0,11,0,13,0,14,0,12,0,10,0,9,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [7,8,9,10,11,12,13,14,14,13,12,11,10,9,9,7] ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll index 98ea87cbe18f3..ca57359183312 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -702,7 +702,7 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 # [32,16,16,128,64,16,256,32] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm4 -; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm5 # [22,0,20,0,18,0,16,0,14,0,12,0,10,0,8,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm5 # [22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7] ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; AVX1-NEXT: vpand %xmm3, %xmm5, %xmm5 ; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [0,21,0,19,0,17,0,15,0,13,0,11,0,9,0,7] @@ -739,7 +739,7 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 # [64,256,128,32,32,32,64,64] ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpackuswb %xmm1, %xmm4, %xmm1 -; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm4 # [7,0,9,0,11,0,13,0,15,0,17,0,19,0,21,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm4 # [7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22] ; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [0,8,0,10,0,12,0,14,0,16,0,18,0,20,0,22] ; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 @@ -781,7 +781,7 @@ define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2 # [0,8,0,10,0,12,0,14,0,16,0,18,0,20,0,22,0,21,0,19,0,17,0,15,0,13,0,11,0,9,0,7] ; AVX2NOBW-NEXT: vpsllw $8, %ymm2, %ymm2 -; AVX2NOBW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [7,0,9,0,11,0,13,0,15,0,17,0,19,0,21,0,22,0,20,0,18,0,16,0,14,0,12,0,10,0,8,0] +; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7] ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll index a11fa370a86b7..b8a131e628007 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll @@ -575,7 +575,7 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 # [64,16,32,8,8,8,256,16,32,16,16,128,64,16,256,32] ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512F-NEXT: vpackuswb %ymm4, %ymm3, %ymm4 -; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm5 # [38,0,36,0,34,0,32,0,30,0,28,0,26,0,24,0,22,0,20,0,18,0,16,0,14,0,12,0,10,0,8,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm5 # [38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7] ; AVX512F-NEXT: vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; AVX512F-NEXT: vpand %ymm3, %ymm5, %ymm5 ; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4 # [0,37,0,35,0,33,0,31,0,29,0,27,0,25,0,23,0,21,0,19,0,17,0,15,0,13,0,11,0,9,0,7] @@ -609,7 +609,7 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [64,256,128,32,32,32,64,64,16,16,64,32,128,256,16,16] ; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1 ; AVX512F-NEXT: vpackuswb %ymm5, %ymm1, %ymm1 -; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm4 # [7,0,9,0,11,0,13,0,15,0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31,0,33,0,35,0,37,0] +; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm4 # [7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38] ; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [0,8,0,10,0,12,0,14,0,16,0,18,0,20,0,22,0,24,0,26,0,28,0,30,0,32,0,34,0,36,0,38] ; AVX512F-NEXT: vpsllw $8, %ymm1, %ymm1 @@ -648,7 +648,7 @@ define <64 x i8> @test_remconstant_64i8(<64 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1 ; AVX512BW-NEXT: vpackuswb %zmm3, %zmm1, %zmm1 -; AVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2 # [7,0,9,0,11,0,13,0,15,0,17,0,19,0,21,0,23,0,25,0,27,0,29,0,31,0,33,0,35,0,37,0,38,0,36,0,34,0,32,0,30,0,28,0,26,0,24,0,22,0,20,0,18,0,16,0,14,0,12,0,10,0,8,0] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2 # [7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7] ; AVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [0,8,0,10,0,12,0,14,0,16,0,18,0,20,0,22,0,24,0,26,0,28,0,30,0,32,0,34,0,36,0,38,0,37,0,35,0,33,0,31,0,29,0,27,0,25,0,23,0,21,0,19,0,17,0,15,0,13,0,11,0,9,0,7] ; AVX512BW-NEXT: vpsllw $8, %zmm1, %zmm1 ; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 | (zmm2 & m32bcst) diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index d0bb90c5fc8ab..6d6f1c28ca282 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -265,7 +265,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw ; X86-SSE4-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [0,2,0,8,0,2,0,8,0,2,0,8,0,2,0,8] ; X86-SSE4-NEXT: psllw $8, %xmm1 -; X86-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,0,4,0,1,0,4,0,1,0,4,0,1,0,4,0] +; X86-SSE4-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8] ; X86-SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE4-NEXT: por %xmm1, %xmm0 ; X86-SSE4-NEXT: retl @@ -275,7 +275,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw ; X64-SSE4-NEXT: movdqa %xmm0, %xmm1 ; X64-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,2,0,8,0,2,0,8,0,2,0,8,0,2,0,8] ; X64-SSE4-NEXT: psllw $8, %xmm1 -; X64-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,0,4,0,1,0,4,0,1,0,4,0,1,0,4,0] +; X64-SSE4-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8] ; X64-SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE4-NEXT: por %xmm1, %xmm0 ; X64-SSE4-NEXT: retq @@ -1072,7 +1072,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8> ; X86-SSE4-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [0,3,0,17,0,65,0,2,0,9,0,33,0,129,0,3] ; X86-SSE4-NEXT: psllw $8, %xmm1 -; X86-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [2,0,9,0,33,0,129,0,3,0,17,0,65,0,2,0] +; X86-SSE4-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [2,3,9,17,33,65,129,2,3,9,17,33,65,129,2,3] ; X86-SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE4-NEXT: por %xmm1, %xmm0 ; X86-SSE4-NEXT: retl @@ -1095,7 +1095,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8> ; X64-SSE4-NEXT: movdqa %xmm0, %xmm1 ; X64-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,3,0,17,0,65,0,2,0,9,0,33,0,129,0,3] ; X64-SSE4-NEXT: psllw $8, %xmm1 -; X64-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,0,9,0,33,0,129,0,3,0,17,0,65,0,2,0] +; X64-SSE4-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,3,9,17,33,65,129,2,3,9,17,33,65,129,2,3] ; X64-SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE4-NEXT: por %xmm1, %xmm0 ; X64-SSE4-NEXT: retq @@ -1103,7 +1103,7 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8> ; X64-XOP-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3: ; X64-XOP: # %bb.0: ; X64-XOP-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [0,3,0,17,0,65,0,2,0,9,0,33,0,129,0,3] -; X64-XOP-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2,0,9,0,33,0,129,0,3,0,17,0,65,0,2,0] +; X64-XOP-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [2,3,9,17,33,65,129,2,3,9,17,33,65,129,2,3] ; X64-XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2],xmm1[2],xmm0[4],xmm1[4],xmm0[6],xmm1[6],xmm0[8],xmm1[8],xmm0[10],xmm1[10],xmm0[12],xmm1[12],xmm0[14],xmm1[14] ; X64-XOP-NEXT: retq ; @@ -1847,7 +1847,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8> ; X86-SSE4-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [0,1,0,7,0,31,0,127,0,1,0,7,0,31,0,127] ; X86-SSE4-NEXT: psllw $8, %xmm1 -; X86-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,0,3,0,15,0,63,0,0,0,3,0,15,0,63,0] +; X86-SSE4-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [0,1,3,7,15,31,63,127,0,1,3,7,15,31,63,127] ; X86-SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE4-NEXT: por %xmm1, %xmm0 ; X86-SSE4-NEXT: retl @@ -1857,7 +1857,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8> ; X64-SSE4-NEXT: movdqa %xmm0, %xmm1 ; X64-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,1,0,7,0,31,0,127,0,1,0,7,0,31,0,127] ; X64-SSE4-NEXT: psllw $8, %xmm1 -; X64-SSE4-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,0,3,0,15,0,63,0,0,0,3,0,15,0,63,0] +; X64-SSE4-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [0,1,3,7,15,31,63,127,0,1,3,7,15,31,63,127] ; X64-SSE4-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE4-NEXT: por %xmm1, %xmm0 ; X64-SSE4-NEXT: retq @@ -1865,7 +1865,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8> ; X64-XOP-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127: ; X64-XOP: # %bb.0: ; X64-XOP-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [0,1,0,7,0,31,0,127,0,1,0,7,0,31,0,127] -; X64-XOP-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,3,0,15,0,63,0,0,0,3,0,15,0,63,0] +; X64-XOP-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,1,3,7,15,31,63,127,0,1,3,7,15,31,63,127] ; X64-XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2],xmm1[2],xmm0[4],xmm1[4],xmm0[6],xmm1[6],xmm0[8],xmm1[8],xmm0[10],xmm1[10],xmm0[12],xmm1[12],xmm0[14],xmm1[14] ; X64-XOP-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll index 3085c325e0968..37b96b8f3f927 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll @@ -1165,7 +1165,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; SSE41-NEXT: psllw $8, %xmm1 -; SSE41-NEXT: pmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: por %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -1174,7 +1174,7 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll index f9ccd1e8ca156..c7d2532e9acb2 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll @@ -1313,9 +1313,9 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpmaddubsw %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = [1,4,16,64,128,32,8,2] -; AVX1-NEXT: vpmaddubsw %xmm2, %xmm3, %xmm3 -; AVX1-NEXT: vpmaddubsw %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] +; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm3 +; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 @@ -1325,7 +1325,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind { ; AVX2: # %bb.0: ; AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; AVX2-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -1352,7 +1352,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind { ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; AVX512DQ-NEXT: vpsllw $8, %ymm1, %ymm1 -; AVX512DQ-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; AVX512DQ-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX512DQ-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512DQ-NEXT: retq @@ -1366,7 +1366,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind { ; ; AVX512DQVL-LABEL: constant_shift_v32i8: ; AVX512DQVL: # %bb.0: -; AVX512DQVL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; AVX512DQVL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; AVX512DQVL-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; AVX512DQVL-NEXT: vpsllw $8, %ymm0, %ymm0 ; AVX512DQVL-NEXT: vpternlogd {{.*#+}} ymm0 = ymm0 | (ymm1 & m32bcst) @@ -1388,9 +1388,9 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind { ; X86-AVX1-NEXT: vpmaddubsw %xmm1, %xmm3, %xmm1 ; X86-AVX1-NEXT: vpsllw $8, %xmm1, %xmm1 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; X86-AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = [1,4,16,64,128,32,8,2] -; X86-AVX1-NEXT: vpmaddubsw %xmm2, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpmaddubsw %xmm2, %xmm0, %xmm0 +; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] +; X86-AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; X86-AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 @@ -1400,7 +1400,7 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind { ; X86-AVX2: # %bb.0: ; X86-AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; X86-AVX2-NEXT: vpsllw $8, %ymm1, %ymm1 -; X86-AVX2-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; X86-AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; X86-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-512.ll b/llvm/test/CodeGen/X86/vector-shift-shl-512.ll index 41238acc4b74d..1e5f1b8729d47 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-512.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-512.ll @@ -307,10 +307,10 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind { ; AVX512DQ-LABEL: constant_shift_v64i8: ; AVX512DQ: # %bb.0: ; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; AVX512DQ-NEXT: # ymm2 = mem[0,1,0,1] -; AVX512DQ-NEXT: vpmaddubsw %ymm2, %ymm1, %ymm3 -; AVX512DQ-NEXT: vpmaddubsw %ymm2, %ymm0, %ymm2 +; AVX512DQ-NEXT: vpmullw %ymm2, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpmullw %ymm2, %ymm0, %ymm2 ; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 ; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; AVX512DQ-NEXT: # ymm3 = mem[0,1,0,1] @@ -324,7 +324,7 @@ define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind { ; ; AVX512BW-LABEL: constant_shift_v64i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 # [1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0,1,0,4,0,16,0,64,0,128,0,32,0,8,0,2,0] +; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 # [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1,1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1] ; AVX512BW-NEXT: vpmaddubsw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1,0,2,0,8,0,32,0,128,0,64,0,16,0,4,0,1] ; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm0 ; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 | (zmm1 & m32bcst) diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll index 3590c4d027be7..ac5830604461c 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll @@ -100,16 +100,14 @@ define <16 x i8> @PR50049(ptr %p1, ptr %p2) { ; SSE-NEXT: pshufb %xmm3, %xmm4 ; SSE-NEXT: pshufb %xmm8, %xmm1 ; SSE-NEXT: por %xmm4, %xmm1 -; SSE-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] -; SSE-NEXT: movdqa %xmm1, %xmm3 -; SSE-NEXT: pand %xmm2, %xmm3 -; SSE-NEXT: movdqa %xmm0, %xmm4 -; SSE-NEXT: pmaddubsw %xmm3, %xmm4 -; SSE-NEXT: pand %xmm2, %xmm4 -; SSE-NEXT: pandn %xmm1, %xmm2 -; SSE-NEXT: pmaddubsw %xmm2, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: pmullw %xmm1, %xmm2 +; SSE-NEXT: pmovzxbw {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; SSE-NEXT: pand %xmm3, %xmm2 +; SSE-NEXT: pandn %xmm1, %xmm3 +; SSE-NEXT: pmaddubsw %xmm3, %xmm0 ; SSE-NEXT: psllw $8, %xmm0 -; SSE-NEXT: por %xmm4, %xmm0 +; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX1-LABEL: PR50049: @@ -129,21 +127,20 @@ define <16 x i8> @PR50049(ptr %p1, ptr %p2) { ; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [5,6,7,8,9,10,128,128,128,128,128,0,1,2,3,4] -; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,128,128,128,2,5,8,11,14,128,128,128,128,128] +; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm5 +; AVX1-NEXT: vpor %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm1 -; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm2 -; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3 -; AVX1-NEXT: vpmaddubsw %xmm3, %xmm0, %xmm3 -; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm3 -; AVX1-NEXT: vpandn %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm1 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmaddubsw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0 -; AVX1-NEXT: vpor %xmm0, %xmm3, %xmm0 +; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: PR50049: From e413343ca7ee65ecf04fc455abb55604c7191e34 Mon Sep 17 00:00:00 2001 From: Sergei Barannikov Date: Sun, 16 Nov 2025 18:26:03 +0300 Subject: [PATCH 05/17] [SelectionDAG] Verify SDTCisVT and SDTCVecEltisVT constraints (#150125) Teach `SDNodeInfoEmitter` TableGen backend to process `SDTypeConstraint` records and emit tables for them. The tables are used by `SDNodeInfo::verifyNode()` to validate a node being created. This PR only adds validation code for `SDTCisVT` and `SDTCVecEltisVT` constraints to keep it smaller. Pull Request: https://github.com/llvm/llvm-project/pull/150125 --- llvm/include/llvm/CodeGen/SDNodeInfo.h | 22 +++- llvm/lib/CodeGen/SelectionDAG/SDNodeInfo.cpp | 116 ++++++++++++++++++ llvm/lib/Target/AArch64/AArch64InstrInfo.td | 2 +- .../AArch64/AArch64SelectionDAGInfo.cpp | 37 ++---- llvm/lib/Target/M68k/M68kISelLowering.cpp | 2 +- llvm/lib/Target/M68k/M68kSelectionDAGInfo.cpp | 15 +++ llvm/lib/Target/M68k/M68kSelectionDAGInfo.h | 3 + .../Target/RISCV/RISCVSelectionDAGInfo.cpp | 19 +-- llvm/lib/Target/Sparc/SparcInstrInfo.td | 2 +- .../TableGen/SDNodeInfoEmitter/advanced.td | 40 +++--- .../ambiguous-constraints-1.td | 12 +- .../ambiguous-constraints-2.td | 12 +- .../TableGen/SDNodeInfoEmitter/hw-mode.td | 61 +++++++++ .../TableGen/SDNodeInfoEmitter/namespace.td | 18 +-- .../TableGen/SDNodeInfoEmitter/no-nodes.td | 10 +- .../SDNodeInfoEmitter/skipped-nodes.td | 6 +- .../SDNodeInfoEmitter/trivial-node.td | 12 +- .../TableGen/Basic/SequenceToOffsetTable.h | 3 +- llvm/utils/TableGen/Common/InfoByHwMode.h | 2 + llvm/utils/TableGen/SDNodeInfoEmitter.cpp | 72 +++++++++-- 20 files changed, 359 insertions(+), 107 deletions(-) create mode 100644 llvm/test/TableGen/SDNodeInfoEmitter/hw-mode.td diff --git a/llvm/include/llvm/CodeGen/SDNodeInfo.h b/llvm/include/llvm/CodeGen/SDNodeInfo.h index ba6c343ee1838..07909f226630f 100644 --- a/llvm/include/llvm/CodeGen/SDNodeInfo.h +++ b/llvm/include/llvm/CodeGen/SDNodeInfo.h @@ -48,11 +48,21 @@ enum SDNF { SDNFIsStrictFP, }; +struct VTByHwModePair { + uint8_t Mode; + MVT::SimpleValueType VT; +}; + struct SDTypeConstraint { SDTC Kind; - uint8_t OpNo; - uint8_t OtherOpNo; - MVT::SimpleValueType VT; + uint8_t ConstrainedValIdx; + uint8_t ConstrainingValIdx; + /// For Kind == SDTCisVT or SDTCVecEltisVT: + /// - if not using HwMode, NumHwModes == 0 and VT is MVT::SimpleValueType; + /// - otherwise, VT is offset into VTByHwModeTable and NumHwModes specifies + /// the number of entries. + uint8_t NumHwModes; + uint16_t VT; }; using SDNodeTSFlags = uint32_t; @@ -76,13 +86,15 @@ class SDNodeInfo final { unsigned NumOpcodes; const SDNodeDesc *Descs; StringTable Names; + const VTByHwModePair *VTByHwModeTable; const SDTypeConstraint *Constraints; public: constexpr SDNodeInfo(unsigned NumOpcodes, const SDNodeDesc *Descs, - StringTable Names, const SDTypeConstraint *Constraints) + StringTable Names, const VTByHwModePair *VTByHwModeTable, + const SDTypeConstraint *Constraints) : NumOpcodes(NumOpcodes), Descs(Descs), Names(Names), - Constraints(Constraints) {} + VTByHwModeTable(VTByHwModeTable), Constraints(Constraints) {} /// Returns true if there is a generated description for a node with the given /// target-specific opcode. diff --git a/llvm/lib/CodeGen/SelectionDAG/SDNodeInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/SDNodeInfo.cpp index e3f6c98a9a90a..da763dfb212f6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SDNodeInfo.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SDNodeInfo.cpp @@ -7,7 +7,10 @@ //===----------------------------------------------------------------------===// #include "llvm/CodeGen/SDNodeInfo.h" +#include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGNodes.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" using namespace llvm; @@ -40,6 +43,32 @@ static void checkOperandType(const SelectionDAG &DAG, const SDNode *N, ExpectedVT.getEVTString() + ", got " + ActualVT.getEVTString()); } +namespace { + +/// Similar to SDValue, but also records whether it is a result or an operand +/// of a node so we can provide more precise diagnostics. +class SDNodeValue { + const SDNode *N; + unsigned Idx; + bool IsRes; + +public: + SDNodeValue(const SDNode *N, unsigned Idx, bool IsRes) + : N(N), Idx(Idx), IsRes(IsRes) {} + + SDValue getValue() const { + return IsRes ? SDValue(const_cast(N), Idx) : N->getOperand(Idx); + } + + EVT getValueType() const { return getValue().getValueType(); } + + friend raw_ostream &operator<<(raw_ostream &OS, const SDNodeValue &Op) { + return OS << (Op.IsRes ? "result" : "operand") << " #" << Op.Idx; + } +}; + +} // namespace + void SDNodeInfo::verifyNode(const SelectionDAG &DAG, const SDNode *N) const { const SDNodeDesc &Desc = getDesc(N->getOpcode()); bool HasChain = Desc.hasProperty(SDNPHasChain); @@ -125,4 +154,91 @@ void SDNodeInfo::verifyNode(const SelectionDAG &DAG, const SDNode *N) const { " must be Register or RegisterMask"); } } + + unsigned VTHwMode = + DAG.getSubtarget().getHwMode(MCSubtargetInfo::HwMode_ValueType); + + // Returns a constrained or constraining value (result or operand) of a node. + // ValIdx is the index of a node's value, as defined by SDTypeConstraint; + // that is, it indexes a node's operands after its results and ignores + // chain/glue values. + auto GetConstraintValue = [&](unsigned ValIdx) { + if (ValIdx < Desc.NumResults) + return SDNodeValue(N, ValIdx, /*IsRes=*/true); + return SDNodeValue(N, HasChain + (ValIdx - Desc.NumResults), + /*IsRes=*/false); + }; + + auto GetConstraintVT = [&](const SDTypeConstraint &C) { + if (!C.NumHwModes) + return static_cast(C.VT); + for (auto [Mode, VT] : ArrayRef(&VTByHwModeTable[C.VT], C.NumHwModes)) + if (Mode == VTHwMode) + return VT; + llvm_unreachable("No value type for this HW mode"); + }; + + SmallString<128> ES; + raw_svector_ostream SS(ES); + + for (const SDTypeConstraint &C : getConstraints(N->getOpcode())) { + SDNodeValue Val = GetConstraintValue(C.ConstrainedValIdx); + EVT VT = Val.getValueType(); + + switch (C.Kind) { + case SDTCisVT: { + EVT ExpectedVT = GetConstraintVT(C); + + bool IsPtr = ExpectedVT == MVT::iPTR; + if (IsPtr) + ExpectedVT = + DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); + + if (VT != ExpectedVT) { + SS << Val << " must have type " << ExpectedVT; + if (IsPtr) + SS << " (iPTR)"; + SS << ", but has type " << VT; + reportNodeError(DAG, N, SS.str()); + } + break; + } + case SDTCisPtrTy: + break; + case SDTCisInt: + break; + case SDTCisFP: + break; + case SDTCisVec: + break; + case SDTCisSameAs: + break; + case SDTCisVTSmallerThanOp: + break; + case SDTCisOpSmallerThanOp: + break; + case SDTCisEltOfVec: + break; + case SDTCisSubVecOfVec: + break; + case SDTCVecEltisVT: { + EVT ExpectedVT = GetConstraintVT(C); + + if (!VT.isVector()) { + SS << Val << " must have vector type"; + reportNodeError(DAG, N, SS.str()); + } + if (VT.getVectorElementType() != ExpectedVT) { + SS << Val << " must have " << ExpectedVT << " element type, but has " + << VT.getVectorElementType() << " element type"; + reportNodeError(DAG, N, SS.str()); + } + break; + } + case SDTCisSameNumEltsAs: + break; + case SDTCisSameSizeAs: + break; + } + } } diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 094917b33bc17..50a3a4ab8d8b6 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -1184,7 +1184,7 @@ def AArch64msrr : SDNode<"AArch64ISD::MSRR", SDTCisVT<2, i64>]>, [SDNPHasChain]>; -def SD_AArch64rshrnb : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, SDTCisInt<2>]>; +def SD_AArch64rshrnb : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i32>]>; // Vector narrowing shift by immediate (bottom) def AArch64rshrnb : SDNode<"AArch64ISD::RSHRNB_I", SD_AArch64rshrnb>; def AArch64rshrnb_pf : PatFrags<(ops node:$rs, node:$i), diff --git a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp index d3b1aa621b61a..48e03ad853d26 100644 --- a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp @@ -32,35 +32,21 @@ AArch64SelectionDAGInfo::AArch64SelectionDAGInfo() void AArch64SelectionDAGInfo::verifyTargetNode(const SelectionDAG &DAG, const SDNode *N) const { + switch (N->getOpcode()) { + case AArch64ISD::WrapperLarge: + // operand #0 must have type i32, but has type i64 + return; + } + SelectionDAGGenTargetInfo::verifyTargetNode(DAG, N); #ifndef NDEBUG // Some additional checks not yet implemented by verifyTargetNode. - constexpr MVT FlagsVT = MVT::i32; switch (N->getOpcode()) { - case AArch64ISD::SUBS: - assert(N->getValueType(1) == FlagsVT); - break; - case AArch64ISD::ADC: - case AArch64ISD::SBC: - assert(N->getOperand(2).getValueType() == FlagsVT); - break; - case AArch64ISD::ADCS: - case AArch64ISD::SBCS: - assert(N->getValueType(1) == FlagsVT); - assert(N->getOperand(2).getValueType() == FlagsVT); - break; - case AArch64ISD::CSEL: - case AArch64ISD::CSINC: - case AArch64ISD::BRCOND: - assert(N->getOperand(3).getValueType() == FlagsVT); - break; case AArch64ISD::SADDWT: case AArch64ISD::SADDWB: case AArch64ISD::UADDWT: case AArch64ISD::UADDWB: { - assert(N->getNumValues() == 1 && "Expected one result!"); - assert(N->getNumOperands() == 2 && "Expected two operands!"); EVT VT = N->getValueType(0); EVT Op0VT = N->getOperand(0).getValueType(); EVT Op1VT = N->getOperand(1).getValueType(); @@ -80,8 +66,6 @@ void AArch64SelectionDAGInfo::verifyTargetNode(const SelectionDAG &DAG, case AArch64ISD::SUNPKHI: case AArch64ISD::UUNPKLO: case AArch64ISD::UUNPKHI: { - assert(N->getNumValues() == 1 && "Expected one result!"); - assert(N->getNumOperands() == 1 && "Expected one operand!"); EVT VT = N->getValueType(0); EVT OpVT = N->getOperand(0).getValueType(); assert(OpVT.isVector() && VT.isVector() && OpVT.isInteger() && @@ -98,8 +82,6 @@ void AArch64SelectionDAGInfo::verifyTargetNode(const SelectionDAG &DAG, case AArch64ISD::UZP2: case AArch64ISD::ZIP1: case AArch64ISD::ZIP2: { - assert(N->getNumValues() == 1 && "Expected one result!"); - assert(N->getNumOperands() == 2 && "Expected two operands!"); EVT VT = N->getValueType(0); EVT Op0VT = N->getOperand(0).getValueType(); EVT Op1VT = N->getOperand(1).getValueType(); @@ -109,11 +91,8 @@ void AArch64SelectionDAGInfo::verifyTargetNode(const SelectionDAG &DAG, break; } case AArch64ISD::RSHRNB_I: { - assert(N->getNumValues() == 1 && "Expected one result!"); - assert(N->getNumOperands() == 2 && "Expected two operands!"); EVT VT = N->getValueType(0); EVT Op0VT = N->getOperand(0).getValueType(); - EVT Op1VT = N->getOperand(1).getValueType(); assert(VT.isVector() && VT.isInteger() && "Expected integer vector result type!"); assert(Op0VT.isVector() && Op0VT.isInteger() && @@ -122,8 +101,8 @@ void AArch64SelectionDAGInfo::verifyTargetNode(const SelectionDAG &DAG, "Expected vectors of equal size!"); assert(VT.getVectorElementCount() == Op0VT.getVectorElementCount() * 2 && "Expected input vector with half the lanes of its result!"); - assert(Op1VT == MVT::i32 && isa(N->getOperand(1)) && - "Expected second operand to be a constant i32!"); + assert(isa(N->getOperand(1)) && + "Expected second operand to be a constant!"); break; } } diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp index f94380f27e8bd..35daa8ec8f263 100644 --- a/llvm/lib/Target/M68k/M68kISelLowering.cpp +++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp @@ -1664,7 +1664,7 @@ static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC, if (Src.getValueType() != BitNo.getValueType()) BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo); - SDValue BTST = DAG.getNode(M68kISD::BTST, DL, MVT::i32, Src, BitNo); + SDValue BTST = DAG.getNode(M68kISD::BTST, DL, MVT::i8, Src, BitNo); // NOTE BTST sets CCR.Z flag if bit is 0, same as AND with bitmask M68k::CondCode Cond = CC == ISD::SETEQ ? M68k::COND_EQ : M68k::COND_NE; diff --git a/llvm/lib/Target/M68k/M68kSelectionDAGInfo.cpp b/llvm/lib/Target/M68k/M68kSelectionDAGInfo.cpp index dd1bfdf00af8c..a402c7721129c 100644 --- a/llvm/lib/Target/M68k/M68kSelectionDAGInfo.cpp +++ b/llvm/lib/Target/M68k/M68kSelectionDAGInfo.cpp @@ -16,4 +16,19 @@ using namespace llvm; M68kSelectionDAGInfo::M68kSelectionDAGInfo() : SelectionDAGGenTargetInfo(M68kGenSDNodeInfo) {} +void M68kSelectionDAGInfo::verifyTargetNode(const SelectionDAG &DAG, + const SDNode *N) const { + switch (N->getOpcode()) { + case M68kISD::ADD: + case M68kISD::SUBX: + // result #1 must have type i8, but has type i32 + return; + case M68kISD::SETCC: + // operand #1 must have type i8, but has type i32 + return; + } + + SelectionDAGGenTargetInfo::verifyTargetNode(DAG, N); +} + M68kSelectionDAGInfo::~M68kSelectionDAGInfo() = default; diff --git a/llvm/lib/Target/M68k/M68kSelectionDAGInfo.h b/llvm/lib/Target/M68k/M68kSelectionDAGInfo.h index 87a8c08d2591e..de4667f830d41 100644 --- a/llvm/lib/Target/M68k/M68kSelectionDAGInfo.h +++ b/llvm/lib/Target/M68k/M68kSelectionDAGInfo.h @@ -21,6 +21,9 @@ class M68kSelectionDAGInfo : public SelectionDAGGenTargetInfo { M68kSelectionDAGInfo(); ~M68kSelectionDAGInfo() override; + + void verifyTargetNode(const SelectionDAG &DAG, + const SDNode *N) const override; }; } // namespace llvm diff --git a/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.cpp b/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.cpp index 041dd07b48bf0..8b66aa12c9be4 100644 --- a/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVSelectionDAGInfo.cpp @@ -22,27 +22,22 @@ RISCVSelectionDAGInfo::~RISCVSelectionDAGInfo() = default; void RISCVSelectionDAGInfo::verifyTargetNode(const SelectionDAG &DAG, const SDNode *N) const { + SelectionDAGGenTargetInfo::verifyTargetNode(DAG, N); + #ifndef NDEBUG + // Some additional checks not yet implemented by verifyTargetNode. switch (N->getOpcode()) { - default: - return SelectionDAGGenTargetInfo::verifyTargetNode(DAG, N); case RISCVISD::TUPLE_EXTRACT: - assert(N->getNumOperands() == 2 && "Expected three operands!"); assert(N->getOperand(1).getOpcode() == ISD::TargetConstant && - N->getOperand(1).getValueType() == MVT::i32 && - "Expected index to be an i32 target constant!"); + "Expected index to be a target constant!"); break; case RISCVISD::TUPLE_INSERT: - assert(N->getNumOperands() == 3 && "Expected three operands!"); assert(N->getOperand(2).getOpcode() == ISD::TargetConstant && - N->getOperand(2).getValueType() == MVT::i32 && - "Expected index to be an i32 target constant!"); + "Expected index to be a target constant!"); break; case RISCVISD::VQDOT_VL: case RISCVISD::VQDOTU_VL: case RISCVISD::VQDOTSU_VL: { - assert(N->getNumValues() == 1 && "Expected one result!"); - assert(N->getNumOperands() == 5 && "Expected five operands!"); EVT VT = N->getValueType(0); assert(VT.isScalableVector() && VT.getVectorElementType() == MVT::i32 && "Expected result to be an i32 scalable vector"); @@ -52,13 +47,9 @@ void RISCVSelectionDAGInfo::verifyTargetNode(const SelectionDAG &DAG, "Expected result and first 3 operands to have the same type!"); EVT MaskVT = N->getOperand(3).getValueType(); assert(MaskVT.isScalableVector() && - MaskVT.getVectorElementType() == MVT::i1 && MaskVT.getVectorElementCount() == VT.getVectorElementCount() && "Expected mask VT to be an i1 scalable vector with same number of " "elements as the result"); - assert((N->getOperand(4).getValueType() == MVT::i32 || - N->getOperand(4).getValueType() == MVT::i64) && - "Expect VL operand to be i32 or i64"); break; } } diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.td b/llvm/lib/Target/Sparc/SparcInstrInfo.td index f654f8b0671e4..107817fcab6df 100644 --- a/llvm/lib/Target/Sparc/SparcInstrInfo.td +++ b/llvm/lib/Target/Sparc/SparcInstrInfo.td @@ -369,7 +369,7 @@ def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPCallSeqStart, def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; -def SDT_SPCall : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>; +def SDT_SPCall : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>; def call : SDNode<"SPISD::CALL", SDT_SPCall, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>; diff --git a/llvm/test/TableGen/SDNodeInfoEmitter/advanced.td b/llvm/test/TableGen/SDNodeInfoEmitter/advanced.td index d7eeaba9d8552..0c4a331be28f5 100644 --- a/llvm/test/TableGen/SDNodeInfoEmitter/advanced.td +++ b/llvm/test/TableGen/SDNodeInfoEmitter/advanced.td @@ -65,22 +65,26 @@ def my_node_3 : SDNode< // CHECK-NEXT: "MyTargetISD::NODE_3\0" // CHECK-NEXT: ; -// CHECK: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { -// CHECK-NEXT: /* 0 */ {SDTCisVT, 1, 0, MVT::i2}, -// CHECK-SAME: {SDTCisVT, 0, 0, MVT::i1}, -// CHECK-NEXT: /* 2 */ {SDTCisSameSizeAs, 19, 18, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCisSameNumEltsAs, 17, 16, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCVecEltisVT, 15, 0, MVT::i32}, -// CHECK-SAME: {SDTCisSubVecOfVec, 14, 13, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCisEltOfVec, 12, 11, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCisOpSmallerThanOp, 10, 9, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCisVTSmallerThanOp, 8, 7, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCisSameAs, 6, 5, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCisVec, 4, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCisFP, 3, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCisInt, 2, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCisPtrTy, 1, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, -// CHECK-SAME: {SDTCisVT, 0, 0, MVT::i1}, +// CHECK: static const VTByHwModePair MyTargetVTByHwModeTable[] = { +// CHECK-NEXT: /* dummy */ {0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { +// CHECK-NEXT: /* 0 */ {SDTCisVT, 1, 0, 0, MVT::i2}, +// CHECK-SAME: {SDTCisVT, 0, 0, 0, MVT::i1}, +// CHECK-NEXT: /* 2 */ {SDTCisSameSizeAs, 19, 18, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCisSameNumEltsAs, 17, 16, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCVecEltisVT, 15, 0, 0, MVT::i32}, +// CHECK-SAME: {SDTCisSubVecOfVec, 14, 13, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCisEltOfVec, 12, 11, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCisOpSmallerThanOp, 10, 9, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCisVTSmallerThanOp, 8, 7, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCisSameAs, 6, 5, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCisVec, 4, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCisFP, 3, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCisInt, 2, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCisPtrTy, 1, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE}, +// CHECK-SAME: {SDTCisVT, 0, 0, 0, MVT::i1}, // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeDesc MyTargetSDNodeDescs[] = { @@ -90,5 +94,5 @@ def my_node_3 : SDNode< // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeInfo MyTargetGenSDNodeInfo( -// CHECK-NEXT: /*NumOpcodes=*/3, MyTargetSDNodeDescs, -// CHECK-NEXT: MyTargetSDNodeNames, MyTargetSDTypeConstraints); +// CHECK-NEXT: /*NumOpcodes=*/3, MyTargetSDNodeDescs, MyTargetSDNodeNames, +// CHECK-NEXT: MyTargetVTByHwModeTable, MyTargetSDTypeConstraints); diff --git a/llvm/test/TableGen/SDNodeInfoEmitter/ambiguous-constraints-1.td b/llvm/test/TableGen/SDNodeInfoEmitter/ambiguous-constraints-1.td index 8b86f93b1f785..3a5a70c0a2550 100644 --- a/llvm/test/TableGen/SDNodeInfoEmitter/ambiguous-constraints-1.td +++ b/llvm/test/TableGen/SDNodeInfoEmitter/ambiguous-constraints-1.td @@ -16,8 +16,12 @@ def my_node_b : SDNode<"MyTargetISD::NODE", SDTypeProfile<1, 0, [SDTCisVT<0, f32 // CHECK-NEXT: "MyTargetISD::NODE\0" // CHECK-NEXT: ; -// CHECK: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { -// CHECK-NEXT: /* dummy */ {SDTCisVT, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// CHECK: static const VTByHwModePair MyTargetVTByHwModeTable[] = { +// CHECK-NEXT: /* dummy */ {0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { +// CHECK-NEXT: /* dummy */ {SDTCisVT, 0, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE} // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeDesc MyTargetSDNodeDescs[] = { @@ -25,5 +29,5 @@ def my_node_b : SDNode<"MyTargetISD::NODE", SDTypeProfile<1, 0, [SDTCisVT<0, f32 // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeInfo MyTargetGenSDNodeInfo( -// CHECK-NEXT: /*NumOpcodes=*/1, MyTargetSDNodeDescs, -// CHECK-NEXT: MyTargetSDNodeNames, MyTargetSDTypeConstraints); +// CHECK-NEXT: /*NumOpcodes=*/1, MyTargetSDNodeDescs, MyTargetSDNodeNames, +// CHECK-NEXT: MyTargetVTByHwModeTable, MyTargetSDTypeConstraints); diff --git a/llvm/test/TableGen/SDNodeInfoEmitter/ambiguous-constraints-2.td b/llvm/test/TableGen/SDNodeInfoEmitter/ambiguous-constraints-2.td index 29429e9baa300..916508426b66b 100644 --- a/llvm/test/TableGen/SDNodeInfoEmitter/ambiguous-constraints-2.td +++ b/llvm/test/TableGen/SDNodeInfoEmitter/ambiguous-constraints-2.td @@ -26,8 +26,12 @@ def my_node_2b : SDNode<"MyTargetISD::NODE_2", SDTypeProfile<1, 0, [SDTCisVT<0, // CHECK-NEXT: "MyTargetISD::NODE_2\0" // CHECK-NEXT: ; -// CHECK: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { -// CHECK-NEXT: /* 0 */ {SDTCisVT, 0, 0, MVT::i32}, +// CHECK: static const VTByHwModePair MyTargetVTByHwModeTable[] = { +// CHECK-NEXT: /* dummy */ {0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { +// CHECK-NEXT: /* 0 */ {SDTCisVT, 0, 0, 0, MVT::i32}, // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeDesc MyTargetSDNodeDescs[] = { @@ -36,5 +40,5 @@ def my_node_2b : SDNode<"MyTargetISD::NODE_2", SDTypeProfile<1, 0, [SDTCisVT<0, // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeInfo MyTargetGenSDNodeInfo( -// CHECK-NEXT: /*NumOpcodes=*/2, MyTargetSDNodeDescs, -// CHECK-NEXT: MyTargetSDNodeNames, MyTargetSDTypeConstraints); +// CHECK-NEXT: /*NumOpcodes=*/2, MyTargetSDNodeDescs, MyTargetSDNodeNames, +// CHECK-NEXT: MyTargetVTByHwModeTable, MyTargetSDTypeConstraints); diff --git a/llvm/test/TableGen/SDNodeInfoEmitter/hw-mode.td b/llvm/test/TableGen/SDNodeInfoEmitter/hw-mode.td new file mode 100644 index 0000000000000..f6e1182f09b8f --- /dev/null +++ b/llvm/test/TableGen/SDNodeInfoEmitter/hw-mode.td @@ -0,0 +1,61 @@ +// RUN: llvm-tblgen -gen-sd-node-info -I %p/../../../include %s | FileCheck %s + +include "llvm/Target/Target.td" + +def MyTarget : Target; + +def M1 : HwMode<[]>; +def M2 : HwMode<[]>; +def M3 : HwMode<[]>; + +def VT1 : ValueTypeByHwMode<[M1], [i1]>; +def VT2 : ValueTypeByHwMode<[M2], [i2]>; +def VT3 : ValueTypeByHwMode<[M1, M2, M3, DefaultMode], [i1, i2, i4, i8]>; + +def my_node_1 : SDNode< + "MyTargetISD::NODE_1", + SDTypeProfile<0, 5, [ + SDTCVecEltisVT<0, VT3>, + SDTCisVT<1, i1>, + SDTCVecEltisVT<2, i2>, + SDTCisVT<3, VT1>, + SDTCVecEltisVT<4, VT2>, + ]> +>; + +def my_node_2 : SDNode< + "MyTargetISD::NODE_2", + SDTypeProfile<1, 2, [ + SDTCVecEltisVT<0, VT3>, + SDTCisVT<1, i1>, + SDTCVecEltisVT<2, i2>, + ]> +>; + +def my_node_3 : SDNode< + "MyTargetISD::NODE_3", + SDTypeProfile<1, 0, [ + SDTCisVT<0, VT3>, + ]> +>; + +// CHECK: static const VTByHwModePair MyTargetVTByHwModeTable[] = { +// CHECK-NEXT: /* 0 */ {0, MVT::i8}, {1, MVT::i1}, {2, MVT::i2}, {3, MVT::i4}, +// CHECK-NEXT: /* 4 */ {1, MVT::i1}, +// CHECK-NEXT: /* 5 */ {2, MVT::i2}, +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { +// CHECK-NEXT: /* 0 */ {SDTCisVT, 0, 0, 4, 0}, +// CHECK-NEXT: /* 1 */ {SDTCVecEltisVT, 4, 0, 1, 5}, +// CHECK-SAME: {SDTCisVT, 3, 0, 1, 4}, +// CHECK-SAME: {SDTCVecEltisVT, 2, 0, 0, MVT::i2}, +// CHECK-SAME: {SDTCisVT, 1, 0, 0, MVT::i1}, +// CHECK-SAME: {SDTCVecEltisVT, 0, 0, 4, 0}, +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: static const SDNodeDesc MyTargetSDNodeDescs[] = { +// CHECK-NEXT: {0, 5, 0, 0, 0, 1, 1, 5}, // NODE_1 +// CHECK-NEXT: {1, 2, 0, 0, 0, 21, 3, 3}, // NODE_2 +// CHECK-NEXT: {1, 0, 0, 0, 0, 41, 0, 1}, // NODE_3 +// CHECK-NEXT: }; diff --git a/llvm/test/TableGen/SDNodeInfoEmitter/namespace.td b/llvm/test/TableGen/SDNodeInfoEmitter/namespace.td index 217fb7c9fd475..578016469fccd 100644 --- a/llvm/test/TableGen/SDNodeInfoEmitter/namespace.td +++ b/llvm/test/TableGen/SDNodeInfoEmitter/namespace.td @@ -24,15 +24,15 @@ def node_2 : SDNode<"MyCustomISD::NODE", SDTypeProfile<0, 1, [SDTCisVT<0, i2>]>> // EMPTY-NEXT: ; // EMPTY: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { -// EMPTY-NEXT: /* dummy */ {SDTCisVT, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// EMPTY-NEXT: /* dummy */ {SDTCisVT, 0, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE} // EMPTY-NEXT: }; // EMPTY-EMPTY: // EMPTY-NEXT: static const SDNodeDesc MyTargetSDNodeDescs[] = { // EMPTY-NEXT: }; // EMPTY-EMPTY: // EMPTY-NEXT: static const SDNodeInfo MyTargetGenSDNodeInfo( -// EMPTY-NEXT: /*NumOpcodes=*/0, MyTargetSDNodeDescs, -// EMPTY-NEXT: MyTargetSDNodeNames, MyTargetSDTypeConstraints); +// EMPTY-NEXT: /*NumOpcodes=*/0, MyTargetSDNodeDescs, MyTargetSDNodeNames, +// EMPTY-NEXT: MyTargetVTByHwModeTable, MyTargetSDTypeConstraints); // COMMON: namespace llvm::[[NS]] { // COMMON-EMPTY: @@ -49,9 +49,13 @@ def node_2 : SDNode<"MyCustomISD::NODE", SDTypeProfile<0, 1, [SDTCisVT<0, i2>]>> // COMMON-NEXT: "[[NS]]::NODE\0" // COMMON-NEXT: ; +// COMMON: static const VTByHwModePair MyTargetVTByHwModeTable[] = { +// COMMON-NEXT: /* dummy */ {0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// COMMON-NEXT: }; + // COMMON: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { -// TARGET-NEXT: /* 0 */ {SDTCisVT, 0, 0, MVT::i1}, -// CUSTOM-NEXT: /* 0 */ {SDTCisVT, 0, 0, MVT::i2}, +// TARGET-NEXT: /* 0 */ {SDTCisVT, 0, 0, 0, MVT::i1}, +// CUSTOM-NEXT: /* 0 */ {SDTCisVT, 0, 0, 0, MVT::i2}, // COMMON-NEXT: }; // COMMON-EMPTY: // COMMON-NEXT: static const SDNodeDesc MyTargetSDNodeDescs[] = { @@ -60,5 +64,5 @@ def node_2 : SDNode<"MyCustomISD::NODE", SDTypeProfile<0, 1, [SDTCisVT<0, i2>]>> // COMMON-NEXT: }; // COMMON-EMPTY: // COMMON-NEXT: static const SDNodeInfo MyTargetGenSDNodeInfo( -// COMMON-NEXT: /*NumOpcodes=*/1, MyTargetSDNodeDescs, -// COMMON-NEXT: MyTargetSDNodeNames, MyTargetSDTypeConstraints); +// COMMON-NEXT: /*NumOpcodes=*/1, MyTargetSDNodeDescs, MyTargetSDNodeNames, +// COMMON-NEXT: MyTargetVTByHwModeTable, MyTargetSDTypeConstraints); diff --git a/llvm/test/TableGen/SDNodeInfoEmitter/no-nodes.td b/llvm/test/TableGen/SDNodeInfoEmitter/no-nodes.td index cc0f87755cdc2..3e3f4e3284803 100644 --- a/llvm/test/TableGen/SDNodeInfoEmitter/no-nodes.td +++ b/llvm/test/TableGen/SDNodeInfoEmitter/no-nodes.td @@ -35,16 +35,20 @@ def MyTarget : Target; // CHECK-NEXT: static constexpr llvm::StringTable // CHECK-NEXT: MyTargetSDNodeNames = MyTargetSDNodeNamesStorage; // CHECK-EMPTY: +// CHECK-NEXT: static const VTByHwModePair MyTargetVTByHwModeTable[] = { +// CHECK-NEXT: /* dummy */ {0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// CHECK-NEXT: }; +// CHECK-EMPTY: // CHECK-NEXT: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { -// CHECK-NEXT: /* dummy */ {SDTCisVT, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// CHECK-NEXT: /* dummy */ {SDTCisVT, 0, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE} // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeDesc MyTargetSDNodeDescs[] = { // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeInfo MyTargetGenSDNodeInfo( -// CHECK-NEXT: /*NumOpcodes=*/0, MyTargetSDNodeDescs, -// CHECK-NEXT: MyTargetSDNodeNames, MyTargetSDTypeConstraints); +// CHECK-NEXT: /*NumOpcodes=*/0, MyTargetSDNodeDescs, MyTargetSDNodeNames, +// CHECK-NEXT: MyTargetVTByHwModeTable, MyTargetSDTypeConstraints); // CHECK-EMPTY: // CHECK-NEXT: } // namespace llvm // CHECK-EMPTY: diff --git a/llvm/test/TableGen/SDNodeInfoEmitter/skipped-nodes.td b/llvm/test/TableGen/SDNodeInfoEmitter/skipped-nodes.td index abd6ad3bda3bc..f6c2d174f636b 100644 --- a/llvm/test/TableGen/SDNodeInfoEmitter/skipped-nodes.td +++ b/llvm/test/TableGen/SDNodeInfoEmitter/skipped-nodes.td @@ -75,7 +75,7 @@ def node_5b : SDNode<"MyTargetISD::NODE_5", SDTypeProfile<0, 0, []>, [SDNPHasCha // CHECK-NEXT: ; // CHECK: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { -// CHECK-NEXT: /* dummy */ {SDTCisVT, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// CHECK-NEXT: /* dummy */ {SDTCisVT, 0, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE} // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeDesc MyTargetSDNodeDescs[] = { @@ -83,8 +83,8 @@ def node_5b : SDNode<"MyTargetISD::NODE_5", SDTypeProfile<0, 0, []>, [SDNPHasCha // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeInfo MyTargetGenSDNodeInfo( -// CHECK-NEXT: /*NumOpcodes=*/1, MyTargetSDNodeDescs, -// CHECK-NEXT: MyTargetSDNodeNames, MyTargetSDTypeConstraints); +// CHECK-NEXT: /*NumOpcodes=*/1, MyTargetSDNodeDescs, MyTargetSDNodeNames, +// CHECK-NEXT: MyTargetVTByHwModeTable, MyTargetSDTypeConstraints); def compat_a : SDNode<"MyTargetISD::COMPAT", SDTypeProfile<1, -1, []>>; def compat_b : SDNode<"MyTargetISD::COMPAT", SDTypeProfile<1, -1, [SDTCisVT<0, untyped>]>>; diff --git a/llvm/test/TableGen/SDNodeInfoEmitter/trivial-node.td b/llvm/test/TableGen/SDNodeInfoEmitter/trivial-node.td index 4bdc70a8508f6..6874e389a41eb 100644 --- a/llvm/test/TableGen/SDNodeInfoEmitter/trivial-node.td +++ b/llvm/test/TableGen/SDNodeInfoEmitter/trivial-node.td @@ -21,8 +21,12 @@ def my_noop : SDNode<"MyTargetISD::NOOP", SDTypeProfile<0, 0, []>>; // CHECK-NEXT: "MyTargetISD::NOOP\0" // CHECK-NEXT: ; -// CHECK: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { -// CHECK-NEXT: /* dummy */ {SDTCisVT, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// CHECK: static const VTByHwModePair MyTargetVTByHwModeTable[] = { +// CHECK-NEXT: /* dummy */ {0, MVT::INVALID_SIMPLE_VALUE_TYPE} +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: static const SDTypeConstraint MyTargetSDTypeConstraints[] = { +// CHECK-NEXT: /* dummy */ {SDTCisVT, 0, 0, 0, MVT::INVALID_SIMPLE_VALUE_TYPE} // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeDesc MyTargetSDNodeDescs[] = { @@ -30,5 +34,5 @@ def my_noop : SDNode<"MyTargetISD::NOOP", SDTypeProfile<0, 0, []>>; // CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: static const SDNodeInfo MyTargetGenSDNodeInfo( -// CHECK-NEXT: /*NumOpcodes=*/1, MyTargetSDNodeDescs, -// CHECK-NEXT: MyTargetSDNodeNames, MyTargetSDTypeConstraints); +// CHECK-NEXT: /*NumOpcodes=*/1, MyTargetSDNodeDescs, MyTargetSDNodeNames, +// CHECK-NEXT: MyTargetVTByHwModeTable, MyTargetSDTypeConstraints); diff --git a/llvm/utils/TableGen/Basic/SequenceToOffsetTable.h b/llvm/utils/TableGen/Basic/SequenceToOffsetTable.h index 761ef1fcf12fe..1cd18b7184a57 100644 --- a/llvm/utils/TableGen/Basic/SequenceToOffsetTable.h +++ b/llvm/utils/TableGen/Basic/SequenceToOffsetTable.h @@ -162,7 +162,8 @@ class SequenceToOffsetTable { /// emit - Print out the table as the body of an array initializer. /// Use the Print function to print elements. - void emit(raw_ostream &OS, void (*Print)(raw_ostream &, ElemT)) const { + void emit(raw_ostream &OS, + function_ref Print) const { assert(IsLaidOut && "Call layout() before emit()"); for (const auto &[Seq, Offset] : Seqs) { OS << " /* " << Offset << " */ "; diff --git a/llvm/utils/TableGen/Common/InfoByHwMode.h b/llvm/utils/TableGen/Common/InfoByHwMode.h index ce84960ef79a7..bd24fb84b085a 100644 --- a/llvm/utils/TableGen/Common/InfoByHwMode.h +++ b/llvm/utils/TableGen/Common/InfoByHwMode.h @@ -103,6 +103,8 @@ template struct InfoByHwMode { LLVM_ATTRIBUTE_ALWAYS_INLINE const_iterator end() const { return Map.end(); } LLVM_ATTRIBUTE_ALWAYS_INLINE + size_t size() const { return Map.size(); } + LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const { return Map.empty(); } LLVM_ATTRIBUTE_ALWAYS_INLINE diff --git a/llvm/utils/TableGen/SDNodeInfoEmitter.cpp b/llvm/utils/TableGen/SDNodeInfoEmitter.cpp index dd18d29e6c676..5dff3862fa308 100644 --- a/llvm/utils/TableGen/SDNodeInfoEmitter.cpp +++ b/llvm/utils/TableGen/SDNodeInfoEmitter.cpp @@ -195,15 +195,29 @@ static StringRef getTypeConstraintKindName(SDTypeConstraint::KindTy Kind) { #undef CASE } -static void emitTypeConstraint(raw_ostream &OS, SDTypeConstraint C) { +static void emitTypeConstraint( + raw_ostream &OS, SDTypeConstraint C, + const std::map &VTByHwModeTable) { unsigned OtherOpNo = 0; - MVT VT; + unsigned NumHwModes = 0; + unsigned VTByHwModeOffset = 0; + MVT::SimpleValueType VT = MVT::INVALID_SIMPLE_VALUE_TYPE; switch (C.ConstraintType) { case SDTypeConstraint::SDTCisVT: + // SequenceToOffsetTable::emit() prints a "dummy" (default-constructed) + // element if the table would otherwise be empty. VVT is empty in this case. + if (C.VVT.empty()) + break; + [[fallthrough]]; case SDTypeConstraint::SDTCVecEltisVT: - if (C.VVT.isSimple()) - VT = C.VVT.getSimple(); + if (C.VVT.isSimple()) { + VT = C.VVT.getSimple().SimpleTy; + } else { + NumHwModes = C.VVT.size(); + assert(NumHwModes && "Empty type set?"); + VTByHwModeOffset = VTByHwModeTable.at(C.VVT); + } break; case SDTypeConstraint::SDTCisPtrTy: case SDTypeConstraint::SDTCisInt: @@ -221,15 +235,22 @@ static void emitTypeConstraint(raw_ostream &OS, SDTypeConstraint C) { break; } - StringRef KindName = getTypeConstraintKindName(C.ConstraintType); - StringRef VTName = VT.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE - ? "MVT::INVALID_SIMPLE_VALUE_TYPE" - : getEnumName(VT.SimpleTy); - OS << formatv("{{{}, {}, {}, {}}", KindName, C.OperandNo, OtherOpNo, VTName); + OS << '{' << getTypeConstraintKindName(C.ConstraintType) << ", " + << C.OperandNo << ", " << OtherOpNo << ", " << NumHwModes << ", "; + if (NumHwModes) { + OS << VTByHwModeOffset; + } else { + OS << (VT == MVT::INVALID_SIMPLE_VALUE_TYPE + ? "MVT::INVALID_SIMPLE_VALUE_TYPE" + : getEnumName(VT)); + } + OS << '}'; } std::vector> SDNodeInfoEmitter::emitTypeConstraints(raw_ostream &OS) const { + std::map VTByHwModeTable; + using ConstraintsVecTy = SmallVector; SequenceToOffsetTable ConstraintTable( /*Terminator=*/std::nullopt); @@ -258,6 +279,16 @@ SDNodeInfoEmitter::emitTypeConstraints(raw_ostream &OS) const { if (Constraints.empty()) continue; + for (const SDTypeConstraint &C : Constraints) { + if (C.ConstraintType == SDTypeConstraint::SDTCisVT || + C.ConstraintType == SDTypeConstraint::SDTCVecEltisVT) { + if (!C.VVT.isSimple()) { + assert(!C.VVT.empty() && "Unexpected empty type set"); + VTByHwModeTable.try_emplace(C.VVT); + } + } + } + // SequenceToOffsetTable reuses the storage if a sequence matches another // sequence's *suffix*. It is more likely that we have a matching *prefix*, // so reverse the order to increase the likelihood of a match. @@ -266,9 +297,26 @@ SDNodeInfoEmitter::emitTypeConstraints(raw_ostream &OS) const { ConstraintTable.layout(); + OS << "static const VTByHwModePair " << Target.getName() + << "VTByHwModeTable[] = {\n"; + unsigned VTByHwModeOffset = 0; + for (auto &[VTByHwMode, Offset] : VTByHwModeTable) { + OS << " /* " << VTByHwModeOffset << " */ "; + for (auto [Mode, VT] : VTByHwMode) + OS << '{' << Mode << ", " << getEnumName(VT.SimpleTy) << "}, "; + OS << '\n'; + Offset = VTByHwModeOffset; + VTByHwModeOffset += VTByHwMode.size(); + } + // Avoid "zero size arrays are an extension" warning. + if (VTByHwModeTable.empty()) + OS << " /* dummy */ {0, MVT::INVALID_SIMPLE_VALUE_TYPE}\n"; + OS << "};\n\n"; + OS << "static const SDTypeConstraint " << Target.getName() << "SDTypeConstraints[] = {\n"; - ConstraintTable.emit(OS, emitTypeConstraint); + ConstraintTable.emit(OS, std::bind(emitTypeConstraint, std::placeholders::_1, + std::placeholders::_2, VTByHwModeTable)); OS << "};\n\n"; for (const auto &[EnumName, Nodes] : NodesByName) { @@ -338,8 +386,8 @@ void SDNodeInfoEmitter::emitDescs(raw_ostream &OS) const { OS << "};\n\n"; OS << formatv("static const SDNodeInfo {0}GenSDNodeInfo(\n" - " /*NumOpcodes=*/{1}, {0}SDNodeDescs,\n" - " {0}SDNodeNames, {0}SDTypeConstraints);\n", + " /*NumOpcodes=*/{1}, {0}SDNodeDescs, {0}SDNodeNames,\n" + " {0}VTByHwModeTable, {0}SDTypeConstraints);\n", TargetName, NodesByName.size()); } From e5b9e80a54bb75d4b0fee7197d78f909b1f4ba93 Mon Sep 17 00:00:00 2001 From: "Yaxun (Sam) Liu" Date: Sun, 16 Nov 2025 10:36:13 -0500 Subject: [PATCH 06/17] [ClangLinkerWrapper] Fix test linker-wrapper-hip-no-rdc.c https://github.com/llvm/llvm-project/pull/167918 broke buildbots: https://lab.llvm.org/buildbot/#/builders/64/builds/6531 https://lab.llvm.org/buildbot/#/builders/108/builds/19881 with error: # | clang: error: unable to execute command: posix_spawn failed: No such file or directory # | clang: error: ld.lld command failed with exit code 1 (use -v to see invocation) This is due to the test requiring lld but these buildbots do not build them. Fix the lit test by adding REQUIRES: lld --- clang/test/Driver/linker-wrapper-hip-no-rdc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/test/Driver/linker-wrapper-hip-no-rdc.c b/clang/test/Driver/linker-wrapper-hip-no-rdc.c index d6838896f7093..7545205f22ea0 100644 --- a/clang/test/Driver/linker-wrapper-hip-no-rdc.c +++ b/clang/test/Driver/linker-wrapper-hip-no-rdc.c @@ -1,5 +1,6 @@ // UNSUPPORTED: system-windows // REQUIRES: amdgpu-registered-target +// REQUIRES: lld // Test HIP non-RDC linker wrapper behavior with new offload driver. // The linker wrapper should output .hipfb files directly without using -r option. From 47da0f1e91d06deae5d60229485f42242891a7f4 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 16 Nov 2025 15:54:04 +0000 Subject: [PATCH 07/17] [X86] Add experimental-new-constant-interpreter test coverage to BF16 intrinsics tests (#168274) --- clang/test/CodeGen/X86/avx512bf16-builtins.c | 5 +++++ clang/test/CodeGen/X86/avx512vlbf16-builtins.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/clang/test/CodeGen/X86/avx512bf16-builtins.c b/clang/test/CodeGen/X86/avx512bf16-builtins.c index 52c20aa15a568..3f544d387f7aa 100644 --- a/clang/test/CodeGen/X86/avx512bf16-builtins.c +++ b/clang/test/CodeGen/X86/avx512bf16-builtins.c @@ -2,6 +2,11 @@ // RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror | FileCheck %s // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror | FileCheck %s // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror | FileCheck %s +// +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s #include diff --git a/clang/test/CodeGen/X86/avx512vlbf16-builtins.c b/clang/test/CodeGen/X86/avx512vlbf16-builtins.c index 5e37b4d502ad1..d59b254520774 100644 --- a/clang/test/CodeGen/X86/avx512vlbf16-builtins.c +++ b/clang/test/CodeGen/X86/avx512vlbf16-builtins.c @@ -2,6 +2,11 @@ // RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s +// +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512bf16 -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s #include From e8cc0c3eb82789c9d2a22e094a941be180e9ab4b Mon Sep 17 00:00:00 2001 From: Anton Kesy Date: Sun, 16 Nov 2025 16:01:06 +0000 Subject: [PATCH 08/17] [clang-format]: Fix JSON casing (#168156) This commit aligns the user clang-format output to always show JSON, not Json. --- clang/docs/ClangFormat.rst | 2 +- clang/tools/clang-format/ClangFormat.cpp | 4 ++-- clang/tools/clang-format/git-clang-format | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clang/docs/ClangFormat.rst b/clang/docs/ClangFormat.rst index 92af06e5083d6..26490f9c15bb8 100644 --- a/clang/docs/ClangFormat.rst +++ b/clang/docs/ClangFormat.rst @@ -50,7 +50,7 @@ to format C/C++/Java/JavaScript/JSON/Objective-C/Protobuf/C# code. CSharp: .cs Java: .java JavaScript: .js .mjs .cjs .ts - Json: .json .ipynb + JSON: .json .ipynb Objective-C: .m .mm Proto: .proto .protodevel TableGen: .td diff --git a/clang/tools/clang-format/ClangFormat.cpp b/clang/tools/clang-format/ClangFormat.cpp index 5f6502f7f18a8..12ac8a31f24ab 100644 --- a/clang/tools/clang-format/ClangFormat.cpp +++ b/clang/tools/clang-format/ClangFormat.cpp @@ -88,7 +88,7 @@ static cl::opt AssumeFileName( " CSharp: .cs\n" " Java: .java\n" " JavaScript: .js .mjs .cjs .ts\n" - " Json: .json .ipynb\n" + " JSON: .json .ipynb\n" " Objective-C: .m .mm\n" " Proto: .proto .protodevel\n" " TableGen: .td\n" @@ -489,7 +489,7 @@ static bool format(StringRef FileName, bool ErrorOnIncompleteFormat = false) { auto Err = Replaces.add(tooling::Replacement(AssumedFileName, 0, 0, "x = ")); if (Err) - llvm::errs() << "Bad Json variable insertion\n"; + llvm::errs() << "Bad JSON variable insertion\n"; } auto ChangedCode = tooling::applyAllReplacements(Code->getBuffer(), Replaces); diff --git a/clang/tools/clang-format/git-clang-format b/clang/tools/clang-format/git-clang-format index fe2dd283d403e..dcc0a5347f5d2 100755 --- a/clang/tools/clang-format/git-clang-format +++ b/clang/tools/clang-format/git-clang-format @@ -115,7 +115,7 @@ def main(): "ts", # TypeScript "cs", # C Sharp "json", - "ipynb", # Json + "ipynb", # JSON "sv", "svh", "v", From 2394eb118045bd47c1c75f9cab42d701221846a0 Mon Sep 17 00:00:00 2001 From: Kazu Hirata Date: Sun, 16 Nov 2025 08:08:39 -0800 Subject: [PATCH 09/17] [TargetParser] Avoid repeated hash lookups (NFC) (#168216) --- llvm/lib/TargetParser/PPCTargetParser.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/llvm/lib/TargetParser/PPCTargetParser.cpp b/llvm/lib/TargetParser/PPCTargetParser.cpp index f74d670df4306..106d07107ac8c 100644 --- a/llvm/lib/TargetParser/PPCTargetParser.cpp +++ b/llvm/lib/TargetParser/PPCTargetParser.cpp @@ -138,8 +138,11 @@ std::optional> getPPCDefaultTargetFeatures(const Triple &T, // The target feature `quadword-atomics` is only supported for 64-bit // POWER8 and above. - if (Features.find("quadword-atomics") != Features.end() && !T.isArch64Bit()) - Features["quadword-atomics"] = false; + if (!T.isArch64Bit()) { + auto It = Features.find("quadword-atomics"); + if (It != Features.end()) + It->second = false; + } return Features; } } // namespace PPC From 688b190745655c30a39a25f2e274fd62f0e9cce4 Mon Sep 17 00:00:00 2001 From: Kazu Hirata Date: Sun, 16 Nov 2025 08:08:52 -0800 Subject: [PATCH 10/17] [ADT] Simplify DenseMap::grow (NFC) (#168238) This patch simplifies DenseMap::grow by reimplementing it in terms of DenseMapBase::moveFrom. Since moveFrom iterates over the bucket range, we don't need: if (!OldBuckets) The old bucket array is released by the destructor on Tmp. This patch removes moveFromOldBuckets as it's no longer used with this patch. moveFromImpl is "inlined" into moveFrom. --- llvm/include/llvm/ADT/DenseMap.h | 53 ++++++++++++-------------------- 1 file changed, 20 insertions(+), 33 deletions(-) diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h index 094dc5730a8d9..7926159c4c09c 100644 --- a/llvm/include/llvm/ADT/DenseMap.h +++ b/llvm/include/llvm/ADT/DenseMap.h @@ -413,11 +413,13 @@ class DenseMapBase : public DebugEpochBase { return NextPowerOf2(NumEntries * 4 / 3 + 1); } - void moveFromImpl(iterator_range OldBuckets) { + // Move key/value from Other to *this. + // Other is left in a valid but empty state. + void moveFrom(DerivedT &Other) { // Insert all the old elements. const KeyT EmptyKey = KeyInfoT::getEmptyKey(); const KeyT TombstoneKey = KeyInfoT::getTombstoneKey(); - for (BucketT &B : OldBuckets) { + for (BucketT &B : Other.buckets()) { if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) && !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) { // Insert the key/value into the new table. @@ -434,17 +436,6 @@ class DenseMapBase : public DebugEpochBase { } B.getFirst().~KeyT(); } - } - - void moveFromOldBuckets(iterator_range OldBuckets) { - initEmpty(); - moveFromImpl(OldBuckets); - } - - // Move key/value from Other to *this. - // Other is left in a valid but empty state. - void moveFrom(DerivedT &Other) { - moveFromImpl(Other.buckets()); Other.derived().kill(); } @@ -738,6 +729,11 @@ class DenseMap : public DenseMapBase, unsigned NumTombstones; unsigned NumBuckets; + struct ExactBucketCount {}; + explicit DenseMap(unsigned NumBuckets, ExactBucketCount) { + initWithExactBucketCount(NumBuckets); + } + public: /// Create a DenseMap with an optional \p NumElementsToReserve to guarantee /// that this number of elements can be inserted in the map without grow(). @@ -822,9 +818,8 @@ class DenseMap : public DenseMapBase, return true; } - void init(unsigned InitNumEntries) { - auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries); - if (allocateBuckets(InitBuckets)) { + void initWithExactBucketCount(unsigned NewNumBuckets) { + if (allocateBuckets(NewNumBuckets)) { this->BaseT::initEmpty(); } else { NumEntries = 0; @@ -832,6 +827,11 @@ class DenseMap : public DenseMapBase, } } + void init(unsigned InitNumEntries) { + auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries); + initWithExactBucketCount(InitBuckets); + } + // Put the zombie instance in a known good state after a move. void kill() { deallocateBuckets(); @@ -840,23 +840,10 @@ class DenseMap : public DenseMapBase, } void grow(unsigned AtLeast) { - unsigned OldNumBuckets = NumBuckets; - BucketT *OldBuckets = Buckets; - - allocateBuckets(std::max( - 64, static_cast(NextPowerOf2(AtLeast - 1)))); - assert(Buckets); - if (!OldBuckets) { - this->BaseT::initEmpty(); - return; - } - - this->moveFromOldBuckets( - llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets)); - - // Free the old table. - deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets, - alignof(BucketT)); + AtLeast = std::max(64, NextPowerOf2(AtLeast - 1)); + DenseMap Tmp(AtLeast, ExactBucketCount{}); + Tmp.moveFrom(*this); + swapImpl(Tmp); } // Plan how to shrink the bucket table. Return: From 7262c592c124a5bd8a5b534af5ed0390511116d9 Mon Sep 17 00:00:00 2001 From: Kazu Hirata Date: Sun, 16 Nov 2025 08:08:59 -0800 Subject: [PATCH 11/17] [CAS] Remove a redundant cast (NFC) (#168240) D.Offset.get() already returns uint64_t. Identified with readability-redundant-casting. --- llvm/lib/CAS/OnDiskGraphDB.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/llvm/lib/CAS/OnDiskGraphDB.cpp b/llvm/lib/CAS/OnDiskGraphDB.cpp index 245b6fb832549..2d76ff11064e9 100644 --- a/llvm/lib/CAS/OnDiskGraphDB.cpp +++ b/llvm/lib/CAS/OnDiskGraphDB.cpp @@ -938,8 +938,7 @@ Error OnDiskGraphDB::validate(bool Deep, HashingFuncT Hasher) const { // Check offset is a postive value, and large enough to hold the // header for the data record. if (D.Offset.get() <= 0 || - (uint64_t)D.Offset.get() + sizeof(DataRecordHandle::Header) >= - DataPool.size()) + D.Offset.get() + sizeof(DataRecordHandle::Header) >= DataPool.size()) return formatError("datapool record out of bound"); break; case TrieRecord::StorageKind::Standalone: From ea0ecd63d4aa6ebe0fd11fe48bb707ba024d5a49 Mon Sep 17 00:00:00 2001 From: Kazu Hirata Date: Sun, 16 Nov 2025 08:09:07 -0800 Subject: [PATCH 12/17] [llvm] Proofread *.rst (#168254) This patch is limited to hyphenation to ease the review process. --- llvm/docs/AMDGPUUsage.rst | 6 +++--- llvm/docs/CodeGenerator.rst | 4 ++-- llvm/docs/CommandLine.rst | 2 +- llvm/docs/LangRef.rst | 4 ++-- llvm/docs/SPIRVUsage.rst | 2 +- llvm/docs/XRayFDRFormat.rst | 10 +++++----- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst index 7267f6bb88a58..bd2884481ee95 100644 --- a/llvm/docs/AMDGPUUsage.rst +++ b/llvm/docs/AMDGPUUsage.rst @@ -18986,8 +18986,8 @@ On entry to a function: objects and to convert this address to a flat address by adding the flat scratch aperture base address. - The swizzled SP value is always 4 bytes aligned for the ``r600`` - architecture and 16 byte aligned for the ``amdgcn`` architecture. + The swizzled SP value is always 4-byte aligned for the ``r600`` + architecture and 16-byte aligned for the ``amdgcn`` architecture. .. note:: @@ -19278,7 +19278,7 @@ describes how the AMDGPU implements function calls: The CFI will reflect the changed calculation needed to compute the CFA from SP. -7. 4 byte spill slots are used in the stack frame. One slot is allocated for an +7. 4-byte spill slots are used in the stack frame. One slot is allocated for an emergency spill slot. Buffer instructions are used for stack accesses and not the ``flat_scratch`` instruction. diff --git a/llvm/docs/CodeGenerator.rst b/llvm/docs/CodeGenerator.rst index a74f16d7e9477..a960ea13df60c 100644 --- a/llvm/docs/CodeGenerator.rst +++ b/llvm/docs/CodeGenerator.rst @@ -269,7 +269,7 @@ Each register in the processor description has an associated indicate whether one register overlaps with another). In addition to the per-register description, the ``TargetRegisterInfo`` class -exposes a set of processor specific register classes (instances of the +exposes a set of processor-specific register classes (instances of the ``TargetRegisterClass`` class). Each register class contains sets of registers that have the same properties (for example, they are all 32-bit integer registers). Each SSA virtual register created by the instruction selector has @@ -1295,7 +1295,7 @@ Physical registers, in LLVM, are grouped in *Register Classes*. Elements in the same register class are functionally equivalent, and can be interchangeably used. Each virtual register can only be mapped to physical registers of a particular class. For instance, in the X86 architecture, some virtuals can only -be allocated to 8 bit registers. A register class is described by +be allocated to 8-bit registers. A register class is described by ``TargetRegisterClass`` objects. To discover if a virtual register is compatible with a given physical, this code can be used: diff --git a/llvm/docs/CommandLine.rst b/llvm/docs/CommandLine.rst index 00d098745f55b..54a8b0d019399 100644 --- a/llvm/docs/CommandLine.rst +++ b/llvm/docs/CommandLine.rst @@ -1262,7 +1262,7 @@ specify boolean properties that modify the option. .. _cl::DefaultOption: * The **cl::DefaultOption** modifier is used to specify that the option is a - default that can be overridden by application specific parsers. For example, + default that can be overridden by application-specific parsers. For example, the ``-help`` alias, ``-h``, is registered this way, so it can be overridden by applications that need to use the ``-h`` option for another purpose, either as a regular option or an alias for another option. diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index 83d68d5fe4bee..1a8886dd79c9c 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -691,7 +691,7 @@ correctly in a target-specific way. An example of pointers with non-address bits are the AMDGPU buffer descriptors which are 160 bits: a 128-bit fat pointer and a 32-bit offset. -Similarly, CHERI capabilities contain a 32 or 64 bit address as well as the +Similarly, CHERI capabilities contain a 32- or 64-bit address as well as the same number of metadata bits, but unlike the AMDGPU buffer descriptors they have external state in addition to non-address bits. @@ -19600,7 +19600,7 @@ Syntax: Overview: """"""""" -The '``llvm.canonicalize.*``' intrinsic returns the platform specific canonical +The '``llvm.canonicalize.*``' intrinsic returns the platform-specific canonical encoding of a floating-point number. This canonicalization is useful for implementing certain numeric primitives such as frexp. The canonical encoding is defined by IEEE-754-2008 to be: diff --git a/llvm/docs/SPIRVUsage.rst b/llvm/docs/SPIRVUsage.rst index 5ee3d83bd7aac..aedb6643cf581 100644 --- a/llvm/docs/SPIRVUsage.rst +++ b/llvm/docs/SPIRVUsage.rst @@ -216,7 +216,7 @@ Below is a list of supported SPIR-V extensions, sorted alphabetically by their e * - ``SPV_KHR_float_controls`` - Provides new execution modes to control floating-point computations by overriding an implementation’s default behavior for rounding modes, denormals, signed zero, and infinities. * - ``SPV_KHR_integer_dot_product`` - - Adds instructions for dot product operations on integer vectors with optional accumulation. Integer vectors includes 4-component vector of 8 bit integers and 4-component vectors of 8 bit integers packed into 32-bit integers. + - Adds instructions for dot product operations on integer vectors with optional accumulation. Integer vectors includes 4-component vector of 8-bit integers and 4-component vectors of 8-bit integers packed into 32-bit integers. * - ``SPV_KHR_linkonce_odr`` - Allows to use the LinkOnceODR linkage type that lets a function or global variable to be merged with other functions or global variables of the same name when linkage occurs. * - ``SPV_KHR_no_integer_wrap_decoration`` diff --git a/llvm/docs/XRayFDRFormat.rst b/llvm/docs/XRayFDRFormat.rst index adb9d1e9a85ca..b21b55496d753 100644 --- a/llvm/docs/XRayFDRFormat.rst +++ b/llvm/docs/XRayFDRFormat.rst @@ -147,14 +147,14 @@ reconstruct a call stack of instrumented function and their durations. +---------------+--------------+-----------------------------------------------+ On little-endian machines, the bitfields are ordered from least significant bit -bit to most significant bit. A reader can read an 8 bit value and apply the mask +bit to most significant bit. A reader can read an 8-bit value and apply the mask ``0x01`` for the discriminant. Similarly, they can read 32 bits and unsigned shift right by ``0x04`` to obtain the function_id field. On big-endian machine, the bitfields are written in order from most significant -bit to least significant bit. A reader would read an 8 bit value and unsigned +bit to least significant bit. A reader would read an 8-bit value and unsigned shift right by 7 bits for the discriminant. The function_id field could be -obtained by reading a 32 bit value and applying the mask ``0x0FFFFFFF``. +obtained by reading a 32-bit value and applying the mask ``0x0FFFFFFF``. Function action types are as follows. @@ -288,11 +288,11 @@ Its data segment is as follows. TSCWrap Records --------------- -Since each function record uses a 32 bit value to represent the number of ticks +Since each function record uses a 32-bit value to represent the number of ticks of the timestamp counter since the last reference, it is possible for this value to overflow, particularly for sparsely instrumented binaries. -When this delta would not fit into a 32 bit representation, a reference absolute +When this delta would not fit into a 32-bit representation, a reference absolute timestamp counter record is written in the form of a TSCWrap record. Its data segment is as follows. From e1e696d2eb0f4467133275ff6a2661f389dd3dbe Mon Sep 17 00:00:00 2001 From: Kazu Hirata Date: Sun, 16 Nov 2025 08:09:21 -0800 Subject: [PATCH 13/17] [Scalar] Avoid repeated hash lookups (NFC) (#168217) --- llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp index a62804d3ef201..e5399bdd767e2 100644 --- a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp @@ -1275,8 +1275,7 @@ bool StraightLineStrengthReduce::runOnFunction(Function &F) { // Build the dependency graph and sort candidate instructions from dependency // roots to leaves for (auto &C : Candidates) { - if (DependencyGraph.find(C.Ins) == DependencyGraph.end()) - DependencyGraph[C.Ins] = {}; + DependencyGraph.try_emplace(C.Ins); addDependency(C, C.Basis); } sortCandidateInstructions(); From 180b59c37ab2082a2e68a2b75283e7fa4527552a Mon Sep 17 00:00:00 2001 From: Kazu Hirata Date: Sun, 16 Nov 2025 08:12:23 -0800 Subject: [PATCH 14/17] [clang] Proofread *.rst (#168215) This patch is limited to single-word replacements to fix spelling and/or grammar to ease the review process. Punctuation and markdown fixes are specifically excluded. --- clang/docs/AutomaticReferenceCounting.rst | 2 +- clang/docs/Block-ABI-Apple.rst | 2 +- clang/docs/BoundsSafety.rst | 4 ++-- clang/docs/BoundsSafetyAdoptionGuide.rst | 2 +- clang/docs/CXXTypeAwareAllocators.rst | 2 +- clang/docs/ClangTools.rst | 2 +- clang/docs/ClangTransformerTutorial.rst | 2 +- clang/docs/DataFlowSanitizerDesign.rst | 4 ++-- clang/docs/InternalsManual.rst | 2 +- clang/docs/JSONCompilationDatabase.rst | 2 +- clang/docs/LibASTImporter.rst | 2 +- clang/docs/MisExpect.rst | 2 +- clang/docs/PCHInternals.rst | 2 +- clang/docs/PointerAuthentication.rst | 8 ++++---- clang/docs/RAVFrontendAction.rst | 2 +- clang/docs/RealtimeSanitizer.rst | 2 +- clang/docs/SafeStack.rst | 2 +- clang/docs/UsersManual.rst | 2 +- 18 files changed, 23 insertions(+), 23 deletions(-) diff --git a/clang/docs/AutomaticReferenceCounting.rst b/clang/docs/AutomaticReferenceCounting.rst index 80bbd25121148..4a97cac7b2615 100644 --- a/clang/docs/AutomaticReferenceCounting.rst +++ b/clang/docs/AutomaticReferenceCounting.rst @@ -2318,7 +2318,7 @@ aligned for an object of type ``id``. The other qualifiers may be used on explicitly under-aligned memory. The runtime tracks ``__weak`` objects which holds non-null values. It is -undefined behavior to direct modify a ``__weak`` object which is being tracked +undefined behavior to directly modify a ``__weak`` object which is being tracked by the runtime except through an :ref:`objc_storeWeak `, :ref:`objc_destroyWeak `, or diff --git a/clang/docs/Block-ABI-Apple.rst b/clang/docs/Block-ABI-Apple.rst index f46f2f991ad7f..c577e43b171b5 100644 --- a/clang/docs/Block-ABI-Apple.rst +++ b/clang/docs/Block-ABI-Apple.rst @@ -34,7 +34,7 @@ attempt to use this ABI on systems prior to SnowLeopard is undefined. High Level ========== -The ABI of ``Blocks`` consist of their layout and the runtime functions required +The ABI of ``Blocks`` consists of their layout and the runtime functions required by the compiler. A ``Block`` of type ``R (^)(P...)`` consists of a structure of the following form: diff --git a/clang/docs/BoundsSafety.rst b/clang/docs/BoundsSafety.rst index b0f77c38b28af..e3cb78eecbea4 100644 --- a/clang/docs/BoundsSafety.rst +++ b/clang/docs/BoundsSafety.rst @@ -912,7 +912,7 @@ unsafe library by calling ``get_buf()`` which returns ``void ``__bidi_indexable`` gets the lower bound same as the pointer value. * A type conversion may involve both a bitcast and a bounds annotation cast. For - example, casting from ``int *__bidi_indexable`` to ``char *__single`` involve + example, casting from ``int *__bidi_indexable`` to ``char *__single`` involves a bitcast (``int *`` to ``char *``) and a bounds annotation cast (``__bidi_indexable`` to ``__single``). In this case, the compiler performs the bitcast and then converts the bounds annotation. This means, ``int @@ -994,7 +994,7 @@ other types of safety violations such as type confusion. For instance, ``-fbounds-safety`` heavily relies on run-time checks to keep the bounds safety and the soundness of the type system. This may incur significant code size -overhead in unoptimized builds and leaving some of the adoption mistakes to be +overhead in unoptimized builds and leave some of the adoption mistakes to be caught only at run time. This is not a fundamental limitation, however, because incrementally adding necessary static analysis will allow us to catch issues early on and remove unnecessary bounds checks in unoptimized builds. diff --git a/clang/docs/BoundsSafetyAdoptionGuide.rst b/clang/docs/BoundsSafetyAdoptionGuide.rst index 947a236080586..09deae37abbf7 100644 --- a/clang/docs/BoundsSafetyAdoptionGuide.rst +++ b/clang/docs/BoundsSafetyAdoptionGuide.rst @@ -45,7 +45,7 @@ that automatically carries the bounds information. Address compiler diagnostics ============================ -Once you pass ``-fbounds-safety`` to compiler a C file, you will see some new +Once you pass ``-fbounds-safety`` to compile a C file, you will see some new compiler warnings and errors, which guide adoption of ``-fbounds-safety``. Consider the following example: diff --git a/clang/docs/CXXTypeAwareAllocators.rst b/clang/docs/CXXTypeAwareAllocators.rst index 4cf5f7817ac3e..c65d20a5c7d47 100644 --- a/clang/docs/CXXTypeAwareAllocators.rst +++ b/clang/docs/CXXTypeAwareAllocators.rst @@ -90,7 +90,7 @@ Operator selection then proceeds according to the usual rules for choosing the best/most constrained match. Any declaration of a type aware operator new or operator delete must include a -matching complimentary operator defined in the same scope. +matching complementary operator defined in the same scope. Notes ===== diff --git a/clang/docs/ClangTools.rst b/clang/docs/ClangTools.rst index b53c125f5b42e..b480b88428359 100644 --- a/clang/docs/ClangTools.rst +++ b/clang/docs/ClangTools.rst @@ -72,7 +72,7 @@ instructions on how to setup and use `clang-check`. ---------------- Clang-format is both a :doc:`library ` and a :doc:`stand-alone tool -` with the goal of automatically reformatting C++ sources files +` with the goal of automatically reformatting C++ source files according to configurable style guides. To do so, clang-format uses Clang's ``Lexer`` to transform an input file into a token stream and then changes all the whitespace around those tokens. The goal is for clang-format to serve both diff --git a/clang/docs/ClangTransformerTutorial.rst b/clang/docs/ClangTransformerTutorial.rst index e9b701203300a..f3f936a1144e3 100644 --- a/clang/docs/ClangTransformerTutorial.rst +++ b/clang/docs/ClangTransformerTutorial.rst @@ -78,7 +78,7 @@ can express this a Transformer rewrite rule: arguments: the pattern, the edit, and (optionally) an explanatory note. In our example, the pattern (``functionDecl(...)``) identifies the declaration of the function ``MkX``. Since we're just diagnosing the problem, but not suggesting a -fix, our edit is an no-op. But, it contains an *anchor* for the diagnostic +fix, our edit is a no-op. But, it contains an *anchor* for the diagnostic message: ``node("fun")`` says to associate the message with the source range of the AST node bound to "fun"; in this case, the ill-named function declaration. Finally, we use ``cat`` to build a message that explains the change. Regarding the diff --git a/clang/docs/DataFlowSanitizerDesign.rst b/clang/docs/DataFlowSanitizerDesign.rst index 4f60391d9f5e0..ef6e6d5821df6 100644 --- a/clang/docs/DataFlowSanitizerDesign.rst +++ b/clang/docs/DataFlowSanitizerDesign.rst @@ -160,8 +160,8 @@ instructions, glibc memcpy and memmove. When ``-dfsan-track-origins`` is 2, a new chain is also appended at loads. Other instructions do not create new chains, but simply propagate origin trace -IDs. If an instruction has more than one operands with non-zero labels, the origin -treace ID of the last operand with non-zero label is propagated to the result of +IDs. If an instruction has more than one operand with non-zero labels, the origin +trace ID of the last operand with non-zero label is propagated to the result of the instruction. Memory layout and label management diff --git a/clang/docs/InternalsManual.rst b/clang/docs/InternalsManual.rst index a849d05eb7ae9..42004bcac56b2 100644 --- a/clang/docs/InternalsManual.rst +++ b/clang/docs/InternalsManual.rst @@ -2351,7 +2351,7 @@ different "from" contexts; in this case, they have to share the associated errors of the "to" context. When an error happens, that propagates through the call stack, through all the -dependant nodes. However, in case of dependency cycles, this is not enough, +dependent nodes. However, in case of dependency cycles, this is not enough, because we strive to mark the erroneous nodes so clients can act upon. In those cases, we have to keep track of the errors for those nodes which are intermediate nodes of a cycle. diff --git a/clang/docs/JSONCompilationDatabase.rst b/clang/docs/JSONCompilationDatabase.rst index 936ba11b087bd..1a1c59f6d7c0a 100644 --- a/clang/docs/JSONCompilationDatabase.rst +++ b/clang/docs/JSONCompilationDatabase.rst @@ -48,7 +48,7 @@ techniques. Clang's tooling interface supports reading compilation databases; see the :doc:`LibTooling documentation `. libclang and its -python bindings also support this (since clang 3.2); see +Python bindings also support this (since clang 3.2); see `CXCompilationDatabase.h `_. Format diff --git a/clang/docs/LibASTImporter.rst b/clang/docs/LibASTImporter.rst index e438de6624fd7..f0fbcacc1a752 100644 --- a/clang/docs/LibASTImporter.rst +++ b/clang/docs/LibASTImporter.rst @@ -502,7 +502,7 @@ The ``-ast-merge `` command-line switch can be used to merge from the This file represents the source context. When this switch is present then each top-level AST node of the source context is being merged into the destination context. If the merge was successful then ``ASTConsumer::HandleTopLevelDecl`` is called for the Decl. -This results that we can execute the original front-end action on the extended AST. +This means that we can execute the original front-end action on the extended AST. Example for C ^^^^^^^^^^^^^ diff --git a/clang/docs/MisExpect.rst b/clang/docs/MisExpect.rst index 0db73d5f292ea..3c67d58545218 100644 --- a/clang/docs/MisExpect.rst +++ b/clang/docs/MisExpect.rst @@ -19,7 +19,7 @@ MisExpect diagnostics are intended to help developers identify and address these situations, by comparing the branch weights added by the ``llvm.expect`` intrinsic to those collected through profiling. Whenever these values are mismatched, a diagnostic is surfaced to the user. Details on how the checks -operate in the LLVM backed can be found in LLVM's documentation. +operate in the LLVM backend can be found in LLVM's documentation. By default MisExpect checking is quite strict, because the use of the ``llvm.expect`` intrinsic is designed for specialized cases, where the outcome diff --git a/clang/docs/PCHInternals.rst b/clang/docs/PCHInternals.rst index 079fba16711dc..8e76fb98abfd1 100644 --- a/clang/docs/PCHInternals.rst +++ b/clang/docs/PCHInternals.rst @@ -504,7 +504,7 @@ Name lookup based. This changes the lookup algorithm for the various tables, such as the :ref:`identifier table `: the search starts at the most-recent precompiled header. If no entry is found, lookup then proceeds - to the identifier table in the precompiled header it depends on, and so one. + to the identifier table in the precompiled header it depends on, and so on. Once a lookup succeeds, that result is considered definitive, overriding any results from earlier precompiled headers. diff --git a/clang/docs/PointerAuthentication.rst b/clang/docs/PointerAuthentication.rst index 7e65f4b1b4915..bf2520b32a3a4 100644 --- a/clang/docs/PointerAuthentication.rst +++ b/clang/docs/PointerAuthentication.rst @@ -684,7 +684,7 @@ a null pointer that the language implementation would. The code sequence produced for this operation must not be directly attackable. However, if the discriminator values are not constant integers, their computations may still be attackable. In the future, Clang should be enhanced -to guaranteed non-attackability if these expressions are +to guarantee non-attackability if these expressions are :ref:`safely-derived`. ``ptrauth_auth_function`` @@ -1572,7 +1572,7 @@ type, they contain an ``isa`` pointer signed as described :ref:`below`. The invocation pointer in a block is signed with the ``IA`` key using address -diversity and a constant dicriminator of 0. Using a uniform discriminator is +diversity and a constant discriminator of 0. Using a uniform discriminator is seen as a weakness to be potentially improved, but this is tricky due to the subtype polymorphism directly permitted for blocks. @@ -1651,7 +1651,7 @@ declaration, which can cause type errors if the address of the ivar is taken: } @end -To fix such an mismatch the schema macro from ``: +To fix such a mismatch the schema macro from ``: .. code-block:: ObjC @@ -1660,7 +1660,7 @@ To fix such an mismatch the schema macro from ``: void f(SEL __ptrauth_objc_sel*); or less safely, and introducing the possibility of an -:ref:`signing or authentication oracle`, an unauthencaticated +:ref:`signing or authentication oracle`, an unauthenticated temporary may be used as intermediate storage. Alternative implementations diff --git a/clang/docs/RAVFrontendAction.rst b/clang/docs/RAVFrontendAction.rst index 2e387b4b339d6..8db141ff7630a 100644 --- a/clang/docs/RAVFrontendAction.rst +++ b/clang/docs/RAVFrontendAction.rst @@ -101,7 +101,7 @@ Accessing the SourceManager and ASTContext ========================================== Some of the information about the AST, like source locations and global -identifier information, are not stored in the AST nodes themselves, but +identifier information, is not stored in the AST nodes themselves, but in the ASTContext and its associated source manager. To retrieve them we need to hand the ASTContext into our RecursiveASTVisitor implementation. diff --git a/clang/docs/RealtimeSanitizer.rst b/clang/docs/RealtimeSanitizer.rst index b842094445f5d..4ce976a3fea29 100644 --- a/clang/docs/RealtimeSanitizer.rst +++ b/clang/docs/RealtimeSanitizer.rst @@ -187,7 +187,7 @@ A **partial** list of flags RealtimeSanitizer respects: * - ``abort_on_error`` - OS dependent - boolean - - If true, the tool calls ``abort()`` instead of ``_exit()`` after printing the error report. On some OSes (MacOS, for example) this is beneficial because a better stack trace is emitted on crash. + - If true, the tool calls ``abort()`` instead of ``_exit()`` after printing the error report. On some OSes (macOS, for example) this is beneficial because a better stack trace is emitted on crash. * - ``symbolize`` - ``true`` - boolean diff --git a/clang/docs/SafeStack.rst b/clang/docs/SafeStack.rst index c585315dff882..e347ae18b3506 100644 --- a/clang/docs/SafeStack.rst +++ b/clang/docs/SafeStack.rst @@ -73,7 +73,7 @@ are always accessed in a safe way by separating them in a dedicated safe stack region. The safe stack is automatically protected against stack-based buffer overflows, since it is disjoint from the unsafe stack in memory, and it itself is always accessed in a safe way. In the current implementation, the safe stack -is protected against arbitrary memory write vulnerabilities though +is protected against arbitrary memory write vulnerabilities through randomization and information hiding: the safe stack is allocated at a random address and the instrumentation ensures that no pointers to the safe stack are ever stored outside of the safe stack itself (see limitations below). diff --git a/clang/docs/UsersManual.rst b/clang/docs/UsersManual.rst index fb22ad3c90af4..d267eec9425b3 100644 --- a/clang/docs/UsersManual.rst +++ b/clang/docs/UsersManual.rst @@ -1090,7 +1090,7 @@ Usually, config file options are placed before command-line options, regardless of the actual operation to be performed. The exception is being made for the options prefixed with the ``$`` character. These will be used only when the linker is being invoked, and added after all of the command-line specified linker -inputs. Here is some example of ``$``-prefixed options: +inputs. Here is an example of ``$``-prefixed options: :: From 8a055f8067f4c3a121acaa25d4f20918e1dc0b81 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 16 Nov 2025 17:00:24 +0000 Subject: [PATCH 15/17] [DAG] Add baseline test coverage for #161036 (#168278) Baseline tests from #161651 that were reverted in #167854 Still missing test coverage for the ffmpeg regression failures --- .../umin-sub-to-usubo-select-combine.ll | 158 +++++++++++++++++ .../X86/umin-sub-to-usubo-select-combine.ll | 166 ++++++++++++++++++ 2 files changed, 324 insertions(+) create mode 100644 llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll create mode 100644 llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll diff --git a/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll b/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll new file mode 100644 index 0000000000000..d5f516fb3aa27 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll @@ -0,0 +1,158 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=aarch64 | FileCheck %s + +; GitHub issue #161036 + +; Positive test : umin(sub(a,b),a) with scalar types should be folded +define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) { +; CHECK-LABEL: underflow_compare_fold_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub x8, x0, x1 +; CHECK-NEXT: cmp x8, x0 +; CHECK-NEXT: csel x0, x8, x0, lo +; CHECK-NEXT: ret + %sub = sub i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a) + ret i64 %cond +} + +; Positive test : umin(a,sub(a,b)) with scalar types should be folded +define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) { +; CHECK-LABEL: underflow_compare_fold_i64_commute: +; CHECK: // %bb.0: +; CHECK-NEXT: sub x8, x0, x1 +; CHECK-NEXT: cmp x0, x8 +; CHECK-NEXT: csel x0, x0, x8, lo +; CHECK-NEXT: ret + %sub = sub i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub) + ret i64 %cond +} + +; Positive test : multi-use is OK since the sub instruction still runs once +define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i64_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: sub x8, x0, x1 +; CHECK-NEXT: cmp x8, x0 +; CHECK-NEXT: str x8, [x2] +; CHECK-NEXT: csel x0, x8, x0, lo +; CHECK-NEXT: ret + %sub = sub i64 %a, %b + store i64 %sub, ptr addrspace(1) %ptr + %cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a) + ret i64 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) { +; CHECK-LABEL: underflow_compare_fold_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub w8, w0, w1 +; CHECK-NEXT: cmp w8, w0 +; CHECK-NEXT: csel w0, w8, w0, lo +; CHECK-NEXT: ret + %sub = sub i32 %a, %b + %cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a) + ret i32 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) { +; CHECK-LABEL: underflow_compare_fold_i32_commute: +; CHECK: // %bb.0: +; CHECK-NEXT: sub w8, w0, w1 +; CHECK-NEXT: cmp w0, w8 +; CHECK-NEXT: csel w0, w0, w8, lo +; CHECK-NEXT: ret + %sub = sub i32 %a, %b + %cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub) + ret i32 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i32_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: sub w8, w0, w1 +; CHECK-NEXT: cmp w8, w0 +; CHECK-NEXT: str w8, [x2] +; CHECK-NEXT: csel w0, w8, w0, lo +; CHECK-NEXT: ret + %sub = sub i32 %a, %b + store i32 %sub, ptr addrspace(1) %ptr + %cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a) + ret i32 %cond +} + +; Negative test : i16 +define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) { +; CHECK-LABEL: underflow_compare_fold_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub w8, w0, w1 +; CHECK-NEXT: and w9, w0, #0xffff +; CHECK-NEXT: and w8, w8, #0xffff +; CHECK-NEXT: cmp w8, w9 +; CHECK-NEXT: csel w0, w8, w9, lo +; CHECK-NEXT: ret + %sub = sub i16 %a, %b + %cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a) + ret i16 %cond +} + +; Negative test : i16 +define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) { +; CHECK-LABEL: underflow_compare_fold_i16_commute: +; CHECK: // %bb.0: +; CHECK-NEXT: sub w8, w0, w1 +; CHECK-NEXT: and w9, w0, #0xffff +; CHECK-NEXT: and w8, w8, #0xffff +; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: csel w0, w9, w8, lo +; CHECK-NEXT: ret + %sub = sub i16 %a, %b + %cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub) + ret i16 %cond +} + +; Negative test : i16 +define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i16_multi_use: +; CHECK: // %bb.0: +; CHECK-NEXT: sub w8, w0, w1 +; CHECK-NEXT: and w9, w0, #0xffff +; CHECK-NEXT: and w10, w8, #0xffff +; CHECK-NEXT: strh w8, [x2] +; CHECK-NEXT: cmp w10, w9 +; CHECK-NEXT: csel w0, w10, w9, lo +; CHECK-NEXT: ret + %sub = sub i16 %a, %b + store i16 %sub, ptr addrspace(1) %ptr + %cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a) + ret i16 %cond +} + +; Negative test, vector types : umin(sub(a,b),a) but with vectors +define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: underflow_compare_dontfold_vectors: +; CHECK: // %bb.0: +; CHECK-NEXT: sub v1.16b, v0.16b, v1.16b +; CHECK-NEXT: umin v0.16b, v1.16b, v0.16b +; CHECK-NEXT: ret + %sub = sub <16 x i8> %a, %b + %cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a) + ret <16 x i8> %cond +} + +; Negative test, pattern mismatch : umin(add(a,b),a) +define i64 @umin_add(i64 %a, i64 %b) { +; CHECK-LABEL: umin_add: +; CHECK: // %bb.0: +; CHECK-NEXT: add x8, x0, x1 +; CHECK-NEXT: cmp x8, x0 +; CHECK-NEXT: csel x0, x8, x0, lo +; CHECK-NEXT: ret + %add = add i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a) + ret i64 %cond +} diff --git a/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll b/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll new file mode 100644 index 0000000000000..6739be52d47f6 --- /dev/null +++ b/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll @@ -0,0 +1,166 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=x86_64 | FileCheck %s + +; GitHub issue #161036 + +; Positive test : umin(sub(a,b),a) with scalar types should be folded +define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) { +; CHECK-LABEL: underflow_compare_fold_i64: +; CHECK: # %bb.0: +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: subq %rsi, %rax +; CHECK-NEXT: cmpq %rdi, %rax +; CHECK-NEXT: cmovaeq %rdi, %rax +; CHECK-NEXT: retq + %sub = sub i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a) + ret i64 %cond +} + +; Positive test : umin(a,sub(a,b)) with scalar types should be folded +define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) { +; CHECK-LABEL: underflow_compare_fold_i64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: subq %rsi, %rax +; CHECK-NEXT: cmpq %rax, %rdi +; CHECK-NEXT: cmovbq %rdi, %rax +; CHECK-NEXT: retq + %sub = sub i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub) + ret i64 %cond +} + +; Positive test : multi-use is OK since the sub instruction still runs once +define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i64_multi_use: +; CHECK: # %bb.0: +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: subq %rsi, %rax +; CHECK-NEXT: movq %rax, (%rdx) +; CHECK-NEXT: cmpq %rdi, %rax +; CHECK-NEXT: cmovaeq %rdi, %rax +; CHECK-NEXT: retq + %sub = sub i64 %a, %b + store i64 %sub, ptr addrspace(1) %ptr + %cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a) + ret i64 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) { +; CHECK-LABEL: underflow_compare_fold_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subl %esi, %eax +; CHECK-NEXT: cmpl %edi, %eax +; CHECK-NEXT: cmovael %edi, %eax +; CHECK-NEXT: retq + %sub = sub i32 %a, %b + %cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a) + ret i32 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) { +; CHECK-LABEL: underflow_compare_fold_i32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subl %esi, %eax +; CHECK-NEXT: cmpl %eax, %edi +; CHECK-NEXT: cmovbl %edi, %eax +; CHECK-NEXT: retq + %sub = sub i32 %a, %b + %cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub) + ret i32 %cond +} + +; Positive test : i32 +define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i32_multi_use: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subl %esi, %eax +; CHECK-NEXT: movl %eax, (%rdx) +; CHECK-NEXT: cmpl %edi, %eax +; CHECK-NEXT: cmovael %edi, %eax +; CHECK-NEXT: retq + %sub = sub i32 %a, %b + store i32 %sub, ptr addrspace(1) %ptr + %cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a) + ret i32 %cond +} + +; Positive test : i16 +define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) { +; CHECK-LABEL: underflow_compare_fold_i16: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subl %esi, %eax +; CHECK-NEXT: cmpw %di, %ax +; CHECK-NEXT: cmovael %edi, %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax +; CHECK-NEXT: retq + %sub = sub i16 %a, %b + %cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a) + ret i16 %cond +} + +; Positive test : i16 +define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) { +; CHECK-LABEL: underflow_compare_fold_i16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subl %esi, %eax +; CHECK-NEXT: cmpw %ax, %di +; CHECK-NEXT: cmovbl %edi, %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax +; CHECK-NEXT: retq + %sub = sub i16 %a, %b + %cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub) + ret i16 %cond +} + +; Positive test : i16 +define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) { +; CHECK-LABEL: underflow_compare_fold_i16_multi_use: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: subl %esi, %eax +; CHECK-NEXT: movw %ax, (%rdx) +; CHECK-NEXT: cmpw %di, %ax +; CHECK-NEXT: cmovael %edi, %eax +; CHECK-NEXT: # kill: def $ax killed $ax killed $eax +; CHECK-NEXT: retq + %sub = sub i16 %a, %b + store i16 %sub, ptr addrspace(1) %ptr + %cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a) + ret i16 %cond +} + + +; Negative test, vector types : umin(sub(a,b),a) but with vectors +define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: underflow_compare_dontfold_vectors: +; CHECK: # %bb.0: +; CHECK-NEXT: movdqa %xmm0, %xmm2 +; CHECK-NEXT: psubb %xmm1, %xmm2 +; CHECK-NEXT: pminub %xmm2, %xmm0 +; CHECK-NEXT: retq + %sub = sub <16 x i8> %a, %b + %cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a) + ret <16 x i8> %cond +} + +; Negative test, pattern mismatch : umin(add(a,b),a) +define i64 @umin_add(i64 %a, i64 %b) { +; CHECK-LABEL: umin_add: +; CHECK: # %bb.0: +; CHECK-NEXT: leaq (%rsi,%rdi), %rax +; CHECK-NEXT: cmpq %rdi, %rax +; CHECK-NEXT: cmovaeq %rdi, %rax +; CHECK-NEXT: retq + %add = add i64 %a, %b + %cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a) + ret i64 %cond +} From 6f3f1088df0d48c3f56f6eaec695868f9e239318 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 16 Nov 2025 17:30:19 +0000 Subject: [PATCH 16/17] [X86] LowerMUL - remove vXi8 UNPCK(BUILD_VECTOR,UNDEF) special case handling (#168277) getUnpackl/h + shuffle combining can now handle this for us generically --- llvm/lib/Target/X86/X86ISelLowering.cpp | 22 ++--------------- llvm/test/CodeGen/X86/pmul.ll | 6 ++--- .../CodeGen/X86/srem-seteq-vec-nonsplat.ll | 8 +++---- llvm/test/CodeGen/X86/vector-fshr-128.ll | 8 +++---- llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll | 4 ++-- llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll | 4 ++-- llvm/test/CodeGen/X86/vector-mul.ll | 12 +++++----- llvm/test/CodeGen/X86/vector-shift-shl-128.ll | 8 +++---- .../CodeGen/X86/vector-shift-shl-sub128.ll | 24 +++++++++---------- 9 files changed, 39 insertions(+), 57 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 593c7627a6575..621f1868d3311 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -29647,26 +29647,8 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget, SDValue Undef = DAG.getUNDEF(VT); SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef)); SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef)); - - SDValue BLo, BHi; - if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) { - // If the RHS is a constant, manually unpackl/unpackh. - SmallVector LoOps, HiOps; - for (unsigned i = 0; i != NumElts; i += 16) { - for (unsigned j = 0; j != 8; ++j) { - LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl, - MVT::i16)); - HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl, - MVT::i16)); - } - } - - BLo = DAG.getBuildVector(ExVT, dl, LoOps); - BHi = DAG.getBuildVector(ExVT, dl, HiOps); - } else { - BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef)); - BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef)); - } + SDValue BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef)); + SDValue BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef)); // Multiply, mask the lower 8bits of the lo/hi results and pack. SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo); diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll index 189c5aa9fee20..a1808e4efbbf7 100644 --- a/llvm/test/CodeGen/X86/pmul.ll +++ b/llvm/test/CodeGen/X86/pmul.ll @@ -10,7 +10,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind { ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,u,117,u,117,u,117,u,117,u,117,u,117,u,117,u] ; SSE2-NEXT: pmullw %xmm2, %xmm1 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm3, %xmm1 @@ -378,7 +378,7 @@ define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind { ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117] +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,u,117,u,117,u,117,u,117,u,117,u,117,u,117,u] ; SSE2-NEXT: pmullw %xmm3, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm4, %xmm2 @@ -729,7 +729,7 @@ define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movdqa %xmm0, %xmm6 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,u,117,u,117,u,117,u,117,u,117,u,117,u,117,u] ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm5, %xmm6 diff --git a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll index ec94d003f10ea..82e840b9f1342 100644 --- a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll +++ b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll @@ -2213,12 +2213,12 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-SSE2-NEXT: movq %rdi, %rax ; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm5 ; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5 # [9,0,41,183,1,1,161,221] +; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5 # [9,u,0,u,41,u,183,u,1,u,1,u,161,u,221,u] ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] ; CHECK-SSE2-NEXT: pand %xmm4, %xmm5 ; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm6 ; CHECK-SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6 # [171,103,183,171,61,1,127,183] +; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6 # [171,u,103,u,183,u,171,u,61,u,1,u,127,u,183,u] ; CHECK-SSE2-NEXT: pand %xmm4, %xmm6 ; CHECK-SSE2-NEXT: packuswb %xmm5, %xmm6 ; CHECK-SSE2-NEXT: paddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6 @@ -2242,10 +2242,10 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) { ; CHECK-SSE2-NEXT: por %xmm7, %xmm5 ; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1 ; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [223,223,205,183,161,1,171,239] +; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [223,u,223,u,205,u,183,u,161,u,1,u,171,u,239,u] ; CHECK-SSE2-NEXT: pand %xmm4, %xmm1 ; CHECK-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [197,205,27,241,1,1,1,163] +; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [197,u,205,u,27,u,241,u,1,u,1,u,1,u,163,u] ; CHECK-SSE2-NEXT: pand %xmm4, %xmm0 ; CHECK-SSE2-NEXT: packuswb %xmm1, %xmm0 ; CHECK-SSE2-NEXT: paddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll index a5d6900f77f97..30205259b92f6 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll @@ -1989,11 +1989,11 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,1,2,4,8,16,32,64] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [128,u,1,u,2,u,4,u,8,u,16,u,32,u,64,u] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm3, %xmm2 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [128,64,32,16,8,4,2,1] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [128,u,64,u,32,u,16,u,8,u,4,u,2,u,1,u] ; SSE2-NEXT: pand %xmm3, %xmm0 ; SSE2-NEXT: packuswb %xmm2, %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 @@ -2149,11 +2149,11 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; X86-SSE2-NEXT: paddb %xmm0, %xmm0 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [128,1,2,4,8,16,32,64] +; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 # [128,u,1,u,2,u,4,u,8,u,16,u,32,u,64,u] ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; X86-SSE2-NEXT: pand %xmm3, %xmm2 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [128,64,32,16,8,4,2,1] +; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [128,u,64,u,32,u,16,u,8,u,4,u,2,u,1,u] ; X86-SSE2-NEXT: pand %xmm3, %xmm0 ; X86-SSE2-NEXT: packuswb %xmm2, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll index 9b7d66def8b5b..3117865184ecc 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -691,11 +691,11 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: psubb %xmm2, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [14,13,12,11,10,9,9,7] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [14,u,13,u,12,u,11,u,10,u,9,u,9,u,7,u] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm3, %xmm2 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [7,8,9,10,11,12,13,14] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [7,u,8,u,9,u,10,u,11,u,12,u,13,u,14,u] ; SSE2-NEXT: pand %xmm3, %xmm1 ; SSE2-NEXT: packuswb %xmm2, %xmm1 ; SSE2-NEXT: psubb %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll index ef255e598e4a1..cbc2b968eec7f 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -787,13 +787,13 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15] ; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [64,64,32,32,32,128,128,64] ; SSE2-NEXT: psrlw $8, %xmm3 -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [14,13,12,11,10,9,9,7] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [14,u,13,u,12,u,11,u,10,u,9,u,9,u,7,u] ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm4, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] ; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [64,256,128,32,32,32,64,64] ; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [7,8,9,10,11,12,13,14] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [7,u,8,u,9,u,10,u,11,u,12,u,13,u,14,u] ; SSE2-NEXT: pand %xmm4, %xmm2 ; SSE2-NEXT: packuswb %xmm3, %xmm2 ; SSE2-NEXT: psubb %xmm2, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index 6d6f1c28ca282..552b927f88451 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -250,7 +250,7 @@ define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounw ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,u,2,u,4,u,8,u,1,u,2,u,4,u,8,u] ; SSE2-NEXT: pmullw %xmm2, %xmm1 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm3, %xmm1 @@ -1058,11 +1058,11 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8> ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [3,9,17,33,65,129,2,3] +; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [3,u,9,u,17,u,33,u,65,u,129,u,2,u,3,u] ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; X86-SSE2-NEXT: pand %xmm2, %xmm1 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [2,3,9,17,33,65,129,2] +; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [2,u,3,u,9,u,17,u,33,u,65,u,129,u,2,u] ; X86-SSE2-NEXT: pand %xmm2, %xmm0 ; X86-SSE2-NEXT: packuswb %xmm1, %xmm0 ; X86-SSE2-NEXT: retl @@ -1081,11 +1081,11 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8> ; X64-SSE2: # %bb.0: ; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X64-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X64-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3,9,17,33,65,129,2,3] +; X64-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [3,u,9,u,17,u,33,u,65,u,129,u,2,u,3,u] ; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; X64-SSE2-NEXT: pand %xmm2, %xmm1 ; X64-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X64-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,3,9,17,33,65,129,2] +; X64-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [2,u,3,u,9,u,17,u,33,u,65,u,129,u,2,u] ; X64-SSE2-NEXT: pand %xmm2, %xmm0 ; X64-SSE2-NEXT: packuswb %xmm1, %xmm0 ; X64-SSE2-NEXT: retq @@ -1832,7 +1832,7 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8> ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,u,1,u,3,u,7,u,15,u,31,u,63,u,127,u] ; SSE2-NEXT: pmullw %xmm2, %xmm1 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm3, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll index 37b96b8f3f927..efe80b4eb95e4 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll @@ -1151,11 +1151,11 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind { ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [128,64,32,16,8,4,2,1] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [128,u,64,u,32,u,16,u,8,u,4,u,2,u,1,u] ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm2, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8,16,32,64,128] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,2,u,4,u,8,u,16,u,32,u,64,u,128,u] ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm1, %xmm0 ; SSE2-NEXT: retq @@ -1232,11 +1232,11 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind { ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [128,64,32,16,8,4,2,1] +; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 # [128,u,64,u,32,u,16,u,8,u,4,u,2,u,1,u] ; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; X86-SSE-NEXT: pand %xmm2, %xmm1 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,4,8,16,32,64,128] +; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,u,2,u,4,u,8,u,16,u,32,u,64,u,128,u] ; X86-SSE-NEXT: pand %xmm2, %xmm0 ; X86-SSE-NEXT: packuswb %xmm1, %xmm0 ; X86-SSE-NEXT: retl diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll index ec7db86e5e05e..07e6c36db1308 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll @@ -1429,7 +1429,7 @@ define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind { ; SSE2-LABEL: constant_shift_v8i8: ; SSE2: # %bb.0: ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8,16,32,64,128] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,2,u,4,u,8,u,16,u,32,u,64,u,128,u] ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -1438,7 +1438,7 @@ define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind { ; SSE41-LABEL: constant_shift_v8i8: ; SSE41: # %bb.0: ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8,16,32,64,128] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,2,u,4,u,8,u,16,u,32,u,64,u,128,u] ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: packuswb %xmm1, %xmm0 @@ -1447,7 +1447,7 @@ define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind { ; AVX1-LABEL: constant_shift_v8i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2,4,8,16,32,64,128] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,u,2,u,4,u,8,u,16,u,32,u,64,u,128,u] ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 @@ -1506,7 +1506,7 @@ define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind { ; X86-SSE-LABEL: constant_shift_v8i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,4,8,16,32,64,128] +; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,u,2,u,4,u,8,u,16,u,32,u,64,u,128,u] ; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: packuswb %xmm1, %xmm0 @@ -1519,7 +1519,7 @@ define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind { ; SSE2-LABEL: constant_shift_v4i8: ; SSE2: # %bb.0: ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8,u,u,u,u] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,2,u,4,u,8,u,u,u,u,u,u,u,u,u] ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -1528,7 +1528,7 @@ define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind { ; SSE41-LABEL: constant_shift_v4i8: ; SSE41: # %bb.0: ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,2,4,8,u,u,u,u] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1,u,2,u,4,u,8,u,u,u,u,u,u,u,u,u] ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: packuswb %xmm1, %xmm0 @@ -1537,7 +1537,7 @@ define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind { ; AVX1-LABEL: constant_shift_v4i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2,4,8,u,u,u,u] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,u,2,u,4,u,8,u,u,u,u,u,u,u,u,u] ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 @@ -1595,7 +1595,7 @@ define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind { ; X86-SSE-LABEL: constant_shift_v4i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,4,8,u,u,u,u] +; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,u,2,u,4,u,8,u,u,u,u,u,u,u,u,u] ; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: packuswb %xmm1, %xmm0 @@ -1608,7 +1608,7 @@ define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind { ; SSE2-LABEL: constant_shift_v2i8: ; SSE2: # %bb.0: ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,8,u,u,u,u,u,u] +; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,u,8,u,u,u,u,u,u,u,u,u,u,u,u,u] ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: packuswb %xmm1, %xmm0 @@ -1617,7 +1617,7 @@ define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind { ; SSE41-LABEL: constant_shift_v2i8: ; SSE41: # %bb.0: ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,8,u,u,u,u,u,u] +; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,u,8,u,u,u,u,u,u,u,u,u,u,u,u,u] ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: packuswb %xmm1, %xmm0 @@ -1626,7 +1626,7 @@ define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind { ; AVX1-LABEL: constant_shift_v2i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4,8,u,u,u,u,u,u] +; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4,u,8,u,u,u,u,u,u,u,u,u,u,u,u,u] ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 @@ -1684,7 +1684,7 @@ define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind { ; X86-SSE-LABEL: constant_shift_v2i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [4,8,u,u,u,u,u,u] +; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [4,u,8,u,u,u,u,u,u,u,u,u,u,u,u,u] ; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: packuswb %xmm1, %xmm0 From 97a60aa37a048155fec0c560fc51ed52dbd84e44 Mon Sep 17 00:00:00 2001 From: Sergei Barannikov Date: Sun, 16 Nov 2025 20:46:44 +0300 Subject: [PATCH 17/17] [CodeGen] Turn MCRegUnit into an enum class (NFC) (#167943) This changes `MCRegUnit` type from `unsigned` to `enum class : unsigned` and inserts necessary casts. The added `MCRegUnitToIndex` functor is used with `SparseSet`, `SparseMultiSet` and `IndexedMap` in a few places. `MCRegUnit` is opaque to users, so it didn't seem worth making it a full-fledged class like `Register`. Static type checking has detected one issue in `PrologueEpilogueInserter.cpp`, where `BitVector` created for `MCRegister` is indexed by both `MCRegister` and `MCRegUnit`. The number of casts could be reduced by using `IndexedMap` in more places and/or adding a `BitVector` adaptor, but the number of casts *per file* is still small and `IndexedMap` has limitations, so it didn't seem worth the effort. Pull Request: https://github.com/llvm/llvm-project/pull/167943 --- llvm/include/llvm/CodeGen/LiveIntervalUnion.h | 12 ++++----- llvm/include/llvm/CodeGen/LiveIntervals.h | 15 ++++++----- llvm/include/llvm/CodeGen/LiveRegMatrix.h | 4 ++- llvm/include/llvm/CodeGen/LiveRegUnits.h | 12 ++++----- .../llvm/CodeGen/MachineTraceMetrics.h | 4 +-- llvm/include/llvm/CodeGen/RDFRegisters.h | 11 ++++---- .../llvm/CodeGen/ReachingDefAnalysis.h | 10 +++---- llvm/include/llvm/CodeGen/Register.h | 9 +++++-- llvm/include/llvm/CodeGen/RegisterClassInfo.h | 2 +- llvm/include/llvm/CodeGen/RegisterPressure.h | 6 ++--- llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h | 6 +++-- llvm/include/llvm/MC/MCRegister.h | 10 ++++++- llvm/include/llvm/MC/MCRegisterInfo.h | 11 +++++--- llvm/lib/CodeGen/EarlyIfConversion.cpp | 6 ++--- llvm/lib/CodeGen/InterferenceCache.cpp | 6 ++--- llvm/lib/CodeGen/LiveIntervals.cpp | 10 ++++--- llvm/lib/CodeGen/LiveRegMatrix.cpp | 4 +-- llvm/lib/CodeGen/LiveRegUnits.cpp | 4 +-- llvm/lib/CodeGen/MachineCopyPropagation.cpp | 4 +-- llvm/lib/CodeGen/MachineInstrBundle.cpp | 4 +-- llvm/lib/CodeGen/MachineLICM.cpp | 27 ++++++++++--------- llvm/lib/CodeGen/PrologEpilogInserter.cpp | 3 ++- llvm/lib/CodeGen/RDFRegisters.cpp | 22 +++++++-------- llvm/lib/CodeGen/ReachingDefAnalysis.cpp | 21 ++++++++------- llvm/lib/CodeGen/RegAllocFast.cpp | 12 +++++---- llvm/lib/CodeGen/RegAllocGreedy.cpp | 5 ++-- llvm/lib/CodeGen/RegisterClassInfo.cpp | 2 +- llvm/lib/CodeGen/TargetRegisterInfo.cpp | 6 ++--- llvm/lib/Target/X86/X86FixupBWInsts.cpp | 3 ++- 29 files changed, 144 insertions(+), 107 deletions(-) diff --git a/llvm/include/llvm/CodeGen/LiveIntervalUnion.h b/llvm/include/llvm/CodeGen/LiveIntervalUnion.h index cc0f2a45bb182..240fa114cf179 100644 --- a/llvm/include/llvm/CodeGen/LiveIntervalUnion.h +++ b/llvm/include/llvm/CodeGen/LiveIntervalUnion.h @@ -191,14 +191,14 @@ class LiveIntervalUnion { void clear(); - LiveIntervalUnion& operator[](unsigned idx) { - assert(idx < Size && "idx out of bounds"); - return LIUs[idx]; + LiveIntervalUnion &operator[](MCRegUnit Unit) { + assert(static_cast(Unit) < Size && "Unit out of bounds"); + return LIUs[static_cast(Unit)]; } - const LiveIntervalUnion& operator[](unsigned Idx) const { - assert(Idx < Size && "Idx out of bounds"); - return LIUs[Idx]; + const LiveIntervalUnion &operator[](MCRegUnit Unit) const { + assert(static_cast(Unit) < Size && "Unit out of bounds"); + return LIUs[static_cast(Unit)]; } }; }; diff --git a/llvm/include/llvm/CodeGen/LiveIntervals.h b/llvm/include/llvm/CodeGen/LiveIntervals.h index 32027766e7093..b618e0b778ae8 100644 --- a/llvm/include/llvm/CodeGen/LiveIntervals.h +++ b/llvm/include/llvm/CodeGen/LiveIntervals.h @@ -413,11 +413,12 @@ class LiveIntervals { /// Return the live range for register unit \p Unit. It will be computed if /// it doesn't exist. LiveRange &getRegUnit(MCRegUnit Unit) { - LiveRange *LR = RegUnitRanges[Unit]; + LiveRange *LR = RegUnitRanges[static_cast(Unit)]; if (!LR) { // Compute missing ranges on demand. // Use segment set to speed-up initial computation of the live range. - RegUnitRanges[Unit] = LR = new LiveRange(UseSegmentSetForPhysRegs); + RegUnitRanges[static_cast(Unit)] = LR = + new LiveRange(UseSegmentSetForPhysRegs); computeRegUnitRange(*LR, Unit); } return *LR; @@ -425,17 +426,19 @@ class LiveIntervals { /// Return the live range for register unit \p Unit if it has already been /// computed, or nullptr if it hasn't been computed yet. - LiveRange *getCachedRegUnit(MCRegUnit Unit) { return RegUnitRanges[Unit]; } + LiveRange *getCachedRegUnit(MCRegUnit Unit) { + return RegUnitRanges[static_cast(Unit)]; + } const LiveRange *getCachedRegUnit(MCRegUnit Unit) const { - return RegUnitRanges[Unit]; + return RegUnitRanges[static_cast(Unit)]; } /// Remove computed live range for register unit \p Unit. Subsequent uses /// should rely on on-demand recomputation. void removeRegUnit(MCRegUnit Unit) { - delete RegUnitRanges[Unit]; - RegUnitRanges[Unit] = nullptr; + delete RegUnitRanges[static_cast(Unit)]; + RegUnitRanges[static_cast(Unit)] = nullptr; } /// Remove associated live ranges for the register units associated with \p diff --git a/llvm/include/llvm/CodeGen/LiveRegMatrix.h b/llvm/include/llvm/CodeGen/LiveRegMatrix.h index 0bc243271bb73..35add577d071a 100644 --- a/llvm/include/llvm/CodeGen/LiveRegMatrix.h +++ b/llvm/include/llvm/CodeGen/LiveRegMatrix.h @@ -165,7 +165,9 @@ class LiveRegMatrix { /// Directly access the live interval unions per regunit. /// This returns an array indexed by the regunit number. - LiveIntervalUnion *getLiveUnions() { return &Matrix[0]; } + LiveIntervalUnion *getLiveUnions() { + return &Matrix[static_cast(0)]; + } Register getOneVReg(unsigned PhysReg) const; }; diff --git a/llvm/include/llvm/CodeGen/LiveRegUnits.h b/llvm/include/llvm/CodeGen/LiveRegUnits.h index 37c31cc6f4ac5..0ff5273929671 100644 --- a/llvm/include/llvm/CodeGen/LiveRegUnits.h +++ b/llvm/include/llvm/CodeGen/LiveRegUnits.h @@ -86,23 +86,23 @@ class LiveRegUnits { /// Adds register units covered by physical register \p Reg. void addReg(MCRegister Reg) { for (MCRegUnit Unit : TRI->regunits(Reg)) - Units.set(Unit); + Units.set(static_cast(Unit)); } /// Adds register units covered by physical register \p Reg that are /// part of the lanemask \p Mask. void addRegMasked(MCRegister Reg, LaneBitmask Mask) { - for (MCRegUnitMaskIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) { - LaneBitmask UnitMask = (*Unit).second; + for (MCRegUnitMaskIterator I(Reg, TRI); I.isValid(); ++I) { + auto [Unit, UnitMask] = *I; if ((UnitMask & Mask).any()) - Units.set((*Unit).first); + Units.set(static_cast(Unit)); } } /// Removes all register units covered by physical register \p Reg. void removeReg(MCRegister Reg) { for (MCRegUnit Unit : TRI->regunits(Reg)) - Units.reset(Unit); + Units.reset(static_cast(Unit)); } /// Removes register units not preserved by the regmask \p RegMask. @@ -116,7 +116,7 @@ class LiveRegUnits { /// Returns true if no part of physical register \p Reg is live. bool available(MCRegister Reg) const { for (MCRegUnit Unit : TRI->regunits(Reg)) { - if (Units.test(Unit)) + if (Units.test(static_cast(Unit))) return false; } return true; diff --git a/llvm/include/llvm/CodeGen/MachineTraceMetrics.h b/llvm/include/llvm/CodeGen/MachineTraceMetrics.h index d1be0ee3dfff9..b29984cd95a4b 100644 --- a/llvm/include/llvm/CodeGen/MachineTraceMetrics.h +++ b/llvm/include/llvm/CodeGen/MachineTraceMetrics.h @@ -78,12 +78,12 @@ struct LiveRegUnit { const MachineInstr *MI = nullptr; unsigned Op = 0; - unsigned getSparseSetIndex() const { return RegUnit; } + unsigned getSparseSetIndex() const { return static_cast(RegUnit); } explicit LiveRegUnit(MCRegUnit RU) : RegUnit(RU) {} }; -using LiveRegUnitSet = SparseSet; +using LiveRegUnitSet = SparseSet; /// Strategies for selecting traces. enum class MachineTraceStrategy { diff --git a/llvm/include/llvm/CodeGen/RDFRegisters.h b/llvm/include/llvm/CodeGen/RDFRegisters.h index 6583efc00cf96..48e1e3487f11f 100644 --- a/llvm/include/llvm/CodeGen/RDFRegisters.h +++ b/llvm/include/llvm/CodeGen/RDFRegisters.h @@ -10,6 +10,7 @@ #define LLVM_CODEGEN_RDFREGISTERS_H #include "llvm/ADT/BitVector.h" +#include "llvm/ADT/IndexedMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/iterator_range.h" #include "llvm/CodeGen/TargetRegisterInfo.h" @@ -111,7 +112,7 @@ struct RegisterRef { constexpr MCRegUnit asMCRegUnit() const { assert(isUnit()); - return Id & ~UnitFlag; + return static_cast(Id & ~UnitFlag); } constexpr unsigned asMaskIdx() const { @@ -160,7 +161,7 @@ struct PhysicalRegisterInfo { // Returns the set of aliased physical registers. std::set getAliasSet(RegisterRef RR) const; - RegisterRef getRefForUnit(uint32_t U) const { + RegisterRef getRefForUnit(MCRegUnit U) const { return RegisterRef(UnitInfos[U].Reg, UnitInfos[U].Mask); } @@ -170,7 +171,7 @@ struct PhysicalRegisterInfo { std::set getUnits(RegisterRef RR) const; - const BitVector &getUnitAliases(uint32_t U) const { + const BitVector &getUnitAliases(MCRegUnit U) const { return AliasInfos[U].Regs; } @@ -201,9 +202,9 @@ struct PhysicalRegisterInfo { const TargetRegisterInfo &TRI; IndexedSet RegMasks; std::vector RegInfos; - std::vector UnitInfos; + IndexedMap UnitInfos; std::vector MaskInfos; - std::vector AliasInfos; + IndexedMap AliasInfos; }; struct RegisterRefEqualTo { diff --git a/llvm/include/llvm/CodeGen/ReachingDefAnalysis.h b/llvm/include/llvm/CodeGen/ReachingDefAnalysis.h index 2893e5ce6647e..863c3b39229b9 100644 --- a/llvm/include/llvm/CodeGen/ReachingDefAnalysis.h +++ b/llvm/include/llvm/CodeGen/ReachingDefAnalysis.h @@ -78,17 +78,17 @@ class MBBReachingDefsInfo { } void append(unsigned MBBNumber, MCRegUnit Unit, int Def) { - AllReachingDefs[MBBNumber][Unit].push_back(Def); + AllReachingDefs[MBBNumber][static_cast(Unit)].push_back(Def); } void prepend(unsigned MBBNumber, MCRegUnit Unit, int Def) { - auto &Defs = AllReachingDefs[MBBNumber][Unit]; + auto &Defs = AllReachingDefs[MBBNumber][static_cast(Unit)]; Defs.insert(Defs.begin(), Def); } void replaceFront(unsigned MBBNumber, MCRegUnit Unit, int Def) { - assert(!AllReachingDefs[MBBNumber][Unit].empty()); - *AllReachingDefs[MBBNumber][Unit].begin() = Def; + assert(!AllReachingDefs[MBBNumber][static_cast(Unit)].empty()); + *AllReachingDefs[MBBNumber][static_cast(Unit)].begin() = Def; } void clear() { AllReachingDefs.clear(); } @@ -97,7 +97,7 @@ class MBBReachingDefsInfo { if (AllReachingDefs[MBBNumber].empty()) // Block IDs are not necessarily dense. return ArrayRef(); - return AllReachingDefs[MBBNumber][Unit]; + return AllReachingDefs[MBBNumber][static_cast(Unit)]; } private: diff --git a/llvm/include/llvm/CodeGen/Register.h b/llvm/include/llvm/CodeGen/Register.h index 5e1e12942a019..f375af5808d1c 100644 --- a/llvm/include/llvm/CodeGen/Register.h +++ b/llvm/include/llvm/CodeGen/Register.h @@ -182,20 +182,25 @@ class VirtRegOrUnit { unsigned VRegOrUnit; public: - constexpr explicit VirtRegOrUnit(MCRegUnit Unit) : VRegOrUnit(Unit) { + constexpr explicit VirtRegOrUnit(MCRegUnit Unit) + : VRegOrUnit(static_cast(Unit)) { assert(!Register::isVirtualRegister(VRegOrUnit)); } + constexpr explicit VirtRegOrUnit(Register Reg) : VRegOrUnit(Reg.id()) { assert(Reg.isVirtual()); } + // Catches implicit conversions to Register. + template explicit VirtRegOrUnit(T) = delete; + constexpr bool isVirtualReg() const { return Register::isVirtualRegister(VRegOrUnit); } constexpr MCRegUnit asMCRegUnit() const { assert(!isVirtualReg() && "Not a register unit"); - return VRegOrUnit; + return static_cast(VRegOrUnit); } constexpr Register asVirtualReg() const { diff --git a/llvm/include/llvm/CodeGen/RegisterClassInfo.h b/llvm/include/llvm/CodeGen/RegisterClassInfo.h index 078ae80915fed..124c7aff8c76d 100644 --- a/llvm/include/llvm/CodeGen/RegisterClassInfo.h +++ b/llvm/include/llvm/CodeGen/RegisterClassInfo.h @@ -123,7 +123,7 @@ class RegisterClassInfo { MCRegister getLastCalleeSavedAlias(MCRegister PhysReg) const { MCRegister CSR; for (MCRegUnit Unit : TRI->regunits(PhysReg)) { - CSR = CalleeSavedAliases[Unit]; + CSR = CalleeSavedAliases[static_cast(Unit)]; if (CSR) break; } diff --git a/llvm/include/llvm/CodeGen/RegisterPressure.h b/llvm/include/llvm/CodeGen/RegisterPressure.h index 20a7e4fa2e9de..7485be6dcb351 100644 --- a/llvm/include/llvm/CodeGen/RegisterPressure.h +++ b/llvm/include/llvm/CodeGen/RegisterPressure.h @@ -282,14 +282,14 @@ class LiveRegSet { unsigned getSparseIndexFromVirtRegOrUnit(VirtRegOrUnit VRegOrUnit) const { if (VRegOrUnit.isVirtualReg()) return VRegOrUnit.asVirtualReg().virtRegIndex() + NumRegUnits; - assert(VRegOrUnit.asMCRegUnit() < NumRegUnits); - return VRegOrUnit.asMCRegUnit(); + assert(static_cast(VRegOrUnit.asMCRegUnit()) < NumRegUnits); + return static_cast(VRegOrUnit.asMCRegUnit()); } VirtRegOrUnit getVirtRegOrUnitFromSparseIndex(unsigned SparseIndex) const { if (SparseIndex >= NumRegUnits) return VirtRegOrUnit(Register::index2VirtReg(SparseIndex - NumRegUnits)); - return VirtRegOrUnit(SparseIndex); + return VirtRegOrUnit(static_cast(SparseIndex)); } public: diff --git a/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h b/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h index 059a3444c609c..8b3907629c00b 100644 --- a/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h +++ b/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h @@ -82,14 +82,16 @@ namespace llvm { PhysRegSUOper(SUnit *su, int op, MCRegUnit R) : SU(su), OpIdx(op), RegUnit(R) {} - unsigned getSparseSetIndex() const { return RegUnit; } + unsigned getSparseSetIndex() const { + return static_cast(RegUnit); + } }; /// Use a SparseMultiSet to track physical registers. Storage is only /// allocated once for the pass. It can be cleared in constant time and reused /// without any frees. using RegUnit2SUnitsMap = - SparseMultiSet; + SparseMultiSet; /// Track local uses of virtual registers. These uses are gathered by the DAG /// builder and may be consulted by the scheduler to avoid iterating an entire diff --git a/llvm/include/llvm/MC/MCRegister.h b/llvm/include/llvm/MC/MCRegister.h index 388cb5958f32e..c6cde36478c1d 100644 --- a/llvm/include/llvm/MC/MCRegister.h +++ b/llvm/include/llvm/MC/MCRegister.h @@ -27,7 +27,15 @@ using MCPhysReg = uint16_t; /// A target with a complicated sub-register structure will typically have many /// fewer register units than actual registers. MCRI::getNumRegUnits() returns /// the number of register units in the target. -using MCRegUnit = unsigned; +enum class MCRegUnit : unsigned; + +struct MCRegUnitToIndex { + using argument_type = MCRegUnit; + + unsigned operator()(MCRegUnit Unit) const { + return static_cast(Unit); + } +}; /// Wrapper class representing physical registers. Should be passed by value. class MCRegister { diff --git a/llvm/include/llvm/MC/MCRegisterInfo.h b/llvm/include/llvm/MC/MCRegisterInfo.h index f1caa077a6d7b..6e36e580358e7 100644 --- a/llvm/include/llvm/MC/MCRegisterInfo.h +++ b/llvm/include/llvm/MC/MCRegisterInfo.h @@ -724,9 +724,10 @@ class MCRegUnitRootIterator { MCRegUnitRootIterator() = default; MCRegUnitRootIterator(MCRegUnit RegUnit, const MCRegisterInfo *MCRI) { - assert(RegUnit < MCRI->getNumRegUnits() && "Invalid register unit"); - Reg0 = MCRI->RegUnitRoots[RegUnit][0]; - Reg1 = MCRI->RegUnitRoots[RegUnit][1]; + assert(static_cast(RegUnit) < MCRI->getNumRegUnits() && + "Invalid register unit"); + Reg0 = MCRI->RegUnitRoots[static_cast(RegUnit)][0]; + Reg1 = MCRI->RegUnitRoots[static_cast(RegUnit)][1]; } /// Dereference to get the current root register. @@ -803,7 +804,9 @@ MCRegisterInfo::sub_and_superregs_inclusive(MCRegister Reg) const { } inline iota_range MCRegisterInfo::regunits() const { - return seq(getNumRegUnits()); + return enum_seq(static_cast(0), + static_cast(getNumRegUnits()), + force_iteration_on_noniterable_enum); } inline iterator_range diff --git a/llvm/lib/CodeGen/EarlyIfConversion.cpp b/llvm/lib/CodeGen/EarlyIfConversion.cpp index 55caa6e8a8f95..28993c47c094d 100644 --- a/llvm/lib/CodeGen/EarlyIfConversion.cpp +++ b/llvm/lib/CodeGen/EarlyIfConversion.cpp @@ -134,7 +134,7 @@ class SSAIfConv { BitVector ClobberedRegUnits; // Scratch pad for findInsertionPoint. - SparseSet LiveRegUnits; + SparseSet LiveRegUnits; /// Insertion point in Head for speculatively executed instructions form TBB /// and FBB. @@ -271,7 +271,7 @@ bool SSAIfConv::InstrDependenciesAllowIfConv(MachineInstr *I) { // Remember clobbered regunits. if (MO.isDef() && Reg.isPhysical()) for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) - ClobberedRegUnits.set(Unit); + ClobberedRegUnits.set(static_cast(Unit)); if (!MO.readsReg() || !Reg.isVirtual()) continue; @@ -409,7 +409,7 @@ bool SSAIfConv::findInsertionPoint() { // Anything read by I is live before I. while (!Reads.empty()) for (MCRegUnit Unit : TRI->regunits(Reads.pop_back_val())) - if (ClobberedRegUnits.test(Unit)) + if (ClobberedRegUnits.test(static_cast(Unit))) LiveRegUnits.insert(Unit); // We can't insert before a terminator. diff --git a/llvm/lib/CodeGen/InterferenceCache.cpp b/llvm/lib/CodeGen/InterferenceCache.cpp index ebdf0506bb22f..466070b312b2d 100644 --- a/llvm/lib/CodeGen/InterferenceCache.cpp +++ b/llvm/lib/CodeGen/InterferenceCache.cpp @@ -93,7 +93,7 @@ void InterferenceCache::Entry::revalidate(LiveIntervalUnion *LIUArray, PrevPos = SlotIndex(); unsigned i = 0; for (MCRegUnit Unit : TRI->regunits(PhysReg)) - RegUnits[i++].VirtTag = LIUArray[Unit].getTag(); + RegUnits[i++].VirtTag = LIUArray[static_cast(Unit)].getTag(); } void InterferenceCache::Entry::reset(MCRegister physReg, @@ -110,7 +110,7 @@ void InterferenceCache::Entry::reset(MCRegister physReg, PrevPos = SlotIndex(); RegUnits.clear(); for (MCRegUnit Unit : TRI->regunits(PhysReg)) { - RegUnits.push_back(LIUArray[Unit]); + RegUnits.push_back(LIUArray[static_cast(Unit)]); RegUnits.back().Fixed = &LIS->getRegUnit(Unit); } } @@ -121,7 +121,7 @@ bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray, for (MCRegUnit Unit : TRI->regunits(PhysReg)) { if (i == e) return false; - if (LIUArray[Unit].changedSince(RegUnits[i].VirtTag)) + if (LIUArray[static_cast(Unit)].changedSince(RegUnits[i].VirtTag)) return false; ++i; } diff --git a/llvm/lib/CodeGen/LiveIntervals.cpp b/llvm/lib/CodeGen/LiveIntervals.cpp index b600e0411bc48..2e8756565c8f7 100644 --- a/llvm/lib/CodeGen/LiveIntervals.cpp +++ b/llvm/lib/CodeGen/LiveIntervals.cpp @@ -184,7 +184,8 @@ void LiveIntervals::print(raw_ostream &OS) const { // Dump the regunits. for (unsigned Unit = 0, UnitE = RegUnitRanges.size(); Unit != UnitE; ++Unit) if (LiveRange *LR = RegUnitRanges[Unit]) - OS << printRegUnit(Unit, TRI) << ' ' << *LR << '\n'; + OS << printRegUnit(static_cast(Unit), TRI) << ' ' << *LR + << '\n'; // Dump the virtregs. for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { @@ -367,10 +368,11 @@ void LiveIntervals::computeLiveInRegUnits() { LLVM_DEBUG(dbgs() << Begin << "\t" << printMBBReference(MBB)); for (const auto &LI : MBB.liveins()) { for (MCRegUnit Unit : TRI->regunits(LI.PhysReg)) { - LiveRange *LR = RegUnitRanges[Unit]; + LiveRange *LR = RegUnitRanges[static_cast(Unit)]; if (!LR) { // Use segment set to speed-up initial computation of the live range. - LR = RegUnitRanges[Unit] = new LiveRange(UseSegmentSetForPhysRegs); + LR = RegUnitRanges[static_cast(Unit)] = + new LiveRange(UseSegmentSetForPhysRegs); NewRanges.push_back(Unit); } VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator()); @@ -384,7 +386,7 @@ void LiveIntervals::computeLiveInRegUnits() { // Compute the 'normal' part of the ranges. for (MCRegUnit Unit : NewRanges) - computeRegUnitRange(*RegUnitRanges[Unit], Unit); + computeRegUnitRange(*RegUnitRanges[static_cast(Unit)], Unit); } static void createSegmentsForValues(LiveRange &LR, diff --git a/llvm/lib/CodeGen/LiveRegMatrix.cpp b/llvm/lib/CodeGen/LiveRegMatrix.cpp index e3ee8dc325933..e7238008d2c69 100644 --- a/llvm/lib/CodeGen/LiveRegMatrix.cpp +++ b/llvm/lib/CodeGen/LiveRegMatrix.cpp @@ -76,7 +76,7 @@ void LiveRegMatrixWrapperLegacy::releaseMemory() { LRM.releaseMemory(); } void LiveRegMatrix::releaseMemory() { for (unsigned i = 0, e = Matrix.size(); i != e; ++i) { - Matrix[i].clear(); + Matrix[static_cast(i)].clear(); // No need to clear Queries here, since LiveIntervalUnion::Query doesn't // have anything important to clear and LiveRegMatrix's runOnFunction() // does a std::unique_ptr::reset anyways. @@ -185,7 +185,7 @@ bool LiveRegMatrix::checkRegUnitInterference(const LiveInterval &VirtReg, LiveIntervalUnion::Query &LiveRegMatrix::query(const LiveRange &LR, MCRegUnit RegUnit) { - LiveIntervalUnion::Query &Q = Queries[RegUnit]; + LiveIntervalUnion::Query &Q = Queries[static_cast(RegUnit)]; Q.init(UserTag, LR, Matrix[RegUnit]); return Q; } diff --git a/llvm/lib/CodeGen/LiveRegUnits.cpp b/llvm/lib/CodeGen/LiveRegUnits.cpp index 3e7052a9b6245..348ccd85f4c45 100644 --- a/llvm/lib/CodeGen/LiveRegUnits.cpp +++ b/llvm/lib/CodeGen/LiveRegUnits.cpp @@ -23,7 +23,7 @@ void LiveRegUnits::removeRegsNotPreserved(const uint32_t *RegMask) { for (MCRegUnit U : TRI->regunits()) { for (MCRegUnitRootIterator RootReg(U, TRI); RootReg.isValid(); ++RootReg) { if (MachineOperand::clobbersPhysReg(RegMask, *RootReg)) { - Units.reset(U); + Units.reset(static_cast(U)); break; } } @@ -34,7 +34,7 @@ void LiveRegUnits::addRegsInMask(const uint32_t *RegMask) { for (MCRegUnit U : TRI->regunits()) { for (MCRegUnitRootIterator RootReg(U, TRI); RootReg.isValid(); ++RootReg) { if (MachineOperand::clobbersPhysReg(RegMask, *RootReg)) { - Units.set(U); + Units.set(static_cast(U)); break; } } diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp index 5ec7c48d7ee64..2a344b40c30c1 100644 --- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp +++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp @@ -138,7 +138,7 @@ class CopyTracker { for (unsigned SafeReg = 0, E = TRI.getNumRegs(); SafeReg < E; ++SafeReg) if (!RegMaskOp.clobbersPhysReg(SafeReg)) for (MCRegUnit SafeUnit : TRI.regunits(SafeReg)) - PreservedRegUnits.set(SafeUnit); + PreservedRegUnits.set(static_cast(SafeUnit)); return PreservedRegUnits; } @@ -996,7 +996,7 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) { // this register mask. bool MIRefedinCopyInfo = false; for (MCRegUnit RegUnit : TRI->regunits(Reg)) { - if (!PreservedRegUnits.test(RegUnit)) + if (!PreservedRegUnits.test(static_cast(RegUnit))) Tracker.clobberRegUnit(RegUnit, *TRI, *TII, UseCopyInstr); else { if (MaybeDead == Tracker.findCopyForUnit(RegUnit, *TRI)) { diff --git a/llvm/lib/CodeGen/MachineInstrBundle.cpp b/llvm/lib/CodeGen/MachineInstrBundle.cpp index fa654f266c89a..3a212206ac4e6 100644 --- a/llvm/lib/CodeGen/MachineInstrBundle.cpp +++ b/llvm/lib/CodeGen/MachineInstrBundle.cpp @@ -108,7 +108,7 @@ static bool containsReg(SmallSetVector LocalDefsV, const TargetRegisterInfo *TRI) { if (Reg.isPhysical()) { for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) - if (!LocalDefsP[Unit]) + if (!LocalDefsP[static_cast(Unit)]) return false; return true; @@ -189,7 +189,7 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB, if (LocalDefs.insert(Reg)) { if (!MO.isDead() && Reg.isPhysical()) { for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) - LocalDefsP.set(Unit); + LocalDefsP.set(static_cast(Unit)); } } else { if (!MO.isDead()) { diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp index c169467384f8b..0a4b04d60aedc 100644 --- a/llvm/lib/CodeGen/MachineLICM.cpp +++ b/llvm/lib/CodeGen/MachineLICM.cpp @@ -492,7 +492,7 @@ static void applyBitsNotInRegMaskToRegUnitsMask(const TargetRegisterInfo &TRI, if (PhysReg && !((Word >> Bit) & 1)) { for (MCRegUnit Unit : TRI.regunits(PhysReg)) - RUsFromRegsNotInMask.set(Unit); + RUsFromRegsNotInMask.set(static_cast(Unit)); } } } @@ -541,7 +541,8 @@ void MachineLICMImpl::ProcessMI(MachineInstr *MI, BitVector &RUDefs, for (MCRegUnit Unit : TRI->regunits(Reg)) { // If it's using a non-loop-invariant register, then it's obviously // not safe to hoist. - if (RUDefs.test(Unit) || RUClobbers.test(Unit)) { + if (RUDefs.test(static_cast(Unit)) || + RUClobbers.test(static_cast(Unit))) { HasNonInvariantUse = true; break; } @@ -562,16 +563,16 @@ void MachineLICMImpl::ProcessMI(MachineInstr *MI, BitVector &RUDefs, // register, then this is not safe. Two defs is indicated by setting a // PhysRegClobbers bit. for (MCRegUnit Unit : TRI->regunits(Reg)) { - if (RUDefs.test(Unit)) { - RUClobbers.set(Unit); + if (RUDefs.test(static_cast(Unit))) { + RUClobbers.set(static_cast(Unit)); RuledOut = true; - } else if (RUClobbers.test(Unit)) { + } else if (RUClobbers.test(static_cast(Unit))) { // MI defined register is seen defined by another instruction in // the loop, it cannot be a LICM candidate. RuledOut = true; } - RUDefs.set(Unit); + RUDefs.set(static_cast(Unit)); } } @@ -612,7 +613,7 @@ void MachineLICMImpl::HoistRegionPostRA(MachineLoop *CurLoop) { // be LICM'ed. for (const auto &LI : BB->liveins()) { for (MCRegUnit Unit : TRI->regunits(LI.PhysReg)) - RUDefs.set(Unit); + RUDefs.set(static_cast(Unit)); } // Funclet entry blocks will clobber all registers @@ -626,10 +627,10 @@ void MachineLICMImpl::HoistRegionPostRA(MachineLoop *CurLoop) { const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); if (MCRegister Reg = TLI.getExceptionPointerRegister(PersonalityFn)) for (MCRegUnit Unit : TRI->regunits(Reg)) - RUClobbers.set(Unit); + RUClobbers.set(static_cast(Unit)); if (MCRegister Reg = TLI.getExceptionSelectorRegister(PersonalityFn)) for (MCRegUnit Unit : TRI->regunits(Reg)) - RUClobbers.set(Unit); + RUClobbers.set(static_cast(Unit)); } SpeculationState = SpeculateUnknown; @@ -648,7 +649,7 @@ void MachineLICMImpl::HoistRegionPostRA(MachineLoop *CurLoop) { if (!Reg) continue; for (MCRegUnit Unit : TRI->regunits(Reg)) - TermRUs.set(Unit); + TermRUs.set(static_cast(Unit)); } } @@ -668,7 +669,8 @@ void MachineLICMImpl::HoistRegionPostRA(MachineLoop *CurLoop) { Register Def = Candidate.Def; bool Safe = true; for (MCRegUnit Unit : TRI->regunits(Def)) { - if (RUClobbers.test(Unit) || TermRUs.test(Unit)) { + if (RUClobbers.test(static_cast(Unit)) || + TermRUs.test(static_cast(Unit))) { Safe = false; break; } @@ -682,7 +684,8 @@ void MachineLICMImpl::HoistRegionPostRA(MachineLoop *CurLoop) { if (!MO.getReg()) continue; for (MCRegUnit Unit : TRI->regunits(MO.getReg())) { - if (RUDefs.test(Unit) || RUClobbers.test(Unit)) { + if (RUDefs.test(static_cast(Unit)) || + RUClobbers.test(static_cast(Unit))) { // If it's using a non-loop-invariant register, then it's obviously // not safe to hoist. Safe = false; diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp index 0be75e073dedd..41efe622417c8 100644 --- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp +++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp @@ -1320,8 +1320,9 @@ void PEIImpl::insertZeroCallUsedRegs(MachineFunction &MF) { continue; // This picks up sibling registers (e.q. %al -> %ah). + // FIXME: Mixing physical registers and register units is likely a bug. for (MCRegUnit Unit : TRI.regunits(Reg)) - RegsToZero.reset(Unit); + RegsToZero.reset(static_cast(Unit)); for (MCPhysReg SReg : TRI.sub_and_superregs_inclusive(Reg)) RegsToZero.reset(SReg); diff --git a/llvm/lib/CodeGen/RDFRegisters.cpp b/llvm/lib/CodeGen/RDFRegisters.cpp index 07729ebec6e51..80fad722f592a 100644 --- a/llvm/lib/CodeGen/RDFRegisters.cpp +++ b/llvm/lib/CodeGen/RDFRegisters.cpp @@ -82,7 +82,7 @@ PhysicalRegisterInfo::PhysicalRegisterInfo(const TargetRegisterInfo &tri, if (!(MB[I / 32] & (1u << (I % 32)))) continue; for (MCRegUnit Unit : TRI.regunits(MCRegister::from(I))) - PU.set(Unit); + PU.set(static_cast(Unit)); } MaskInfos[M].Units = PU.flip(); } @@ -132,7 +132,7 @@ std::set PhysicalRegisterInfo::getUnits(RegisterRef RR) const { for (MCRegUnitMaskIterator UM(RR.asMCReg(), &TRI); UM.isValid(); ++UM) { auto [U, M] = *UM; if ((M & RR.Mask).any()) - Units.insert(U); + Units.insert(static_cast(U)); } return Units; } @@ -152,7 +152,7 @@ std::set PhysicalRegisterInfo::getUnits(RegisterRef RR) const { unsigned T = llvm::countr_zero(C); unsigned CR = 32 * I + T; // Clobbered reg for (MCRegUnit U : TRI.regunits(CR)) - Units.insert(U); + Units.insert(static_cast(U)); C &= ~(1u << T); } } @@ -269,7 +269,7 @@ void PhysicalRegisterInfo::print(raw_ostream &OS, RegisterRef A) const { void PhysicalRegisterInfo::print(raw_ostream &OS, const RegisterAggr &A) const { OS << '{'; for (unsigned U : A.units()) - OS << ' ' << printRegUnit(U, &TRI); + OS << ' ' << printRegUnit(static_cast(U), &TRI); OS << " }"; } @@ -280,7 +280,7 @@ bool RegisterAggr::hasAliasOf(RegisterRef RR) const { for (MCRegUnitMaskIterator U(RR.asMCReg(), &PRI.getTRI()); U.isValid(); ++U) { auto [Unit, LaneMask] = *U; if ((LaneMask & RR.Mask).any()) - if (Units.test(Unit)) + if (Units.test(static_cast(Unit))) return true; } return false; @@ -295,7 +295,7 @@ bool RegisterAggr::hasCoverOf(RegisterRef RR) const { for (MCRegUnitMaskIterator U(RR.asMCReg(), &PRI.getTRI()); U.isValid(); ++U) { auto [Unit, LaneMask] = *U; if ((LaneMask & RR.Mask).any()) - if (!Units.test(Unit)) + if (!Units.test(static_cast(Unit))) return false; } return true; @@ -310,7 +310,7 @@ RegisterAggr &RegisterAggr::insert(RegisterRef RR) { for (MCRegUnitMaskIterator U(RR.asMCReg(), &PRI.getTRI()); U.isValid(); ++U) { auto [Unit, LaneMask] = *U; if ((LaneMask & RR.Mask).any()) - Units.set(Unit); + Units.set(static_cast(Unit)); } return *this; } @@ -361,13 +361,13 @@ RegisterRef RegisterAggr::makeRegRef() const { // in this aggregate. // Get all the registers aliased to the first unit in the bit vector. - BitVector Regs = PRI.getUnitAliases(U); + BitVector Regs = PRI.getUnitAliases(static_cast(U)); U = Units.find_next(U); // For each other unit, intersect it with the set of all registers // aliased that unit. while (U >= 0) { - Regs &= PRI.getUnitAliases(U); + Regs &= PRI.getUnitAliases(static_cast(U)); U = Units.find_next(U); } @@ -382,7 +382,7 @@ RegisterRef RegisterAggr::makeRegRef() const { LaneBitmask M; for (MCRegUnitMaskIterator I(F, &PRI.getTRI()); I.isValid(); ++I) { auto [Unit, LaneMask] = *I; - if (Units.test(Unit)) + if (Units.test(static_cast(Unit))) M |= LaneMask; } return RegisterRef(F, M); @@ -391,7 +391,7 @@ RegisterRef RegisterAggr::makeRegRef() const { RegisterAggr::ref_iterator::ref_iterator(const RegisterAggr &RG, bool End) : Owner(&RG) { for (int U = RG.Units.find_first(); U >= 0; U = RG.Units.find_next(U)) { - RegisterRef R = RG.PRI.getRefForUnit(U); + RegisterRef R = RG.PRI.getRefForUnit(static_cast(U)); Masks[R.Id] |= R.Mask; } Pos = End ? Masks.end() : Masks.begin(); diff --git a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp index 61706e13b8e91..b12a5bc64ca0b 100644 --- a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp +++ b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp @@ -134,8 +134,8 @@ void ReachingDefInfo::enterBasicBlock(MachineBasicBlock *MBB) { // Treat function live-ins as if they were defined just before the first // instruction. Usually, function arguments are set up immediately // before the call. - if (LiveRegs[Unit] != -1) { - LiveRegs[Unit] = -1; + if (LiveRegs[static_cast(Unit)] != -1) { + LiveRegs[static_cast(Unit)] = -1; MBBReachingDefs.append(MBBNumber, Unit, -1); } } @@ -162,7 +162,8 @@ void ReachingDefInfo::enterBasicBlock(MachineBasicBlock *MBB) { // Insert the most recent reaching definition we found. for (unsigned Unit = 0; Unit != NumRegUnits; ++Unit) if (LiveRegs[Unit] != ReachingDefDefaultVal) - MBBReachingDefs.append(MBBNumber, Unit, LiveRegs[Unit]); + MBBReachingDefs.append(MBBNumber, static_cast(Unit), + LiveRegs[Unit]); } void ReachingDefInfo::leaveBasicBlock(MachineBasicBlock *MBB) { @@ -205,8 +206,8 @@ void ReachingDefInfo::processDefs(MachineInstr *MI) { << *MI); // How many instructions since this reg unit was last written? - if (LiveRegs[Unit] != CurInstr) { - LiveRegs[Unit] = CurInstr; + if (LiveRegs[static_cast(Unit)] != CurInstr) { + LiveRegs[static_cast(Unit)] = CurInstr; MBBReachingDefs.append(MBBNumber, Unit, CurInstr); } } @@ -240,16 +241,17 @@ void ReachingDefInfo::reprocessBasicBlock(MachineBasicBlock *MBB) { if (Def == ReachingDefDefaultVal) continue; - auto Defs = MBBReachingDefs.defs(MBBNumber, Unit); + auto Defs = MBBReachingDefs.defs(MBBNumber, static_cast(Unit)); if (!Defs.empty() && Defs.front() < 0) { if (Defs.front() >= Def) continue; // Update existing reaching def from predecessor to a more recent one. - MBBReachingDefs.replaceFront(MBBNumber, Unit, Def); + MBBReachingDefs.replaceFront(MBBNumber, static_cast(Unit), + Def); } else { // Insert new reaching def from predecessor. - MBBReachingDefs.prepend(MBBNumber, Unit, Def); + MBBReachingDefs.prepend(MBBNumber, static_cast(Unit), Def); } // Update reaching def at end of BB. Keep in mind that these are @@ -370,7 +372,8 @@ void ReachingDefInfo::traverse() { MBBNumber != NumBlockIDs; ++MBBNumber) { for (unsigned Unit = 0; Unit != NumRegUnits; ++Unit) { int LastDef = ReachingDefDefaultVal; - for (int Def : MBBReachingDefs.defs(MBBNumber, Unit)) { + for (int Def : + MBBReachingDefs.defs(MBBNumber, static_cast(Unit))) { assert(Def > LastDef && "Defs must be sorted and unique"); LastDef = Def; } diff --git a/llvm/lib/CodeGen/RegAllocFast.cpp b/llvm/lib/CodeGen/RegAllocFast.cpp index d01f0d1f9de33..33e1d916a7d7a 100644 --- a/llvm/lib/CodeGen/RegAllocFast.cpp +++ b/llvm/lib/CodeGen/RegAllocFast.cpp @@ -284,7 +284,7 @@ class RegAllocFastImpl { /// Mark a physreg as used in this instruction. void markRegUsedInInstr(MCPhysReg PhysReg) { for (MCRegUnit Unit : TRI->regunits(PhysReg)) - UsedInInstr[Unit] = InstrGen | 1; + UsedInInstr[static_cast(Unit)] = InstrGen | 1; } // Check if physreg is clobbered by instruction's regmask(s). @@ -299,7 +299,8 @@ class RegAllocFastImpl { if (LookAtPhysRegUses && isClobberedByRegMasks(PhysReg)) return true; for (MCRegUnit Unit : TRI->regunits(PhysReg)) - if (UsedInInstr[Unit] >= (InstrGen | !LookAtPhysRegUses)) + if (UsedInInstr[static_cast(Unit)] >= + (InstrGen | !LookAtPhysRegUses)) return true; return false; } @@ -308,15 +309,16 @@ class RegAllocFastImpl { /// This is only used by the special livethrough handling code. void markPhysRegUsedInInstr(MCRegister PhysReg) { for (MCRegUnit Unit : TRI->regunits(PhysReg)) { - assert(UsedInInstr[Unit] <= InstrGen && "non-phys use before phys use?"); - UsedInInstr[Unit] = InstrGen; + assert(UsedInInstr[static_cast(Unit)] <= InstrGen && + "non-phys use before phys use?"); + UsedInInstr[static_cast(Unit)] = InstrGen; } } /// Remove mark of physical register being used in the instruction. void unmarkRegUsedInInstr(MCRegister PhysReg) { for (MCRegUnit Unit : TRI->regunits(PhysReg)) - UsedInInstr[Unit] = 0; + UsedInInstr[static_cast(Unit)] = 0; } enum : unsigned { diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index 1bc7607890328..a059cb55371a3 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -590,7 +590,8 @@ bool RegAllocEvictionAdvisor::canReassign(const LiveInterval &VirtReg, MCRegister FromReg) const { auto HasRegUnitInterference = [&](MCRegUnit Unit) { // Instantiate a "subquery", not to be confused with the Queries array. - LiveIntervalUnion::Query SubQ(VirtReg, Matrix->getLiveUnions()[Unit]); + LiveIntervalUnion::Query SubQ( + VirtReg, Matrix->getLiveUnions()[static_cast(Unit)]); return SubQ.checkInterference(); }; @@ -1681,7 +1682,7 @@ void RAGreedy::calcGapWeights(MCRegister PhysReg, // StartIdx and after StopIdx. // LiveIntervalUnion::SegmentIter IntI = - Matrix->getLiveUnions()[Unit].find(StartIdx); + Matrix->getLiveUnions()[static_cast(Unit)].find(StartIdx); for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { // Skip the gaps before IntI. while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) diff --git a/llvm/lib/CodeGen/RegisterClassInfo.cpp b/llvm/lib/CodeGen/RegisterClassInfo.cpp index 8ead83302c337..bbeb7adae825c 100644 --- a/llvm/lib/CodeGen/RegisterClassInfo.cpp +++ b/llvm/lib/CodeGen/RegisterClassInfo.cpp @@ -85,7 +85,7 @@ void RegisterClassInfo::runOnMachineFunction(const MachineFunction &mf, CalleeSavedAliases.assign(TRI->getNumRegUnits(), 0); for (const MCPhysReg *I = CSR; *I; ++I) { for (MCRegUnit U : TRI->regunits(*I)) - CalleeSavedAliases[U] = *I; + CalleeSavedAliases[static_cast(U)] = *I; LastCalleeSavedRegs.push_back(*I); } diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp index 975895809b9de..cffb3ed1b8779 100644 --- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp +++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp @@ -137,13 +137,13 @@ Printable llvm::printRegUnit(MCRegUnit Unit, const TargetRegisterInfo *TRI) { return Printable([Unit, TRI](raw_ostream &OS) { // Generic printout when TRI is missing. if (!TRI) { - OS << "Unit~" << Unit; + OS << "Unit~" << static_cast(Unit); return; } // Check for invalid register units. - if (Unit >= TRI->getNumRegUnits()) { - OS << "BadUnit~" << Unit; + if (static_cast(Unit) >= TRI->getNumRegUnits()) { + OS << "BadUnit~" << static_cast(Unit); return; } diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp index 6274cb4462192..6e0a0f6f1de93 100644 --- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp +++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp @@ -202,7 +202,8 @@ Register FixupBWInstPass::getSuperRegDestIfDead(MachineInstr *OrigMI) const { MCRegUnitIterator I = Range.begin(), E = Range.end(); for (MCRegUnit S : TRI->regunits(SuperDestReg)) { I = std::lower_bound(I, E, S); - if ((I == E || *I > S) && LiveUnits.getBitVector().test(S)) { + if ((I == E || *I > S) && + LiveUnits.getBitVector().test(static_cast(S))) { SuperIsLive = true; break; }