diff --git a/bolt/lib/Core/ParallelUtilities.cpp b/bolt/lib/Core/ParallelUtilities.cpp index 88d9444a6a2ba..5f5e96e0e7881 100644 --- a/bolt/lib/Core/ParallelUtilities.cpp +++ b/bolt/lib/Core/ParallelUtilities.cpp @@ -49,7 +49,7 @@ namespace ParallelUtilities { namespace { /// A single thread pool that is used to run parallel tasks -std::unique_ptr ThreadPoolPtr; +std::unique_ptr ThreadPoolPtr; unsigned computeCostFor(const BinaryFunction &BF, const PredicateTy &SkipPredicate, @@ -106,7 +106,7 @@ ThreadPoolInterface &getThreadPool() { if (ThreadPoolPtr.get()) return *ThreadPoolPtr; - ThreadPoolPtr = std::make_unique( + ThreadPoolPtr = std::make_unique( llvm::hardware_concurrency(opts::ThreadCount)); return *ThreadPoolPtr; } diff --git a/bolt/tools/merge-fdata/merge-fdata.cpp b/bolt/tools/merge-fdata/merge-fdata.cpp index c6dfd3cfdc56d..f2ac5ad4492ee 100644 --- a/bolt/tools/merge-fdata/merge-fdata.cpp +++ b/bolt/tools/merge-fdata/merge-fdata.cpp @@ -316,7 +316,7 @@ void mergeLegacyProfiles(const SmallVectorImpl &Filenames) { // least 4 tasks. ThreadPoolStrategy S = optimal_concurrency( std::max(Filenames.size() / 4, static_cast(1))); - ThreadPool Pool(S); + DefaultThreadPool Pool(S); DenseMap ParsedProfiles( Pool.getMaxConcurrency()); for (const auto &Filename : Filenames) diff --git a/clang-tools-extra/clang-doc/tool/ClangDocMain.cpp b/clang-tools-extra/clang-doc/tool/ClangDocMain.cpp index 22bdb5de22d87..21b581fa6df2e 100644 --- a/clang-tools-extra/clang-doc/tool/ClangDocMain.cpp +++ b/clang-tools-extra/clang-doc/tool/ClangDocMain.cpp @@ -238,7 +238,7 @@ Example usage for a project using a compile commands database: Error = false; llvm::sys::Mutex IndexMutex; // ExecutorConcurrency is a flag exposed by AllTUsExecution.h - llvm::ThreadPool Pool(llvm::hardware_concurrency(ExecutorConcurrency)); + llvm::DefaultThreadPool Pool(llvm::hardware_concurrency(ExecutorConcurrency)); for (auto &Group : USRToBitcode) { Pool.async([&]() { std::vector> Infos; diff --git a/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/FindAllSymbolsMain.cpp b/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/FindAllSymbolsMain.cpp index b2d0efecc2069..298b02e77cb0a 100644 --- a/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/FindAllSymbolsMain.cpp +++ b/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/FindAllSymbolsMain.cpp @@ -89,7 +89,7 @@ bool Merge(llvm::StringRef MergeDir, llvm::StringRef OutputFile) { // Load all symbol files in MergeDir. { - llvm::ThreadPool Pool; + llvm::DefaultThreadPool Pool; for (llvm::sys::fs::directory_iterator Dir(MergeDir, EC), DirEnd; Dir != DirEnd && !EC; Dir.increment(EC)) { // Parse YAML files in parallel. diff --git a/clang/lib/Tooling/AllTUsExecution.cpp b/clang/lib/Tooling/AllTUsExecution.cpp index f327d01399414..9cad8680447be 100644 --- a/clang/lib/Tooling/AllTUsExecution.cpp +++ b/clang/lib/Tooling/AllTUsExecution.cpp @@ -115,7 +115,7 @@ llvm::Error AllTUsToolExecutor::execute( auto &Action = Actions.front(); { - llvm::ThreadPool Pool(llvm::hardware_concurrency(ThreadCount)); + llvm::DefaultThreadPool Pool(llvm::hardware_concurrency(ThreadCount)); for (std::string File : Files) { Pool.async( [&](std::string Path) { diff --git a/clang/tools/clang-scan-deps/ClangScanDeps.cpp b/clang/tools/clang-scan-deps/ClangScanDeps.cpp index 9811d2a875335..d042fecc3dbe6 100644 --- a/clang/tools/clang-scan-deps/ClangScanDeps.cpp +++ b/clang/tools/clang-scan-deps/ClangScanDeps.cpp @@ -869,7 +869,7 @@ int clang_scan_deps_main(int argc, char **argv, const llvm::ToolContext &) { DependencyScanningService Service(ScanMode, Format, OptimizeArgs, EagerLoadModules); - llvm::ThreadPool Pool(llvm::hardware_concurrency(NumThreads)); + llvm::DefaultThreadPool Pool(llvm::hardware_concurrency(NumThreads)); std::vector> WorkerTools; for (unsigned I = 0; I < Pool.getMaxConcurrency(); ++I) WorkerTools.push_back(std::make_unique(Service)); diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp index 65b598d1d7c42..9b0a32c136e8b 100644 --- a/lld/MachO/Writer.cpp +++ b/lld/MachO/Writer.cpp @@ -66,7 +66,7 @@ class Writer { template void run(); - ThreadPool threadPool; + DefaultThreadPool threadPool; std::unique_ptr &buffer; uint64_t addr = 0; uint64_t fileOff = 0; diff --git a/lldb/source/Core/Debugger.cpp b/lldb/source/Core/Debugger.cpp index 1b25527abf981..9d62b2a908f77 100644 --- a/lldb/source/Core/Debugger.cpp +++ b/lldb/source/Core/Debugger.cpp @@ -104,7 +104,7 @@ static std::recursive_mutex *g_debugger_list_mutex_ptr = nullptr; // NOTE: intentional leak to avoid issues with C++ destructor chain static Debugger::DebuggerList *g_debugger_list_ptr = nullptr; // NOTE: intentional leak to avoid issues with C++ destructor chain -static llvm::ThreadPool *g_thread_pool = nullptr; +static llvm::DefaultThreadPoolThreadPool *g_thread_pool = nullptr; static constexpr OptionEnumValueElement g_show_disassembly_enum_values[] = { { @@ -609,7 +609,7 @@ void Debugger::Initialize(LoadPluginCallbackType load_plugin_callback) { "Debugger::Initialize called more than once!"); g_debugger_list_mutex_ptr = new std::recursive_mutex(); g_debugger_list_ptr = new DebuggerList(); - g_thread_pool = new llvm::ThreadPool(llvm::optimal_concurrency()); + g_thread_pool = new llvm::DefaultThreadPool(llvm::optimal_concurrency()); g_load_plugin_callback = load_plugin_callback; } diff --git a/llvm/docs/ORCv2.rst b/llvm/docs/ORCv2.rst index add05e05a80e5..910ef5b9f3d02 100644 --- a/llvm/docs/ORCv2.rst +++ b/llvm/docs/ORCv2.rst @@ -738,7 +738,7 @@ or creating any Modules attached to it. E.g. ThreadSafeContext TSCtx(std::make_unique()); - ThreadPool TP(NumThreads); + DefaultThreadPool TP(NumThreads); JITStack J; for (auto &ModulePath : ModulePaths) { diff --git a/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp b/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp index fdd376d82da5d..0d97d379d2279 100644 --- a/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp +++ b/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp @@ -136,7 +136,7 @@ class SpeculativeJIT { std::unique_ptr ES; DataLayout DL; MangleAndInterner Mangle{*ES, DL}; - ThreadPool CompileThreads{llvm::hardware_concurrency(NumThreads)}; + DefaultThreadPool CompileThreads{llvm::hardware_concurrency(NumThreads)}; JITDylib &MainJD; diff --git a/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h b/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h index 923976b182d1e..76d16e63df281 100644 --- a/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h @@ -254,7 +254,7 @@ class LLJIT { DataLayout DL; Triple TT; - std::unique_ptr CompileThreads; + std::unique_ptr CompileThreads; std::unique_ptr ObjLinkingLayer; std::unique_ptr ObjTransformLayer; diff --git a/llvm/include/llvm/Support/ThreadPool.h b/llvm/include/llvm/Support/ThreadPool.h index 93f02729f047a..014b7a09d7c8b 100644 --- a/llvm/include/llvm/Support/ThreadPool.h +++ b/llvm/include/llvm/Support/ThreadPool.h @@ -212,8 +212,7 @@ class StdThreadPool : public ThreadPoolInterface { /// Maximum number of threads to potentially grow this pool to. const unsigned MaxThreadCount; }; - -#endif // LLVM_ENABLE_THREADS Disabled +#endif // LLVM_ENABLE_THREADS /// A non-threaded implementation. class SingleThreadExecutor : public ThreadPoolInterface { @@ -253,9 +252,9 @@ class SingleThreadExecutor : public ThreadPoolInterface { }; #if LLVM_ENABLE_THREADS -using ThreadPool = StdThreadPool; +using DefaultThreadPool = StdThreadPool; #else -using ThreadPool = SingleThreadExecutor; +using DefaultThreadPool = SingleThreadExecutor; #endif /// A group of tasks to be run on a thread pool. Thread pool tasks in different diff --git a/llvm/lib/CodeGen/ParallelCG.cpp b/llvm/lib/CodeGen/ParallelCG.cpp index 43b23368ead27..ceb64b2badab5 100644 --- a/llvm/lib/CodeGen/ParallelCG.cpp +++ b/llvm/lib/CodeGen/ParallelCG.cpp @@ -52,7 +52,7 @@ void llvm::splitCodeGen( // Create ThreadPool in nested scope so that threads will be joined // on destruction. { - ThreadPool CodegenThreadPool(hardware_concurrency(OSs.size())); + DefaultThreadPool CodegenThreadPool(hardware_concurrency(OSs.size())); int ThreadCount = 0; SplitModule( diff --git a/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp b/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp index 4f5a4e2ffc702..9b581a6c9ab77 100644 --- a/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp +++ b/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp @@ -2935,7 +2935,7 @@ Error DWARFLinker::link() { } EmitLambda(); } else { - ThreadPool Pool(hardware_concurrency(2)); + DefaultThreadPool Pool(hardware_concurrency(2)); Pool.async(AnalyzeAll); Pool.async(CloneAll); Pool.wait(); diff --git a/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp b/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp index a052969e74c0f..49b08997eb9c1 100644 --- a/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp +++ b/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp @@ -192,7 +192,7 @@ Error DWARFLinkerImpl::link() { Context->InputDWARFFile.unload(); } } else { - ThreadPool Pool(llvm::parallel::strategy); + DefaultThreadPool Pool(llvm::parallel::strategy); for (std::unique_ptr &Context : ObjectContexts) Pool.async([&]() { // Link object file. diff --git a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp index 3a28cd412de92..ff6b560d11726 100644 --- a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp +++ b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp @@ -601,7 +601,7 @@ Error DwarfTransformer::convert(uint32_t NumThreads, OutputAggregator &Out) { // Now parse all DIEs in case we have cross compile unit references in a // thread pool. - ThreadPool pool(hardware_concurrency(NumThreads)); + DefaultThreadPool pool(hardware_concurrency(NumThreads)); for (const auto &CU : DICtx.compile_units()) pool.async([&CU]() { CU->getUnitDIE(false /*CUDieOnly*/); }); pool.wait(); diff --git a/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp b/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp index 833dcb9d5bf2e..79adda5b7bc03 100644 --- a/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp +++ b/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp @@ -972,8 +972,8 @@ LLJIT::LLJIT(LLJITBuilderState &S, Error &Err) if (S.NumCompileThreads > 0) { InitHelperTransformLayer->setCloneToNewContextOnEmit(true); - CompileThreads = - std::make_unique(hardware_concurrency(S.NumCompileThreads)); + CompileThreads = std::make_unique( + hardware_concurrency(S.NumCompileThreads)); ES->setDispatchTask([this](std::unique_ptr T) { // FIXME: We should be able to use move-capture here, but ThreadPool's // AsyncTaskTys are std::functions rather than unique_functions diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp index 34a49c8588b2f..9c93ec70da776 100644 --- a/llvm/lib/LTO/LTO.cpp +++ b/llvm/lib/LTO/LTO.cpp @@ -1409,7 +1409,7 @@ class lto::ThinBackendProc { namespace { class InProcessThinBackend : public ThinBackendProc { - ThreadPool BackendThreadPool; + DefaultThreadPool BackendThreadPool; AddStreamFn AddStream; FileCache Cache; std::set CfiFunctionDefs; diff --git a/llvm/lib/LTO/LTOBackend.cpp b/llvm/lib/LTO/LTOBackend.cpp index 6cfe67779b1a7..71e8849dc3cc9 100644 --- a/llvm/lib/LTO/LTOBackend.cpp +++ b/llvm/lib/LTO/LTOBackend.cpp @@ -431,7 +431,7 @@ static void splitCodeGen(const Config &C, TargetMachine *TM, AddStreamFn AddStream, unsigned ParallelCodeGenParallelismLevel, Module &Mod, const ModuleSummaryIndex &CombinedIndex) { - ThreadPool CodegenThreadPool( + DefaultThreadPool CodegenThreadPool( heavyweight_hardware_concurrency(ParallelCodeGenParallelismLevel)); unsigned ThreadCount = 0; const Target *T = &TM->getTarget(); diff --git a/llvm/lib/LTO/ThinLTOCodeGenerator.cpp b/llvm/lib/LTO/ThinLTOCodeGenerator.cpp index 8fd181846f0c4..8f517eb50dc76 100644 --- a/llvm/lib/LTO/ThinLTOCodeGenerator.cpp +++ b/llvm/lib/LTO/ThinLTOCodeGenerator.cpp @@ -980,7 +980,7 @@ void ThinLTOCodeGenerator::run() { if (CodeGenOnly) { // Perform only parallel codegen and return. - ThreadPool Pool; + DefaultThreadPool Pool; int count = 0; for (auto &Mod : Modules) { Pool.async([&](int count) { @@ -1126,7 +1126,7 @@ void ThinLTOCodeGenerator::run() { // Parallel optimizer + codegen { - ThreadPool Pool(heavyweight_hardware_concurrency(ThreadCount)); + DefaultThreadPool Pool(heavyweight_hardware_concurrency(ThreadCount)); for (auto IndexCount : ModulesOrdering) { auto &Mod = Modules[IndexCount]; Pool.async([&](int count) { diff --git a/llvm/lib/Support/BalancedPartitioning.cpp b/llvm/lib/Support/BalancedPartitioning.cpp index cb6ba61179941..f4254b50d26c9 100644 --- a/llvm/lib/Support/BalancedPartitioning.cpp +++ b/llvm/lib/Support/BalancedPartitioning.cpp @@ -82,7 +82,7 @@ void BalancedPartitioning::run(std::vector &Nodes) const { Nodes.size(), Config.SplitDepth, Config.IterationsPerSplit)); std::optional TP; #if LLVM_ENABLE_THREADS - ThreadPool TheThreadPool; + DefaultThreadPool TheThreadPool; if (Config.TaskSplitDepth > 1) TP.emplace(TheThreadPool); #endif diff --git a/llvm/tools/dsymutil/dsymutil.cpp b/llvm/tools/dsymutil/dsymutil.cpp index b0e988c6f8e4b..25e281c415e75 100644 --- a/llvm/tools/dsymutil/dsymutil.cpp +++ b/llvm/tools/dsymutil/dsymutil.cpp @@ -734,7 +734,7 @@ int dsymutil_main(int argc, char **argv, const llvm::ToolContext &) { S.ThreadsRequested = DebugMapPtrsOrErr->size(); S.Limit = true; } - ThreadPool Threads(S); + DefaultThreadPool Threads(S); // If there is more than one link to execute, we need to generate // temporary files. diff --git a/llvm/tools/llvm-cov/CodeCoverage.cpp b/llvm/tools/llvm-cov/CodeCoverage.cpp index 049e89d1a2300..1e5bfbe5c3aad 100644 --- a/llvm/tools/llvm-cov/CodeCoverage.cpp +++ b/llvm/tools/llvm-cov/CodeCoverage.cpp @@ -1217,7 +1217,7 @@ int CodeCoverageTool::doShow(int argc, const char **argv, ShowFilenames); } else { // In -output-dir mode, it's safe to use multiple threads to print files. - ThreadPool Pool(S); + DefaultThreadPool Pool(S); for (const std::string &SourceFile : SourceFiles) Pool.async(&CodeCoverageTool::writeSourceFileView, this, SourceFile, Coverage.get(), Printer.get(), ShowFilenames); diff --git a/llvm/tools/llvm-cov/CoverageExporterJson.cpp b/llvm/tools/llvm-cov/CoverageExporterJson.cpp index a424bbe06e0ec..9a8c7c94f0612 100644 --- a/llvm/tools/llvm-cov/CoverageExporterJson.cpp +++ b/llvm/tools/llvm-cov/CoverageExporterJson.cpp @@ -277,7 +277,7 @@ json::Array renderFiles(const coverage::CoverageMapping &Coverage, S = heavyweight_hardware_concurrency(SourceFiles.size()); S.Limit = true; } - ThreadPool Pool(S); + DefaultThreadPool Pool(S); json::Array FileArray; std::mutex FileArrayMutex; diff --git a/llvm/tools/llvm-cov/CoverageReport.cpp b/llvm/tools/llvm-cov/CoverageReport.cpp index 8cc073e4def8f..49a35f2a943e6 100644 --- a/llvm/tools/llvm-cov/CoverageReport.cpp +++ b/llvm/tools/llvm-cov/CoverageReport.cpp @@ -465,7 +465,7 @@ std::vector CoverageReport::prepareFileReports( S = heavyweight_hardware_concurrency(Files.size()); S.Limit = true; } - ThreadPool Pool(S); + DefaultThreadPool Pool(S); std::vector FileReports; FileReports.reserve(Files.size()); @@ -580,7 +580,7 @@ Expected DirectoryCoverageReport::prepareDirectoryReports( PoolS = heavyweight_hardware_concurrency(Files.size()); PoolS.Limit = true; } - ThreadPool Pool(PoolS); + DefaultThreadPool Pool(PoolS); TPool = &Pool; LCPStack = {RootLCP}; diff --git a/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp b/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp index 9d347dbd68f39..44d656148a4e2 100644 --- a/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp +++ b/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp @@ -127,7 +127,7 @@ int llvm_debuginfod_main(int argc, char **argv, const llvm::ToolContext &) { for (const std::string &Path : ScanPaths) Paths.push_back(Path); - ThreadPool Pool(hardware_concurrency(MaxConcurrency)); + DefaultThreadPool Pool(hardware_concurrency(MaxConcurrency)); DebuginfodLog Log; DebuginfodCollection Collection(Paths, Log, Pool, MinInterval); DebuginfodServer Server(Log, Collection); diff --git a/llvm/tools/llvm-profdata/llvm-profdata.cpp b/llvm/tools/llvm-profdata/llvm-profdata.cpp index 577a8825fcaa7..8400b0769944c 100644 --- a/llvm/tools/llvm-profdata/llvm-profdata.cpp +++ b/llvm/tools/llvm-profdata/llvm-profdata.cpp @@ -898,7 +898,7 @@ static void mergeInstrProfile(const WeightedFileVector &Inputs, loadInput(Input, Remapper, Correlator.get(), ProfiledBinary, Contexts[0].get()); } else { - ThreadPool Pool(hardware_concurrency(NumThreads)); + DefaultThreadPool Pool(hardware_concurrency(NumThreads)); // Load the inputs in parallel (N/NumThreads serial steps). unsigned Ctx = 0; diff --git a/llvm/tools/llvm-reduce/deltas/Delta.cpp b/llvm/tools/llvm-reduce/deltas/Delta.cpp index 569117e70d6b4..4b84921618e1c 100644 --- a/llvm/tools/llvm-reduce/deltas/Delta.cpp +++ b/llvm/tools/llvm-reduce/deltas/Delta.cpp @@ -222,7 +222,7 @@ void llvm::runDeltaPass(TestRunner &Test, ReductionFunc ExtractChunksFromModule, std::unique_ptr ChunkThreadPoolPtr; if (NumJobs > 1) ChunkThreadPoolPtr = - std::make_unique(hardware_concurrency(NumJobs)); + std::make_unique(hardware_concurrency(NumJobs)); bool FoundAtLeastOneNewUninterestingChunkWithCurrentGranularity; do { diff --git a/llvm/unittests/ADT/LazyAtomicPointerTest.cpp b/llvm/unittests/ADT/LazyAtomicPointerTest.cpp index efead0bdf0a31..3558225654268 100644 --- a/llvm/unittests/ADT/LazyAtomicPointerTest.cpp +++ b/llvm/unittests/ADT/LazyAtomicPointerTest.cpp @@ -18,7 +18,7 @@ namespace { TEST(LazyAtomicPointer, loadOrGenerate) { int Value = 0; LazyAtomicPointer Ptr; - ThreadPool Threads; + DefaultThreadPool Threads; for (unsigned I = 0; I < 4; ++I) Threads.async([&]() { Ptr.loadOrGenerate([&]() { @@ -38,7 +38,7 @@ TEST(LazyAtomicPointer, loadOrGenerate) { TEST(LazyAtomicPointer, BusyState) { int Value = 0; LazyAtomicPointer Ptr; - ThreadPool Threads; + DefaultThreadPool Threads; std::mutex BusyLock, EndLock; std::condition_variable Busy, End; diff --git a/llvm/unittests/Debuginfod/HTTPServerTests.cpp b/llvm/unittests/Debuginfod/HTTPServerTests.cpp index b0af2f850576d..cd1d5f2d9fc70 100644 --- a/llvm/unittests/Debuginfod/HTTPServerTests.cpp +++ b/llvm/unittests/Debuginfod/HTTPServerTests.cpp @@ -92,7 +92,7 @@ TEST_F(HTTPClientServerTest, Hello) { Expected PortOrErr = Server.bind(); EXPECT_THAT_EXPECTED(PortOrErr, Succeeded()); unsigned Port = *PortOrErr; - ThreadPool Pool(hardware_concurrency(1)); + DefaultThreadPool Pool(hardware_concurrency(1)); Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); }); std::string Url = "http://localhost:" + utostr(Port); HTTPRequest Request(Url); @@ -116,7 +116,7 @@ TEST_F(HTTPClientServerTest, LambdaHandlerHello) { Expected PortOrErr = Server.bind(); EXPECT_THAT_EXPECTED(PortOrErr, Succeeded()); unsigned Port = *PortOrErr; - ThreadPool Pool(hardware_concurrency(1)); + DefaultThreadPool Pool(hardware_concurrency(1)); Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); }); std::string Url = "http://localhost:" + utostr(Port); HTTPRequest Request(Url); @@ -135,7 +135,7 @@ TEST_F(HTTPClientServerTest, StreamingHello) { Expected PortOrErr = Server.bind(); EXPECT_THAT_EXPECTED(PortOrErr, Succeeded()); unsigned Port = *PortOrErr; - ThreadPool Pool(hardware_concurrency(1)); + DefaultThreadPool Pool(hardware_concurrency(1)); Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); }); std::string Url = "http://localhost:" + utostr(Port); HTTPRequest Request(Url); @@ -167,7 +167,7 @@ TEST_F(HTTPClientServerTest, StreamingFileResponse) { Expected PortOrErr = Server.bind(); EXPECT_THAT_EXPECTED(PortOrErr, Succeeded()); unsigned Port = *PortOrErr; - ThreadPool Pool(hardware_concurrency(1)); + DefaultThreadPool Pool(hardware_concurrency(1)); Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); }); std::string Url = "http://localhost:" + utostr(Port); HTTPRequest Request(Url); @@ -203,7 +203,7 @@ TEST_F(HTTPClientServerTest, StreamingMissingFileResponse) { Expected PortOrErr = Server.bind(); EXPECT_THAT_EXPECTED(PortOrErr, Succeeded()); unsigned Port = *PortOrErr; - ThreadPool Pool(hardware_concurrency(1)); + DefaultThreadPool Pool(hardware_concurrency(1)); Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); }); std::string Url = "http://localhost:" + utostr(Port); HTTPRequest Request(Url); @@ -220,7 +220,7 @@ TEST_F(HTTPClientServerTest, ClientTimeout) { Expected PortOrErr = Server.bind(); EXPECT_THAT_EXPECTED(PortOrErr, Succeeded()); unsigned Port = *PortOrErr; - ThreadPool Pool(hardware_concurrency(1)); + DefaultThreadPool Pool(hardware_concurrency(1)); Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); }); std::string Url = "http://localhost:" + utostr(Port); HTTPClient Client; @@ -257,7 +257,7 @@ TEST_F(HTTPClientServerTest, PathMatching) { Expected PortOrErr = Server.bind(); EXPECT_THAT_EXPECTED(PortOrErr, Succeeded()); unsigned Port = *PortOrErr; - ThreadPool Pool(hardware_concurrency(1)); + DefaultThreadPool Pool(hardware_concurrency(1)); Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); }); std::string Url = "http://localhost:" + utostr(Port) + "/abc/1/2"; HTTPRequest Request(Url); @@ -289,7 +289,7 @@ TEST_F(HTTPClientServerTest, FirstPathMatched) { Expected PortOrErr = Server.bind(); EXPECT_THAT_EXPECTED(PortOrErr, Succeeded()); unsigned Port = *PortOrErr; - ThreadPool Pool(hardware_concurrency(1)); + DefaultThreadPool Pool(hardware_concurrency(1)); Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); }); std::string Url = "http://localhost:" + utostr(Port) + "/abc/1/2"; HTTPRequest Request(Url); diff --git a/llvm/unittests/Support/ParallelTest.cpp b/llvm/unittests/Support/ParallelTest.cpp index 53ef9fa25e826..91250f01a3c11 100644 --- a/llvm/unittests/Support/ParallelTest.cpp +++ b/llvm/unittests/Support/ParallelTest.cpp @@ -160,7 +160,7 @@ TEST(Parallel, ParallelNestedTaskGroup) { }); }; - ThreadPool Pool; + DefaultThreadPool Pool; Pool.async(Fn); Pool.async(Fn); diff --git a/llvm/unittests/Support/ThreadPool.cpp b/llvm/unittests/Support/ThreadPool.cpp index 1da8e056019d8..d74c625d12295 100644 --- a/llvm/unittests/Support/ThreadPool.cpp +++ b/llvm/unittests/Support/ThreadPool.cpp @@ -140,7 +140,7 @@ TYPED_TEST(ThreadPoolTest, AsyncBarrier) { std::atomic_int checked_in{0}; - TypeParam Pool; + DefaultThreadPool Pool; for (size_t i = 0; i < 5; ++i) { Pool.async([this, &checked_in] { this->waitForMainThread(); @@ -160,7 +160,7 @@ TYPED_TEST(ThreadPoolTest, AsyncBarrierArgs) { // Test that async works with a function requiring multiple parameters. std::atomic_int checked_in{0}; - ThreadPool Pool; + DefaultThreadPool Pool; for (size_t i = 0; i < 5; ++i) { Pool.async(TestFunc, std::ref(checked_in), i); } @@ -170,7 +170,7 @@ TYPED_TEST(ThreadPoolTest, AsyncBarrierArgs) { TYPED_TEST(ThreadPoolTest, Async) { CHECK_UNSUPPORTED(); - ThreadPool Pool; + DefaultThreadPool Pool; std::atomic_int i{0}; Pool.async([this, &i] { this->waitForMainThread(); @@ -185,7 +185,7 @@ TYPED_TEST(ThreadPoolTest, Async) { TYPED_TEST(ThreadPoolTest, GetFuture) { CHECK_UNSUPPORTED(); - ThreadPool Pool(hardware_concurrency(2)); + DefaultThreadPool Pool(hardware_concurrency(2)); std::atomic_int i{0}; Pool.async([this, &i] { this->waitForMainThread(); @@ -201,7 +201,7 @@ TYPED_TEST(ThreadPoolTest, GetFuture) { TYPED_TEST(ThreadPoolTest, GetFutureWithResult) { CHECK_UNSUPPORTED(); - ThreadPool Pool(hardware_concurrency(2)); + DefaultThreadPool Pool(hardware_concurrency(2)); auto F1 = Pool.async([] { return 1; }); auto F2 = Pool.async([] { return 2; }); @@ -213,7 +213,7 @@ TYPED_TEST(ThreadPoolTest, GetFutureWithResult) { TYPED_TEST(ThreadPoolTest, GetFutureWithResultAndArgs) { CHECK_UNSUPPORTED(); - ThreadPool Pool(hardware_concurrency(2)); + DefaultThreadPool Pool(hardware_concurrency(2)); auto Fn = [](int x) { return x; }; auto F1 = Pool.async(Fn, 1); auto F2 = Pool.async(Fn, 2); @@ -229,7 +229,7 @@ TYPED_TEST(ThreadPoolTest, PoolDestruction) { // Test that we are waiting on destruction std::atomic_int checked_in{0}; { - ThreadPool Pool; + DefaultThreadPool Pool; for (size_t i = 0; i < 5; ++i) { Pool.async([this, &checked_in] { this->waitForMainThread(); @@ -250,7 +250,7 @@ TYPED_TEST(ThreadPoolTest, Groups) { ThreadPoolStrategy S = hardware_concurrency(2); if (S.compute_thread_count() < 2) GTEST_SKIP(); - ThreadPool Pool(S); + DefaultThreadPool Pool(S); typename TestFixture::PhaseResetHelper Helper(this); ThreadPoolTaskGroup Group1(Pool); ThreadPoolTaskGroup Group2(Pool); @@ -288,7 +288,7 @@ TYPED_TEST(ThreadPoolTest, Groups) { // Check recursive tasks. TYPED_TEST(ThreadPoolTest, RecursiveGroups) { CHECK_UNSUPPORTED(); - ThreadPool Pool; + DefaultThreadPool Pool; ThreadPoolTaskGroup Group(Pool); std::atomic_int checked_in1{0}; @@ -323,7 +323,7 @@ TYPED_TEST(ThreadPoolTest, RecursiveWaitDeadlock) { ThreadPoolStrategy S = hardware_concurrency(2); if (S.compute_thread_count() < 2) GTEST_SKIP(); - ThreadPool Pool(S); + DefaultThreadPool Pool(S); typename TestFixture::PhaseResetHelper Helper(this); ThreadPoolTaskGroup Group(Pool); @@ -378,7 +378,7 @@ ThreadPoolTest::RunOnAllSockets(ThreadPoolStrategy S) { std::mutex AllThreadsLock; unsigned Active = 0; - ThreadPool Pool(S); + DefaultThreadPool Pool(S); for (size_t I = 0; I < S.compute_thread_count(); ++I) { Pool.async([&] { { diff --git a/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp b/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp index d9a85b435ebdb..b3d9430fc0f30 100644 --- a/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp +++ b/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp @@ -77,7 +77,7 @@ TEST(ThreadSafeAllocatorTest, AllocWait) { // Get the allocation from the allocator first since this requires a lock. Alloc.applyLocked( [&](MockAllocator &Alloc) { C = &Alloc.getAllocCondition(); }); - ThreadPool Threads; + DefaultThreadPool Threads; // First allocation of 1 byte. Threads.async([&Alloc]() { char *P = (char *)Alloc.Allocate(1, alignof(char)); @@ -104,7 +104,7 @@ TEST(ThreadSafeAllocatorTest, AllocWait) { TEST(ThreadSafeAllocatorTest, AllocWithAlign) { ThreadSafeAllocator Alloc; - ThreadPool Threads; + DefaultThreadPool Threads; for (unsigned Index = 1; Index < 100; ++Index) Threads.async( @@ -123,7 +123,7 @@ TEST(ThreadSafeAllocatorTest, AllocWithAlign) { TEST(ThreadSafeAllocatorTest, SpecificBumpPtrAllocator) { ThreadSafeAllocator> Alloc; - ThreadPool Threads; + DefaultThreadPool Threads; for (unsigned Index = 1; Index < 100; ++Index) Threads.async( diff --git a/mlir/include/mlir/IR/MLIRContext.h b/mlir/include/mlir/IR/MLIRContext.h index 2ad35d8f78ee3..11e5329f43e68 100644 --- a/mlir/include/mlir/IR/MLIRContext.h +++ b/mlir/include/mlir/IR/MLIRContext.h @@ -50,7 +50,7 @@ class IRUnit; /// To control better thread spawning, an externally owned ThreadPool can be /// injected in the context. For example: /// -/// llvm::ThreadPool myThreadPool; +/// llvm::DefaultThreadPool myThreadPool; /// while (auto *request = nextCompilationRequests()) { /// MLIRContext ctx(registry, MLIRContext::Threading::DISABLED); /// ctx.setThreadPool(myThreadPool); diff --git a/mlir/lib/CAPI/IR/Support.cpp b/mlir/lib/CAPI/IR/Support.cpp index 81c9fc7719264..3311131fc2bc8 100644 --- a/mlir/lib/CAPI/IR/Support.cpp +++ b/mlir/lib/CAPI/IR/Support.cpp @@ -25,7 +25,7 @@ bool mlirStringRefEqual(MlirStringRef string, MlirStringRef other) { // LLVM ThreadPool API. //===----------------------------------------------------------------------===// MlirLlvmThreadPool mlirLlvmThreadPoolCreate() { - return wrap(new llvm::ThreadPool()); + return wrap(new llvm::DefaultThreadPool()); } void mlirLlvmThreadPoolDestroy(MlirLlvmThreadPool threadPool) { diff --git a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp index ec4a81c042c2c..9e6f8a7216995 100644 --- a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp +++ b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp @@ -72,7 +72,7 @@ class AsyncRuntime { } std::atomic numRefCountedObjects; - llvm::ThreadPool threadPool; + llvm::DefaultThreadPool threadPool; }; // -------------------------------------------------------------------------- // diff --git a/mlir/lib/IR/MLIRContext.cpp b/mlir/lib/IR/MLIRContext.cpp index 92568bd311e39..e1e6d14231d9f 100644 --- a/mlir/lib/IR/MLIRContext.cpp +++ b/mlir/lib/IR/MLIRContext.cpp @@ -274,7 +274,7 @@ class MLIRContextImpl { MLIRContextImpl(bool threadingIsEnabled) : threadingIsEnabled(threadingIsEnabled) { if (threadingIsEnabled) { - ownedThreadPool = std::make_unique(); + ownedThreadPool = std::make_unique(); threadPool = ownedThreadPool.get(); } } @@ -621,7 +621,7 @@ void MLIRContext::disableMultithreading(bool disable) { } else if (!impl->threadPool) { // The thread pool isn't externally provided. assert(!impl->ownedThreadPool); - impl->ownedThreadPool = std::make_unique(); + impl->ownedThreadPool = std::make_unique(); impl->threadPool = impl->ownedThreadPool.get(); } }