diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index 07a9dec12f6f2..908dec42afabe 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -2236,7 +2236,7 @@ static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM, !isa(MD); } -std::vector +SmallVector CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) { llvm::SetVector MostBases; diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h index f5fd94492540f..c5de947687ea6 100644 --- a/clang/lib/CodeGen/CodeGenModule.h +++ b/clang/lib/CodeGen/CodeGenModule.h @@ -1505,7 +1505,7 @@ class CodeGenModule : public CodeGenTypeCache { /// /// A most-base class of a class C is defined as a recursive base class of C, /// including C itself, that does not have any bases. - std::vector + SmallVector getMostBaseClasses(const CXXRecordDecl *RD); /// Get the declaration of std::terminate for the platform. diff --git a/llvm/include/llvm/ADT/SetVector.h b/llvm/include/llvm/ADT/SetVector.h index 781ca367b97e4..ff083556cf169 100644 --- a/llvm/include/llvm/ADT/SetVector.h +++ b/llvm/include/llvm/ADT/SetVector.h @@ -23,10 +23,10 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include #include -#include namespace llvm { @@ -52,7 +52,7 @@ namespace llvm { /// when searching for elements instead of checking Set, due to it being better /// for performance. A value of 0 means that this mode of operation is not used, /// and is the default value. -template , +template , typename Set = DenseSet, unsigned N = 0> class SetVector { // Much like in SmallPtrSet, this value should not be too high to prevent diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp index 2076ed48ea342..775bb95fdda7b 100644 --- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp +++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp @@ -93,7 +93,7 @@ extern cl::opt ScalePartialSampleProfileWorkingSetSize; // instruction in it takes an address of any basic block, because instruction // can only take an address of basic block located in the same function. static bool findRefEdges(ModuleSummaryIndex &Index, const User *CurUser, - SetVector &RefEdges, + SetVector> &RefEdges, SmallPtrSet &Visited) { bool HasBlockAddress = false; SmallVector Worklist; @@ -144,9 +144,12 @@ static bool isNonRenamableLocal(const GlobalValue &GV) { /// Determine whether this call has all constant integer arguments (excluding /// "this") and summarize it to VCalls or ConstVCalls as appropriate. -static void addVCallToSet(DevirtCallSite Call, GlobalValue::GUID Guid, - SetVector &VCalls, - SetVector &ConstVCalls) { +static void addVCallToSet( + DevirtCallSite Call, GlobalValue::GUID Guid, + SetVector> + &VCalls, + SetVector> &ConstVCalls) { std::vector Args; // Start from the second argument to skip the "this" pointer. for (auto &Arg : drop_begin(Call.CB.args())) { @@ -163,11 +166,18 @@ static void addVCallToSet(DevirtCallSite Call, GlobalValue::GUID Guid, /// If this intrinsic call requires that we add information to the function /// summary, do so via the non-constant reference arguments. static void addIntrinsicToSummary( - const CallInst *CI, SetVector &TypeTests, - SetVector &TypeTestAssumeVCalls, - SetVector &TypeCheckedLoadVCalls, - SetVector &TypeTestAssumeConstVCalls, - SetVector &TypeCheckedLoadConstVCalls, + const CallInst *CI, + SetVector> &TypeTests, + SetVector> + &TypeTestAssumeVCalls, + SetVector> + &TypeCheckedLoadVCalls, + SetVector> + &TypeTestAssumeConstVCalls, + SetVector> + &TypeCheckedLoadConstVCalls, DominatorTree &DT) { switch (CI->getCalledFunction()->getIntrinsicID()) { case Intrinsic::type_test: @@ -269,12 +279,14 @@ static void computeFunctionSummary( MapVector, std::vector>> CallGraphEdges; - SetVector RefEdges, LoadRefEdges, StoreRefEdges; - SetVector TypeTests; - SetVector TypeTestAssumeVCalls, - TypeCheckedLoadVCalls; - SetVector TypeTestAssumeConstVCalls, - TypeCheckedLoadConstVCalls; + SetVector> RefEdges, LoadRefEdges, + StoreRefEdges; + SetVector> TypeTests; + SetVector> + TypeTestAssumeVCalls, TypeCheckedLoadVCalls; + SetVector> + TypeTestAssumeConstVCalls, TypeCheckedLoadConstVCalls; ICallPromotionAnalysis ICallAnalysis; SmallPtrSet Visited; @@ -505,7 +517,7 @@ static void computeFunctionSummary( std::vector Refs; if (IsThinLTO) { auto AddRefEdges = [&](const std::vector &Instrs, - SetVector &Edges, + SetVector> &Edges, SmallPtrSet &Cache) { for (const auto *I : Instrs) { Cache.erase(I); @@ -710,7 +722,7 @@ static void computeVariableSummary(ModuleSummaryIndex &Index, DenseSet &CantBePromoted, const Module &M, SmallVectorImpl &Types) { - SetVector RefEdges; + SetVector> RefEdges; SmallPtrSet Visited; bool HasBlockAddress = findRefEdges(Index, &V, RefEdges, Visited); bool NonRenamableLocal = isNonRenamableLocal(V); diff --git a/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp b/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp index 52679f8ba0b00..29863e06f174b 100644 --- a/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp +++ b/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp @@ -155,7 +155,7 @@ opportunities(Function &F, // After all candidates have been added, it doesn't need to be a set // anymore. - std::vector Candidates = ReferencedVals.takeVector(); + auto Candidates = ReferencedVals.takeVector(); // Remove ineligible candidates. llvm::erase_if(Candidates, [&, OpVal](Value *V) { diff --git a/llvm/unittests/Support/ThreadPool.cpp b/llvm/unittests/Support/ThreadPool.cpp index a0817dbe07375..cce20b6dd1dfb 100644 --- a/llvm/unittests/Support/ThreadPool.cpp +++ b/llvm/unittests/Support/ThreadPool.cpp @@ -99,7 +99,7 @@ class ThreadPoolTest : public testing::Test { void SetUp() override { CurrentPhase = 0; } - std::vector RunOnAllSockets(ThreadPoolStrategy S); + SmallVector RunOnAllSockets(ThreadPoolStrategy S); std::condition_variable CurrentPhaseCondition; std::mutex CurrentPhaseMutex; @@ -346,7 +346,7 @@ TEST_F(ThreadPoolTest, RecursiveWaitDeadlock) { // isn't implemented for Unix (need AffinityMask in Support/Unix/Program.inc). #ifdef _WIN32 -std::vector +SmallVector ThreadPoolTest::RunOnAllSockets(ThreadPoolStrategy S) { llvm::SetVector ThreadsUsed; std::mutex Lock; @@ -387,7 +387,7 @@ TEST_F(ThreadPoolTest, AllThreads_UseAllRessources) { // therefore this test should not run. if (llvm::RunningWindows11OrGreater()) GTEST_SKIP(); - std::vector ThreadsUsed = RunOnAllSockets({}); + auto ThreadsUsed = RunOnAllSockets({}); ASSERT_EQ(llvm::get_cpus(), ThreadsUsed.size()); } @@ -398,8 +398,7 @@ TEST_F(ThreadPoolTest, AllThreads_OneThreadPerCore) { // therefore this test should not run. if (llvm::RunningWindows11OrGreater()) GTEST_SKIP(); - std::vector ThreadsUsed = - RunOnAllSockets(llvm::heavyweight_hardware_concurrency()); + auto ThreadsUsed = RunOnAllSockets(llvm::heavyweight_hardware_concurrency()); ASSERT_EQ(llvm::get_cpus(), ThreadsUsed.size()); } @@ -422,7 +421,7 @@ TEST_F(ThreadPoolTest, AffinityMask) { using namespace llvm::sys; if (getenv("LLVM_THREADPOOL_AFFINITYMASK")) { - std::vector ThreadsUsed = RunOnAllSockets({}); + auto ThreadsUsed = RunOnAllSockets({}); // Ensure the threads only ran on CPUs 0-3. // NOTE: Don't use ASSERT* here because this runs in a subprocess, // and will show up as un-executed in the parent. diff --git a/mlir/include/mlir/Support/LLVM.h b/mlir/include/mlir/Support/LLVM.h index fec0debc71669..235d84c5beff1 100644 --- a/mlir/include/mlir/Support/LLVM.h +++ b/mlir/include/mlir/Support/LLVM.h @@ -122,7 +122,7 @@ template ; template > using DenseSet = llvm::DenseSet; -template , +template , typename Set = DenseSet, unsigned N = 0> using SetVector = llvm::SetVector; template diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp index 9a5821da6343d..95ec6ab2bd7a4 100644 --- a/mlir/lib/Analysis/SliceAnalysis.cpp +++ b/mlir/lib/Analysis/SliceAnalysis.cpp @@ -62,7 +62,7 @@ void mlir::getForwardSlice(Operation *op, SetVector *forwardSlice, // Reverse to get back the actual topological order. // std::reverse does not work out of the box on SetVector and I want an // in-place swap based thing (the real std::reverse, not the LLVM adapter). - std::vector v(forwardSlice->takeVector()); + SmallVector v(forwardSlice->takeVector()); forwardSlice->insert(v.rbegin(), v.rend()); } @@ -74,7 +74,7 @@ void mlir::getForwardSlice(Value root, SetVector *forwardSlice, // Reverse to get back the actual topological order. // std::reverse does not work out of the box on SetVector and I want an // in-place swap based thing (the real std::reverse, not the LLVM adapter). - std::vector v(forwardSlice->takeVector()); + SmallVector v(forwardSlice->takeVector()); forwardSlice->insert(v.rbegin(), v.rend()); }