Skip to content

Commit

Permalink
Enable ThreadPool to support tasks that return values.
Browse files Browse the repository at this point in the history
Previously ThreadPool could only queue async "jobs", i.e. work
that was done for its side effects and not for its result.  It's
useful occasionally to queue async work that returns a value.
From an API perspective, this is very intuitive.  The previous
API just returned a shared_future<void>, so all we need to do is
make it return a shared_future<T>, where T is the type of value
that the operation returns.

Making this work required a little magic, but ultimately it's not
too bad.  Instead of keeping a shared queue<packaged_task<void()>>
we just keep a shared queue<unique_ptr<TaskBase>>, where TaskBase
is a class with a pure virtual execute() method, then have a
templated derived class that stores a packaged_task<T()>.  Everything
else works out pretty cleanly.

Differential Revision: https://reviews.llvm.org/D48115

llvm-svn: 334643
  • Loading branch information
Zachary Turner committed Jun 13, 2018
1 parent 9d6fabf commit 1b76a12
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 26 deletions.
50 changes: 43 additions & 7 deletions llvm/include/llvm/Support/ThreadPool.h
Expand Up @@ -20,6 +20,7 @@
#include <future>

#include <atomic>
#include <cassert>
#include <condition_variable>
#include <functional>
#include <memory>
Expand All @@ -35,10 +36,21 @@ namespace llvm {
/// The pool keeps a vector of threads alive, waiting on a condition variable
/// for some work to become available.
class ThreadPool {
public:
using TaskTy = std::function<void()>;
using PackagedTaskTy = std::packaged_task<void()>;
struct TaskBase {
virtual ~TaskBase() {}
virtual void execute() = 0;
};

template <typename ReturnType> struct TypedTask : public TaskBase {
explicit TypedTask(std::packaged_task<ReturnType()> Task)
: Task(std::move(Task)) {}

void execute() override { Task(); }

std::packaged_task<ReturnType()> Task;
};

public:
/// Construct a pool with the number of threads found by
/// hardware_concurrency().
ThreadPool();
Expand All @@ -52,7 +64,8 @@ class ThreadPool {
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
template <typename Function, typename... Args>
inline std::shared_future<void> async(Function &&F, Args &&... ArgList) {
inline std::shared_future<typename std::result_of<Function(Args...)>::type>
async(Function &&F, Args &&... ArgList) {
auto Task =
std::bind(std::forward<Function>(F), std::forward<Args>(ArgList)...);
return asyncImpl(std::move(Task));
Expand All @@ -61,7 +74,8 @@ class ThreadPool {
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
template <typename Function>
inline std::shared_future<void> async(Function &&F) {
inline std::shared_future<typename std::result_of<Function()>::type>
async(Function &&F) {
return asyncImpl(std::forward<Function>(F));
}

Expand All @@ -72,13 +86,35 @@ class ThreadPool {
private:
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
std::shared_future<void> asyncImpl(TaskTy F);
template <typename TaskTy>
std::shared_future<typename std::result_of<TaskTy()>::type>
asyncImpl(TaskTy &&Task) {
typedef decltype(Task()) ResultTy;

/// Wrap the Task in a packaged_task to return a future object.
std::packaged_task<ResultTy()> PackagedTask(std::move(Task));
auto Future = PackagedTask.get_future();
std::unique_ptr<TaskBase> TB =
llvm::make_unique<TypedTask<ResultTy>>(std::move(PackagedTask));

{
// Lock the queue and push the new task
std::unique_lock<std::mutex> LockGuard(QueueLock);

// Don't allow enqueueing after disabling the pool
assert(EnableFlag && "Queuing a thread during ThreadPool destruction");

Tasks.push(std::move(TB));
}
QueueCondition.notify_one();
return Future.share();
}

/// Threads in flight
std::vector<llvm::thread> Threads;

/// Tasks waiting for execution in the pool.
std::queue<PackagedTaskTy> Tasks;
std::queue<std::unique_ptr<TaskBase>> Tasks;

/// Locking and signaling for accessing the Tasks queue.
std::mutex QueueLock;
Expand Down
21 changes: 2 additions & 19 deletions llvm/lib/Support/ThreadPool.cpp
Expand Up @@ -32,7 +32,7 @@ ThreadPool::ThreadPool(unsigned ThreadCount)
for (unsigned ThreadID = 0; ThreadID < ThreadCount; ++ThreadID) {
Threads.emplace_back([&] {
while (true) {
PackagedTaskTy Task;
std::unique_ptr<TaskBase> Task;
{
std::unique_lock<std::mutex> LockGuard(QueueLock);
// Wait for tasks to be pushed in the queue
Expand All @@ -54,7 +54,7 @@ ThreadPool::ThreadPool(unsigned ThreadCount)
Tasks.pop();
}
// Run the task we just grabbed
Task();
Task->execute();

{
// Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait()
Expand All @@ -79,23 +79,6 @@ void ThreadPool::wait() {
[&] { return !ActiveThreads && Tasks.empty(); });
}

std::shared_future<void> ThreadPool::asyncImpl(TaskTy Task) {
/// Wrap the Task in a packaged_task to return a future object.
PackagedTaskTy PackagedTask(std::move(Task));
auto Future = PackagedTask.get_future();
{
// Lock the queue and push the new task
std::unique_lock<std::mutex> LockGuard(QueueLock);

// Don't allow enqueueing after disabling the pool
assert(EnableFlag && "Queuing a thread during ThreadPool destruction");

Tasks.push(std::move(PackagedTask));
}
QueueCondition.notify_one();
return Future.share();
}

// The destructor joins all threads, waiting for completion.
ThreadPool::~ThreadPool() {
{
Expand Down
19 changes: 19 additions & 0 deletions llvm/unittests/Support/ThreadPool.cpp
Expand Up @@ -147,6 +147,25 @@ TEST_F(ThreadPoolTest, GetFuture) {
ASSERT_EQ(2, i.load());
}

TEST_F(ThreadPoolTest, TaskWithResult) {
CHECK_UNSUPPORTED();
// By making only 1 thread in the pool the two tasks are serialized with
// respect to each other, which means that the second one must return 2.
ThreadPool Pool{1};
std::atomic_int i{0};
Pool.async([this, &i] {
waitForMainThread();
++i;
});
// Force the future using get()
std::shared_future<int> Future = Pool.async([&i] { return ++i; });
ASSERT_EQ(0, i.load());
setMainThreadReady();
int Result = Future.get();
ASSERT_EQ(2, i.load());
ASSERT_EQ(2, Result);
}

TEST_F(ThreadPoolTest, PoolDestruction) {
CHECK_UNSUPPORTED();
// Test that we are waiting on destruction
Expand Down

0 comments on commit 1b76a12

Please sign in to comment.