Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix CoreJob race condition #1027

Merged
merged 3 commits into from Oct 28, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
27 changes: 14 additions & 13 deletions src/autowiring/CoreJob.cpp
Expand Up @@ -52,8 +52,10 @@ void CoreJob::OnPended(std::unique_lock<std::mutex>&& lk){
// Need to ask the thread pool to handle our events again:
m_curEventInTeardown = false;

if (m_curEvent)
delete static_cast<std::future<void>*>(m_curEvent);
std::future<void>* future = static_cast<std::future<void>*>(std::atomic_exchange<void*>(&m_curEvent, nullptr));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fancy

if (future) {
delete future;
}

m_curEvent = new std::future<void>(
std::async(
Expand Down Expand Up @@ -97,7 +99,7 @@ bool CoreJob::OnStart(void) {

m_running = true;

std::unique_lock<std::mutex> lk;
std::unique_lock<std::mutex> lk(m_dispatchLock);
if(m_pHead)
// Simulate a pending event, because we need to set up our async:
OnPended(std::move(lk));
Expand All @@ -122,21 +124,20 @@ void CoreJob::OnStop(bool graceful) {
}

void CoreJob::DoAdditionalWait(void) {
if (m_curEvent) {
std::future<void>* ptr = static_cast<std::future<void>*>(m_curEvent);
ptr->wait();
delete ptr;
m_curEvent = nullptr;
std::future<void>* future = static_cast<std::future<void>*>(std::atomic_exchange<void*>(&m_curEvent, nullptr));

if (future) {
future->wait();
delete future;
}
}

bool CoreJob::DoAdditionalWait(std::chrono::nanoseconds timeout) {
if (!m_curEvent)
std::future<void>* future = static_cast<std::future<void>*>(std::atomic_exchange<void*>(&m_curEvent, nullptr));
if (!future)
return true;

std::future<void>* ptr = static_cast<std::future<void>*>(m_curEvent);
auto status = ptr->wait_for(NanosecondsForFutureWait(timeout));
delete ptr;
m_curEvent = nullptr;
const auto status = future->wait_for(NanosecondsForFutureWait(timeout));
delete future;
return status == std::future_status::ready;
}
2 changes: 1 addition & 1 deletion src/autowiring/CoreJob.h
Expand Up @@ -18,7 +18,7 @@ class CoreJob:
bool m_running = false;

// The current outstanding async in the thread pool, if one exists:
void* m_curEvent = nullptr;
std::atomic<void*> m_curEvent{ nullptr };

// Flag, indicating whether curEvent is in a teardown pathway. This
// flag is highly stateful.
Expand Down
14 changes: 7 additions & 7 deletions src/autowiring/DispatchQueue.h
Expand Up @@ -406,9 +406,10 @@ class DispatchQueue {
// Create the thunk first to reduce the amount of time we spend in lock:
auto thunk = new autowiring::DispatchThunk<_Fx>(std::forward<_Fx>(fx));

m_dispatchLock.lock();
std::unique_lock<std::mutex> lk(m_dispatchLock);

if (m_count >= m_dispatchCap) {
m_dispatchLock.unlock();
lk.unlock();
delete thunk;
return false;
}
Expand All @@ -420,16 +421,15 @@ class DispatchQueue {
if (m_pHead) {
m_pTail->m_pFlink = thunk;
m_pTail = thunk;
m_dispatchLock.unlock();
// Notification as needed:
OnPended(std::move(lk));
}
else {
m_pHead = m_pTail = thunk;
m_dispatchLock.unlock();
// Notification as needed:
OnPended(std::move(lk));
m_queueUpdated.notify_all();
}

// Notification as needed:
OnPended(std::unique_lock<std::mutex>{});
return true;
}
};
26 changes: 26 additions & 0 deletions src/autowiring/test/CoreJobTest.cpp
Expand Up @@ -2,6 +2,7 @@
#include "stdafx.h"
#include <autowiring/CoreJob.h>
#include THREAD_HEADER
#include ARRAY_HEADER

class CoreJobTest:
public testing::Test
Expand Down Expand Up @@ -179,3 +180,28 @@ TEST_F(CoreJobTest, RecursiveDeadlock) {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
};
}

TEST_F(CoreJobTest, PendFromMultipleThreads) {
AutoCurrentContext ctxt;
AutoRequired<CoreJob> cj;
std::array<std::thread, 4> threads;
const size_t times{ 256 };
int counter{ 0 };

for (size_t i = 0; i < threads.size(); i++) {
threads[i] = std::thread([&] {
ctxt->DelayUntilInitiated();
for (int j = 0; j < times; j++) {
*cj += [&counter] {
counter++; // Should be updated exclusively in the CoreJob's thread
};
}
});
}
ctxt->Initiate();
for (size_t i = 0; i < threads.size(); i++) {
threads[i].join();
}
ctxt->SignalShutdown(true);
ASSERT_EQ(times*threads.size(), counter);
}