-
Notifications
You must be signed in to change notification settings - Fork 15
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Scheduler hints and fixes for distributed coordination bugs #169
Changes from 21 commits
9fdfcef
a14a1bc
b6a0c2e
64dbd92
9d4570d
249daf2
6fca18b
ea11f7a
314226f
bb5af80
2cd38e0
db28492
8ce8375
627209b
e2b8f3a
d9cca41
a32b7a3
4b6eb32
983dfaa
ce8a870
41118f9
52b3877
d472329
e46fe94
dd7b6b8
ebc21eb
11473c8
0d2708e
4a76b0b
ef52533
5af97fd
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,10 +1,33 @@ | ||
#pragma once | ||
|
||
#include <faabric/util/logging.h> | ||
|
||
#include <atomic> | ||
#include <condition_variable> | ||
#include <mutex> | ||
#include <shared_mutex> | ||
|
||
#define DEFAULT_FLAG_WAIT_MS 10000 | ||
|
||
namespace faabric::util { | ||
typedef std::unique_lock<std::mutex> UniqueLock; | ||
typedef std::unique_lock<std::shared_mutex> FullLock; | ||
typedef std::shared_lock<std::shared_mutex> SharedLock; | ||
|
||
class FlagWaiter | ||
{ | ||
public: | ||
FlagWaiter(int timeoutMsIn = DEFAULT_FLAG_WAIT_MS); | ||
|
||
void waitOnFlag(); | ||
|
||
void setFlag(bool value); | ||
|
||
private: | ||
int timeoutMs; | ||
|
||
std::mutex flagMx; | ||
std::condition_variable cv; | ||
std::atomic<bool> flag; | ||
}; | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,6 +7,7 @@ | |
#include <vector> | ||
|
||
#include <faabric/util/logging.h> | ||
#include <faabric/util/macros.h> | ||
|
||
namespace faabric::util { | ||
|
||
|
@@ -27,14 +28,6 @@ enum SnapshotMergeOperation | |
Min | ||
}; | ||
|
||
struct SnapshotMergeRegion | ||
{ | ||
uint32_t offset = 0; | ||
size_t length = 0; | ||
SnapshotDataType dataType = SnapshotDataType::Raw; | ||
SnapshotMergeOperation operation = SnapshotMergeOperation::Overwrite; | ||
}; | ||
|
||
class SnapshotDiff | ||
{ | ||
public: | ||
|
@@ -44,6 +37,8 @@ class SnapshotDiff | |
size_t size = 0; | ||
const uint8_t* data = nullptr; | ||
|
||
bool noChange = false; | ||
|
||
SnapshotDiff() = default; | ||
|
||
SnapshotDiff(SnapshotDataType dataTypeIn, | ||
|
@@ -58,13 +53,19 @@ class SnapshotDiff | |
data = dataIn; | ||
size = sizeIn; | ||
} | ||
}; | ||
|
||
SnapshotDiff(uint32_t offsetIn, const uint8_t* dataIn, size_t sizeIn) | ||
{ | ||
offset = offsetIn; | ||
data = dataIn; | ||
size = sizeIn; | ||
} | ||
class SnapshotMergeRegion | ||
{ | ||
public: | ||
uint32_t offset = 0; | ||
size_t length = 0; | ||
SnapshotDataType dataType = SnapshotDataType::Raw; | ||
SnapshotMergeOperation operation = SnapshotMergeOperation::Overwrite; | ||
|
||
void addDiffs(std::vector<SnapshotDiff>& diffs, | ||
const uint8_t* original, | ||
const uint8_t* updated); | ||
}; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Moved this class down the file as it now depends on the |
||
|
||
class SnapshotData | ||
|
@@ -84,12 +85,19 @@ class SnapshotData | |
void addMergeRegion(uint32_t offset, | ||
size_t length, | ||
SnapshotDataType dataType, | ||
SnapshotMergeOperation operation); | ||
SnapshotMergeOperation operation, | ||
bool overwrite = false); | ||
|
||
private: | ||
// Note - we care about the order of this map, as we iterate through it in | ||
// order of offsets | ||
// Note - we care about the order of this map, as we iterate through it | ||
// in order of offsets | ||
std::map<uint32_t, SnapshotMergeRegion> mergeRegions; | ||
|
||
std::vector<SnapshotDiff> getCustomDiffs(const uint8_t* updated, | ||
size_t updatedSize); | ||
|
||
std::vector<SnapshotDiff> getStandardDiffs(const uint8_t* updated, | ||
size_t updatedSize); | ||
}; | ||
|
||
std::string snapshotDataTypeStr(SnapshotDataType dt); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,6 +2,7 @@ | |
#include <faabric/scheduler/Scheduler.h> | ||
#include <faabric/snapshot/SnapshotRegistry.h> | ||
#include <faabric/state/State.h> | ||
#include <faabric/transport/PointToPointBroker.h> | ||
#include <faabric/util/clock.h> | ||
#include <faabric/util/config.h> | ||
#include <faabric/util/environment.h> | ||
|
@@ -44,6 +45,11 @@ Executor::Executor(faabric::Message& msg) | |
// Set an ID for this Executor | ||
id = conf.endpointHost + "_" + std::to_string(faabric::util::generateGid()); | ||
SPDLOG_DEBUG("Starting executor {}", id); | ||
|
||
// Mark all thread pool threads as available | ||
for (int i = 0; i < threadPoolSize; i++) { | ||
availablePoolThreads.insert(i); | ||
} | ||
} | ||
|
||
Executor::~Executor() {} | ||
|
@@ -82,8 +88,6 @@ void Executor::finish() | |
// Reset variables | ||
boundMessage.Clear(); | ||
|
||
lastSnapshot = ""; | ||
|
||
claimed = false; | ||
|
||
threadPoolThreads.clear(); | ||
|
@@ -107,30 +111,18 @@ void Executor::executeTasks(std::vector<int> msgIdxs, | |
faabric::util::UniqueLock lock(threadsMutex); | ||
|
||
// Restore if necessary. If we're executing threads on the master host we | ||
// assume we don't need to restore, but for everything else we do. If we've | ||
// already restored from this snapshot, we don't do so again. | ||
// assume we don't need to restore, but for everything else we do. | ||
faabric::Message& firstMsg = req->mutable_messages()->at(0); | ||
std::string snapshotKey = firstMsg.snapshotkey(); | ||
std::string thisHost = faabric::util::getSystemConfig().endpointHost; | ||
|
||
bool isMaster = firstMsg.masterhost() == thisHost; | ||
bool isThreads = req->type() == faabric::BatchExecuteRequest::THREADS; | ||
bool isSnapshot = !snapshotKey.empty(); | ||
bool alreadyRestored = snapshotKey == lastSnapshot; | ||
|
||
if (isSnapshot && !alreadyRestored) { | ||
if ((!isMaster && isThreads) || !isThreads) { | ||
SPDLOG_DEBUG("Restoring {} from snapshot {}", funcStr, snapshotKey); | ||
lastSnapshot = snapshotKey; | ||
restore(firstMsg); | ||
} else { | ||
SPDLOG_DEBUG("Skipping snapshot restore on master {} [{}]", | ||
funcStr, | ||
snapshotKey); | ||
} | ||
} else if (isSnapshot) { | ||
SPDLOG_DEBUG( | ||
"Skipping already restored snapshot {} [{}]", funcStr, snapshotKey); | ||
if (isSnapshot && !isMaster) { | ||
SPDLOG_DEBUG("Restoring {} from snapshot {}", funcStr, snapshotKey); | ||
restore(firstMsg); | ||
} | ||
|
||
// Reset dirty page tracking if we're executing threads. | ||
|
@@ -150,23 +142,42 @@ void Executor::executeTasks(std::vector<int> msgIdxs, | |
// original function call will cause a reset | ||
bool skipReset = isMaster && isThreads; | ||
|
||
// Iterate through and invoke tasks | ||
// Iterate through and invoke tasks. By default, we allocate tasks | ||
// one-to-one with thread pool threads. Only once the pool is exhausted do | ||
// we start overallocating. | ||
for (int msgIdx : msgIdxs) { | ||
const faabric::Message& msg = req->messages().at(msgIdx); | ||
|
||
// If executing threads, we must always keep thread pool index zero | ||
// free, as this may be executing the function that spawned them | ||
int threadPoolIdx; | ||
if (isThreads) { | ||
assert(threadPoolSize > 1); | ||
threadPoolIdx = (msg.appidx() % (threadPoolSize - 1)) + 1; | ||
int threadPoolIdx = -1; | ||
if (availablePoolThreads.empty()) { | ||
// Here all threads are still executing, so we have to overload. | ||
// If any tasks are blocking we risk a deadlock, and can no longer | ||
// guarantee the application will finish. | ||
// In general if we're on the master host and this is a thread, we | ||
// should avoid the zeroth and first pool threads as they are likely | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I have read about this in the PR description, and this seems to me a risky game to be playing. How likely, i.e. when, would such an overload happen? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Overloads like this would only happen when there aren't enough resources available, so they're unlikely. When the system is overloaded it can either keep accepting functions and do its best to execute them, or start rejecting requests with an error. Faabric does the former and starts queueing, as this would then trigger the underlying cluster to scale out in a "real" deployment. This specific bit of code is only related to threading, i.e. when an application spawns more threads than there are cores in the system. Well written multi-threaded applications ought to request the level of parallelism available in the environment, at which point the system can specify an appropriate limit that will avoid this scenario (as is the case with OpenMP). This behaviour is covered in a few tests so although the code shouldn't be triggered in a real deployment, it is still tested. |
||
// to be the main thread and the zeroth in the communication group, | ||
// so will be blocking. | ||
if (isThreads && isMaster) { | ||
assert(threadPoolSize > 2); | ||
threadPoolIdx = (msg.appidx() % (threadPoolSize - 2)) + 2; | ||
} else { | ||
threadPoolIdx = msg.appidx() % threadPoolSize; | ||
} | ||
|
||
SPDLOG_WARN("Overloaded app index {} to thread {}", | ||
msg.appidx(), | ||
threadPoolIdx); | ||
} else { | ||
threadPoolIdx = msg.appidx() % threadPoolSize; | ||
// Take next from those that are available | ||
threadPoolIdx = *availablePoolThreads.begin(); | ||
availablePoolThreads.erase(threadPoolIdx); | ||
|
||
SPDLOG_TRACE("Assigned app index {} to thread {}", | ||
msg.appidx(), | ||
threadPoolIdx); | ||
} | ||
|
||
// Enqueue the task | ||
SPDLOG_TRACE( | ||
"Assigning app index {} to thread {}", msg.appidx(), threadPoolIdx); | ||
threadTaskQueues[threadPoolIdx].enqueue(ExecutorTask( | ||
msgIdx, req, batchCounter, needsSnapshotPush, skipReset)); | ||
|
||
|
@@ -183,6 +194,8 @@ void Executor::threadPoolThread(int threadPoolIdx) | |
SPDLOG_DEBUG("Thread pool thread {}:{} starting up", id, threadPoolIdx); | ||
|
||
auto& sch = faabric::scheduler::getScheduler(); | ||
faabric::transport::PointToPointBroker& broker = | ||
faabric::transport::getPointToPointBroker(); | ||
const auto& conf = faabric::util::getSystemConfig(); | ||
|
||
bool selfShutdown = false; | ||
|
@@ -286,6 +299,12 @@ void Executor::threadPoolThread(int threadPoolIdx) | |
releaseClaim(); | ||
} | ||
|
||
// Return this thread index to the pool available for scheduling | ||
{ | ||
faabric::util::UniqueLock lock(threadsMutex); | ||
availablePoolThreads.insert(threadPoolIdx); | ||
} | ||
|
||
// Vacate the slot occupied by this task. This must be done after | ||
// releasing the claim on this executor, otherwise the scheduler may try | ||
// to schedule another function and be unable to reuse this executor. | ||
|
@@ -333,8 +352,9 @@ void Executor::threadPoolThread(int threadPoolIdx) | |
} | ||
|
||
// We have to clean up TLS here as this should be the last use of the | ||
// scheduler from this thread | ||
// scheduler and point-to-point broker from this thread | ||
sch.resetThreadLocalCache(); | ||
broker.resetThreadLocalCache(); | ||
} | ||
|
||
bool Executor::tryClaim() | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This used to be called a "registry" hence the
reg
name, but it was a bit confusing.