Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Scheduler hints and fixes for distributed coordination bugs #169

Merged
merged 31 commits into from
Nov 10, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
9fdfcef
Started fixes for locks
Shillaker Nov 1, 2021
a14a1bc
Clearing up groups
Shillaker Nov 1, 2021
b6a0c2e
Experiments with waiters/ maps
Shillaker Nov 1, 2021
64dbd92
Add test for flag waiter
Shillaker Nov 2, 2021
9d4570d
Avoid scheduling multiple threads on same thread as 0th group
Shillaker Nov 2, 2021
249daf2
Overloading thread pool
Shillaker Nov 2, 2021
6fca18b
Moved unrelated changes to separate PR
Shillaker Nov 2, 2021
ea11f7a
Add locks.cpp
Shillaker Nov 2, 2021
314226f
Only lock on group when it exists
Shillaker Nov 2, 2021
bb5af80
Add custom merges
Shillaker Nov 4, 2021
2cd38e0
Overhaul of snapshot diffing
Shillaker Nov 4, 2021
db28492
Move spurious logging statement
Shillaker Nov 4, 2021
8ce8375
More snapshotting fixes
Shillaker Nov 5, 2021
627209b
Rearrange scheduler logic
Shillaker Nov 5, 2021
e2b8f3a
Fix indexing in scheduler loop
Shillaker Nov 5, 2021
d9cca41
Avoid pushing snapshots to master host
Shillaker Nov 5, 2021
a32b7a3
Remove last snapshot stuff
Shillaker Nov 5, 2021
4b6eb32
Add scheduler hints
Shillaker Nov 8, 2021
983dfaa
Fix bug in host ordering
Shillaker Nov 8, 2021
ce8a870
Sort out reusing ptp messages
Shillaker Nov 8, 2021
41118f9
Naming
Shillaker Nov 8, 2021
52b3877
Fixing up tests
Shillaker Nov 9, 2021
d472329
Continuing test fixes
Shillaker Nov 9, 2021
e46fe94
Remove ignore regions
Shillaker Nov 9, 2021
dd7b6b8
Add distributed locking test
Shillaker Nov 9, 2021
ebc21eb
Formatting
Shillaker Nov 9, 2021
11473c8
Fix dist tests and remove unnecessary unit test
Shillaker Nov 10, 2021
0d2708e
Clearer error message when insufficient pool threads
Shillaker Nov 10, 2021
4a76b0b
Bump cores in failing tests
Shillaker Nov 10, 2021
ef52533
Factor out scheduler decision making
Shillaker Nov 10, 2021
5af97fd
Fix locking bug
Shillaker Nov 10, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 16 additions & 9 deletions include/faabric/scheduler/Scheduler.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,13 +81,12 @@ class Executor
uint32_t threadPoolSize = 0;

private:
std::string lastSnapshot;

std::atomic<bool> claimed = false;

std::mutex threadsMutex;
std::vector<std::shared_ptr<std::thread>> threadPoolThreads;
std::vector<std::shared_ptr<std::thread>> deadThreads;
std::set<int> availablePoolThreads;

std::vector<faabric::util::Queue<ExecutorTask>> threadTaskQueues;

Expand All @@ -105,6 +104,10 @@ class Scheduler
std::shared_ptr<faabric::BatchExecuteRequest> req,
bool forceLocal = false);

faabric::util::SchedulingDecision callFunctions(
std::shared_ptr<faabric::BatchExecuteRequest> req,
faabric::util::SchedulingDecision& hint);

void reset();

void resetThreadLocalCache();
Expand Down Expand Up @@ -204,6 +207,8 @@ class Scheduler
std::promise<std::unique_ptr<faabric::Message>>>
localResults;

std::unordered_map<std::string, std::set<std::string>> pushedSnapshotsMap;

std::mutex localResultsMutex;

// ---- Clients ----
Expand All @@ -226,20 +231,22 @@ class Scheduler

std::unordered_map<std::string, std::set<std::string>> registeredHosts;

faabric::util::SchedulingDecision makeSchedulingDecision(
std::shared_ptr<faabric::BatchExecuteRequest> req,
bool forceLocal);

faabric::util::SchedulingDecision doCallFunctions(
std::shared_ptr<faabric::BatchExecuteRequest> req,
faabric::util::SchedulingDecision& decision,
faabric::util::FullLock& lock);

std::shared_ptr<Executor> claimExecutor(
faabric::Message& msg,
faabric::util::FullLock& schedulerLock);

std::vector<std::string> getUnregisteredHosts(const std::string& funcStr,
bool noCache = false);

int scheduleFunctionsOnHost(
const std::string& host,
std::shared_ptr<faabric::BatchExecuteRequest> req,
faabric::util::SchedulingDecision& decision,
int offset,
faabric::util::SnapshotData* snapshot);

// ---- Accounting and debugging ----
std::vector<faabric::Message> recordedMessagesAll;
std::vector<faabric::Message> recordedMessagesLocal;
Expand Down
21 changes: 13 additions & 8 deletions include/faabric/transport/PointToPointBroker.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

#include <faabric/transport/PointToPointClient.h>
#include <faabric/util/config.h>
#include <faabric/util/locks.h>
#include <faabric/util/scheduling.h>

#include <condition_variable>
Expand All @@ -26,10 +27,16 @@ class PointToPointGroup
public:
static std::shared_ptr<PointToPointGroup> getGroup(int groupId);

static std::shared_ptr<PointToPointGroup> getOrAwaitGroup(int groupId);

static bool groupExists(int groupId);

static void addGroup(int appId, int groupId, int groupSize);

static void addGroupIfNotExists(int appId, int groupId, int groupSize);

static void clearGroup(int groupId);

static void clear();

PointToPointGroup(int appId, int groupIdIn, int groupSizeIn);
Expand Down Expand Up @@ -77,10 +84,6 @@ class PointToPointGroup
std::queue<int> lockWaiters;

void notifyLocked(int groupIdx);

void masterLock(int groupIdx, bool recursive);

void masterUnlock(int groupIdx, bool recursive);
};

class PointToPointBroker
Expand Down Expand Up @@ -108,21 +111,23 @@ class PointToPointBroker

std::vector<uint8_t> recvMessage(int groupId, int sendIdx, int recvIdx);

void clearGroup(int groupId);

void clear();

void resetThreadLocalCache();

private:
faabric::util::SystemConfig& conf;

std::shared_mutex brokerMutex;

std::unordered_map<int, std::set<int>> groupIdIdxsMap;
std::unordered_map<std::string, std::string> mappings;

std::unordered_map<int, bool> groupMappingsFlags;
std::unordered_map<int, std::mutex> groupMappingMutexes;
std::unordered_map<int, std::condition_variable> groupMappingCvs;
std::unordered_map<int, faabric::util::FlagWaiter> groupFlags;

faabric::util::SystemConfig& conf;
faabric::util::FlagWaiter& getGroupFlag(int groupId);
};

PointToPointBroker& getPointToPointBroker();
Expand Down
2 changes: 1 addition & 1 deletion include/faabric/transport/PointToPointServer.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ class PointToPointServer final : public MessageEndpointServer
PointToPointServer();

private:
PointToPointBroker& reg;
PointToPointBroker& broker;
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This used to be called a "registry" hence the reg name, but it was a bit confusing.


void doAsyncRecv(int header,
const uint8_t* buffer,
Expand Down
23 changes: 23 additions & 0 deletions include/faabric/util/locks.h
Original file line number Diff line number Diff line change
@@ -1,10 +1,33 @@
#pragma once

#include <faabric/util/logging.h>

#include <atomic>
#include <condition_variable>
#include <mutex>
#include <shared_mutex>

#define DEFAULT_FLAG_WAIT_MS 10000

namespace faabric::util {
typedef std::unique_lock<std::mutex> UniqueLock;
typedef std::unique_lock<std::shared_mutex> FullLock;
typedef std::shared_lock<std::shared_mutex> SharedLock;

class FlagWaiter
{
public:
FlagWaiter(int timeoutMsIn = DEFAULT_FLAG_WAIT_MS);

void waitOnFlag();

void setFlag(bool value);

private:
int timeoutMs;

std::mutex flagMx;
std::condition_variable cv;
std::atomic<bool> flag;
};
}
37 changes: 19 additions & 18 deletions include/faabric/util/snapshot.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <vector>

#include <faabric/util/logging.h>
#include <faabric/util/macros.h>

namespace faabric::util {

Expand All @@ -19,22 +20,13 @@ enum SnapshotDataType
enum SnapshotMergeOperation
{
Overwrite,
Ignore,
Sum,
Product,
Subtract,
Max,
Min
};

struct SnapshotMergeRegion
{
uint32_t offset = 0;
size_t length = 0;
SnapshotDataType dataType = SnapshotDataType::Raw;
SnapshotMergeOperation operation = SnapshotMergeOperation::Overwrite;
};

class SnapshotDiff
{
public:
Expand All @@ -44,6 +36,8 @@ class SnapshotDiff
size_t size = 0;
const uint8_t* data = nullptr;

bool noChange = false;

SnapshotDiff() = default;

SnapshotDiff(SnapshotDataType dataTypeIn,
Expand All @@ -58,13 +52,19 @@ class SnapshotDiff
data = dataIn;
size = sizeIn;
}
};

SnapshotDiff(uint32_t offsetIn, const uint8_t* dataIn, size_t sizeIn)
{
offset = offsetIn;
data = dataIn;
size = sizeIn;
}
class SnapshotMergeRegion
{
public:
uint32_t offset = 0;
size_t length = 0;
SnapshotDataType dataType = SnapshotDataType::Raw;
SnapshotMergeOperation operation = SnapshotMergeOperation::Overwrite;

void addDiffs(std::vector<SnapshotDiff>& diffs,
const uint8_t* original,
const uint8_t* updated);
};
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moved this class down the file as it now depends on the SnapshotDiff class which is declared above.


class SnapshotData
Expand All @@ -84,11 +84,12 @@ class SnapshotData
void addMergeRegion(uint32_t offset,
size_t length,
SnapshotDataType dataType,
SnapshotMergeOperation operation);
SnapshotMergeOperation operation,
bool overwrite = false);

private:
// Note - we care about the order of this map, as we iterate through it in
// order of offsets
// Note - we care about the order of this map, as we iterate through it
// in order of offsets
std::map<uint32_t, SnapshotMergeRegion> mergeRegions;
};

Expand Down
Loading