-
Notifications
You must be signed in to change notification settings - Fork 314
/
Copy pathshared_mutex.cpp
94 lines (70 loc) · 2.79 KB
/
shared_mutex.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#include <userver/engine/shared_mutex.hpp>
#include <userver/engine/task/cancel.hpp>
#include <userver/utils/scope_guard.hpp>
USERVER_NAMESPACE_BEGIN
namespace engine {
namespace {
constexpr auto kWriterLock = std::numeric_limits<Semaphore::Counter>::max();
}
SharedMutex::SharedMutex() : semaphore_(kWriterLock), waiting_writers_count_(0) {}
void SharedMutex::lock() {
const auto ok = try_lock_until(Deadline{});
UASSERT(ok);
}
void SharedMutex::unlock() {
utils::ScopeGuard stop_wait([this] { DecWaitingWriters(); });
semaphore_.unlock_shared_count(kWriterLock);
}
void SharedMutex::DecWaitingWriters() {
/*
* If we're the last writer, notify readers.
* If we're not the last, do nothing: readers are still waiting,
* the next writer will eventually lock the semaphore.
*/
auto writers_left = waiting_writers_count_.fetch_sub(1, std::memory_order_relaxed);
UASSERT_MSG(writers_left > 0, "unlock without lock");
if (writers_left == 1) {
engine::TaskCancellationBlocker blocker;
std::lock_guard<Mutex> lock(waiting_writers_count_mutex_);
waiting_writers_count_cv_.NotifyAll();
}
}
bool SharedMutex::try_lock_until(Deadline deadline) {
waiting_writers_count_.fetch_add(1, std::memory_order_relaxed);
utils::ScopeGuard stop_wait([this] { DecWaitingWriters(); });
if (semaphore_.try_lock_shared_until_count(deadline, kWriterLock)) {
stop_wait.Release();
return true;
}
return false;
}
bool SharedMutex::try_lock() { return try_lock_until(Deadline::Passed()); }
void SharedMutex::lock_shared() {
WaitForNoWaitingWriters(Deadline{});
/*
* There is a deliberate TOCTOU race between "wait for no writers" and
* "ok, now let's lock" - it's just a cheap way to avoid writers starvation.
* If one or two readers sneak just before a writer out of turn,
* we just don't care.
*/
semaphore_.lock_shared();
}
void SharedMutex::unlock_shared() { semaphore_.unlock_shared(); }
bool SharedMutex::try_lock_shared() {
if (HasWaitingWriter()) return false;
return semaphore_.try_lock_shared();
}
bool SharedMutex::try_lock_shared_until(Deadline deadline) {
if (!WaitForNoWaitingWriters(deadline)) return false;
/* Same deliberate race, see comment in lock_shared() */
return semaphore_.try_lock_shared_until(deadline);
}
bool SharedMutex::HasWaitingWriter() const noexcept { return waiting_writers_count_.load() > 0; }
bool SharedMutex::WaitForNoWaitingWriters(Deadline deadline) {
/* Fast path */
if (waiting_writers_count_ == 0) return true;
std::unique_lock<Mutex> lock(waiting_writers_count_mutex_);
return waiting_writers_count_cv_.WaitUntil(lock, deadline, [this] { return waiting_writers_count_ == 0; });
}
} // namespace engine
USERVER_NAMESPACE_END