forked from squid-cache/squid
/
ReadWriteLock.cc
157 lines (138 loc) · 3.82 KB
/
ReadWriteLock.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
/*
* Copyright (C) 1996-2018 The Squid Software Foundation and contributors
*
* Squid software is distributed under GPLv2+ license and includes
* contributions from numerous individuals and organizations.
* Please see the COPYING and CONTRIBUTORS files for details.
*/
/* DEBUG: section 54 Interprocess Communication */
#include "squid.h"
#include "ipc/ReadWriteLock.h"
#include "Store.h"
void Ipc::AssertFlagIsSet(std::atomic_flag &flag)
{
// If the flag was false, then we set it to true and assert. A true flag
// may help keep other processes away from this broken entry.
// Otherwise, we just set an already set flag, which is probably a no-op.
assert(flag.test_and_set(std::memory_order_relaxed));
}
bool
Ipc::ReadWriteLock::lockShared()
{
++readLevel; // this locks "new" writers out
if (!writeLevel || appending) { // nobody is writing, or sharing is OK
++readers;
return true;
}
--readLevel;
return false;
}
bool
Ipc::ReadWriteLock::lockExclusive()
{
if (!writeLevel++) { // we are the first writer + lock "new" readers out
if (!readLevel) { // no old readers and nobody is becoming one
writing = true;
return true;
}
}
--writeLevel;
return false;
}
bool
Ipc::ReadWriteLock::lockHeaders()
{
if (lockShared()) {
if (!updating.test_and_set(std::memory_order_acquire))
return true; // we got here first
// the updating lock was already set by somebody else
unlockShared();
}
return false;
}
void
Ipc::ReadWriteLock::unlockShared()
{
assert(readers > 0);
--readers;
--readLevel;
}
void
Ipc::ReadWriteLock::unlockExclusive()
{
assert(writing);
appending = false;
writing = false;
--writeLevel;
}
void
Ipc::ReadWriteLock::unlockHeaders()
{
AssertFlagIsSet(updating);
updating.clear(std::memory_order_release);
unlockShared();
}
void
Ipc::ReadWriteLock::switchExclusiveToShared()
{
assert(writing);
++readLevel; // must be done before we release exclusive control
++readers;
unlockExclusive();
}
void
Ipc::ReadWriteLock::startAppending()
{
assert(writing);
appending = true;
}
void
Ipc::ReadWriteLock::updateStats(ReadWriteLockStats &stats) const
{
if (readers) {
++stats.readable;
stats.readers += readers;
} else if (writing) {
++stats.writeable;
++stats.writers;
stats.appenders += appending;
} else {
++stats.idle;
}
++stats.count;
}
/* Ipc::ReadWriteLockStats */
Ipc::ReadWriteLockStats::ReadWriteLockStats()
{
memset(this, 0, sizeof(*this));
}
void
Ipc::ReadWriteLockStats::dump(StoreEntry &e) const
{
storeAppendPrintf(&e, "Available locks: %9d\n", count);
if (!count)
return;
storeAppendPrintf(&e, "Reading: %9d %6.2f%%\n",
readable, (100.0 * readable / count));
storeAppendPrintf(&e, "Writing: %9d %6.2f%%\n",
writeable, (100.0 * writeable / count));
storeAppendPrintf(&e, "Idle: %9d %6.2f%%\n",
idle, (100.0 * idle / count));
if (readers || writers) {
const int locked = readers + writers;
storeAppendPrintf(&e, "Readers: %9d %6.2f%%\n",
readers, (100.0 * readers / locked));
const double appPerc = writers ? (100.0 * appenders / writers) : 0.0;
storeAppendPrintf(&e, "Writers: %9d %6.2f%% including Appenders: %9d %6.2f%%\n",
writers, (100.0 * writers / locked),
appenders, appPerc);
}
}
std::ostream &
Ipc::operator <<(std::ostream &os, const Ipc::ReadWriteLock &lock)
{
return os << lock.readers << 'R' <<
(lock.writing ? "W" : "") <<
(lock.appending ? "A" : "");
// impossible to report lock.updating without setting/clearing that flag
}