-
Notifications
You must be signed in to change notification settings - Fork 5.9k
Expand file tree
/
Copy pathPicoSpinLock.h
More file actions
167 lines (151 loc) · 5.41 KB
/
PicoSpinLock.h
File metadata and controls
167 lines (151 loc) · 5.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* N.B. You most likely do _not_ want to use PicoSpinLock or any other
* kind of spinlock. Consider MicroLock instead.
*
* In short, spinlocks in preemptive multi-tasking operating systems
* have serious problems and fast mutexes like std::mutex are almost
* certainly the better choice, because letting the OS scheduler put a
* thread to sleep is better for system responsiveness and throughput
* than wasting a timeslice repeatedly querying a lock held by a
* thread that's blocked, and you can't prevent userspace
* programs blocking.
*
* Spinlocks in an operating system kernel make much more sense than
* they do in userspace.
*/
#pragma once
#include <array>
#include <atomic>
#include <cinttypes>
#include <cstdlib>
#include <mutex>
#include <type_traits>
#include <folly/Portability.h>
#include <folly/lang/SafeAssert.h>
#include <folly/synchronization/AtomicRef.h>
#include <folly/synchronization/AtomicUtil.h>
#include <folly/synchronization/SanitizeThread.h>
#include <folly/synchronization/detail/Sleeper.h>
namespace folly {
/*
* Spin lock on a single bit in an integral type. You can use this
* with 16, 32, or 64-bit integral types.
*
* This is useful if you want a small lock and already have an int
* with a bit in it that you aren't using. But note that it can't be
* as small as MicroSpinLock (1 byte), if you don't already have a
* convenient int with an unused bit lying around to put it on.
*
* To construct these, either use init() or zero initialize. We don't
* have a real constructor because we want this to be a POD type so we
* can put it into packed structs.
*/
template <class IntType, int Bit = sizeof(IntType) * 8 - 1>
struct PicoSpinLock {
// Internally we deal with the unsigned version of the type.
using UIntType = std::make_unsigned_t<IntType>;
static_assert(
std::is_integral<IntType>::value, "PicoSpinLock needs an integral type");
static_assert(
sizeof(IntType) == 2 || sizeof(IntType) == 4 || sizeof(IntType) == 8,
"PicoSpinLock can't work on integers smaller than 2 bytes");
public:
static constexpr UIntType kLockBitMask_ = UIntType(1) << Bit;
alignas(atomic_ref<UIntType>::required_alignment) mutable UIntType lock_;
/*
* You must call this function before using this class, if you
* default constructed it. If you zero-initialized it you can
* assume the PicoSpinLock is in a valid unlocked state with
* getData() == 0.
*
* (This doesn't use a constructor because we want to be a POD.)
*/
void init(IntType initialValue = 0) {
FOLLY_SAFE_CHECK(!(initialValue & kLockBitMask_));
auto ref = make_atomic_ref(lock_);
auto val = UIntType(initialValue);
ref.store(val, std::memory_order_release);
}
/*
* Returns the value of the integer we using for our lock, except
* with the bit we are using as a lock cleared, regardless of
* whether the lock is held.
*
* It is 'safe' to call this without holding the lock. (As in: you
* get the same guarantees for simultaneous accesses to an integer
* as you normally get.)
*/
IntType getData() const {
auto ref = make_atomic_ref(lock_);
auto val = ref.load(std::memory_order_relaxed);
return val & ~kLockBitMask_;
}
/*
* Set the value of the other bits in our integer.
*
* Don't use this when you aren't holding the lock, unless it can be
* guaranteed that no other threads may be trying to use this.
*/
void setData(IntType w) {
FOLLY_SAFE_CHECK(!(w & kLockBitMask_));
auto ref = make_atomic_ref(lock_);
auto val = ref.load(std::memory_order_relaxed);
val = (val & kLockBitMask_) | w;
ref.store(val, std::memory_order_relaxed);
}
/*
* Try to get the lock without blocking: returns whether or not we
* got it.
*/
bool try_lock() const {
auto ret = try_lock_internal();
annotate_rwlock_try_acquired(
this, annotate_rwlock_level::wrlock, ret, __FILE__, __LINE__);
return ret;
}
/*
* Block until we can acquire the lock. Uses Sleeper to wait.
*/
void lock() const {
detail::Sleeper sleeper;
while (!try_lock_internal()) {
sleeper.wait();
}
annotate_rwlock_acquired(
this, annotate_rwlock_level::wrlock, __FILE__, __LINE__);
}
/*
* Release the lock, without changing the value of the rest of the
* integer.
*/
void unlock() const {
auto ref = make_atomic_ref(lock_);
annotate_rwlock_released(
this, annotate_rwlock_level::wrlock, __FILE__, __LINE__);
auto previous = atomic_fetch_reset(ref, Bit, std::memory_order_release);
FOLLY_SAFE_DCHECK(previous);
}
private:
// called by lock/try_lock - this is not TSAN aware
bool try_lock_internal() const {
auto ref = make_atomic_ref(lock_);
auto previous = atomic_fetch_set(ref, Bit, std::memory_order_acquire);
return !previous;
}
};
} // namespace folly