Skip to content

Commit de8f5e4

Browse files
author
Peter Zijlstra
committed
lockdep: Introduce wait-type checks
Extend lockdep to validate lock wait-type context. The current wait-types are: LD_WAIT_FREE, /* wait free, rcu etc.. */ LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ Where lockdep validates that the current lock (the one being acquired) fits in the current wait-context (as generated by the held stack). This ensures that there is no attempt to acquire mutexes while holding spinlocks, to acquire spinlocks while holding raw_spinlocks and so on. In other words, its a more fancy might_sleep(). Obviously RCU made the entire ordeal more complex than a simple single value test because RCU can be acquired in (pretty much) any context and while it presents a context to nested locks it is not the same as it got acquired in. Therefore its necessary to split the wait_type into two values, one representing the acquire (outer) and one representing the nested context (inner). For most 'normal' locks these two are the same. [ To make static initialization easier we have the rule that: .outer == INV means .outer == .inner; because INV == 0. ] It further means that its required to find the minimal .inner of the held stack to compare against the outer of the new lock; because while 'normal' RCU presents a CONFIG type to nested locks, if it is taken while already holding a SPIN type it obviously doesn't relax the rules. Below is an example output generated by the trivial test code: raw_spin_lock(&foo); spin_lock(&bar); spin_unlock(&bar); raw_spin_unlock(&foo); [ BUG: Invalid wait context ] ----------------------------- swapper/0/1 is trying to lock: ffffc90000013f20 (&bar){....}-{3:3}, at: kernel_init+0xdb/0x187 other info that might help us debug this: 1 lock held by swapper/0/1: #0: ffffc90000013ee0 (&foo){+.+.}-{2:2}, at: kernel_init+0xd1/0x187 The way to read it is to look at the new -{n,m} part in the lock description; -{3:3} for the attempted lock, and try and match that up to the held locks, which in this case is the one: -{2,2}. This tells that the acquiring lock requires a more relaxed environment than presented by the lock stack. Currently only the normal locks and RCU are converted, the rest of the lockdep users defaults to .inner = INV which is ignored. More conversions can be done when desired. The check for spinlock_t nesting is not enabled by default. It's a separate config option for now as there are known problems which are currently addressed. The config option allows to identify these problems and to verify that the solutions found are indeed solving them. The config switch will be removed and the checks will permanently enabled once the vast majority of issues has been addressed. [ bigeasy: Move LD_WAIT_FREE,… out of CONFIG_LOCKDEP to avoid compile failure with CONFIG_DEBUG_SPINLOCK + !CONFIG_LOCKDEP] [ tglx: Add the config option ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200321113242.427089655@linutronix.de
1 parent a5c6234 commit de8f5e4

File tree

15 files changed

+307
-47
lines changed

15 files changed

+307
-47
lines changed

include/linux/irqflags.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,12 @@
3737
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
3838
# define trace_hardirq_enter() \
3939
do { \
40-
current->hardirq_context++; \
40+
if (!current->hardirq_context++) \
41+
current->hardirq_threaded = 0; \
42+
} while (0)
43+
# define trace_hardirq_threaded() \
44+
do { \
45+
current->hardirq_threaded = 1; \
4146
} while (0)
4247
# define trace_hardirq_exit() \
4348
do { \
@@ -59,6 +64,7 @@ do { \
5964
# define trace_hardirqs_enabled(p) 0
6065
# define trace_softirqs_enabled(p) 0
6166
# define trace_hardirq_enter() do { } while (0)
67+
# define trace_hardirq_threaded() do { } while (0)
6268
# define trace_hardirq_exit() do { } while (0)
6369
# define lockdep_softirq_enter() do { } while (0)
6470
# define lockdep_softirq_exit() do { } while (0)

include/linux/lockdep.h

Lines changed: 60 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,22 @@ extern int lock_stat;
2121

2222
#include <linux/types.h>
2323

24+
enum lockdep_wait_type {
25+
LD_WAIT_INV = 0, /* not checked, catch all */
26+
27+
LD_WAIT_FREE, /* wait free, rcu etc.. */
28+
LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
29+
30+
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
31+
LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
32+
#else
33+
LD_WAIT_CONFIG = LD_WAIT_SPIN,
34+
#endif
35+
LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
36+
37+
LD_WAIT_MAX, /* must be last */
38+
};
39+
2440
#ifdef CONFIG_LOCKDEP
2541

2642
#include <linux/linkage.h>
@@ -111,6 +127,9 @@ struct lock_class {
111127
int name_version;
112128
const char *name;
113129

130+
short wait_type_inner;
131+
short wait_type_outer;
132+
114133
#ifdef CONFIG_LOCK_STAT
115134
unsigned long contention_point[LOCKSTAT_POINTS];
116135
unsigned long contending_point[LOCKSTAT_POINTS];
@@ -158,6 +177,8 @@ struct lockdep_map {
158177
struct lock_class_key *key;
159178
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
160179
const char *name;
180+
short wait_type_outer; /* can be taken in this context */
181+
short wait_type_inner; /* presents this context */
161182
#ifdef CONFIG_LOCK_STAT
162183
int cpu;
163184
unsigned long ip;
@@ -299,27 +320,51 @@ extern void lockdep_unregister_key(struct lock_class_key *key);
299320
* to lockdep:
300321
*/
301322

302-
extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
303-
struct lock_class_key *key, int subclass);
323+
extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
324+
struct lock_class_key *key, int subclass, short inner, short outer);
325+
326+
static inline void
327+
lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
328+
struct lock_class_key *key, int subclass, short inner)
329+
{
330+
lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
331+
}
332+
333+
static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
334+
struct lock_class_key *key, int subclass)
335+
{
336+
lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
337+
}
304338

305339
/*
306340
* Reinitialize a lock key - for cases where there is special locking or
307341
* special initialization of locks so that the validator gets the scope
308342
* of dependencies wrong: they are either too broad (they need a class-split)
309343
* or they are too narrow (they suffer from a false class-split):
310344
*/
311-
#define lockdep_set_class(lock, key) \
312-
lockdep_init_map(&(lock)->dep_map, #key, key, 0)
313-
#define lockdep_set_class_and_name(lock, key, name) \
314-
lockdep_init_map(&(lock)->dep_map, name, key, 0)
315-
#define lockdep_set_class_and_subclass(lock, key, sub) \
316-
lockdep_init_map(&(lock)->dep_map, #key, key, sub)
317-
#define lockdep_set_subclass(lock, sub) \
318-
lockdep_init_map(&(lock)->dep_map, #lock, \
319-
(lock)->dep_map.key, sub)
345+
#define lockdep_set_class(lock, key) \
346+
lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
347+
(lock)->dep_map.wait_type_inner, \
348+
(lock)->dep_map.wait_type_outer)
349+
350+
#define lockdep_set_class_and_name(lock, key, name) \
351+
lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
352+
(lock)->dep_map.wait_type_inner, \
353+
(lock)->dep_map.wait_type_outer)
354+
355+
#define lockdep_set_class_and_subclass(lock, key, sub) \
356+
lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
357+
(lock)->dep_map.wait_type_inner, \
358+
(lock)->dep_map.wait_type_outer)
359+
360+
#define lockdep_set_subclass(lock, sub) \
361+
lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
362+
(lock)->dep_map.wait_type_inner, \
363+
(lock)->dep_map.wait_type_outer)
320364

321365
#define lockdep_set_novalidate_class(lock) \
322366
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
367+
323368
/*
324369
* Compare locking classes
325370
*/
@@ -432,6 +477,10 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
432477
# define lock_set_class(l, n, k, s, i) do { } while (0)
433478
# define lock_set_subclass(l, s, i) do { } while (0)
434479
# define lockdep_init() do { } while (0)
480+
# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
481+
do { (void)(name); (void)(key); } while (0)
482+
# define lockdep_init_map_wait(lock, name, key, sub, inner) \
483+
do { (void)(name); (void)(key); } while (0)
435484
# define lockdep_init_map(lock, name, key, sub) \
436485
do { (void)(name); (void)(key); } while (0)
437486
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)

include/linux/mutex.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,11 @@ do { \
109109
} while (0)
110110

111111
#ifdef CONFIG_DEBUG_LOCK_ALLOC
112-
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
113-
, .dep_map = { .name = #lockname }
112+
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
113+
, .dep_map = { \
114+
.name = #lockname, \
115+
.wait_type_inner = LD_WAIT_SLEEP, \
116+
}
114117
#else
115118
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
116119
#endif

include/linux/rwlock_types.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,11 @@ typedef struct {
2222
#define RWLOCK_MAGIC 0xdeaf1eed
2323

2424
#ifdef CONFIG_DEBUG_LOCK_ALLOC
25-
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
25+
# define RW_DEP_MAP_INIT(lockname) \
26+
.dep_map = { \
27+
.name = #lockname, \
28+
.wait_type_inner = LD_WAIT_CONFIG, \
29+
}
2630
#else
2731
# define RW_DEP_MAP_INIT(lockname)
2832
#endif

include/linux/rwsem.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,11 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
6565
/* Common initializer macros and functions */
6666

6767
#ifdef CONFIG_DEBUG_LOCK_ALLOC
68-
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
68+
# define __RWSEM_DEP_MAP_INIT(lockname) \
69+
, .dep_map = { \
70+
.name = #lockname, \
71+
.wait_type_inner = LD_WAIT_SLEEP, \
72+
}
6973
#else
7074
# define __RWSEM_DEP_MAP_INIT(lockname)
7175
#endif

include/linux/sched.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -970,6 +970,7 @@ struct task_struct {
970970

971971
#ifdef CONFIG_TRACE_IRQFLAGS
972972
unsigned int irq_events;
973+
unsigned int hardirq_threaded;
973974
unsigned long hardirq_enable_ip;
974975
unsigned long hardirq_disable_ip;
975976
unsigned int hardirq_enable_event;

include/linux/spinlock.h

Lines changed: 25 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -93,12 +93,13 @@
9393

9494
#ifdef CONFIG_DEBUG_SPINLOCK
9595
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
96-
struct lock_class_key *key);
97-
# define raw_spin_lock_init(lock) \
98-
do { \
99-
static struct lock_class_key __key; \
100-
\
101-
__raw_spin_lock_init((lock), #lock, &__key); \
96+
struct lock_class_key *key, short inner);
97+
98+
# define raw_spin_lock_init(lock) \
99+
do { \
100+
static struct lock_class_key __key; \
101+
\
102+
__raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
102103
} while (0)
103104

104105
#else
@@ -327,12 +328,26 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
327328
return &lock->rlock;
328329
}
329330

330-
#define spin_lock_init(_lock) \
331-
do { \
332-
spinlock_check(_lock); \
333-
raw_spin_lock_init(&(_lock)->rlock); \
331+
#ifdef CONFIG_DEBUG_SPINLOCK
332+
333+
# define spin_lock_init(lock) \
334+
do { \
335+
static struct lock_class_key __key; \
336+
\
337+
__raw_spin_lock_init(spinlock_check(lock), \
338+
#lock, &__key, LD_WAIT_CONFIG); \
339+
} while (0)
340+
341+
#else
342+
343+
# define spin_lock_init(_lock) \
344+
do { \
345+
spinlock_check(_lock); \
346+
*(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
334347
} while (0)
335348

349+
#endif
350+
336351
static __always_inline void spin_lock(spinlock_t *lock)
337352
{
338353
raw_spin_lock(&lock->rlock);

include/linux/spinlock_types.h

Lines changed: 20 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,18 @@ typedef struct raw_spinlock {
3333
#define SPINLOCK_OWNER_INIT ((void *)-1L)
3434

3535
#ifdef CONFIG_DEBUG_LOCK_ALLOC
36-
# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
36+
# define RAW_SPIN_DEP_MAP_INIT(lockname) \
37+
.dep_map = { \
38+
.name = #lockname, \
39+
.wait_type_inner = LD_WAIT_SPIN, \
40+
}
41+
# define SPIN_DEP_MAP_INIT(lockname) \
42+
.dep_map = { \
43+
.name = #lockname, \
44+
.wait_type_inner = LD_WAIT_CONFIG, \
45+
}
3746
#else
47+
# define RAW_SPIN_DEP_MAP_INIT(lockname)
3848
# define SPIN_DEP_MAP_INIT(lockname)
3949
#endif
4050

@@ -51,7 +61,7 @@ typedef struct raw_spinlock {
5161
{ \
5262
.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
5363
SPIN_DEBUG_INIT(lockname) \
54-
SPIN_DEP_MAP_INIT(lockname) }
64+
RAW_SPIN_DEP_MAP_INIT(lockname) }
5565

5666
#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
5767
(raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
@@ -72,11 +82,17 @@ typedef struct spinlock {
7282
};
7383
} spinlock_t;
7484

85+
#define ___SPIN_LOCK_INITIALIZER(lockname) \
86+
{ \
87+
.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
88+
SPIN_DEBUG_INIT(lockname) \
89+
SPIN_DEP_MAP_INIT(lockname) }
90+
7591
#define __SPIN_LOCK_INITIALIZER(lockname) \
76-
{ { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
92+
{ { .rlock = ___SPIN_LOCK_INITIALIZER(lockname) } }
7793

7894
#define __SPIN_LOCK_UNLOCKED(lockname) \
79-
(spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
95+
(spinlock_t) __SPIN_LOCK_INITIALIZER(lockname)
8096

8197
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
8298

kernel/irq/handle.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,13 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
145145
for_each_action_of_desc(desc, action) {
146146
irqreturn_t res;
147147

148+
/*
149+
* If this IRQ would be threaded under force_irqthreads, mark it so.
150+
*/
151+
if (irq_settings_can_thread(desc) &&
152+
!(action->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)))
153+
trace_hardirq_threaded();
154+
148155
trace_irq_handler_entry(irq, action);
149156
res = action->handler(irq, action->dev_id);
150157
trace_irq_handler_exit(irq, action, res);

0 commit comments

Comments
 (0)