Skip to content

Commit

Permalink
dept: Tie to Lockdep and IRQ tracing
Browse files Browse the repository at this point in the history
Yes. How to place Dept in here looks so ugly. But it's inevitable as
long as relying on Lockdep. The way should be enhanced gradually.

   1. Basically relies on Lockdep to track typical locks and IRQ things.

   2. Dept fails to recognize IRQ situation so it generates false alarms
      when raw_local_irq_*() APIs are used. So made it track those too.

   3. Lockdep doesn't track the outmost {hard,soft}irq entracnes but
      Dept makes use of it. So made it track those too.

Signed-off-by: Byungchul Park <max.byungchul.park@gmail.com>
  • Loading branch information
maxbyungchulpark authored and intel-lab-lkp committed Jan 28, 2023
1 parent d2758f6 commit aed5169
Show file tree
Hide file tree
Showing 14 changed files with 140 additions and 29 deletions.
23 changes: 20 additions & 3 deletions include/linux/irqflags.h
Expand Up @@ -13,6 +13,8 @@
#define _LINUX_TRACE_IRQFLAGS_H

#include <linux/typecheck.h>
#include <linux/kernel.h>
#include <linux/dept.h>
#include <asm/irqflags.h>
#include <asm/percpu.h>

Expand Down Expand Up @@ -60,8 +62,10 @@ extern void trace_hardirqs_off(void);
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
# define lockdep_hardirq_enter() \
do { \
if (__this_cpu_inc_return(hardirq_context) == 1)\
if (__this_cpu_inc_return(hardirq_context) == 1) { \
current->hardirq_threaded = 0; \
dept_hardirq_enter(); \
} \
} while (0)
# define lockdep_hardirq_threaded() \
do { \
Expand Down Expand Up @@ -136,6 +140,8 @@ do { \
# define lockdep_softirq_enter() \
do { \
current->softirq_context++; \
if (current->softirq_context == 1) \
dept_softirq_enter(); \
} while (0)
# define lockdep_softirq_exit() \
do { \
Expand Down Expand Up @@ -170,17 +176,28 @@ extern void warn_bogus_irq_restore(void);
/*
* Wrap the arch provided IRQ routines to provide appropriate checks.
*/
#define raw_local_irq_disable() arch_local_irq_disable()
#define raw_local_irq_enable() arch_local_irq_enable()
#define raw_local_irq_disable() \
do { \
arch_local_irq_disable(); \
dept_hardirqs_off(_THIS_IP_); \
} while (0)
#define raw_local_irq_enable() \
do { \
dept_hardirqs_on(_THIS_IP_); \
arch_local_irq_enable(); \
} while (0)
#define raw_local_irq_save(flags) \
do { \
typecheck(unsigned long, flags); \
flags = arch_local_irq_save(); \
dept_hardirqs_off(_THIS_IP_); \
} while (0)
#define raw_local_irq_restore(flags) \
do { \
typecheck(unsigned long, flags); \
raw_check_bogus_irq_restore(); \
if (!arch_irqs_disabled_flags(flags)) \
dept_hardirqs_on(_THIS_IP_); \
arch_local_irq_restore(flags); \
} while (0)
#define raw_local_save_flags(flags) \
Expand Down
1 change: 1 addition & 0 deletions include/linux/local_lock_internal.h
Expand Up @@ -21,6 +21,7 @@ typedef struct {
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
.lock_type = LD_LOCK_PERCPU, \
.dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
}, \
.owner = NULL,

Expand Down
102 changes: 81 additions & 21 deletions include/linux/lockdep.h
Expand Up @@ -12,6 +12,7 @@

#include <linux/lockdep_types.h>
#include <linux/smp.h>
#include <linux/dept_ldt.h>
#include <asm/percpu.h>

struct task_struct;
Expand Down Expand Up @@ -39,6 +40,8 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
*/
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
to->class_cache[i] = NULL;

dept_map_copy(&to->dmap, &from->dmap);
}

/*
Expand Down Expand Up @@ -441,7 +444,8 @@ enum xhlock_context_t {
* Note that _name must not be NULL.
*/
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), }
{ .name = (_name), .key = (void *)(_key), \
.dmap = DEPT_MAP_INITIALIZER(_name, _key) }

static inline void lockdep_invariant_state(bool force) {}
static inline void lockdep_free_task(struct task_struct *task) {}
Expand Down Expand Up @@ -523,33 +527,89 @@ extern bool read_lock_is_recursive(void);
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)

#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define spin_release(l, i) lock_release(l, i)

#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire(l, s, t, i) \
do { \
ldt_lock(&(l)->dmap, s, t, NULL, i); \
lock_acquire_exclusive(l, s, t, NULL, i); \
} while (0)
#define spin_acquire_nest(l, s, t, n, i) \
do { \
ldt_lock(&(l)->dmap, s, t, n, i); \
lock_acquire_exclusive(l, s, t, n, i); \
} while (0)
#define spin_release(l, i) \
do { \
ldt_unlock(&(l)->dmap, i); \
lock_release(l, i); \
} while (0)
#define rwlock_acquire(l, s, t, i) \
do { \
ldt_wlock(&(l)->dmap, s, t, NULL, i); \
lock_acquire_exclusive(l, s, t, NULL, i); \
} while (0)
#define rwlock_acquire_read(l, s, t, i) \
do { \
ldt_rlock(&(l)->dmap, s, t, NULL, i, !read_lock_is_recursive());\
if (read_lock_is_recursive()) \
lock_acquire_shared_recursive(l, s, t, NULL, i); \
else \
lock_acquire_shared(l, s, t, NULL, i); \
} while (0)

#define rwlock_release(l, i) lock_release(l, i)

#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
#define seqcount_release(l, i) lock_release(l, i)

#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define mutex_release(l, i) lock_release(l, i)

#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
#define rwsem_release(l, i) lock_release(l, i)
#define rwlock_release(l, i) \
do { \
ldt_unlock(&(l)->dmap, i); \
lock_release(l, i); \
} while (0)
#define seqcount_acquire(l, s, t, i) \
do { \
ldt_wlock(&(l)->dmap, s, t, NULL, i); \
lock_acquire_exclusive(l, s, t, NULL, i); \
} while (0)
#define seqcount_acquire_read(l, s, t, i) \
do { \
ldt_rlock(&(l)->dmap, s, t, NULL, i, false); \
lock_acquire_shared_recursive(l, s, t, NULL, i); \
} while (0)
#define seqcount_release(l, i) \
do { \
ldt_unlock(&(l)->dmap, i); \
lock_release(l, i); \
} while (0)
#define mutex_acquire(l, s, t, i) \
do { \
ldt_lock(&(l)->dmap, s, t, NULL, i); \
lock_acquire_exclusive(l, s, t, NULL, i); \
} while (0)
#define mutex_acquire_nest(l, s, t, n, i) \
do { \
ldt_lock(&(l)->dmap, s, t, n, i); \
lock_acquire_exclusive(l, s, t, n, i); \
} while (0)
#define mutex_release(l, i) \
do { \
ldt_unlock(&(l)->dmap, i); \
lock_release(l, i); \
} while (0)
#define rwsem_acquire(l, s, t, i) \
do { \
ldt_lock(&(l)->dmap, s, t, NULL, i); \
lock_acquire_exclusive(l, s, t, NULL, i); \
} while (0)
#define rwsem_acquire_nest(l, s, t, n, i) \
do { \
ldt_lock(&(l)->dmap, s, t, n, i); \
lock_acquire_exclusive(l, s, t, n, i); \
} while (0)
#define rwsem_acquire_read(l, s, t, i) \
do { \
ldt_lock(&(l)->dmap, s, t, NULL, i); \
lock_acquire_shared(l, s, t, NULL, i); \
} while (0)
#define rwsem_release(l, i) \
do { \
ldt_unlock(&(l)->dmap, i); \
lock_release(l, i); \
} while (0)

#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
Expand Down
3 changes: 3 additions & 0 deletions include/linux/lockdep_types.h
Expand Up @@ -11,6 +11,7 @@
#define __LINUX_LOCKDEP_TYPES_H

#include <linux/types.h>
#include <linux/dept.h>

#define MAX_LOCKDEP_SUBCLASSES 8UL

Expand Down Expand Up @@ -76,6 +77,7 @@ struct lock_class_key {
struct hlist_node hash_entry;
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};
struct dept_key dkey;
};

extern struct lock_class_key __lockdep_no_validate__;
Expand Down Expand Up @@ -185,6 +187,7 @@ struct lockdep_map {
int cpu;
unsigned long ip;
#endif
struct dept_map dmap;
};

struct pin_cookie { unsigned int val; };
Expand Down
1 change: 1 addition & 0 deletions include/linux/mutex.h
Expand Up @@ -25,6 +25,7 @@
, .dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_SLEEP, \
.dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
}
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
Expand Down
2 changes: 1 addition & 1 deletion include/linux/percpu-rwsem.h
Expand Up @@ -21,7 +21,7 @@ struct percpu_rw_semaphore {
};

#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname, .dmap = DEPT_MAP_INITIALIZER(lockname, NULL) },
#else
#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
#endif
Expand Down
1 change: 1 addition & 0 deletions include/linux/rtmutex.h
Expand Up @@ -81,6 +81,7 @@ do { \
.dep_map = { \
.name = #mutexname, \
.wait_type_inner = LD_WAIT_SLEEP, \
.dmap = DEPT_MAP_INITIALIZER(mutexname, NULL),\
}
#else
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
Expand Down
1 change: 1 addition & 0 deletions include/linux/rwlock_types.h
Expand Up @@ -10,6 +10,7 @@
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
.dmap = DEPT_MAP_INITIALIZER(lockname, NULL), \
}
#else
# define RW_DEP_MAP_INIT(lockname)
Expand Down
1 change: 1 addition & 0 deletions include/linux/rwsem.h
Expand Up @@ -21,6 +21,7 @@
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_SLEEP, \
.dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
},
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
Expand Down
2 changes: 1 addition & 1 deletion include/linux/seqlock.h
Expand Up @@ -81,7 +81,7 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,
#ifdef CONFIG_DEBUG_LOCK_ALLOC

# define SEQCOUNT_DEP_MAP_INIT(lockname) \
.dep_map = { .name = #lockname }
.dep_map = { .name = #lockname, .dmap = DEPT_MAP_INITIALIZER(lockname, NULL) }

/**
* seqcount_init() - runtime initializer for seqcount_t
Expand Down
3 changes: 3 additions & 0 deletions include/linux/spinlock_types_raw.h
Expand Up @@ -31,18 +31,21 @@ typedef struct raw_spinlock {
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_SPIN, \
.dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
}
# define SPIN_DEP_MAP_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
.dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
}

# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
.lock_type = LD_LOCK_PERCPU, \
.dmap = DEPT_MAP_INITIALIZER(lockname, NULL),\
}
#else
# define RAW_SPIN_DEP_MAP_INIT(lockname)
Expand Down
2 changes: 1 addition & 1 deletion include/linux/srcu.h
Expand Up @@ -35,7 +35,7 @@ int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
__init_srcu_struct((ssp), #ssp, &__srcu_key); \
})

#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name, .dmap = DEPT_MAP_INITIALIZER(srcu_name, NULL) },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

int init_srcu_struct(struct srcu_struct *ssp);
Expand Down
4 changes: 2 additions & 2 deletions kernel/dependency/dept.c
Expand Up @@ -244,10 +244,10 @@ static inline bool dept_working(void)
* Even k == NULL is considered as a valid key because it would use
* &->map_key as the key in that case.
*/
struct dept_key __dept_no_validate__;
extern struct lock_class_key __lockdep_no_validate__;
static inline bool valid_key(struct dept_key *k)
{
return &__dept_no_validate__ != k;
return &__lockdep_no_validate__.dkey != k;
}

/*
Expand Down

0 comments on commit aed5169

Please sign in to comment.