Skip to content
Permalink
Browse files
dump_stack: move cpu lock to printk.c
dump_stack() implements its own cpu-reentrant spinning lock to
best-effort serialize stack traces in the printk log. However,
there are other functions (such as show_regs()) that can also
benefit from this serialization.

Move the cpu-reentrant spinning lock (cpu lock) into new helper
functions printk_cpu_lock_irqsave()/printk_cpu_unlock_irqrestore()
so that it is available for others as well. For !CONFIG_SMP the
cpu lock is a NOP.

Note that having multiple cpu locks in the system can easily
lead to deadlock. Code needing a cpu lock should use the
printk cpu lock, since the printk cpu lock could be acquired
from any code and any context.

Signed-off-by: John Ogness <john.ogness@linutronix.de>
  • Loading branch information
jogness authored and intel-lab-lkp committed Jun 7, 2021
1 parent 7f09e89 commit 136bcc2980e636b2ae156ca63fbe95c713e44c1b
Show file tree
Hide file tree
Showing 3 changed files with 92 additions and 37 deletions.
@@ -287,6 +287,19 @@ static inline void printk_safe_flush_on_panic(void)
}
#endif

#if defined(CONFIG_SMP)
extern void printk_cpu_lock_irqsave(bool *lock_flag, unsigned long *irq_flags);
extern void printk_cpu_unlock_irqrestore(bool lock_flag, unsigned long irq_flags);
#else
static inline void printk_cpu_lock_irqsave(bool *lock_flag, unsigned long *irq_flags)
{
}

static inline void printk_cpu_unlock_irqrestore(bool lock_flag, unsigned long irq_flags)
{
}
#endif

extern int kptr_restrict;

/**
@@ -3532,3 +3532,78 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);

#endif

#ifdef CONFIG_SMP
static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);

/*
* printk_cpu_lock_irqsave: Acquire the printk cpu-reentrant spinning lock
* and disable interrupts.
* @lock_flag: A buffer to store lock state.
* @irq_flags: A buffer to store irq state.
*
* If no processor has the lock, the calling processor takes the lock and
* becomes the owner. If the calling processor is already the owner of the
* lock, this function succeeds immediately. If the lock is held by another
* processor, this function spins until the calling processor becomes the
* owner. This function returns with interrupts disabled.
*
* It is safe to call this function from any context and state.
*/
void printk_cpu_lock_irqsave(bool *lock_flag, unsigned long *irq_flags)
{
int old;
int cpu;

retry:
local_irq_save(*irq_flags);

cpu = smp_processor_id();

old = atomic_cmpxchg(&printk_cpulock_owner, -1, cpu);
if (old == -1) {
/* This CPU is now the owner. */

*lock_flag = true;

} else if (old == cpu) {
/* This CPU is already the owner. */

*lock_flag = false;

} else {
local_irq_restore(*irq_flags);

/*
* Wait for the lock to release before jumping to cmpxchg()
* in order to mitigate the thundering herd problem.
*/
do {
cpu_relax();
} while (atomic_read(&printk_cpulock_owner) != -1);

goto retry;
}
}
EXPORT_SYMBOL(printk_cpu_lock_irqsave);

/*
* printk_cpu_unlock_irqrestore: Release the printk cpu-reentrant spinning
* lock and restore interrupts.
* @lock_flag: The current lock state.
* @irq_flags: The current irq state.
*
* Release the lock. The calling processor must be the owner of the lock.
*
* It is safe to call this function from any context and state.
*/
void printk_cpu_unlock_irqrestore(bool lock_flag, unsigned long irq_flags)
{
if (lock_flag) {
atomic_set(&printk_cpulock_owner, -1);

local_irq_restore(irq_flags);
}
}
EXPORT_SYMBOL(printk_cpu_unlock_irqrestore);
#endif /* CONFIG_SMP */
@@ -93,52 +93,19 @@ static void __dump_stack(const char *log_lvl)
*
* Architectures can override this implementation by implementing its own.
*/
#ifdef CONFIG_SMP
static atomic_t dump_lock = ATOMIC_INIT(-1);

asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
unsigned long flags;
int was_locked;
int old;
int cpu;
unsigned long irq_flags;
bool lock_flag;

/*
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
retry:
local_irq_save(flags);
cpu = smp_processor_id();
old = atomic_cmpxchg(&dump_lock, -1, cpu);
if (old == -1) {
was_locked = 0;
} else if (old == cpu) {
was_locked = 1;
} else {
local_irq_restore(flags);
/*
* Wait for the lock to release before jumping to
* atomic_cmpxchg() in order to mitigate the thundering herd
* problem.
*/
do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
goto retry;
}

__dump_stack(log_lvl);

if (!was_locked)
atomic_set(&dump_lock, -1);

local_irq_restore(flags);
}
#else
asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
printk_cpu_lock_irqsave(&lock_flag, &irq_flags);
__dump_stack(log_lvl);
printk_cpu_unlock_irqrestore(lock_flag, irq_flags);
}
#endif
EXPORT_SYMBOL(dump_stack_lvl);

asmlinkage __visible void dump_stack(void)

0 comments on commit 136bcc2

Please sign in to comment.