Skip to content

Commit

Permalink
random: clear fast pool, crng, and batches in cpuhp bring up
Browse files Browse the repository at this point in the history
commit 3191dd5 upstream.

For the irq randomness fast pool, rather than having to use expensive
atomics, which were visibly the most expensive thing in the entire irq
handler, simply take care of the extreme edge case of resetting count to
zero in the cpuhp online handler, just after workqueues have been
reenabled. This simplifies the code a bit and lets us use vanilla
variables rather than atomics, and performance should be improved.

As well, very early on when the CPU comes up, while interrupts are still
disabled, we clear out the per-cpu crng and its batches, so that it
always starts with fresh randomness.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Sultan Alsawaf <sultan@kerneltoast.com>
Cc: Dominik Brodowski <linux@dominikbrodowski.net>
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
zx2c4 authored and gregkh committed May 30, 2022
1 parent 6e1cb84 commit 5064550
Show file tree
Hide file tree
Showing 4 changed files with 65 additions and 15 deletions.
62 changes: 47 additions & 15 deletions drivers/char/random.c
Original file line number Diff line number Diff line change
Expand Up @@ -698,6 +698,25 @@ u32 get_random_u32(void)
}
EXPORT_SYMBOL(get_random_u32);

#ifdef CONFIG_SMP
/*
* This function is called when the CPU is coming up, with entry
* CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
*/
int random_prepare_cpu(unsigned int cpu)
{
/*
* When the cpu comes back online, immediately invalidate both
* the per-cpu crng and all batches, so that we serve fresh
* randomness.
*/
per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
return 0;
}
#endif

/**
* randomize_page - Generate a random, page aligned address
* @start: The smallest acceptable address the caller will take.
Expand Down Expand Up @@ -1183,7 +1202,7 @@ struct fast_pool {
};
struct work_struct mix;
unsigned long last;
atomic_t count;
unsigned int count;
u16 reg_idx;
};

Expand Down Expand Up @@ -1219,6 +1238,29 @@ static void fast_mix(u32 pool[4])

static DEFINE_PER_CPU(struct fast_pool, irq_randomness);

#ifdef CONFIG_SMP
/*
* This function is called when the CPU has just come online, with
* entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
*/
int random_online_cpu(unsigned int cpu)
{
/*
* During CPU shutdown and before CPU onlining, add_interrupt_
* randomness() may schedule mix_interrupt_randomness(), and
* set the MIX_INFLIGHT flag. However, because the worker can
* be scheduled on a different CPU during this period, that
* flag will never be cleared. For that reason, we zero out
* the flag here, which runs just after workqueues are onlined
* for the CPU again. This also has the effect of setting the
* irq randomness count to zero so that new accumulated irqs
* are fresh.
*/
per_cpu_ptr(&irq_randomness, cpu)->count = 0;
return 0;
}
#endif

static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
u32 *ptr = (u32 *)regs;
Expand All @@ -1243,15 +1285,6 @@ static void mix_interrupt_randomness(struct work_struct *work)
local_irq_disable();
if (fast_pool != this_cpu_ptr(&irq_randomness)) {
local_irq_enable();
/*
* If we are unlucky enough to have been moved to another CPU,
* during CPU hotplug while the CPU was shutdown then we set
* our count to zero atomically so that when the CPU comes
* back online, it can enqueue work again. The _release here
* pairs with the atomic_inc_return_acquire in
* add_interrupt_randomness().
*/
atomic_set_release(&fast_pool->count, 0);
return;
}

Expand All @@ -1260,7 +1293,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
* consistent view, before we reenable irqs again.
*/
memcpy(pool, fast_pool->pool32, sizeof(pool));
atomic_set(&fast_pool->count, 0);
fast_pool->count = 0;
fast_pool->last = jiffies;
local_irq_enable();

Expand Down Expand Up @@ -1296,14 +1329,13 @@ void add_interrupt_randomness(int irq)
}

fast_mix(fast_pool->pool32);
/* The _acquire here pairs with the atomic_set_release in mix_interrupt_randomness(). */
new_count = (unsigned int)atomic_inc_return_acquire(&fast_pool->count);
new_count = ++fast_pool->count;

if (unlikely(crng_init == 0)) {
if (new_count >= 64 &&
crng_pre_init_inject(fast_pool->pool32, sizeof(fast_pool->pool32),
true, true) > 0) {
atomic_set(&fast_pool->count, 0);
fast_pool->count = 0;
fast_pool->last = now;
if (spin_trylock(&input_pool.lock)) {
_mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32));
Expand All @@ -1321,7 +1353,7 @@ void add_interrupt_randomness(int irq)

if (unlikely(!fast_pool->mix.func))
INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
atomic_or(MIX_INFLIGHT, &fast_pool->count);
fast_pool->count |= MIX_INFLIGHT;
queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
Expand Down
2 changes: 2 additions & 0 deletions include/linux/cpuhotplug.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ enum cpuhp_state {
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
CPUHP_RANDOM_PREPARE,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
Expand Down Expand Up @@ -187,6 +188,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RANDOM_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
Expand Down
5 changes: 5 additions & 0 deletions include/linux/random.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,4 +156,9 @@ static inline bool __init arch_get_random_long_early(unsigned long *v)
}
#endif

#ifdef CONFIG_SMP
extern int random_prepare_cpu(unsigned int cpu);
extern int random_online_cpu(unsigned int cpu);
#endif

#endif /* _LINUX_RANDOM_H */
11 changes: 11 additions & 0 deletions kernel/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include <linux/scs.h>
#include <linux/percpu-rwsem.h>
#include <linux/cpuset.h>
#include <linux/random.h>

#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
Expand Down Expand Up @@ -1581,6 +1582,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
.startup.single = perf_event_init_cpu,
.teardown.single = perf_event_exit_cpu,
},
[CPUHP_RANDOM_PREPARE] = {
.name = "random:prepare",
.startup.single = random_prepare_cpu,
.teardown.single = NULL,
},
[CPUHP_WORKQUEUE_PREP] = {
.name = "workqueue:prepare",
.startup.single = workqueue_prepare_cpu,
Expand Down Expand Up @@ -1697,6 +1703,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
.startup.single = workqueue_online_cpu,
.teardown.single = workqueue_offline_cpu,
},
[CPUHP_AP_RANDOM_ONLINE] = {
.name = "random:online",
.startup.single = random_online_cpu,
.teardown.single = NULL,
},
[CPUHP_AP_RCUTREE_ONLINE] = {
.name = "RCU/tree:online",
.startup.single = rcutree_online_cpu,
Expand Down

0 comments on commit 5064550

Please sign in to comment.