Skip to content

Commit

Permalink
random: remove batched entropy locking
Browse files Browse the repository at this point in the history
commit 77760fd upstream.

Rather than use spinlocks to protect batched entropy, we can instead
disable interrupts locally, since we're dealing with per-cpu data, and
manage resets with a basic generation counter. At the same time, we
can't quite do this on PREEMPT_RT, where we still want spinlocks-as-
mutexes semantics. So we use a local_lock_t, which provides the right
behavior for each. Because this is a per-cpu lock, that generation
counter is still doing the necessary CPU-to-CPU communication.

This should improve performance a bit. It will also fix the linked splat
that Jonathan received with a PROVE_RAW_LOCK_NESTING=y.

Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: Dominik Brodowski <linux@dominikbrodowski.net>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Suggested-by: Andy Lutomirski <luto@kernel.org>
Reported-by: Jonathan Neuschäfer <j.neuschaefer@gmx.net>
Tested-by: Jonathan Neuschäfer <j.neuschaefer@gmx.net>
Link: https://lore.kernel.org/lkml/YfMa0QgsjCVdRAvJ@latitude/
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
zx2c4 authored and gregkh committed May 30, 2022
1 parent a53df44 commit 048d57f
Showing 1 changed file with 28 additions and 27 deletions.
55 changes: 28 additions & 27 deletions drivers/char/random.c
Original file line number Diff line number Diff line change
Expand Up @@ -1731,13 +1731,16 @@ static int __init random_sysctls_init(void)
device_initcall(random_sysctls_init);
#endif /* CONFIG_SYSCTL */

static atomic_t batch_generation = ATOMIC_INIT(0);

struct batched_entropy {
union {
u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
};
local_lock_t lock;
unsigned int position;
spinlock_t batch_lock;
int generation;
};

/*
Expand All @@ -1749,7 +1752,7 @@ struct batched_entropy {
* point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
.lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock)
};

u64 get_random_u64(void)
Expand All @@ -1758,67 +1761,65 @@ u64 get_random_u64(void)
unsigned long flags;
struct batched_entropy *batch;
static void *previous;
int next_gen;

warn_unseeded_randomness(&previous);

local_lock_irqsave(&batched_entropy_u64.lock, flags);
batch = raw_cpu_ptr(&batched_entropy_u64);
spin_lock_irqsave(&batch->batch_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {

next_gen = atomic_read(&batch_generation);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0 ||
next_gen != batch->generation) {
extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
batch->generation = next_gen;
}

ret = batch->entropy_u64[batch->position++];
spin_unlock_irqrestore(&batch->batch_lock, flags);
local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u64);

static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
.lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock)
};

u32 get_random_u32(void)
{
u32 ret;
unsigned long flags;
struct batched_entropy *batch;
static void *previous;
int next_gen;

warn_unseeded_randomness(&previous);

local_lock_irqsave(&batched_entropy_u32.lock, flags);
batch = raw_cpu_ptr(&batched_entropy_u32);
spin_lock_irqsave(&batch->batch_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {

next_gen = atomic_read(&batch_generation);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0 ||
next_gen != batch->generation) {
extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
batch->generation = next_gen;
}

ret = batch->entropy_u32[batch->position++];
spin_unlock_irqrestore(&batch->batch_lock, flags);
local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u32);

/* It's important to invalidate all potential batched entropy that might
* be stored before the crng is initialized, which we can do lazily by
* simply resetting the counter to zero so that it's re-extracted on the
* next usage. */
* bumping the generation counter.
*/
static void invalidate_batched_entropy(void)
{
int cpu;
unsigned long flags;

for_each_possible_cpu(cpu) {
struct batched_entropy *batched_entropy;

batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
spin_lock_irqsave(&batched_entropy->batch_lock, flags);
batched_entropy->position = 0;
spin_unlock(&batched_entropy->batch_lock);

batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
spin_lock(&batched_entropy->batch_lock);
batched_entropy->position = 0;
spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
}
atomic_inc(&batch_generation);
}

/**
Expand Down

0 comments on commit 048d57f

Please sign in to comment.