Commit f4fe3e0c authored by Theodore Ts'o's avatar Theodore Ts'o Committed by Ben Hutchings

random: use lockless techniques in the interrupt path

commit 902c098a upstream.

The real-time Linux folks don't like add_interrupt_randomness() taking
a spinlock since it is called in the low-level interrupt routine.
This also allows us to reduce the overhead in the fast path, for the
random driver, which is the interrupt collection path.
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent 70696784
...@@ -418,9 +418,9 @@ struct entropy_store { ...@@ -418,9 +418,9 @@ struct entropy_store {
/* read-write data: */ /* read-write data: */
spinlock_t lock; spinlock_t lock;
unsigned add_ptr; unsigned add_ptr;
unsigned input_rotate;
int entropy_count; int entropy_count;
int entropy_total; int entropy_total;
int input_rotate;
unsigned int initialized:1; unsigned int initialized:1;
__u8 last_data[EXTRACT_SIZE]; __u8 last_data[EXTRACT_SIZE];
}; };
...@@ -468,26 +468,24 @@ static __u32 const twist_table[8] = { ...@@ -468,26 +468,24 @@ static __u32 const twist_table[8] = {
* it's cheap to do so and helps slightly in the expected case where * it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits. * the entropy is concentrated in the low-order bits.
*/ */
static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, static void __mix_pool_bytes(struct entropy_store *r, const void *in,
int nbytes, __u8 out[64]) int nbytes, __u8 out[64])
{ {
unsigned long i, j, tap1, tap2, tap3, tap4, tap5; unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
int input_rotate; int input_rotate;
int wordmask = r->poolinfo->poolwords - 1; int wordmask = r->poolinfo->poolwords - 1;
const char *bytes = in; const char *bytes = in;
__u32 w; __u32 w;
unsigned long flags;
/* Taps are constant, so we can load them without holding r->lock. */
tap1 = r->poolinfo->tap1; tap1 = r->poolinfo->tap1;
tap2 = r->poolinfo->tap2; tap2 = r->poolinfo->tap2;
tap3 = r->poolinfo->tap3; tap3 = r->poolinfo->tap3;
tap4 = r->poolinfo->tap4; tap4 = r->poolinfo->tap4;
tap5 = r->poolinfo->tap5; tap5 = r->poolinfo->tap5;
spin_lock_irqsave(&r->lock, flags); smp_rmb();
input_rotate = r->input_rotate; input_rotate = ACCESS_ONCE(r->input_rotate);
i = r->add_ptr; i = ACCESS_ONCE(r->add_ptr);
/* mix one byte at a time to simplify size handling and churn faster */ /* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) { while (nbytes--) {
...@@ -514,19 +512,23 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, ...@@ -514,19 +512,23 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
input_rotate += i ? 7 : 14; input_rotate += i ? 7 : 14;
} }
r->input_rotate = input_rotate; ACCESS_ONCE(r->input_rotate) = input_rotate;
r->add_ptr = i; ACCESS_ONCE(r->add_ptr) = i;
smp_wmb();
if (out) if (out)
for (j = 0; j < 16; j++) for (j = 0; j < 16; j++)
((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
spin_unlock_irqrestore(&r->lock, flags);
} }
static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) static void mix_pool_bytes(struct entropy_store *r, const void *in,
int nbytes, __u8 out[64])
{ {
mix_pool_bytes_extract(r, in, bytes, NULL); unsigned long flags;
spin_lock_irqsave(&r->lock, flags);
__mix_pool_bytes(r, in, nbytes, out);
spin_unlock_irqrestore(&r->lock, flags);
} }
struct fast_pool { struct fast_pool {
...@@ -564,23 +566,22 @@ static void fast_mix(struct fast_pool *f, const void *in, int nbytes) ...@@ -564,23 +566,22 @@ static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
*/ */
static void credit_entropy_bits(struct entropy_store *r, int nbits) static void credit_entropy_bits(struct entropy_store *r, int nbits)
{ {
unsigned long flags; int entropy_count, orig;
int entropy_count;
if (!nbits) if (!nbits)
return; return;
spin_lock_irqsave(&r->lock, flags);
DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
entropy_count = r->entropy_count; retry:
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
entropy_count += nbits; entropy_count += nbits;
if (entropy_count < 0) { if (entropy_count < 0) {
DEBUG_ENT("negative entropy/overflow\n"); DEBUG_ENT("negative entropy/overflow\n");
entropy_count = 0; entropy_count = 0;
} else if (entropy_count > r->poolinfo->POOLBITS) } else if (entropy_count > r->poolinfo->POOLBITS)
entropy_count = r->poolinfo->POOLBITS; entropy_count = r->poolinfo->POOLBITS;
r->entropy_count = entropy_count; if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
if (!r->initialized && nbits > 0) { if (!r->initialized && nbits > 0) {
r->entropy_total += nbits; r->entropy_total += nbits;
...@@ -593,7 +594,6 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) ...@@ -593,7 +594,6 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
wake_up_interruptible(&random_read_wait); wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN); kill_fasync(&fasync, SIGIO, POLL_IN);
} }
spin_unlock_irqrestore(&r->lock, flags);
} }
/********************************************************************* /*********************************************************************
...@@ -680,7 +680,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) ...@@ -680,7 +680,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
sample.cycles = get_cycles(); sample.cycles = get_cycles();
sample.num = num; sample.num = num;
mix_pool_bytes(&input_pool, &sample, sizeof(sample)); mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
/* /*
* Calculate number of bits of randomness we probably added. * Calculate number of bits of randomness we probably added.
...@@ -764,7 +764,7 @@ void add_interrupt_randomness(int irq, int irq_flags) ...@@ -764,7 +764,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_pool->last = now; fast_pool->last = now;
r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
/* /*
* If we don't have a valid cycle counter, and we see * If we don't have a valid cycle counter, and we see
* back-to-back timer interrupts, then skip giving credit for * back-to-back timer interrupts, then skip giving credit for
...@@ -829,7 +829,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) ...@@ -829,7 +829,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
bytes = extract_entropy(r->pull, tmp, bytes, bytes = extract_entropy(r->pull, tmp, bytes,
random_read_wakeup_thresh / 8, rsvd); random_read_wakeup_thresh / 8, rsvd);
mix_pool_bytes(r, tmp, bytes); mix_pool_bytes(r, tmp, bytes, NULL);
credit_entropy_bits(r, bytes*8); credit_entropy_bits(r, bytes*8);
} }
} }
...@@ -890,9 +890,11 @@ static void extract_buf(struct entropy_store *r, __u8 *out) ...@@ -890,9 +890,11 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
int i; int i;
__u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
__u8 extract[64]; __u8 extract[64];
unsigned long flags;
/* Generate a hash across the pool, 16 words (512 bits) at a time */ /* Generate a hash across the pool, 16 words (512 bits) at a time */
sha_init(hash); sha_init(hash);
spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16) for (i = 0; i < r->poolinfo->poolwords; i += 16)
sha_transform(hash, (__u8 *)(r->pool + i), workspace); sha_transform(hash, (__u8 *)(r->pool + i), workspace);
...@@ -905,7 +907,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out) ...@@ -905,7 +907,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
* brute-forcing the feedback as hard as brute-forcing the * brute-forcing the feedback as hard as brute-forcing the
* hash. * hash.
*/ */
mix_pool_bytes_extract(r, hash, sizeof(hash), extract); __mix_pool_bytes(r, hash, sizeof(hash), extract);
spin_unlock_irqrestore(&r->lock, flags);
/* /*
* To avoid duplicates, we atomically extract a portion of the * To avoid duplicates, we atomically extract a portion of the
...@@ -928,11 +931,10 @@ static void extract_buf(struct entropy_store *r, __u8 *out) ...@@ -928,11 +931,10 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
} }
static ssize_t extract_entropy(struct entropy_store *r, void *buf, static ssize_t extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int min, int reserved) size_t nbytes, int min, int reserved)
{ {
ssize_t ret = 0, i; ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE]; __u8 tmp[EXTRACT_SIZE];
unsigned long flags;
xfer_secondary_pool(r, nbytes); xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved); nbytes = account(r, nbytes, min, reserved);
...@@ -941,6 +943,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, ...@@ -941,6 +943,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
extract_buf(r, tmp); extract_buf(r, tmp);
if (fips_enabled) { if (fips_enabled) {
unsigned long flags;
spin_lock_irqsave(&r->lock, flags); spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n"); panic("Hardware RNG duplicated output!\n");
...@@ -1034,22 +1038,18 @@ EXPORT_SYMBOL(get_random_bytes); ...@@ -1034,22 +1038,18 @@ EXPORT_SYMBOL(get_random_bytes);
static void init_std_data(struct entropy_store *r) static void init_std_data(struct entropy_store *r)
{ {
int i; int i;
ktime_t now; ktime_t now = ktime_get_real();
unsigned long flags; unsigned long rv;
spin_lock_irqsave(&r->lock, flags);
r->entropy_count = 0; r->entropy_count = 0;
r->entropy_total = 0; r->entropy_total = 0;
spin_unlock_irqrestore(&r->lock, flags); mix_pool_bytes(r, &now, sizeof(now), NULL);
for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
now = ktime_get_real(); if (!arch_get_random_long(&rv))
mix_pool_bytes(r, &now, sizeof(now));
for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
if (!arch_get_random_long(&flags))
break; break;
mix_pool_bytes(r, &flags, sizeof(flags)); mix_pool_bytes(r, &rv, sizeof(rv), NULL);
} }
mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
} }
static int rand_initialize(void) static int rand_initialize(void)
...@@ -1186,7 +1186,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count) ...@@ -1186,7 +1186,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
count -= bytes; count -= bytes;
p += bytes; p += bytes;
mix_pool_bytes(r, buf, bytes); mix_pool_bytes(r, buf, bytes, NULL);
cond_resched(); cond_resched();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment