Commit fbf6dda7 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] random: SMP locking

From: Oliver Xymoron <oxymoron@waste.org>

This patch adds locking for SMP. Apparently Willy never managed to
revive his laptop with his version so I revived mine.

The batch pool is copied as a block to avoid long lock hold times
while mixing it into the primary pool. 

Two locks are added:

gobal batch_lock
   batch_entropy_store can be called from any context, and typically from
   interrupts -> spin_lock_irqsave

   batch_entropy_process is called called via schedule_delayed_work and
   runs in process context -> spin_lock_irq

entropy_store.lock
   the mixing process is too expensive to be called from an interrupt
   context and the basic worker function extract_entropy can sleep, so
   all this stuff can be under a normal spin_lock
parent b0c15cba
...@@ -486,6 +486,7 @@ struct entropy_store { ...@@ -486,6 +486,7 @@ struct entropy_store {
int extract_count; int extract_count;
struct poolinfo poolinfo; struct poolinfo poolinfo;
__u32 *pool; __u32 *pool;
spinlock_t lock;
}; };
/* /*
...@@ -502,7 +503,7 @@ static int create_entropy_store(int size, struct entropy_store **ret_bucket) ...@@ -502,7 +503,7 @@ static int create_entropy_store(int size, struct entropy_store **ret_bucket)
poolwords = (size + 3) / 4; /* Convert bytes->words */ poolwords = (size + 3) / 4; /* Convert bytes->words */
/* The pool size must be a multiple of 16 32-bit words */ /* The pool size must be a multiple of 16 32-bit words */
poolwords = ((poolwords + 15) / 16) * 16; poolwords = ((poolwords + 15) / 16) * 16;
for (p = poolinfo_table; p->poolwords; p++) { for (p = poolinfo_table; p->poolwords; p++) {
if (poolwords == p->poolwords) if (poolwords == p->poolwords)
...@@ -524,6 +525,7 @@ static int create_entropy_store(int size, struct entropy_store **ret_bucket) ...@@ -524,6 +525,7 @@ static int create_entropy_store(int size, struct entropy_store **ret_bucket)
return -ENOMEM; return -ENOMEM;
} }
memset(r->pool, 0, POOLBYTES); memset(r->pool, 0, POOLBYTES);
r->lock = SPIN_LOCK_UNLOCKED;
*ret_bucket = r; *ret_bucket = r;
return 0; return 0;
} }
...@@ -565,6 +567,9 @@ static void add_entropy_words(struct entropy_store *r, const __u32 *in, ...@@ -565,6 +567,9 @@ static void add_entropy_words(struct entropy_store *r, const __u32 *in,
int new_rotate; int new_rotate;
int wordmask = r->poolinfo.poolwords - 1; int wordmask = r->poolinfo.poolwords - 1;
__u32 w; __u32 w;
unsigned long flags;
spin_lock_irqsave(&r->lock, flags);
while (nwords--) { while (nwords--) {
w = rotate_left(r->input_rotate, *in++); w = rotate_left(r->input_rotate, *in++);
...@@ -589,6 +594,8 @@ static void add_entropy_words(struct entropy_store *r, const __u32 *in, ...@@ -589,6 +594,8 @@ static void add_entropy_words(struct entropy_store *r, const __u32 *in,
w ^= r->pool[i]; w ^= r->pool[i];
r->pool[i] = (w >> 3) ^ twist_table[w & 7]; r->pool[i] = (w >> 3) ^ twist_table[w & 7];
} }
spin_unlock_irqrestore(&r->lock, flags);
} }
/* /*
...@@ -596,6 +603,10 @@ static void add_entropy_words(struct entropy_store *r, const __u32 *in, ...@@ -596,6 +603,10 @@ static void add_entropy_words(struct entropy_store *r, const __u32 *in,
*/ */
static void credit_entropy_store(struct entropy_store *r, int nbits) static void credit_entropy_store(struct entropy_store *r, int nbits)
{ {
unsigned long flags;
spin_lock_irqsave(&r->lock, flags);
if (r->entropy_count + nbits < 0) { if (r->entropy_count + nbits < 0) {
DEBUG_ENT("negative entropy/overflow (%d+%d)\n", DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
r->entropy_count, nbits); r->entropy_count, nbits);
...@@ -610,6 +621,8 @@ static void credit_entropy_store(struct entropy_store *r, int nbits) ...@@ -610,6 +621,8 @@ static void credit_entropy_store(struct entropy_store *r, int nbits)
r == random_state ? "primary" : "unknown", r == random_state ? "primary" : "unknown",
nbits, r->entropy_count); nbits, r->entropy_count);
} }
spin_unlock_irqrestore(&r->lock, flags);
} }
/********************************************************************** /**********************************************************************
...@@ -620,27 +633,33 @@ static void credit_entropy_store(struct entropy_store *r, int nbits) ...@@ -620,27 +633,33 @@ static void credit_entropy_store(struct entropy_store *r, int nbits)
* *
**********************************************************************/ **********************************************************************/
static __u32 *batch_entropy_pool; struct sample {
static int *batch_entropy_credit; __u32 data[2];
static int batch_max; int credit;
};
static struct sample *batch_entropy_pool, *batch_entropy_copy;
static int batch_head, batch_tail; static int batch_head, batch_tail;
static spinlock_t batch_lock = SPIN_LOCK_UNLOCKED;
static int batch_max;
static void batch_entropy_process(void *private_); static void batch_entropy_process(void *private_);
static DECLARE_WORK(batch_work, batch_entropy_process, NULL); static DECLARE_WORK(batch_work, batch_entropy_process, NULL);
/* note: the size must be a power of 2 */ /* note: the size must be a power of 2 */
static int __init batch_entropy_init(int size, struct entropy_store *r) static int __init batch_entropy_init(int size, struct entropy_store *r)
{ {
batch_entropy_pool = kmalloc(2*size*sizeof(__u32), GFP_KERNEL); batch_entropy_pool = kmalloc(size*sizeof(struct sample), GFP_KERNEL);
if (!batch_entropy_pool) if (!batch_entropy_pool)
return -1; return -1;
batch_entropy_credit =kmalloc(size*sizeof(int), GFP_KERNEL); batch_entropy_copy = kmalloc(size*sizeof(struct sample), GFP_KERNEL);
if (!batch_entropy_credit) { if (!batch_entropy_copy) {
kfree(batch_entropy_pool); kfree(batch_entropy_pool);
return -1; return -1;
} }
batch_head = batch_tail = 0; batch_head = batch_tail = 0;
batch_max = size;
batch_work.data = r; batch_work.data = r;
batch_max = size;
return 0; return 0;
} }
...@@ -652,27 +671,33 @@ static int __init batch_entropy_init(int size, struct entropy_store *r) ...@@ -652,27 +671,33 @@ static int __init batch_entropy_init(int size, struct entropy_store *r)
*/ */
void batch_entropy_store(u32 a, u32 b, int num) void batch_entropy_store(u32 a, u32 b, int num)
{ {
int new; int new;
unsigned long flags;
if (!batch_max) if (!batch_max)
return; return;
batch_entropy_pool[2*batch_head] = a;
batch_entropy_pool[(2*batch_head) + 1] = b;
batch_entropy_credit[batch_head] = num;
new = (batch_head+1) & (batch_max-1); spin_lock_irqsave(&batch_lock, flags);
if ((unsigned)(new - batch_tail) >= (unsigned)(batch_max / 2)) {
batch_entropy_pool[batch_head].data[0] = a;
batch_entropy_pool[batch_head].data[1] = b;
batch_entropy_pool[batch_head].credit = num;
if (((batch_head - batch_tail) & (batch_max-1)) >= (batch_max / 2)) {
/* /*
* Schedule it for the next timer tick: * Schedule it for the next timer tick:
*/ */
schedule_delayed_work(&batch_work, 1); schedule_delayed_work(&batch_work, 1);
batch_head = new; }
} else if (new == batch_tail) {
new = (batch_head+1) & (batch_max-1);
if (new == batch_tail) {
DEBUG_ENT("batch entropy buffer full\n"); DEBUG_ENT("batch entropy buffer full\n");
} else { } else {
batch_head = new; batch_head = new;
} }
spin_unlock_irqrestore(&batch_lock, flags);
} }
/* /*
...@@ -684,20 +709,34 @@ static void batch_entropy_process(void *private_) ...@@ -684,20 +709,34 @@ static void batch_entropy_process(void *private_)
{ {
struct entropy_store *r = (struct entropy_store *) private_, *p; struct entropy_store *r = (struct entropy_store *) private_, *p;
int max_entropy = r->poolinfo.POOLBITS; int max_entropy = r->poolinfo.POOLBITS;
unsigned head, tail;
if (!batch_max) /* Mixing into the pool is expensive, so copy over the batch
return; * data and release the batch lock. The pool is at least half
* full, so don't worry too much about copying only the used
* part.
*/
spin_lock_irq(&batch_lock);
memcpy(batch_entropy_copy, batch_entropy_pool,
batch_max*sizeof(struct sample));
head = batch_head;
tail = batch_tail;
batch_tail = batch_head;
spin_unlock_irq(&batch_lock);
p = r; p = r;
while (batch_head != batch_tail) { while (head != tail) {
if (r->entropy_count >= max_entropy) { if (r->entropy_count >= max_entropy) {
r = (r == sec_random_state) ? random_state : r = (r == sec_random_state) ? random_state :
sec_random_state; sec_random_state;
max_entropy = r->poolinfo.POOLBITS; max_entropy = r->poolinfo.POOLBITS;
} }
add_entropy_words(r, batch_entropy_pool + 2*batch_tail, 2); add_entropy_words(r, batch_entropy_copy[tail].data, 2);
credit_entropy_store(r, batch_entropy_credit[batch_tail]); credit_entropy_store(r, batch_entropy_copy[tail].credit);
batch_tail = (batch_tail+1) & (batch_max-1); tail = (tail+1) & (batch_max-1);
} }
if (p->entropy_count >= random_read_wakeup_thresh) if (p->entropy_count >= random_read_wakeup_thresh)
wake_up_interruptible(&random_read_wait); wake_up_interruptible(&random_read_wait);
...@@ -1276,6 +1315,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void * buf, ...@@ -1276,6 +1315,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void * buf,
ssize_t ret, i; ssize_t ret, i;
__u32 tmp[TMP_BUF_SIZE]; __u32 tmp[TMP_BUF_SIZE];
__u32 x; __u32 x;
unsigned long cpuflags;
add_timer_randomness(&extract_timer_state, nbytes); add_timer_randomness(&extract_timer_state, nbytes);
...@@ -1286,6 +1326,9 @@ static ssize_t extract_entropy(struct entropy_store *r, void * buf, ...@@ -1286,6 +1326,9 @@ static ssize_t extract_entropy(struct entropy_store *r, void * buf,
if (flags & EXTRACT_ENTROPY_SECONDARY) if (flags & EXTRACT_ENTROPY_SECONDARY)
xfer_secondary_pool(r, nbytes, tmp); xfer_secondary_pool(r, nbytes, tmp);
/* Hold lock while accounting */
spin_lock_irqsave(&r->lock, cpuflags);
DEBUG_ENT("%s has %d bits, want %d bits\n", DEBUG_ENT("%s has %d bits, want %d bits\n",
r == sec_random_state ? "secondary" : r == sec_random_state ? "secondary" :
r == random_state ? "primary" : "unknown", r == random_state ? "primary" : "unknown",
...@@ -1300,6 +1343,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void * buf, ...@@ -1300,6 +1343,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void * buf,
wake_up_interruptible(&random_write_wait); wake_up_interruptible(&random_write_wait);
r->extract_count += nbytes; r->extract_count += nbytes;
spin_unlock_irqrestore(&r->lock, cpuflags);
ret = 0; ret = 0;
while (nbytes) { while (nbytes) {
...@@ -1595,8 +1640,9 @@ static int ...@@ -1595,8 +1640,9 @@ static int
random_ioctl(struct inode * inode, struct file * file, random_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
int *p, size, ent_count; int *p, *tmp, size, ent_count;
int retval; int retval;
unsigned long flags;
switch (cmd) { switch (cmd) {
case RNDGETENTCNT: case RNDGETENTCNT:
...@@ -1621,17 +1667,36 @@ random_ioctl(struct inode * inode, struct file * file, ...@@ -1621,17 +1667,36 @@ random_ioctl(struct inode * inode, struct file * file,
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
p = (int *) arg; p = (int *) arg;
ent_count = random_state->entropy_count; if (get_user(size, p) ||
if (put_user(ent_count, p++) ||
get_user(size, p) ||
put_user(random_state->poolinfo.poolwords, p++)) put_user(random_state->poolinfo.poolwords, p++))
return -EFAULT; return -EFAULT;
if (size < 0) if (size < 0)
return -EINVAL; return -EFAULT;
if (size > random_state->poolinfo.poolwords) if (size > random_state->poolinfo.poolwords)
size = random_state->poolinfo.poolwords; size = random_state->poolinfo.poolwords;
if (copy_to_user(p, random_state->pool, size * sizeof(__u32)))
/* prepare to atomically snapshot pool */
tmp = kmalloc(size * sizeof(__u32), GFP_KERNEL);
if (!tmp)
return -EFAULT;
spin_lock_irqsave(&random_state->lock, flags);
ent_count = random_state->entropy_count;
memcpy(tmp, random_state->pool, size * sizeof(__u32));
spin_unlock_irqrestore(&random_state->lock, flags);
if (!copy_to_user(p, tmp, size * sizeof(__u32))) {
kfree(tmp);
return -EFAULT; return -EFAULT;
}
kfree(tmp);
if(put_user(ent_count, p++))
return -EFAULT;
return 0; return 0;
case RNDADDENTROPY: case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment