Commit 205a525c authored by Herbert Xu's avatar Herbert Xu

random: Add callback API for random pool readiness

The get_blocking_random_bytes API is broken because the wait can
be arbitrarily long (potentially forever) so there is no safe way
of calling it from within the kernel.

This patch replaces it with a callback API instead.  The callback
is invoked potentially from interrupt context so the user needs
to schedule their own work thread if necessary.

In addition to adding callbacks, they can also be removed as
otherwise this opens up a way for user-space to allocate kernel
memory with no bound (by opening algif_rng descriptors and then
closing them).
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 05dee9c7
...@@ -409,6 +409,9 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); ...@@ -409,6 +409,9 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait); static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
static struct fasync_struct *fasync; static struct fasync_struct *fasync;
static DEFINE_SPINLOCK(random_ready_list_lock);
static LIST_HEAD(random_ready_list);
/********************************************************************** /**********************************************************************
* *
* OS independent entropy store. Here are the functions which handle * OS independent entropy store. Here are the functions which handle
...@@ -589,6 +592,22 @@ static void fast_mix(struct fast_pool *f) ...@@ -589,6 +592,22 @@ static void fast_mix(struct fast_pool *f)
f->count++; f->count++;
} }
static void process_random_ready_list(void)
{
unsigned long flags;
struct random_ready_callback *rdy, *tmp;
spin_lock_irqsave(&random_ready_list_lock, flags);
list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
struct module *owner = rdy->owner;
list_del_init(&rdy->list);
rdy->func(rdy);
module_put(owner);
}
spin_unlock_irqrestore(&random_ready_list_lock, flags);
}
/* /*
* Credit (or debit) the entropy store with n bits of entropy. * Credit (or debit) the entropy store with n bits of entropy.
* Use credit_entropy_bits_safe() if the value comes from userspace * Use credit_entropy_bits_safe() if the value comes from userspace
...@@ -660,6 +679,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) ...@@ -660,6 +679,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
r->entropy_total = 0; r->entropy_total = 0;
if (r == &nonblocking_pool) { if (r == &nonblocking_pool) {
prandom_reseed_late(); prandom_reseed_late();
process_random_ready_list();
wake_up_all(&urandom_init_wait); wake_up_all(&urandom_init_wait);
pr_notice("random: %s pool is initialized\n", r->name); pr_notice("random: %s pool is initialized\n", r->name);
} }
...@@ -1256,6 +1276,64 @@ void get_blocking_random_bytes(void *buf, int nbytes) ...@@ -1256,6 +1276,64 @@ void get_blocking_random_bytes(void *buf, int nbytes)
} }
EXPORT_SYMBOL(get_blocking_random_bytes); EXPORT_SYMBOL(get_blocking_random_bytes);
/*
* Add a callback function that will be invoked when the nonblocking
* pool is initialised.
*
* returns: 0 if callback is successfully added
* -EALREADY if pool is already initialised (callback not called)
* -ENOENT if module for callback is not alive
*/
int add_random_ready_callback(struct random_ready_callback *rdy)
{
struct module *owner;
unsigned long flags;
int err = -EALREADY;
if (likely(nonblocking_pool.initialized))
return err;
owner = rdy->owner;
if (!try_module_get(owner))
return -ENOENT;
spin_lock_irqsave(&random_ready_list_lock, flags);
if (nonblocking_pool.initialized)
goto out;
owner = NULL;
list_add(&rdy->list, &random_ready_list);
err = 0;
out:
spin_unlock_irqrestore(&random_ready_list_lock, flags);
module_put(owner);
return err;
}
EXPORT_SYMBOL(add_random_ready_callback);
/*
* Delete a previously registered readiness callback function.
*/
void del_random_ready_callback(struct random_ready_callback *rdy)
{
unsigned long flags;
struct module *owner = NULL;
spin_lock_irqsave(&random_ready_list_lock, flags);
if (!list_empty(&rdy->list)) {
list_del_init(&rdy->list);
owner = rdy->owner;
}
spin_unlock_irqrestore(&random_ready_list_lock, flags);
module_put(owner);
}
EXPORT_SYMBOL(del_random_ready_callback);
/* /*
* This function will use the architecture-specific hardware random * This function will use the architecture-specific hardware random
* number generator if it is available. The arch-specific hw RNG will * number generator if it is available. The arch-specific hw RNG will
......
...@@ -6,8 +6,15 @@ ...@@ -6,8 +6,15 @@
#ifndef _LINUX_RANDOM_H #ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H #define _LINUX_RANDOM_H
#include <linux/list.h>
#include <uapi/linux/random.h> #include <uapi/linux/random.h>
struct random_ready_callback {
struct list_head list;
void (*func)(struct random_ready_callback *rdy);
struct module *owner;
};
extern void add_device_randomness(const void *, unsigned int); extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code, extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value); unsigned int value);
...@@ -15,6 +22,8 @@ extern void add_interrupt_randomness(int irq, int irq_flags); ...@@ -15,6 +22,8 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes); extern void get_random_bytes(void *buf, int nbytes);
extern void get_blocking_random_bytes(void *buf, int nbytes); extern void get_blocking_random_bytes(void *buf, int nbytes);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes); extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]); void generate_random_uuid(unsigned char uuid_out[16]);
extern int random_int_secret_init(void); extern int random_int_secret_init(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment