Commit 3f2dc279 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'entropy'

Merge active entropy generation updates.

This is admittedly partly "for discussion".  We need to have a way
forward for the boot time deadlocks where user space ends up waiting for
more entropy, but no entropy is forthcoming because the system is
entirely idle just waiting for something to happen.

While this was triggered by what is arguably a user space bug with
GDM/gnome-session asking for secure randomness during early boot, when
they didn't even need any such truly secure thing, the issue ends up
being that our "getrandom()" interface is prone to that kind of
confusion, because people don't think very hard about whether they want
to block for sufficient amounts of entropy.

The approach here-in is to decide to not just passively wait for entropy
to happen, but to start actively collecting it if it is missing.  This
is not necessarily always possible, but if the architecture has a CPU
cycle counter, there is a fair amount of noise in the exact timings of
reasonably complex loads.

We may end up tweaking the load and the entropy estimates, but this
should be at least a reasonable starting point.

As part of this, we also revert the revert of the ext4 IO pattern
improvement that ended up triggering the reported lack of external
entropy.

* getrandom() active entropy waiting:
  Revert "Revert "ext4: make __ext4_get_inode_loc plug""
  random: try to actively add entropy rather than passively wait for it
parents a3c0e7b1 02f03c42
...@@ -1732,6 +1732,56 @@ void get_random_bytes(void *buf, int nbytes) ...@@ -1732,6 +1732,56 @@ void get_random_bytes(void *buf, int nbytes)
} }
EXPORT_SYMBOL(get_random_bytes); EXPORT_SYMBOL(get_random_bytes);
/*
* Each time the timer fires, we expect that we got an unpredictable
* jump in the cycle counter. Even if the timer is running on another
* CPU, the timer activity will be touching the stack of the CPU that is
* generating entropy..
*
* Note that we don't re-arm the timer in the timer itself - we are
* happy to be scheduled away, since that just makes the load more
* complex, but we do not want the timer to keep ticking unless the
* entropy loop is running.
*
* So the re-arming always happens in the entropy loop itself.
*/
static void entropy_timer(struct timer_list *t)
{
credit_entropy_bits(&input_pool, 1);
}
/*
* If we have an actual cycle counter, see if we can
* generate enough entropy with timing noise
*/
static void try_to_generate_entropy(void)
{
struct {
unsigned long now;
struct timer_list timer;
} stack;
stack.now = random_get_entropy();
/* Slow counter - or none. Don't even bother */
if (stack.now == random_get_entropy())
return;
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
while (!crng_ready()) {
if (!timer_pending(&stack.timer))
mod_timer(&stack.timer, jiffies+1);
mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
schedule();
stack.now = random_get_entropy();
}
del_timer_sync(&stack.timer);
destroy_timer_on_stack(&stack.timer);
mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
}
/* /*
* Wait for the urandom pool to be seeded and thus guaranteed to supply * Wait for the urandom pool to be seeded and thus guaranteed to supply
* cryptographically secure random numbers. This applies to: the /dev/urandom * cryptographically secure random numbers. This applies to: the /dev/urandom
...@@ -1746,7 +1796,17 @@ int wait_for_random_bytes(void) ...@@ -1746,7 +1796,17 @@ int wait_for_random_bytes(void)
{ {
if (likely(crng_ready())) if (likely(crng_ready()))
return 0; return 0;
return wait_event_interruptible(crng_init_wait, crng_ready());
do {
int ret;
ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
if (ret)
return ret > 0 ? 0 : ret;
try_to_generate_entropy();
} while (!crng_ready());
return 0;
} }
EXPORT_SYMBOL(wait_for_random_bytes); EXPORT_SYMBOL(wait_for_random_bytes);
......
...@@ -4551,6 +4551,7 @@ static int __ext4_get_inode_loc(struct inode *inode, ...@@ -4551,6 +4551,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
struct buffer_head *bh; struct buffer_head *bh;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
ext4_fsblk_t block; ext4_fsblk_t block;
struct blk_plug plug;
int inodes_per_block, inode_offset; int inodes_per_block, inode_offset;
iloc->bh = NULL; iloc->bh = NULL;
...@@ -4639,6 +4640,7 @@ static int __ext4_get_inode_loc(struct inode *inode, ...@@ -4639,6 +4640,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
* If we need to do any I/O, try to pre-readahead extra * If we need to do any I/O, try to pre-readahead extra
* blocks from the inode table. * blocks from the inode table.
*/ */
blk_start_plug(&plug);
if (EXT4_SB(sb)->s_inode_readahead_blks) { if (EXT4_SB(sb)->s_inode_readahead_blks) {
ext4_fsblk_t b, end, table; ext4_fsblk_t b, end, table;
unsigned num; unsigned num;
...@@ -4669,6 +4671,7 @@ static int __ext4_get_inode_loc(struct inode *inode, ...@@ -4669,6 +4671,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
get_bh(bh); get_bh(bh);
bh->b_end_io = end_buffer_read_sync; bh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
blk_finish_plug(&plug);
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
EXT4_ERROR_INODE_BLOCK(inode, block, EXT4_ERROR_INODE_BLOCK(inode, block,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment