Commit 0ed90597 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'random-5.17-rc1-for-linus' of...

Merge branch 'random-5.17-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random

Pull random number generator fixes from Jason Donenfeld:

 - Some Kconfig changes resulted in BIG_KEYS being unselectable, which
   Justin sent a patch to fix.

 - Geert pointed out that moving to BLAKE2s bloated vmlinux on little
   machines, like m68k, so we now compensate for this.

 - Numerous style and house cleaning fixes, meant to have a cleaner base
   for future changes.

* 'random-5.17-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random:
  random: simplify arithmetic function flow in account()
  random: selectively clang-format where it makes sense
  random: access input_pool_data directly rather than through pointer
  random: cleanup fractional entropy shift constants
  random: prepend remaining pool constants with POOL_
  random: de-duplicate INPUT_POOL constants
  random: remove unused OUTPUT_POOL constants
  random: rather than entropy_store abstraction, use global
  random: remove unused extract_entropy() reserved argument
  random: remove incomplete last_data logic
  random: cleanup integer types
  random: cleanup poolinfo abstraction
  random: fix typo in comments
  lib/crypto: sha1: re-roll loops to reduce code size
  lib/crypto: blake2s: move hmac construction into wireguard
  lib/crypto: add prompts back to crypto libraries
parents 39b419ea a254a0e4
...@@ -1928,5 +1928,3 @@ source "crypto/asymmetric_keys/Kconfig" ...@@ -1928,5 +1928,3 @@ source "crypto/asymmetric_keys/Kconfig"
source "certs/Kconfig" source "certs/Kconfig"
endif # if CRYPTO endif # if CRYPTO
source "lib/crypto/Kconfig"
...@@ -101,7 +101,7 @@ ...@@ -101,7 +101,7 @@
* =============================== * ===============================
* *
* There are four exported interfaces; two for use within the kernel, * There are four exported interfaces; two for use within the kernel,
* and two or use from userspace. * and two for use from userspace.
* *
* Exported interfaces ---- userspace output * Exported interfaces ---- userspace output
* ----------------------------------------- * -----------------------------------------
...@@ -124,7 +124,7 @@ ...@@ -124,7 +124,7 @@
* *
* The primary kernel interface is * The primary kernel interface is
* *
* void get_random_bytes(void *buf, int nbytes); * void get_random_bytes(void *buf, int nbytes);
* *
* This interface will return the requested number of random bytes, * This interface will return the requested number of random bytes,
* and place it in the requested buffer. This is equivalent to a * and place it in the requested buffer. This is equivalent to a
...@@ -132,10 +132,10 @@ ...@@ -132,10 +132,10 @@
* *
* For less critical applications, there are the functions: * For less critical applications, there are the functions:
* *
* u32 get_random_u32() * u32 get_random_u32()
* u64 get_random_u64() * u64 get_random_u64()
* unsigned int get_random_int() * unsigned int get_random_int()
* unsigned long get_random_long() * unsigned long get_random_long()
* *
* These are produced by a cryptographic RNG seeded from get_random_bytes, * These are produced by a cryptographic RNG seeded from get_random_bytes,
* and so do not deplete the entropy pool as much. These are recommended * and so do not deplete the entropy pool as much. These are recommended
...@@ -197,10 +197,10 @@ ...@@ -197,10 +197,10 @@
* from the devices are: * from the devices are:
* *
* void add_device_randomness(const void *buf, unsigned int size); * void add_device_randomness(const void *buf, unsigned int size);
* void add_input_randomness(unsigned int type, unsigned int code, * void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value); * unsigned int value);
* void add_interrupt_randomness(int irq); * void add_interrupt_randomness(int irq);
* void add_disk_randomness(struct gendisk *disk); * void add_disk_randomness(struct gendisk *disk);
* void add_hwgenerator_randomness(const char *buffer, size_t count, * void add_hwgenerator_randomness(const char *buffer, size_t count,
* size_t entropy); * size_t entropy);
* void add_bootloader_randomness(const void *buf, unsigned int size); * void add_bootloader_randomness(const void *buf, unsigned int size);
...@@ -296,8 +296,8 @@ ...@@ -296,8 +296,8 @@
* /dev/random and /dev/urandom created already, they can be created * /dev/random and /dev/urandom created already, they can be created
* by using the commands: * by using the commands:
* *
* mknod /dev/random c 1 8 * mknod /dev/random c 1 8
* mknod /dev/urandom c 1 9 * mknod /dev/urandom c 1 9
* *
* Acknowledgements: * Acknowledgements:
* ================= * =================
...@@ -337,7 +337,6 @@ ...@@ -337,7 +337,6 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/fips.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/irq.h> #include <linux/irq.h>
...@@ -359,31 +358,12 @@ ...@@ -359,31 +358,12 @@
/* #define ADD_INTERRUPT_BENCH */ /* #define ADD_INTERRUPT_BENCH */
/*
* Configuration information
*/
#define INPUT_POOL_SHIFT 12
#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
#define OUTPUT_POOL_SHIFT 10
#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
#define EXTRACT_SIZE (BLAKE2S_HASH_SIZE / 2)
/*
* To allow fractional bits to be tracked, the entropy_count field is
* denominated in units of 1/8th bits.
*
* 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
* credit_entropy_bits() needs to be 64 bits wide.
*/
#define ENTROPY_SHIFT 3
#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
/* /*
* If the entropy count falls under this number of bits, then we * If the entropy count falls under this number of bits, then we
* should wake up processes which are selecting or polling on write * should wake up processes which are selecting or polling on write
* access to /dev/random. * access to /dev/random.
*/ */
static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; static int random_write_wakeup_bits = 28 * (1 << 5);
/* /*
* Originally, we used a primitive polynomial of degree .poolwords * Originally, we used a primitive polynomial of degree .poolwords
...@@ -430,14 +410,27 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; ...@@ -430,14 +410,27 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
* polynomial which improves the resulting TGFSR polynomial to be * polynomial which improves the resulting TGFSR polynomial to be
* irreducible, which we have made here. * irreducible, which we have made here.
*/ */
static const struct poolinfo { enum poolinfo {
int poolbitshift, poolwords, poolbytes, poolfracbits; POOL_WORDS = 128,
#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5) POOL_WORDMASK = POOL_WORDS - 1,
int tap1, tap2, tap3, tap4, tap5; POOL_BYTES = POOL_WORDS * sizeof(u32),
} poolinfo_table[] = { POOL_BITS = POOL_BYTES * 8,
/* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */ POOL_BITSHIFT = ilog2(POOL_BITS),
/* To allow fractional bits to be tracked, the entropy_count field is
* denominated in units of 1/8th bits. */
POOL_ENTROPY_SHIFT = 3,
#define POOL_ENTROPY_BITS() (input_pool.entropy_count >> POOL_ENTROPY_SHIFT)
POOL_FRACBITS = POOL_BITS << POOL_ENTROPY_SHIFT,
/* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */ /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
{ S(128), 104, 76, 51, 25, 1 }, POOL_TAP1 = 104,
POOL_TAP2 = 76,
POOL_TAP3 = 51,
POOL_TAP4 = 25,
POOL_TAP5 = 1,
EXTRACT_SIZE = BLAKE2S_HASH_SIZE / 2
}; };
/* /*
...@@ -450,9 +443,9 @@ static DEFINE_SPINLOCK(random_ready_list_lock); ...@@ -450,9 +443,9 @@ static DEFINE_SPINLOCK(random_ready_list_lock);
static LIST_HEAD(random_ready_list); static LIST_HEAD(random_ready_list);
struct crng_state { struct crng_state {
__u32 state[16]; u32 state[16];
unsigned long init_time; unsigned long init_time;
spinlock_t lock; spinlock_t lock;
}; };
static struct crng_state primary_crng = { static struct crng_state primary_crng = {
...@@ -476,10 +469,10 @@ static bool crng_need_final_init = false; ...@@ -476,10 +469,10 @@ static bool crng_need_final_init = false;
#define crng_ready() (likely(crng_init > 1)) #define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0; static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0; static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE) #define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE)
static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]); static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng, static void _crng_backtrack_protect(struct crng_state *crng,
__u8 tmp[CHACHA_BLOCK_SIZE], int used); u8 tmp[CHACHA_BLOCK_SIZE], int used);
static void process_random_ready_list(void); static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes); static void _get_random_bytes(void *buf, int nbytes);
...@@ -500,38 +493,23 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); ...@@ -500,38 +493,23 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
* *
**********************************************************************/ **********************************************************************/
struct entropy_store; static u32 input_pool_data[POOL_WORDS] __latent_entropy;
struct entropy_store {
/* read-only data: */
const struct poolinfo *poolinfo;
__u32 *pool;
const char *name;
/* read-write data: */ static struct {
spinlock_t lock; spinlock_t lock;
unsigned short add_ptr; u16 add_ptr;
unsigned short input_rotate; u16 input_rotate;
int entropy_count; int entropy_count;
unsigned int last_data_init:1; } input_pool = {
__u8 last_data[EXTRACT_SIZE]; .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
}; };
static ssize_t extract_entropy(struct entropy_store *r, void *buf, static ssize_t extract_entropy(void *buf, size_t nbytes, int min);
size_t nbytes, int min, int rsvd); static ssize_t _extract_entropy(void *buf, size_t nbytes);
static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int fips);
static void crng_reseed(struct crng_state *crng, struct entropy_store *r); static void crng_reseed(struct crng_state *crng, bool use_input_pool);
static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
.name = "input",
.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
.pool = input_pool_data
};
static __u32 const twist_table[8] = { static const u32 twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
...@@ -545,39 +523,31 @@ static __u32 const twist_table[8] = { ...@@ -545,39 +523,31 @@ static __u32 const twist_table[8] = {
* it's cheap to do so and helps slightly in the expected case where * it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits. * the entropy is concentrated in the low-order bits.
*/ */
static void _mix_pool_bytes(struct entropy_store *r, const void *in, static void _mix_pool_bytes(const void *in, int nbytes)
int nbytes)
{ {
unsigned long i, tap1, tap2, tap3, tap4, tap5; unsigned long i;
int input_rotate; int input_rotate;
int wordmask = r->poolinfo->poolwords - 1; const u8 *bytes = in;
const unsigned char *bytes = in; u32 w;
__u32 w;
tap1 = r->poolinfo->tap1; input_rotate = input_pool.input_rotate;
tap2 = r->poolinfo->tap2; i = input_pool.add_ptr;
tap3 = r->poolinfo->tap3;
tap4 = r->poolinfo->tap4;
tap5 = r->poolinfo->tap5;
input_rotate = r->input_rotate;
i = r->add_ptr;
/* mix one byte at a time to simplify size handling and churn faster */ /* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) { while (nbytes--) {
w = rol32(*bytes++, input_rotate); w = rol32(*bytes++, input_rotate);
i = (i - 1) & wordmask; i = (i - 1) & POOL_WORDMASK;
/* XOR in the various taps */ /* XOR in the various taps */
w ^= r->pool[i]; w ^= input_pool_data[i];
w ^= r->pool[(i + tap1) & wordmask]; w ^= input_pool_data[(i + POOL_TAP1) & POOL_WORDMASK];
w ^= r->pool[(i + tap2) & wordmask]; w ^= input_pool_data[(i + POOL_TAP2) & POOL_WORDMASK];
w ^= r->pool[(i + tap3) & wordmask]; w ^= input_pool_data[(i + POOL_TAP3) & POOL_WORDMASK];
w ^= r->pool[(i + tap4) & wordmask]; w ^= input_pool_data[(i + POOL_TAP4) & POOL_WORDMASK];
w ^= r->pool[(i + tap5) & wordmask]; w ^= input_pool_data[(i + POOL_TAP5) & POOL_WORDMASK];
/* Mix the result back in with a twist */ /* Mix the result back in with a twist */
r->pool[i] = (w >> 3) ^ twist_table[w & 7]; input_pool_data[i] = (w >> 3) ^ twist_table[w & 7];
/* /*
* Normally, we add 7 bits of rotation to the pool. * Normally, we add 7 bits of rotation to the pool.
...@@ -588,33 +558,31 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in, ...@@ -588,33 +558,31 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
input_rotate = (input_rotate + (i ? 7 : 14)) & 31; input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
} }
r->input_rotate = input_rotate; input_pool.input_rotate = input_rotate;
r->add_ptr = i; input_pool.add_ptr = i;
} }
static void __mix_pool_bytes(struct entropy_store *r, const void *in, static void __mix_pool_bytes(const void *in, int nbytes)
int nbytes)
{ {
trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_); trace_mix_pool_bytes_nolock(nbytes, _RET_IP_);
_mix_pool_bytes(r, in, nbytes); _mix_pool_bytes(in, nbytes);
} }
static void mix_pool_bytes(struct entropy_store *r, const void *in, static void mix_pool_bytes(const void *in, int nbytes)
int nbytes)
{ {
unsigned long flags; unsigned long flags;
trace_mix_pool_bytes(r->name, nbytes, _RET_IP_); trace_mix_pool_bytes(nbytes, _RET_IP_);
spin_lock_irqsave(&r->lock, flags); spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(r, in, nbytes); _mix_pool_bytes(in, nbytes);
spin_unlock_irqrestore(&r->lock, flags); spin_unlock_irqrestore(&input_pool.lock, flags);
} }
struct fast_pool { struct fast_pool {
__u32 pool[4]; u32 pool[4];
unsigned long last; unsigned long last;
unsigned short reg_idx; u16 reg_idx;
unsigned char count; u8 count;
}; };
/* /*
...@@ -624,8 +592,8 @@ struct fast_pool { ...@@ -624,8 +592,8 @@ struct fast_pool {
*/ */
static void fast_mix(struct fast_pool *f) static void fast_mix(struct fast_pool *f)
{ {
__u32 a = f->pool[0], b = f->pool[1]; u32 a = f->pool[0], b = f->pool[1];
__u32 c = f->pool[2], d = f->pool[3]; u32 c = f->pool[2], d = f->pool[3];
a += b; c += d; a += b; c += d;
b = rol32(b, 6); d = rol32(d, 27); b = rol32(b, 6); d = rol32(d, 27);
...@@ -669,17 +637,19 @@ static void process_random_ready_list(void) ...@@ -669,17 +637,19 @@ static void process_random_ready_list(void)
* Use credit_entropy_bits_safe() if the value comes from userspace * Use credit_entropy_bits_safe() if the value comes from userspace
* or otherwise should be checked for extreme values. * or otherwise should be checked for extreme values.
*/ */
static void credit_entropy_bits(struct entropy_store *r, int nbits) static void credit_entropy_bits(int nbits)
{ {
int entropy_count, orig; int entropy_count, entropy_bits, orig;
const int pool_size = r->poolinfo->poolfracbits; int nfrac = nbits << POOL_ENTROPY_SHIFT;
int nfrac = nbits << ENTROPY_SHIFT;
/* Ensure that the multiplication can avoid being 64 bits wide. */
BUILD_BUG_ON(2 * (POOL_ENTROPY_SHIFT + POOL_BITSHIFT) > 31);
if (!nbits) if (!nbits)
return; return;
retry: retry:
entropy_count = orig = READ_ONCE(r->entropy_count); entropy_count = orig = READ_ONCE(input_pool.entropy_count);
if (nfrac < 0) { if (nfrac < 0) {
/* Debit */ /* Debit */
entropy_count += nfrac; entropy_count += nfrac;
...@@ -706,50 +676,43 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) ...@@ -706,50 +676,43 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
* turns no matter how large nbits is. * turns no matter how large nbits is.
*/ */
int pnfrac = nfrac; int pnfrac = nfrac;
const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2; const int s = POOL_BITSHIFT + POOL_ENTROPY_SHIFT + 2;
/* The +2 corresponds to the /4 in the denominator */ /* The +2 corresponds to the /4 in the denominator */
do { do {
unsigned int anfrac = min(pnfrac, pool_size/2); unsigned int anfrac = min(pnfrac, POOL_FRACBITS / 2);
unsigned int add = unsigned int add =
((pool_size - entropy_count)*anfrac*3) >> s; ((POOL_FRACBITS - entropy_count) * anfrac * 3) >> s;
entropy_count += add; entropy_count += add;
pnfrac -= anfrac; pnfrac -= anfrac;
} while (unlikely(entropy_count < pool_size-2 && pnfrac)); } while (unlikely(entropy_count < POOL_FRACBITS - 2 && pnfrac));
} }
if (WARN_ON(entropy_count < 0)) { if (WARN_ON(entropy_count < 0)) {
pr_warn("negative entropy/overflow: pool %s count %d\n", pr_warn("negative entropy/overflow: count %d\n", entropy_count);
r->name, entropy_count);
entropy_count = 0; entropy_count = 0;
} else if (entropy_count > pool_size) } else if (entropy_count > POOL_FRACBITS)
entropy_count = pool_size; entropy_count = POOL_FRACBITS;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
goto retry; goto retry;
trace_credit_entropy_bits(r->name, nbits, trace_credit_entropy_bits(nbits, entropy_count >> POOL_ENTROPY_SHIFT, _RET_IP_);
entropy_count >> ENTROPY_SHIFT, _RET_IP_);
if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
if (crng_init < 2 && entropy_bits >= 128) entropy_bits = entropy_count >> POOL_ENTROPY_SHIFT;
crng_reseed(&primary_crng, r); if (crng_init < 2 && entropy_bits >= 128)
} crng_reseed(&primary_crng, true);
} }
static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) static int credit_entropy_bits_safe(int nbits)
{ {
const int nbits_max = r->poolinfo->poolwords * 32;
if (nbits < 0) if (nbits < 0)
return -EINVAL; return -EINVAL;
/* Cap the value to avoid overflows */ /* Cap the value to avoid overflows */
nbits = min(nbits, nbits_max); nbits = min(nbits, POOL_BITS);
credit_entropy_bits(r, nbits); credit_entropy_bits(nbits);
return 0; return 0;
} }
...@@ -759,7 +722,7 @@ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) ...@@ -759,7 +722,7 @@ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
* *
*********************************************************************/ *********************************************************************/
#define CRNG_RESEED_INTERVAL (300*HZ) #define CRNG_RESEED_INTERVAL (300 * HZ)
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
...@@ -783,9 +746,9 @@ early_param("random.trust_cpu", parse_trust_cpu); ...@@ -783,9 +746,9 @@ early_param("random.trust_cpu", parse_trust_cpu);
static bool crng_init_try_arch(struct crng_state *crng) static bool crng_init_try_arch(struct crng_state *crng)
{ {
int i; int i;
bool arch_init = true; bool arch_init = true;
unsigned long rv; unsigned long rv;
for (i = 4; i < 16; i++) { for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) && if (!arch_get_random_seed_long(&rv) &&
...@@ -801,9 +764,9 @@ static bool crng_init_try_arch(struct crng_state *crng) ...@@ -801,9 +764,9 @@ static bool crng_init_try_arch(struct crng_state *crng)
static bool __init crng_init_try_arch_early(struct crng_state *crng) static bool __init crng_init_try_arch_early(struct crng_state *crng)
{ {
int i; int i;
bool arch_init = true; bool arch_init = true;
unsigned long rv; unsigned long rv;
for (i = 4; i < 16; i++) { for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long_early(&rv) && if (!arch_get_random_seed_long_early(&rv) &&
...@@ -820,14 +783,14 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng) ...@@ -820,14 +783,14 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
static void crng_initialize_secondary(struct crng_state *crng) static void crng_initialize_secondary(struct crng_state *crng)
{ {
chacha_init_consts(crng->state); chacha_init_consts(crng->state);
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12); _get_random_bytes(&crng->state[4], sizeof(u32) * 12);
crng_init_try_arch(crng); crng_init_try_arch(crng);
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
} }
static void __init crng_initialize_primary(struct crng_state *crng) static void __init crng_initialize_primary(struct crng_state *crng)
{ {
_extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); _extract_entropy(&crng->state[4], sizeof(u32) * 12);
if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) { if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
invalidate_batched_entropy(); invalidate_batched_entropy();
numa_crng_init(); numa_crng_init();
...@@ -873,7 +836,7 @@ static void do_numa_crng_init(struct work_struct *work) ...@@ -873,7 +836,7 @@ static void do_numa_crng_init(struct work_struct *work)
struct crng_state *crng; struct crng_state *crng;
struct crng_state **pool; struct crng_state **pool;
pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL | __GFP_NOFAIL);
for_each_online_node(i) { for_each_online_node(i) {
crng = kmalloc_node(sizeof(struct crng_state), crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i); GFP_KERNEL | __GFP_NOFAIL, i);
...@@ -917,10 +880,10 @@ static struct crng_state *select_crng(void) ...@@ -917,10 +880,10 @@ static struct crng_state *select_crng(void)
* path. So we can't afford to dilly-dally. Returns the number of * path. So we can't afford to dilly-dally. Returns the number of
* bytes processed from cp. * bytes processed from cp.
*/ */
static size_t crng_fast_load(const char *cp, size_t len) static size_t crng_fast_load(const u8 *cp, size_t len)
{ {
unsigned long flags; unsigned long flags;
char *p; u8 *p;
size_t ret = 0; size_t ret = 0;
if (!spin_trylock_irqsave(&primary_crng.lock, flags)) if (!spin_trylock_irqsave(&primary_crng.lock, flags))
...@@ -929,7 +892,7 @@ static size_t crng_fast_load(const char *cp, size_t len) ...@@ -929,7 +892,7 @@ static size_t crng_fast_load(const char *cp, size_t len)
spin_unlock_irqrestore(&primary_crng.lock, flags); spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0; return 0;
} }
p = (unsigned char *) &primary_crng.state[4]; p = (u8 *)&primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp; p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--; ret++; cp++; crng_init_cnt++; len--; ret++;
...@@ -957,14 +920,14 @@ static size_t crng_fast_load(const char *cp, size_t len) ...@@ -957,14 +920,14 @@ static size_t crng_fast_load(const char *cp, size_t len)
* like a fixed DMI table (for example), which might very well be * like a fixed DMI table (for example), which might very well be
* unique to the machine, but is otherwise unvarying. * unique to the machine, but is otherwise unvarying.
*/ */
static int crng_slow_load(const char *cp, size_t len) static int crng_slow_load(const u8 *cp, size_t len)
{ {
unsigned long flags; unsigned long flags;
static unsigned char lfsr = 1; static u8 lfsr = 1;
unsigned char tmp; u8 tmp;
unsigned i, max = CHACHA_KEY_SIZE; unsigned int i, max = CHACHA_KEY_SIZE;
const char * src_buf = cp; const u8 *src_buf = cp;
char * dest_buf = (char *) &primary_crng.state[4]; u8 *dest_buf = (u8 *)&primary_crng.state[4];
if (!spin_trylock_irqsave(&primary_crng.lock, flags)) if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0; return 0;
...@@ -975,7 +938,7 @@ static int crng_slow_load(const char *cp, size_t len) ...@@ -975,7 +938,7 @@ static int crng_slow_load(const char *cp, size_t len)
if (len > max) if (len > max)
max = len; max = len;
for (i = 0; i < max ; i++) { for (i = 0; i < max; i++) {
tmp = lfsr; tmp = lfsr;
lfsr >>= 1; lfsr >>= 1;
if (tmp & 1) if (tmp & 1)
...@@ -988,17 +951,17 @@ static int crng_slow_load(const char *cp, size_t len) ...@@ -988,17 +951,17 @@ static int crng_slow_load(const char *cp, size_t len)
return 1; return 1;
} }
static void crng_reseed(struct crng_state *crng, struct entropy_store *r) static void crng_reseed(struct crng_state *crng, bool use_input_pool)
{ {
unsigned long flags; unsigned long flags;
int i, num; int i, num;
union { union {
__u8 block[CHACHA_BLOCK_SIZE]; u8 block[CHACHA_BLOCK_SIZE];
__u32 key[8]; u32 key[8];
} buf; } buf;
if (r) { if (use_input_pool) {
num = extract_entropy(r, &buf, 32, 16, 0); num = extract_entropy(&buf, 32, 16);
if (num == 0) if (num == 0)
return; return;
} else { } else {
...@@ -1008,11 +971,11 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) ...@@ -1008,11 +971,11 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
} }
spin_lock_irqsave(&crng->lock, flags); spin_lock_irqsave(&crng->lock, flags);
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
unsigned long rv; unsigned long rv;
if (!arch_get_random_seed_long(&rv) && if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv)) !arch_get_random_long(&rv))
rv = random_get_entropy(); rv = random_get_entropy();
crng->state[i+4] ^= buf.key[i] ^ rv; crng->state[i + 4] ^= buf.key[i] ^ rv;
} }
memzero_explicit(&buf, sizeof(buf)); memzero_explicit(&buf, sizeof(buf));
WRITE_ONCE(crng->init_time, jiffies); WRITE_ONCE(crng->init_time, jiffies);
...@@ -1020,8 +983,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) ...@@ -1020,8 +983,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
crng_finalize_init(crng); crng_finalize_init(crng);
} }
static void _extract_crng(struct crng_state *crng, static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
__u8 out[CHACHA_BLOCK_SIZE])
{ {
unsigned long flags, init_time; unsigned long flags, init_time;
...@@ -1029,8 +991,7 @@ static void _extract_crng(struct crng_state *crng, ...@@ -1029,8 +991,7 @@ static void _extract_crng(struct crng_state *crng,
init_time = READ_ONCE(crng->init_time); init_time = READ_ONCE(crng->init_time);
if (time_after(READ_ONCE(crng_global_init_time), init_time) || if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
time_after(jiffies, init_time + CRNG_RESEED_INTERVAL)) time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
crng_reseed(crng, crng == &primary_crng ? crng_reseed(crng, crng == &primary_crng);
&input_pool : NULL);
} }
spin_lock_irqsave(&crng->lock, flags); spin_lock_irqsave(&crng->lock, flags);
chacha20_block(&crng->state[0], out); chacha20_block(&crng->state[0], out);
...@@ -1039,7 +1000,7 @@ static void _extract_crng(struct crng_state *crng, ...@@ -1039,7 +1000,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags); spin_unlock_irqrestore(&crng->lock, flags);
} }
static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE]) static void extract_crng(u8 out[CHACHA_BLOCK_SIZE])
{ {
_extract_crng(select_crng(), out); _extract_crng(select_crng(), out);
} }
...@@ -1049,26 +1010,26 @@ static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE]) ...@@ -1049,26 +1010,26 @@ static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection. * enough) to mutate the CRNG key to provide backtracking protection.
*/ */
static void _crng_backtrack_protect(struct crng_state *crng, static void _crng_backtrack_protect(struct crng_state *crng,
__u8 tmp[CHACHA_BLOCK_SIZE], int used) u8 tmp[CHACHA_BLOCK_SIZE], int used)
{ {
unsigned long flags; unsigned long flags;
__u32 *s, *d; u32 *s, *d;
int i; int i;
used = round_up(used, sizeof(__u32)); used = round_up(used, sizeof(u32));
if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) { if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
extract_crng(tmp); extract_crng(tmp);
used = 0; used = 0;
} }
spin_lock_irqsave(&crng->lock, flags); spin_lock_irqsave(&crng->lock, flags);
s = (__u32 *) &tmp[used]; s = (u32 *)&tmp[used];
d = &crng->state[4]; d = &crng->state[4];
for (i=0; i < 8; i++) for (i = 0; i < 8; i++)
*d++ ^= *s++; *d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags); spin_unlock_irqrestore(&crng->lock, flags);
} }
static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used) static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used)
{ {
_crng_backtrack_protect(select_crng(), tmp, used); _crng_backtrack_protect(select_crng(), tmp, used);
} }
...@@ -1076,7 +1037,7 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used) ...@@ -1076,7 +1037,7 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes) static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{ {
ssize_t ret = 0, i = CHACHA_BLOCK_SIZE; ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
__u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256); int large_request = (nbytes > 256);
while (nbytes) { while (nbytes) {
...@@ -1108,7 +1069,6 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes) ...@@ -1108,7 +1069,6 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
return ret; return ret;
} }
/********************************************************************* /*********************************************************************
* *
* Entropy input management * Entropy input management
...@@ -1141,8 +1101,8 @@ void add_device_randomness(const void *buf, unsigned int size) ...@@ -1141,8 +1101,8 @@ void add_device_randomness(const void *buf, unsigned int size)
trace_add_device_randomness(size, _RET_IP_); trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags); spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(&input_pool, buf, size); _mix_pool_bytes(buf, size);
_mix_pool_bytes(&input_pool, &time, sizeof(time)); _mix_pool_bytes(&time, sizeof(time));
spin_unlock_irqrestore(&input_pool.lock, flags); spin_unlock_irqrestore(&input_pool.lock, flags);
} }
EXPORT_SYMBOL(add_device_randomness); EXPORT_SYMBOL(add_device_randomness);
...@@ -1161,19 +1121,17 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; ...@@ -1161,19 +1121,17 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
*/ */
static void add_timer_randomness(struct timer_rand_state *state, unsigned num) static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{ {
struct entropy_store *r;
struct { struct {
long jiffies; long jiffies;
unsigned cycles; unsigned int cycles;
unsigned num; unsigned int num;
} sample; } sample;
long delta, delta2, delta3; long delta, delta2, delta3;
sample.jiffies = jiffies; sample.jiffies = jiffies;
sample.cycles = random_get_entropy(); sample.cycles = random_get_entropy();
sample.num = num; sample.num = num;
r = &input_pool; mix_pool_bytes(&sample, sizeof(sample));
mix_pool_bytes(r, &sample, sizeof(sample));
/* /*
* Calculate number of bits of randomness we probably added. * Calculate number of bits of randomness we probably added.
...@@ -1205,11 +1163,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) ...@@ -1205,11 +1163,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles, * Round down by 1 bit on general principles,
* and limit entropy estimate to 12 bits. * and limit entropy estimate to 12 bits.
*/ */
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); credit_entropy_bits(min_t(int, fls(delta >> 1), 11));
} }
void add_input_randomness(unsigned int type, unsigned int code, void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value) unsigned int value)
{ {
static unsigned char last_value; static unsigned char last_value;
...@@ -1220,7 +1178,7 @@ void add_input_randomness(unsigned int type, unsigned int code, ...@@ -1220,7 +1178,7 @@ void add_input_randomness(unsigned int type, unsigned int code,
last_value = value; last_value = value;
add_timer_randomness(&input_timer_state, add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value); (type << 4) ^ code ^ (code >> 4) ^ value);
trace_add_input_randomness(ENTROPY_BITS(&input_pool)); trace_add_input_randomness(POOL_ENTROPY_BITS());
} }
EXPORT_SYMBOL_GPL(add_input_randomness); EXPORT_SYMBOL_GPL(add_input_randomness);
...@@ -1229,33 +1187,33 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness); ...@@ -1229,33 +1187,33 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
#ifdef ADD_INTERRUPT_BENCH #ifdef ADD_INTERRUPT_BENCH
static unsigned long avg_cycles, avg_deviation; static unsigned long avg_cycles, avg_deviation;
#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */ #define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
#define FIXED_1_2 (1 << (AVG_SHIFT-1)) #define FIXED_1_2 (1 << (AVG_SHIFT - 1))
static void add_interrupt_bench(cycles_t start) static void add_interrupt_bench(cycles_t start)
{ {
long delta = random_get_entropy() - start; long delta = random_get_entropy() - start;
/* Use a weighted moving average */ /* Use a weighted moving average */
delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT); delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
avg_cycles += delta; avg_cycles += delta;
/* And average deviation */ /* And average deviation */
delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT); delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
avg_deviation += delta; avg_deviation += delta;
} }
#else #else
#define add_interrupt_bench(x) #define add_interrupt_bench(x)
#endif #endif
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{ {
__u32 *ptr = (__u32 *) regs; u32 *ptr = (u32 *)regs;
unsigned int idx; unsigned int idx;
if (regs == NULL) if (regs == NULL)
return 0; return 0;
idx = READ_ONCE(f->reg_idx); idx = READ_ONCE(f->reg_idx);
if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) if (idx >= sizeof(struct pt_regs) / sizeof(u32))
idx = 0; idx = 0;
ptr += idx++; ptr += idx++;
WRITE_ONCE(f->reg_idx, idx); WRITE_ONCE(f->reg_idx, idx);
...@@ -1264,13 +1222,12 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) ...@@ -1264,13 +1222,12 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
void add_interrupt_randomness(int irq) void add_interrupt_randomness(int irq)
{ {
struct entropy_store *r; struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); struct pt_regs *regs = get_irq_regs();
struct pt_regs *regs = get_irq_regs(); unsigned long now = jiffies;
unsigned long now = jiffies; cycles_t cycles = random_get_entropy();
cycles_t cycles = random_get_entropy(); u32 c_high, j_high;
__u32 c_high, j_high; u64 ip;
__u64 ip;
if (cycles == 0) if (cycles == 0)
cycles = get_reg(fast_pool, regs); cycles = get_reg(fast_pool, regs);
...@@ -1280,38 +1237,35 @@ void add_interrupt_randomness(int irq) ...@@ -1280,38 +1237,35 @@ void add_interrupt_randomness(int irq)
fast_pool->pool[1] ^= now ^ c_high; fast_pool->pool[1] ^= now ^ c_high;
ip = regs ? instruction_pointer(regs) : _RET_IP_; ip = regs ? instruction_pointer(regs) : _RET_IP_;
fast_pool->pool[2] ^= ip; fast_pool->pool[2] ^= ip;
fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : fast_pool->pool[3] ^=
get_reg(fast_pool, regs); (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs);
fast_mix(fast_pool); fast_mix(fast_pool);
add_interrupt_bench(cycles); add_interrupt_bench(cycles);
if (unlikely(crng_init == 0)) { if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) && if ((fast_pool->count >= 64) &&
crng_fast_load((char *) fast_pool->pool, crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
sizeof(fast_pool->pool)) > 0) {
fast_pool->count = 0; fast_pool->count = 0;
fast_pool->last = now; fast_pool->last = now;
} }
return; return;
} }
if ((fast_pool->count < 64) && if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
!time_after(now, fast_pool->last + HZ))
return; return;
r = &input_pool; if (!spin_trylock(&input_pool.lock))
if (!spin_trylock(&r->lock))
return; return;
fast_pool->last = now; fast_pool->last = now;
__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
spin_unlock(&r->lock); spin_unlock(&input_pool.lock);
fast_pool->count = 0; fast_pool->count = 0;
/* award one bit for the contents of the fast pool */ /* award one bit for the contents of the fast pool */
credit_entropy_bits(r, 1); credit_entropy_bits(1);
} }
EXPORT_SYMBOL_GPL(add_interrupt_randomness); EXPORT_SYMBOL_GPL(add_interrupt_randomness);
...@@ -1322,7 +1276,7 @@ void add_disk_randomness(struct gendisk *disk) ...@@ -1322,7 +1276,7 @@ void add_disk_randomness(struct gendisk *disk)
return; return;
/* first major is 1, so we get >= 0x200 here */ /* first major is 1, so we get >= 0x200 here */
add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); trace_add_disk_randomness(disk_devt(disk), POOL_ENTROPY_BITS());
} }
EXPORT_SYMBOL_GPL(add_disk_randomness); EXPORT_SYMBOL_GPL(add_disk_randomness);
#endif #endif
...@@ -1337,43 +1291,36 @@ EXPORT_SYMBOL_GPL(add_disk_randomness); ...@@ -1337,43 +1291,36 @@ EXPORT_SYMBOL_GPL(add_disk_randomness);
* This function decides how many bytes to actually take from the * This function decides how many bytes to actually take from the
* given pool, and also debits the entropy count accordingly. * given pool, and also debits the entropy count accordingly.
*/ */
static size_t account(struct entropy_store *r, size_t nbytes, int min, static size_t account(size_t nbytes, int min)
int reserved)
{ {
int entropy_count, orig, have_bytes; int entropy_count, orig;
size_t ibytes, nfrac; size_t ibytes, nfrac;
BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); BUG_ON(input_pool.entropy_count > POOL_FRACBITS);
/* Can we pull enough? */ /* Can we pull enough? */
retry: retry:
entropy_count = orig = READ_ONCE(r->entropy_count); entropy_count = orig = READ_ONCE(input_pool.entropy_count);
ibytes = nbytes;
/* never pull more than available */
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
if ((have_bytes -= reserved) < 0)
have_bytes = 0;
ibytes = min_t(size_t, ibytes, have_bytes);
if (ibytes < min)
ibytes = 0;
if (WARN_ON(entropy_count < 0)) { if (WARN_ON(entropy_count < 0)) {
pr_warn("negative entropy count: pool %s count %d\n", pr_warn("negative entropy count: count %d\n", entropy_count);
r->name, entropy_count);
entropy_count = 0; entropy_count = 0;
} }
nfrac = ibytes << (ENTROPY_SHIFT + 3);
if ((size_t) entropy_count > nfrac) /* never pull more than available */
ibytes = min_t(size_t, nbytes, entropy_count >> (POOL_ENTROPY_SHIFT + 3));
if (ibytes < min)
ibytes = 0;
nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3);
if ((size_t)entropy_count > nfrac)
entropy_count -= nfrac; entropy_count -= nfrac;
else else
entropy_count = 0; entropy_count = 0;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
goto retry; goto retry;
trace_debit_entropy(r->name, 8 * ibytes); trace_debit_entropy(8 * ibytes);
if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) { if (ibytes && POOL_ENTROPY_BITS() < random_write_wakeup_bits) {
wake_up_interruptible(&random_write_wait); wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT); kill_fasync(&fasync, SIGIO, POLL_OUT);
} }
...@@ -1386,7 +1333,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, ...@@ -1386,7 +1333,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
* *
* Note: we assume that .poolwords is a multiple of 16 words. * Note: we assume that .poolwords is a multiple of 16 words.
*/ */
static void extract_buf(struct entropy_store *r, __u8 *out) static void extract_buf(u8 *out)
{ {
struct blake2s_state state __aligned(__alignof__(unsigned long)); struct blake2s_state state __aligned(__alignof__(unsigned long));
u8 hash[BLAKE2S_HASH_SIZE]; u8 hash[BLAKE2S_HASH_SIZE];
...@@ -1408,9 +1355,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out) ...@@ -1408,9 +1355,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
} }
/* Generate a hash across the pool */ /* Generate a hash across the pool */
spin_lock_irqsave(&r->lock, flags); spin_lock_irqsave(&input_pool.lock, flags);
blake2s_update(&state, (const u8 *)r->pool, blake2s_update(&state, (const u8 *)input_pool_data, POOL_BYTES);
r->poolinfo->poolwords * sizeof(*r->pool));
blake2s_final(&state, hash); /* final zeros out state */ blake2s_final(&state, hash); /* final zeros out state */
/* /*
...@@ -1422,8 +1368,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out) ...@@ -1422,8 +1368,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
* brute-forcing the feedback as hard as brute-forcing the * brute-forcing the feedback as hard as brute-forcing the
* hash. * hash.
*/ */
__mix_pool_bytes(r, hash, sizeof(hash)); __mix_pool_bytes(hash, sizeof(hash));
spin_unlock_irqrestore(&r->lock, flags); spin_unlock_irqrestore(&input_pool.lock, flags);
/* Note that EXTRACT_SIZE is half of hash size here, because above /* Note that EXTRACT_SIZE is half of hash size here, because above
* we've dumped the full length back into mixer. By reducing the * we've dumped the full length back into mixer. By reducing the
...@@ -1433,23 +1379,13 @@ static void extract_buf(struct entropy_store *r, __u8 *out) ...@@ -1433,23 +1379,13 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
memzero_explicit(hash, sizeof(hash)); memzero_explicit(hash, sizeof(hash));
} }
static ssize_t _extract_entropy(struct entropy_store *r, void *buf, static ssize_t _extract_entropy(void *buf, size_t nbytes)
size_t nbytes, int fips)
{ {
ssize_t ret = 0, i; ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE]; u8 tmp[EXTRACT_SIZE];
unsigned long flags;
while (nbytes) { while (nbytes) {
extract_buf(r, tmp); extract_buf(tmp);
if (fips) {
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
memcpy(r->last_data, tmp, EXTRACT_SIZE);
spin_unlock_irqrestore(&r->lock, flags);
}
i = min_t(int, nbytes, EXTRACT_SIZE); i = min_t(int, nbytes, EXTRACT_SIZE);
memcpy(buf, tmp, i); memcpy(buf, tmp, i);
nbytes -= i; nbytes -= i;
...@@ -1468,42 +1404,19 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf, ...@@ -1468,42 +1404,19 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
* returns it in a buffer. * returns it in a buffer.
* *
* The min parameter specifies the minimum amount we can pull before * The min parameter specifies the minimum amount we can pull before
* failing to avoid races that defeat catastrophic reseeding while the * failing to avoid races that defeat catastrophic reseeding.
* reserved parameter indicates how much entropy we must leave in the
* pool after each pull to avoid starving other readers.
*/ */
static ssize_t extract_entropy(struct entropy_store *r, void *buf, static ssize_t extract_entropy(void *buf, size_t nbytes, int min)
size_t nbytes, int min, int reserved)
{ {
__u8 tmp[EXTRACT_SIZE]; trace_extract_entropy(nbytes, POOL_ENTROPY_BITS(), _RET_IP_);
unsigned long flags; nbytes = account(nbytes, min);
return _extract_entropy(buf, nbytes);
/* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
if (fips_enabled) {
spin_lock_irqsave(&r->lock, flags);
if (!r->last_data_init) {
r->last_data_init = 1;
spin_unlock_irqrestore(&r->lock, flags);
trace_extract_entropy(r->name, EXTRACT_SIZE,
ENTROPY_BITS(r), _RET_IP_);
extract_buf(r, tmp);
spin_lock_irqsave(&r->lock, flags);
memcpy(r->last_data, tmp, EXTRACT_SIZE);
}
spin_unlock_irqrestore(&r->lock, flags);
}
trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
nbytes = account(r, nbytes, min, reserved);
return _extract_entropy(r, buf, nbytes, fips_enabled);
} }
#define warn_unseeded_randomness(previous) \ #define warn_unseeded_randomness(previous) \
_warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous)) _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
static void _warn_unseeded_randomness(const char *func_name, void *caller, static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
void **previous)
{ {
#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
const bool print_once = false; const bool print_once = false;
...@@ -1511,8 +1424,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, ...@@ -1511,8 +1424,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
static bool print_once __read_mostly; static bool print_once __read_mostly;
#endif #endif
if (print_once || if (print_once || crng_ready() ||
crng_ready() ||
(previous && (caller == READ_ONCE(*previous)))) (previous && (caller == READ_ONCE(*previous))))
return; return;
WRITE_ONCE(*previous, caller); WRITE_ONCE(*previous, caller);
...@@ -1520,9 +1432,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, ...@@ -1520,9 +1432,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
print_once = true; print_once = true;
#endif #endif
if (__ratelimit(&unseeded_warning)) if (__ratelimit(&unseeded_warning))
printk_deferred(KERN_NOTICE "random: %s called from %pS " printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
"with crng_init=%d\n", func_name, caller, func_name, caller, crng_init);
crng_init);
} }
/* /*
...@@ -1537,7 +1448,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, ...@@ -1537,7 +1448,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/ */
static void _get_random_bytes(void *buf, int nbytes) static void _get_random_bytes(void *buf, int nbytes)
{ {
__u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
trace_get_random_bytes(nbytes, _RET_IP_); trace_get_random_bytes(nbytes, _RET_IP_);
...@@ -1565,7 +1476,6 @@ void get_random_bytes(void *buf, int nbytes) ...@@ -1565,7 +1476,6 @@ void get_random_bytes(void *buf, int nbytes)
} }
EXPORT_SYMBOL(get_random_bytes); EXPORT_SYMBOL(get_random_bytes);
/* /*
* Each time the timer fires, we expect that we got an unpredictable * Each time the timer fires, we expect that we got an unpredictable
* jump in the cycle counter. Even if the timer is running on another * jump in the cycle counter. Even if the timer is running on another
...@@ -1581,7 +1491,7 @@ EXPORT_SYMBOL(get_random_bytes); ...@@ -1581,7 +1491,7 @@ EXPORT_SYMBOL(get_random_bytes);
*/ */
static void entropy_timer(struct timer_list *t) static void entropy_timer(struct timer_list *t)
{ {
credit_entropy_bits(&input_pool, 1); credit_entropy_bits(1);
} }
/* /*
...@@ -1604,15 +1514,15 @@ static void try_to_generate_entropy(void) ...@@ -1604,15 +1514,15 @@ static void try_to_generate_entropy(void)
timer_setup_on_stack(&stack.timer, entropy_timer, 0); timer_setup_on_stack(&stack.timer, entropy_timer, 0);
while (!crng_ready()) { while (!crng_ready()) {
if (!timer_pending(&stack.timer)) if (!timer_pending(&stack.timer))
mod_timer(&stack.timer, jiffies+1); mod_timer(&stack.timer, jiffies + 1);
mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now)); mix_pool_bytes(&stack.now, sizeof(stack.now));
schedule(); schedule();
stack.now = random_get_entropy(); stack.now = random_get_entropy();
} }
del_timer_sync(&stack.timer); del_timer_sync(&stack.timer);
destroy_timer_on_stack(&stack.timer); destroy_timer_on_stack(&stack.timer);
mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now)); mix_pool_bytes(&stack.now, sizeof(stack.now));
} }
/* /*
...@@ -1731,7 +1641,7 @@ EXPORT_SYMBOL(del_random_ready_callback); ...@@ -1731,7 +1641,7 @@ EXPORT_SYMBOL(del_random_ready_callback);
int __must_check get_random_bytes_arch(void *buf, int nbytes) int __must_check get_random_bytes_arch(void *buf, int nbytes)
{ {
int left = nbytes; int left = nbytes;
char *p = buf; u8 *p = buf;
trace_get_random_bytes_arch(left, _RET_IP_); trace_get_random_bytes_arch(left, _RET_IP_);
while (left) { while (left) {
...@@ -1753,26 +1663,24 @@ EXPORT_SYMBOL(get_random_bytes_arch); ...@@ -1753,26 +1663,24 @@ EXPORT_SYMBOL(get_random_bytes_arch);
/* /*
* init_std_data - initialize pool with system data * init_std_data - initialize pool with system data
* *
* @r: pool to initialize
*
* This function clears the pool's entropy count and mixes some system * This function clears the pool's entropy count and mixes some system
* data into the pool to prepare it for use. The pool is not cleared * data into the pool to prepare it for use. The pool is not cleared
* as that can only decrease the entropy in the pool. * as that can only decrease the entropy in the pool.
*/ */
static void __init init_std_data(struct entropy_store *r) static void __init init_std_data(void)
{ {
int i; int i;
ktime_t now = ktime_get_real(); ktime_t now = ktime_get_real();
unsigned long rv; unsigned long rv;
mix_pool_bytes(r, &now, sizeof(now)); mix_pool_bytes(&now, sizeof(now));
for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { for (i = POOL_BYTES; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_seed_long(&rv) && if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv)) !arch_get_random_long(&rv))
rv = random_get_entropy(); rv = random_get_entropy();
mix_pool_bytes(r, &rv, sizeof(rv)); mix_pool_bytes(&rv, sizeof(rv));
} }
mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); mix_pool_bytes(utsname(), sizeof(*(utsname())));
} }
/* /*
...@@ -1787,7 +1695,7 @@ static void __init init_std_data(struct entropy_store *r) ...@@ -1787,7 +1695,7 @@ static void __init init_std_data(struct entropy_store *r)
*/ */
int __init rand_initialize(void) int __init rand_initialize(void)
{ {
init_std_data(&input_pool); init_std_data();
if (crng_need_final_init) if (crng_need_final_init)
crng_finalize_init(&primary_crng); crng_finalize_init(&primary_crng);
crng_initialize_primary(&primary_crng); crng_initialize_primary(&primary_crng);
...@@ -1816,20 +1724,19 @@ void rand_initialize_disk(struct gendisk *disk) ...@@ -1816,20 +1724,19 @@ void rand_initialize_disk(struct gendisk *disk)
} }
#endif #endif
static ssize_t static ssize_t urandom_read_nowarn(struct file *file, char __user *buf,
urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes, size_t nbytes, loff_t *ppos)
loff_t *ppos)
{ {
int ret; int ret;
nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); nbytes = min_t(size_t, nbytes, INT_MAX >> (POOL_ENTROPY_SHIFT + 3));
ret = extract_crng_user(buf, nbytes); ret = extract_crng_user(buf, nbytes);
trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool)); trace_urandom_read(8 * nbytes, 0, POOL_ENTROPY_BITS());
return ret; return ret;
} }
static ssize_t static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) loff_t *ppos)
{ {
static int maxwarn = 10; static int maxwarn = 10;
...@@ -1843,8 +1750,8 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) ...@@ -1843,8 +1750,8 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return urandom_read_nowarn(file, buf, nbytes, ppos); return urandom_read_nowarn(file, buf, nbytes, ppos);
} }
static ssize_t static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) loff_t *ppos)
{ {
int ret; int ret;
...@@ -1854,8 +1761,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) ...@@ -1854,8 +1761,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return urandom_read_nowarn(file, buf, nbytes, ppos); return urandom_read_nowarn(file, buf, nbytes, ppos);
} }
static __poll_t static __poll_t random_poll(struct file *file, poll_table *wait)
random_poll(struct file *file, poll_table * wait)
{ {
__poll_t mask; __poll_t mask;
...@@ -1864,16 +1770,15 @@ random_poll(struct file *file, poll_table * wait) ...@@ -1864,16 +1770,15 @@ random_poll(struct file *file, poll_table * wait)
mask = 0; mask = 0;
if (crng_ready()) if (crng_ready())
mask |= EPOLLIN | EPOLLRDNORM; mask |= EPOLLIN | EPOLLRDNORM;
if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) if (POOL_ENTROPY_BITS() < random_write_wakeup_bits)
mask |= EPOLLOUT | EPOLLWRNORM; mask |= EPOLLOUT | EPOLLWRNORM;
return mask; return mask;
} }
static int static int write_pool(const char __user *buffer, size_t count)
write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
{ {
size_t bytes; size_t bytes;
__u32 t, buf[16]; u32 t, buf[16];
const char __user *p = buffer; const char __user *p = buffer;
while (count > 0) { while (count > 0) {
...@@ -1883,7 +1788,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count) ...@@ -1883,7 +1788,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
if (copy_from_user(&buf, p, bytes)) if (copy_from_user(&buf, p, bytes))
return -EFAULT; return -EFAULT;
for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { for (b = bytes; b > 0; b -= sizeof(u32), i++) {
if (!arch_get_random_int(&t)) if (!arch_get_random_int(&t))
break; break;
buf[i] ^= t; buf[i] ^= t;
...@@ -1892,7 +1797,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count) ...@@ -1892,7 +1797,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
count -= bytes; count -= bytes;
p += bytes; p += bytes;
mix_pool_bytes(r, buf, bytes); mix_pool_bytes(buf, bytes);
cond_resched(); cond_resched();
} }
...@@ -1904,7 +1809,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer, ...@@ -1904,7 +1809,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
{ {
size_t ret; size_t ret;
ret = write_pool(&input_pool, buffer, count); ret = write_pool(buffer, count);
if (ret) if (ret)
return ret; return ret;
...@@ -1920,7 +1825,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) ...@@ -1920,7 +1825,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) { switch (cmd) {
case RNDGETENTCNT: case RNDGETENTCNT:
/* inherently racy, no point locking */ /* inherently racy, no point locking */
ent_count = ENTROPY_BITS(&input_pool); ent_count = POOL_ENTROPY_BITS();
if (put_user(ent_count, p)) if (put_user(ent_count, p))
return -EFAULT; return -EFAULT;
return 0; return 0;
...@@ -1929,7 +1834,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) ...@@ -1929,7 +1834,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM; return -EPERM;
if (get_user(ent_count, p)) if (get_user(ent_count, p))
return -EFAULT; return -EFAULT;
return credit_entropy_bits_safe(&input_pool, ent_count); return credit_entropy_bits_safe(ent_count);
case RNDADDENTROPY: case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
...@@ -1939,11 +1844,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) ...@@ -1939,11 +1844,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EINVAL; return -EINVAL;
if (get_user(size, p++)) if (get_user(size, p++))
return -EFAULT; return -EFAULT;
retval = write_pool(&input_pool, (const char __user *)p, retval = write_pool((const char __user *)p, size);
size);
if (retval < 0) if (retval < 0)
return retval; return retval;
return credit_entropy_bits_safe(&input_pool, ent_count); return credit_entropy_bits_safe(ent_count);
case RNDZAPENTCNT: case RNDZAPENTCNT:
case RNDCLEARPOOL: case RNDCLEARPOOL:
/* /*
...@@ -1959,7 +1863,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) ...@@ -1959,7 +1863,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM; return -EPERM;
if (crng_init < 2) if (crng_init < 2)
return -ENODATA; return -ENODATA;
crng_reseed(&primary_crng, &input_pool); crng_reseed(&primary_crng, true);
WRITE_ONCE(crng_global_init_time, jiffies - 1); WRITE_ONCE(crng_global_init_time, jiffies - 1);
return 0; return 0;
default: default:
...@@ -1973,9 +1877,9 @@ static int random_fasync(int fd, struct file *filp, int on) ...@@ -1973,9 +1877,9 @@ static int random_fasync(int fd, struct file *filp, int on)
} }
const struct file_operations random_fops = { const struct file_operations random_fops = {
.read = random_read, .read = random_read,
.write = random_write, .write = random_write,
.poll = random_poll, .poll = random_poll,
.unlocked_ioctl = random_ioctl, .unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl, .compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync, .fasync = random_fasync,
...@@ -1983,7 +1887,7 @@ const struct file_operations random_fops = { ...@@ -1983,7 +1887,7 @@ const struct file_operations random_fops = {
}; };
const struct file_operations urandom_fops = { const struct file_operations urandom_fops = {
.read = urandom_read, .read = urandom_read,
.write = random_write, .write = random_write,
.unlocked_ioctl = random_ioctl, .unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl, .compat_ioctl = compat_ptr_ioctl,
...@@ -1991,19 +1895,19 @@ const struct file_operations urandom_fops = { ...@@ -1991,19 +1895,19 @@ const struct file_operations urandom_fops = {
.llseek = noop_llseek, .llseek = noop_llseek,
}; };
SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
unsigned int, flags) flags)
{ {
int ret; int ret;
if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE)) if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
return -EINVAL; return -EINVAL;
/* /*
* Requesting insecure and blocking randomness at the same time makes * Requesting insecure and blocking randomness at the same time makes
* no sense. * no sense.
*/ */
if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM)) if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL; return -EINVAL;
if (count > INT_MAX) if (count > INT_MAX)
...@@ -2030,7 +1934,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, ...@@ -2030,7 +1934,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
#include <linux/sysctl.h> #include <linux/sysctl.h>
static int min_write_thresh; static int min_write_thresh;
static int max_write_thresh = INPUT_POOL_WORDS * 32; static int max_write_thresh = POOL_BITS;
static int random_min_urandom_seed = 60; static int random_min_urandom_seed = 60;
static char sysctl_bootid[16]; static char sysctl_bootid[16];
...@@ -2043,8 +1947,8 @@ static char sysctl_bootid[16]; ...@@ -2043,8 +1947,8 @@ static char sysctl_bootid[16];
* returned as an ASCII string in the standard UUID format; if via the * returned as an ASCII string in the standard UUID format; if via the
* sysctl system call, as 16 bytes of binary data. * sysctl system call, as 16 bytes of binary data.
*/ */
static int proc_do_uuid(struct ctl_table *table, int write, static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
void *buffer, size_t *lenp, loff_t *ppos) size_t *lenp, loff_t *ppos)
{ {
struct ctl_table fake_table; struct ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid; unsigned char buf[64], tmp_uuid[16], *uuid;
...@@ -2073,13 +1977,13 @@ static int proc_do_uuid(struct ctl_table *table, int write, ...@@ -2073,13 +1977,13 @@ static int proc_do_uuid(struct ctl_table *table, int write,
/* /*
* Return entropy available scaled to integral bits * Return entropy available scaled to integral bits
*/ */
static int proc_do_entropy(struct ctl_table *table, int write, static int proc_do_entropy(struct ctl_table *table, int write, void *buffer,
void *buffer, size_t *lenp, loff_t *ppos) size_t *lenp, loff_t *ppos)
{ {
struct ctl_table fake_table; struct ctl_table fake_table;
int entropy_count; int entropy_count;
entropy_count = *(int *)table->data >> ENTROPY_SHIFT; entropy_count = *(int *)table->data >> POOL_ENTROPY_SHIFT;
fake_table.data = &entropy_count; fake_table.data = &entropy_count;
fake_table.maxlen = sizeof(entropy_count); fake_table.maxlen = sizeof(entropy_count);
...@@ -2087,7 +1991,7 @@ static int proc_do_entropy(struct ctl_table *table, int write, ...@@ -2087,7 +1991,7 @@ static int proc_do_entropy(struct ctl_table *table, int write,
return proc_dointvec(&fake_table, write, buffer, lenp, ppos); return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
} }
static int sysctl_poolsize = INPUT_POOL_WORDS * 32; static int sysctl_poolsize = POOL_BITS;
extern struct ctl_table random_table[]; extern struct ctl_table random_table[];
struct ctl_table random_table[] = { struct ctl_table random_table[] = {
{ {
...@@ -2151,7 +2055,7 @@ struct ctl_table random_table[] = { ...@@ -2151,7 +2055,7 @@ struct ctl_table random_table[] = {
#endif #endif
{ } { }
}; };
#endif /* CONFIG_SYSCTL */ #endif /* CONFIG_SYSCTL */
struct batched_entropy { struct batched_entropy {
union { union {
...@@ -2171,7 +2075,7 @@ struct batched_entropy { ...@@ -2171,7 +2075,7 @@ struct batched_entropy {
* point prior. * point prior.
*/ */
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
}; };
u64 get_random_u64(void) u64 get_random_u64(void)
...@@ -2196,7 +2100,7 @@ u64 get_random_u64(void) ...@@ -2196,7 +2100,7 @@ u64 get_random_u64(void)
EXPORT_SYMBOL(get_random_u64); EXPORT_SYMBOL(get_random_u64);
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
}; };
u32 get_random_u32(void) u32 get_random_u32(void)
{ {
...@@ -2228,7 +2132,7 @@ static void invalidate_batched_entropy(void) ...@@ -2228,7 +2132,7 @@ static void invalidate_batched_entropy(void)
int cpu; int cpu;
unsigned long flags; unsigned long flags;
for_each_possible_cpu (cpu) { for_each_possible_cpu(cpu) {
struct batched_entropy *batched_entropy; struct batched_entropy *batched_entropy;
batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
...@@ -2257,8 +2161,7 @@ static void invalidate_batched_entropy(void) ...@@ -2257,8 +2161,7 @@ static void invalidate_batched_entropy(void)
* Return: A page aligned address within [start, start + range). On error, * Return: A page aligned address within [start, start + range). On error,
* @start is returned. * @start is returned.
*/ */
unsigned long unsigned long randomize_page(unsigned long start, unsigned long range)
randomize_page(unsigned long start, unsigned long range)
{ {
if (!PAGE_ALIGNED(start)) { if (!PAGE_ALIGNED(start)) {
range -= PAGE_ALIGN(start) - start; range -= PAGE_ALIGN(start) - start;
...@@ -2283,11 +2186,9 @@ randomize_page(unsigned long start, unsigned long range) ...@@ -2283,11 +2186,9 @@ randomize_page(unsigned long start, unsigned long range)
void add_hwgenerator_randomness(const char *buffer, size_t count, void add_hwgenerator_randomness(const char *buffer, size_t count,
size_t entropy) size_t entropy)
{ {
struct entropy_store *poolp = &input_pool;
if (unlikely(crng_init == 0)) { if (unlikely(crng_init == 0)) {
size_t ret = crng_fast_load(buffer, count); size_t ret = crng_fast_load(buffer, count);
mix_pool_bytes(poolp, buffer, ret); mix_pool_bytes(buffer, ret);
count -= ret; count -= ret;
buffer += ret; buffer += ret;
if (!count || crng_init == 0) if (!count || crng_init == 0)
...@@ -2300,9 +2201,9 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, ...@@ -2300,9 +2201,9 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
*/ */
wait_event_interruptible(random_write_wait, wait_event_interruptible(random_write_wait,
!system_wq || kthread_should_stop() || !system_wq || kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); POOL_ENTROPY_BITS() <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count); mix_pool_bytes(buffer, count);
credit_entropy_bits(poolp, entropy); credit_entropy_bits(entropy);
} }
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
......
...@@ -302,6 +302,41 @@ void wg_noise_set_static_identity_private_key( ...@@ -302,6 +302,41 @@ void wg_noise_set_static_identity_private_key(
static_identity->static_public, private_key); static_identity->static_public, private_key);
} }
static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen)
{
struct blake2s_state state;
u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
int i;
if (keylen > BLAKE2S_BLOCK_SIZE) {
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, key, keylen);
blake2s_final(&state, x_key);
} else
memcpy(x_key, key, keylen);
for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
x_key[i] ^= 0x36;
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
blake2s_update(&state, in, inlen);
blake2s_final(&state, i_hash);
for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
x_key[i] ^= 0x5c ^ 0x36;
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
blake2s_final(&state, i_hash);
memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
}
/* This is Hugo Krawczyk's HKDF: /* This is Hugo Krawczyk's HKDF:
* - https://eprint.iacr.org/2010/264.pdf * - https://eprint.iacr.org/2010/264.pdf
* - https://tools.ietf.org/html/rfc5869 * - https://tools.ietf.org/html/rfc5869
...@@ -322,14 +357,14 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, ...@@ -322,14 +357,14 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
((third_len || third_dst) && (!second_len || !second_dst)))); ((third_len || third_dst) && (!second_len || !second_dst))));
/* Extract entropy from data into secret */ /* Extract entropy from data into secret */
blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN); hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
if (!first_dst || !first_len) if (!first_dst || !first_len)
goto out; goto out;
/* Expand first key: key = secret, data = 0x1 */ /* Expand first key: key = secret, data = 0x1 */
output[0] = 1; output[0] = 1;
blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE); hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
memcpy(first_dst, output, first_len); memcpy(first_dst, output, first_len);
if (!second_dst || !second_len) if (!second_dst || !second_len)
...@@ -337,8 +372,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, ...@@ -337,8 +372,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
/* Expand second key: key = secret, data = first-key || 0x2 */ /* Expand second key: key = secret, data = first-key || 0x2 */
output[BLAKE2S_HASH_SIZE] = 2; output[BLAKE2S_HASH_SIZE] = 2;
blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
BLAKE2S_HASH_SIZE);
memcpy(second_dst, output, second_len); memcpy(second_dst, output, second_len);
if (!third_dst || !third_len) if (!third_dst || !third_len)
...@@ -346,8 +380,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, ...@@ -346,8 +380,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
/* Expand third key: key = secret, data = second-key || 0x3 */ /* Expand third key: key = secret, data = second-key || 0x3 */
output[BLAKE2S_HASH_SIZE] = 3; output[BLAKE2S_HASH_SIZE] = 3;
blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
BLAKE2S_HASH_SIZE);
memcpy(third_dst, output, third_len); memcpy(third_dst, output, third_len);
out: out:
......
...@@ -101,7 +101,4 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key, ...@@ -101,7 +101,4 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
blake2s_final(&state, out); blake2s_final(&state, out);
} }
void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
const size_t keylen);
#endif /* _CRYPTO_BLAKE2S_H */ #endif /* _CRYPTO_BLAKE2S_H */
...@@ -28,80 +28,71 @@ TRACE_EVENT(add_device_randomness, ...@@ -28,80 +28,71 @@ TRACE_EVENT(add_device_randomness,
); );
DECLARE_EVENT_CLASS(random__mix_pool_bytes, DECLARE_EVENT_CLASS(random__mix_pool_bytes,
TP_PROTO(const char *pool_name, int bytes, unsigned long IP), TP_PROTO(int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP), TP_ARGS(bytes, IP),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, bytes ) __field( int, bytes )
__field(unsigned long, IP ) __field(unsigned long, IP )
), ),
TP_fast_assign( TP_fast_assign(
__entry->pool_name = pool_name;
__entry->bytes = bytes; __entry->bytes = bytes;
__entry->IP = IP; __entry->IP = IP;
), ),
TP_printk("%s pool: bytes %d caller %pS", TP_printk("input pool: bytes %d caller %pS",
__entry->pool_name, __entry->bytes, (void *)__entry->IP) __entry->bytes, (void *)__entry->IP)
); );
DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes, DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
TP_PROTO(const char *pool_name, int bytes, unsigned long IP), TP_PROTO(int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP) TP_ARGS(bytes, IP)
); );
DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock, DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
TP_PROTO(const char *pool_name, int bytes, unsigned long IP), TP_PROTO(int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP) TP_ARGS(bytes, IP)
); );
TRACE_EVENT(credit_entropy_bits, TRACE_EVENT(credit_entropy_bits,
TP_PROTO(const char *pool_name, int bits, int entropy_count, TP_PROTO(int bits, int entropy_count, unsigned long IP),
unsigned long IP),
TP_ARGS(pool_name, bits, entropy_count, IP), TP_ARGS(bits, entropy_count, IP),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, bits ) __field( int, bits )
__field( int, entropy_count ) __field( int, entropy_count )
__field(unsigned long, IP ) __field(unsigned long, IP )
), ),
TP_fast_assign( TP_fast_assign(
__entry->pool_name = pool_name;
__entry->bits = bits; __entry->bits = bits;
__entry->entropy_count = entropy_count; __entry->entropy_count = entropy_count;
__entry->IP = IP; __entry->IP = IP;
), ),
TP_printk("%s pool: bits %d entropy_count %d caller %pS", TP_printk("input pool: bits %d entropy_count %d caller %pS",
__entry->pool_name, __entry->bits, __entry->bits, __entry->entropy_count, (void *)__entry->IP)
__entry->entropy_count, (void *)__entry->IP)
); );
TRACE_EVENT(debit_entropy, TRACE_EVENT(debit_entropy,
TP_PROTO(const char *pool_name, int debit_bits), TP_PROTO(int debit_bits),
TP_ARGS(pool_name, debit_bits), TP_ARGS( debit_bits),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, debit_bits ) __field( int, debit_bits )
), ),
TP_fast_assign( TP_fast_assign(
__entry->pool_name = pool_name;
__entry->debit_bits = debit_bits; __entry->debit_bits = debit_bits;
), ),
TP_printk("%s: debit_bits %d", __entry->pool_name, TP_printk("input pool: debit_bits %d", __entry->debit_bits)
__entry->debit_bits)
); );
TRACE_EVENT(add_input_randomness, TRACE_EVENT(add_input_randomness,
...@@ -170,36 +161,31 @@ DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch, ...@@ -170,36 +161,31 @@ DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
); );
DECLARE_EVENT_CLASS(random__extract_entropy, DECLARE_EVENT_CLASS(random__extract_entropy,
TP_PROTO(const char *pool_name, int nbytes, int entropy_count, TP_PROTO(int nbytes, int entropy_count, unsigned long IP),
unsigned long IP),
TP_ARGS(pool_name, nbytes, entropy_count, IP), TP_ARGS(nbytes, entropy_count, IP),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, nbytes ) __field( int, nbytes )
__field( int, entropy_count ) __field( int, entropy_count )
__field(unsigned long, IP ) __field(unsigned long, IP )
), ),
TP_fast_assign( TP_fast_assign(
__entry->pool_name = pool_name;
__entry->nbytes = nbytes; __entry->nbytes = nbytes;
__entry->entropy_count = entropy_count; __entry->entropy_count = entropy_count;
__entry->IP = IP; __entry->IP = IP;
), ),
TP_printk("%s pool: nbytes %d entropy_count %d caller %pS", TP_printk("input pool: nbytes %d entropy_count %d caller %pS",
__entry->pool_name, __entry->nbytes, __entry->entropy_count, __entry->nbytes, __entry->entropy_count, (void *)__entry->IP)
(void *)__entry->IP)
); );
DEFINE_EVENT(random__extract_entropy, extract_entropy, DEFINE_EVENT(random__extract_entropy, extract_entropy,
TP_PROTO(const char *pool_name, int nbytes, int entropy_count, TP_PROTO(int nbytes, int entropy_count, unsigned long IP),
unsigned long IP),
TP_ARGS(pool_name, nbytes, entropy_count, IP) TP_ARGS(nbytes, entropy_count, IP)
); );
TRACE_EVENT(urandom_read, TRACE_EVENT(urandom_read,
......
...@@ -122,6 +122,8 @@ config INDIRECT_IOMEM_FALLBACK ...@@ -122,6 +122,8 @@ config INDIRECT_IOMEM_FALLBACK
mmio accesses when the IO memory address is not a registered mmio accesses when the IO memory address is not a registered
emulated region. emulated region.
source "lib/crypto/Kconfig"
config CRC_CCITT config CRC_CCITT
tristate "CRC-CCITT functions" tristate "CRC-CCITT functions"
help help
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
menu "Crypto library routines"
config CRYPTO_LIB_AES config CRYPTO_LIB_AES
tristate tristate
...@@ -31,7 +33,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA ...@@ -31,7 +33,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_LIB_CHACHA_GENERIC config CRYPTO_LIB_CHACHA_GENERIC
tristate tristate
select CRYPTO_ALGAPI select XOR_BLOCKS
help help
This symbol can be depended upon by arch implementations of the This symbol can be depended upon by arch implementations of the
ChaCha library interface that require the generic code as a ChaCha library interface that require the generic code as a
...@@ -40,7 +42,8 @@ config CRYPTO_LIB_CHACHA_GENERIC ...@@ -40,7 +42,8 @@ config CRYPTO_LIB_CHACHA_GENERIC
of CRYPTO_LIB_CHACHA. of CRYPTO_LIB_CHACHA.
config CRYPTO_LIB_CHACHA config CRYPTO_LIB_CHACHA
tristate tristate "ChaCha library interface"
depends on CRYPTO
depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
help help
...@@ -65,7 +68,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC ...@@ -65,7 +68,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC
of CRYPTO_LIB_CURVE25519. of CRYPTO_LIB_CURVE25519.
config CRYPTO_LIB_CURVE25519 config CRYPTO_LIB_CURVE25519
tristate tristate "Curve25519 scalar multiplication library"
depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
help help
...@@ -100,7 +103,7 @@ config CRYPTO_LIB_POLY1305_GENERIC ...@@ -100,7 +103,7 @@ config CRYPTO_LIB_POLY1305_GENERIC
of CRYPTO_LIB_POLY1305. of CRYPTO_LIB_POLY1305.
config CRYPTO_LIB_POLY1305 config CRYPTO_LIB_POLY1305
tristate tristate "Poly1305 library interface"
depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
help help
...@@ -109,14 +112,18 @@ config CRYPTO_LIB_POLY1305 ...@@ -109,14 +112,18 @@ config CRYPTO_LIB_POLY1305
is available and enabled. is available and enabled.
config CRYPTO_LIB_CHACHA20POLY1305 config CRYPTO_LIB_CHACHA20POLY1305
tristate tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
depends on CRYPTO
select CRYPTO_LIB_CHACHA select CRYPTO_LIB_CHACHA
select CRYPTO_LIB_POLY1305 select CRYPTO_LIB_POLY1305
select CRYPTO_ALGAPI
config CRYPTO_LIB_SHA256 config CRYPTO_LIB_SHA256
tristate tristate
config CRYPTO_LIB_SM4 config CRYPTO_LIB_SM4
tristate tristate
endmenu
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
* #include <stdio.h> * #include <stdio.h>
* *
* #include <openssl/evp.h> * #include <openssl/evp.h>
* #include <openssl/hmac.h>
* *
* #define BLAKE2S_TESTVEC_COUNT 256 * #define BLAKE2S_TESTVEC_COUNT 256
* *
...@@ -58,16 +57,6 @@ ...@@ -58,16 +57,6 @@
* } * }
* printf("};\n\n"); * printf("};\n\n");
* *
* printf("static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n");
*
* HMAC(EVP_blake2s256(), key, sizeof(key), buf, sizeof(buf), hash, NULL);
* print_vec(hash, BLAKE2S_OUTBYTES);
*
* HMAC(EVP_blake2s256(), buf, sizeof(buf), key, sizeof(key), hash, NULL);
* print_vec(hash, BLAKE2S_OUTBYTES);
*
* printf("};\n");
*
* return 0; * return 0;
*} *}
*/ */
...@@ -554,15 +543,6 @@ static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { ...@@ -554,15 +543,6 @@ static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, }, 0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, },
}; };
static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
{ 0xce, 0xe1, 0x57, 0x69, 0x82, 0xdc, 0xbf, 0x43, 0xad, 0x56, 0x4c, 0x70,
0xed, 0x68, 0x16, 0x96, 0xcf, 0xa4, 0x73, 0xe8, 0xe8, 0xfc, 0x32, 0x79,
0x08, 0x0a, 0x75, 0x82, 0xda, 0x3f, 0x05, 0x11, },
{ 0x77, 0x2f, 0x0c, 0x71, 0x41, 0xf4, 0x4b, 0x2b, 0xb3, 0xc6, 0xb6, 0xf9,
0x60, 0xde, 0xe4, 0x52, 0x38, 0x66, 0xe8, 0xbf, 0x9b, 0x96, 0xc4, 0x9f,
0x60, 0xd9, 0x24, 0x37, 0x99, 0xd6, 0xec, 0x31, },
};
bool __init blake2s_selftest(void) bool __init blake2s_selftest(void)
{ {
u8 key[BLAKE2S_KEY_SIZE]; u8 key[BLAKE2S_KEY_SIZE];
...@@ -607,16 +587,5 @@ bool __init blake2s_selftest(void) ...@@ -607,16 +587,5 @@ bool __init blake2s_selftest(void)
} }
} }
if (success) {
blake2s256_hmac(hash, buf, key, sizeof(buf), sizeof(key));
success &= !memcmp(hash, blake2s_hmac_testvecs[0], BLAKE2S_HASH_SIZE);
blake2s256_hmac(hash, key, buf, sizeof(key), sizeof(buf));
success &= !memcmp(hash, blake2s_hmac_testvecs[1], BLAKE2S_HASH_SIZE);
if (!success)
pr_err("blake2s256_hmac self-test: FAIL\n");
}
return success; return success;
} }
...@@ -30,43 +30,6 @@ void blake2s_final(struct blake2s_state *state, u8 *out) ...@@ -30,43 +30,6 @@ void blake2s_final(struct blake2s_state *state, u8 *out)
} }
EXPORT_SYMBOL(blake2s_final); EXPORT_SYMBOL(blake2s_final);
void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
const size_t keylen)
{
struct blake2s_state state;
u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
int i;
if (keylen > BLAKE2S_BLOCK_SIZE) {
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, key, keylen);
blake2s_final(&state, x_key);
} else
memcpy(x_key, key, keylen);
for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
x_key[i] ^= 0x36;
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
blake2s_update(&state, in, inlen);
blake2s_final(&state, i_hash);
for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
x_key[i] ^= 0x5c ^ 0x36;
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
blake2s_final(&state, i_hash);
memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
}
EXPORT_SYMBOL(blake2s256_hmac);
static int __init blake2s_mod_init(void) static int __init blake2s_mod_init(void)
{ {
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/string.h>
#include <crypto/sha1.h> #include <crypto/sha1.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
...@@ -55,7 +56,8 @@ ...@@ -55,7 +56,8 @@
#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
__u32 TEMP = input(t); setW(t, TEMP); \ __u32 TEMP = input(t); setW(t, TEMP); \
E += TEMP + rol32(A,5) + (fn) + (constant); \ E += TEMP + rol32(A,5) + (fn) + (constant); \
B = ror32(B, 2); } while (0) B = ror32(B, 2); \
TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0)
#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) #define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
...@@ -84,6 +86,7 @@ ...@@ -84,6 +86,7 @@
void sha1_transform(__u32 *digest, const char *data, __u32 *array) void sha1_transform(__u32 *digest, const char *data, __u32 *array)
{ {
__u32 A, B, C, D, E; __u32 A, B, C, D, E;
unsigned int i = 0;
A = digest[0]; A = digest[0];
B = digest[1]; B = digest[1];
...@@ -92,94 +95,24 @@ void sha1_transform(__u32 *digest, const char *data, __u32 *array) ...@@ -92,94 +95,24 @@ void sha1_transform(__u32 *digest, const char *data, __u32 *array)
E = digest[4]; E = digest[4];
/* Round 1 - iterations 0-16 take their input from 'data' */ /* Round 1 - iterations 0-16 take their input from 'data' */
T_0_15( 0, A, B, C, D, E); for (; i < 16; ++i)
T_0_15( 1, E, A, B, C, D); T_0_15(i, A, B, C, D, E);
T_0_15( 2, D, E, A, B, C);
T_0_15( 3, C, D, E, A, B);
T_0_15( 4, B, C, D, E, A);
T_0_15( 5, A, B, C, D, E);
T_0_15( 6, E, A, B, C, D);
T_0_15( 7, D, E, A, B, C);
T_0_15( 8, C, D, E, A, B);
T_0_15( 9, B, C, D, E, A);
T_0_15(10, A, B, C, D, E);
T_0_15(11, E, A, B, C, D);
T_0_15(12, D, E, A, B, C);
T_0_15(13, C, D, E, A, B);
T_0_15(14, B, C, D, E, A);
T_0_15(15, A, B, C, D, E);
/* Round 1 - tail. Input from 512-bit mixing array */ /* Round 1 - tail. Input from 512-bit mixing array */
T_16_19(16, E, A, B, C, D); for (; i < 20; ++i)
T_16_19(17, D, E, A, B, C); T_16_19(i, A, B, C, D, E);
T_16_19(18, C, D, E, A, B);
T_16_19(19, B, C, D, E, A);
/* Round 2 */ /* Round 2 */
T_20_39(20, A, B, C, D, E); for (; i < 40; ++i)
T_20_39(21, E, A, B, C, D); T_20_39(i, A, B, C, D, E);
T_20_39(22, D, E, A, B, C);
T_20_39(23, C, D, E, A, B);
T_20_39(24, B, C, D, E, A);
T_20_39(25, A, B, C, D, E);
T_20_39(26, E, A, B, C, D);
T_20_39(27, D, E, A, B, C);
T_20_39(28, C, D, E, A, B);
T_20_39(29, B, C, D, E, A);
T_20_39(30, A, B, C, D, E);
T_20_39(31, E, A, B, C, D);
T_20_39(32, D, E, A, B, C);
T_20_39(33, C, D, E, A, B);
T_20_39(34, B, C, D, E, A);
T_20_39(35, A, B, C, D, E);
T_20_39(36, E, A, B, C, D);
T_20_39(37, D, E, A, B, C);
T_20_39(38, C, D, E, A, B);
T_20_39(39, B, C, D, E, A);
/* Round 3 */ /* Round 3 */
T_40_59(40, A, B, C, D, E); for (; i < 60; ++i)
T_40_59(41, E, A, B, C, D); T_40_59(i, A, B, C, D, E);
T_40_59(42, D, E, A, B, C);
T_40_59(43, C, D, E, A, B);
T_40_59(44, B, C, D, E, A);
T_40_59(45, A, B, C, D, E);
T_40_59(46, E, A, B, C, D);
T_40_59(47, D, E, A, B, C);
T_40_59(48, C, D, E, A, B);
T_40_59(49, B, C, D, E, A);
T_40_59(50, A, B, C, D, E);
T_40_59(51, E, A, B, C, D);
T_40_59(52, D, E, A, B, C);
T_40_59(53, C, D, E, A, B);
T_40_59(54, B, C, D, E, A);
T_40_59(55, A, B, C, D, E);
T_40_59(56, E, A, B, C, D);
T_40_59(57, D, E, A, B, C);
T_40_59(58, C, D, E, A, B);
T_40_59(59, B, C, D, E, A);
/* Round 4 */ /* Round 4 */
T_60_79(60, A, B, C, D, E); for (; i < 80; ++i)
T_60_79(61, E, A, B, C, D); T_60_79(i, A, B, C, D, E);
T_60_79(62, D, E, A, B, C);
T_60_79(63, C, D, E, A, B);
T_60_79(64, B, C, D, E, A);
T_60_79(65, A, B, C, D, E);
T_60_79(66, E, A, B, C, D);
T_60_79(67, D, E, A, B, C);
T_60_79(68, C, D, E, A, B);
T_60_79(69, B, C, D, E, A);
T_60_79(70, A, B, C, D, E);
T_60_79(71, E, A, B, C, D);
T_60_79(72, D, E, A, B, C);
T_60_79(73, C, D, E, A, B);
T_60_79(74, B, C, D, E, A);
T_60_79(75, A, B, C, D, E);
T_60_79(76, E, A, B, C, D);
T_60_79(77, D, E, A, B, C);
T_60_79(78, C, D, E, A, B);
T_60_79(79, B, C, D, E, A);
digest[0] += A; digest[0] += A;
digest[1] += B; digest[1] += B;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment