Commit 248045b8 authored by Jason A. Donenfeld's avatar Jason A. Donenfeld

random: selectively clang-format where it makes sense

This is an old driver that has seen a lot of different eras of kernel
coding style. In an effort to make it easier to code for, unify the
coding style around the current norm, by accepting some of -- but
certainly not all of -- the suggestions from clang-format. This should
remove ambiguity in coding style, especially with regards to spacing,
when code is being changed or amended. Consequently it also makes code
review easier on the eyes, following one uniform style rather than
several.
Reviewed-by: default avatarDominik Brodowski <linux@dominikbrodowski.net>
Signed-off-by: default avatarJason A. Donenfeld <Jason@zx2c4.com>
parent 6c0eace6
...@@ -469,7 +469,7 @@ static bool crng_need_final_init = false; ...@@ -469,7 +469,7 @@ static bool crng_need_final_init = false;
#define crng_ready() (likely(crng_init > 1)) #define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0; static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0; static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE) #define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE)
static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]); static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng, static void _crng_backtrack_protect(struct crng_state *crng,
u8 tmp[CHACHA_BLOCK_SIZE], int used); u8 tmp[CHACHA_BLOCK_SIZE], int used);
...@@ -509,7 +509,7 @@ static ssize_t _extract_entropy(void *buf, size_t nbytes); ...@@ -509,7 +509,7 @@ static ssize_t _extract_entropy(void *buf, size_t nbytes);
static void crng_reseed(struct crng_state *crng, bool use_input_pool); static void crng_reseed(struct crng_state *crng, bool use_input_pool);
static u32 const twist_table[8] = { static const u32 twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
...@@ -722,7 +722,7 @@ static int credit_entropy_bits_safe(int nbits) ...@@ -722,7 +722,7 @@ static int credit_entropy_bits_safe(int nbits)
* *
*********************************************************************/ *********************************************************************/
#define CRNG_RESEED_INTERVAL (300*HZ) #define CRNG_RESEED_INTERVAL (300 * HZ)
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
...@@ -836,7 +836,7 @@ static void do_numa_crng_init(struct work_struct *work) ...@@ -836,7 +836,7 @@ static void do_numa_crng_init(struct work_struct *work)
struct crng_state *crng; struct crng_state *crng;
struct crng_state **pool; struct crng_state **pool;
pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL | __GFP_NOFAIL);
for_each_online_node(i) { for_each_online_node(i) {
crng = kmalloc_node(sizeof(struct crng_state), crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i); GFP_KERNEL | __GFP_NOFAIL, i);
...@@ -892,7 +892,7 @@ static size_t crng_fast_load(const u8 *cp, size_t len) ...@@ -892,7 +892,7 @@ static size_t crng_fast_load(const u8 *cp, size_t len)
spin_unlock_irqrestore(&primary_crng.lock, flags); spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0; return 0;
} }
p = (u8 *) &primary_crng.state[4]; p = (u8 *)&primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp; p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--; ret++; cp++; crng_init_cnt++; len--; ret++;
...@@ -926,8 +926,8 @@ static int crng_slow_load(const u8 *cp, size_t len) ...@@ -926,8 +926,8 @@ static int crng_slow_load(const u8 *cp, size_t len)
static u8 lfsr = 1; static u8 lfsr = 1;
u8 tmp; u8 tmp;
unsigned int i, max = CHACHA_KEY_SIZE; unsigned int i, max = CHACHA_KEY_SIZE;
const u8 * src_buf = cp; const u8 *src_buf = cp;
u8 * dest_buf = (u8 *) &primary_crng.state[4]; u8 *dest_buf = (u8 *)&primary_crng.state[4];
if (!spin_trylock_irqsave(&primary_crng.lock, flags)) if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0; return 0;
...@@ -938,7 +938,7 @@ static int crng_slow_load(const u8 *cp, size_t len) ...@@ -938,7 +938,7 @@ static int crng_slow_load(const u8 *cp, size_t len)
if (len > max) if (len > max)
max = len; max = len;
for (i = 0; i < max ; i++) { for (i = 0; i < max; i++) {
tmp = lfsr; tmp = lfsr;
lfsr >>= 1; lfsr >>= 1;
if (tmp & 1) if (tmp & 1)
...@@ -975,7 +975,7 @@ static void crng_reseed(struct crng_state *crng, bool use_input_pool) ...@@ -975,7 +975,7 @@ static void crng_reseed(struct crng_state *crng, bool use_input_pool)
if (!arch_get_random_seed_long(&rv) && if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv)) !arch_get_random_long(&rv))
rv = random_get_entropy(); rv = random_get_entropy();
crng->state[i+4] ^= buf.key[i] ^ rv; crng->state[i + 4] ^= buf.key[i] ^ rv;
} }
memzero_explicit(&buf, sizeof(buf)); memzero_explicit(&buf, sizeof(buf));
WRITE_ONCE(crng->init_time, jiffies); WRITE_ONCE(crng->init_time, jiffies);
...@@ -983,8 +983,7 @@ static void crng_reseed(struct crng_state *crng, bool use_input_pool) ...@@ -983,8 +983,7 @@ static void crng_reseed(struct crng_state *crng, bool use_input_pool)
crng_finalize_init(crng); crng_finalize_init(crng);
} }
static void _extract_crng(struct crng_state *crng, static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
u8 out[CHACHA_BLOCK_SIZE])
{ {
unsigned long flags, init_time; unsigned long flags, init_time;
...@@ -1023,9 +1022,9 @@ static void _crng_backtrack_protect(struct crng_state *crng, ...@@ -1023,9 +1022,9 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0; used = 0;
} }
spin_lock_irqsave(&crng->lock, flags); spin_lock_irqsave(&crng->lock, flags);
s = (u32 *) &tmp[used]; s = (u32 *)&tmp[used];
d = &crng->state[4]; d = &crng->state[4];
for (i=0; i < 8; i++) for (i = 0; i < 8; i++)
*d++ ^= *s++; *d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags); spin_unlock_irqrestore(&crng->lock, flags);
} }
...@@ -1070,7 +1069,6 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes) ...@@ -1070,7 +1069,6 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
return ret; return ret;
} }
/********************************************************************* /*********************************************************************
* *
* Entropy input management * Entropy input management
...@@ -1165,7 +1163,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) ...@@ -1165,7 +1163,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles, * Round down by 1 bit on general principles,
* and limit entropy estimate to 12 bits. * and limit entropy estimate to 12 bits.
*/ */
credit_entropy_bits(min_t(int, fls(delta>>1), 11)); credit_entropy_bits(min_t(int, fls(delta >> 1), 11));
} }
void add_input_randomness(unsigned int type, unsigned int code, void add_input_randomness(unsigned int type, unsigned int code,
...@@ -1190,7 +1188,7 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness); ...@@ -1190,7 +1188,7 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
static unsigned long avg_cycles, avg_deviation; static unsigned long avg_cycles, avg_deviation;
#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */ #define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
#define FIXED_1_2 (1 << (AVG_SHIFT-1)) #define FIXED_1_2 (1 << (AVG_SHIFT - 1))
static void add_interrupt_bench(cycles_t start) static void add_interrupt_bench(cycles_t start)
{ {
...@@ -1209,7 +1207,7 @@ static void add_interrupt_bench(cycles_t start) ...@@ -1209,7 +1207,7 @@ static void add_interrupt_bench(cycles_t start)
static u32 get_reg(struct fast_pool *f, struct pt_regs *regs) static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{ {
u32 *ptr = (u32 *) regs; u32 *ptr = (u32 *)regs;
unsigned int idx; unsigned int idx;
if (regs == NULL) if (regs == NULL)
...@@ -1239,8 +1237,8 @@ void add_interrupt_randomness(int irq) ...@@ -1239,8 +1237,8 @@ void add_interrupt_randomness(int irq)
fast_pool->pool[1] ^= now ^ c_high; fast_pool->pool[1] ^= now ^ c_high;
ip = regs ? instruction_pointer(regs) : _RET_IP_; ip = regs ? instruction_pointer(regs) : _RET_IP_;
fast_pool->pool[2] ^= ip; fast_pool->pool[2] ^= ip;
fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : fast_pool->pool[3] ^=
get_reg(fast_pool, regs); (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs);
fast_mix(fast_pool); fast_mix(fast_pool);
add_interrupt_bench(cycles); add_interrupt_bench(cycles);
...@@ -1254,8 +1252,7 @@ void add_interrupt_randomness(int irq) ...@@ -1254,8 +1252,7 @@ void add_interrupt_randomness(int irq)
return; return;
} }
if ((fast_pool->count < 64) && if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
!time_after(now, fast_pool->last + HZ))
return; return;
if (!spin_trylock(&input_pool.lock)) if (!spin_trylock(&input_pool.lock))
...@@ -1319,7 +1316,7 @@ static size_t account(size_t nbytes, int min) ...@@ -1319,7 +1316,7 @@ static size_t account(size_t nbytes, int min)
entropy_count = 0; entropy_count = 0;
} }
nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3); nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3);
if ((size_t) entropy_count > nfrac) if ((size_t)entropy_count > nfrac)
entropy_count -= nfrac; entropy_count -= nfrac;
else else
entropy_count = 0; entropy_count = 0;
...@@ -1422,10 +1419,9 @@ static ssize_t extract_entropy(void *buf, size_t nbytes, int min) ...@@ -1422,10 +1419,9 @@ static ssize_t extract_entropy(void *buf, size_t nbytes, int min)
} }
#define warn_unseeded_randomness(previous) \ #define warn_unseeded_randomness(previous) \
_warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous)) _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
static void _warn_unseeded_randomness(const char *func_name, void *caller, static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
void **previous)
{ {
#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
const bool print_once = false; const bool print_once = false;
...@@ -1433,8 +1429,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, ...@@ -1433,8 +1429,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
static bool print_once __read_mostly; static bool print_once __read_mostly;
#endif #endif
if (print_once || if (print_once || crng_ready() ||
crng_ready() ||
(previous && (caller == READ_ONCE(*previous)))) (previous && (caller == READ_ONCE(*previous))))
return; return;
WRITE_ONCE(*previous, caller); WRITE_ONCE(*previous, caller);
...@@ -1442,9 +1437,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, ...@@ -1442,9 +1437,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
print_once = true; print_once = true;
#endif #endif
if (__ratelimit(&unseeded_warning)) if (__ratelimit(&unseeded_warning))
printk_deferred(KERN_NOTICE "random: %s called from %pS " printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
"with crng_init=%d\n", func_name, caller, func_name, caller, crng_init);
crng_init);
} }
/* /*
...@@ -1487,7 +1481,6 @@ void get_random_bytes(void *buf, int nbytes) ...@@ -1487,7 +1481,6 @@ void get_random_bytes(void *buf, int nbytes)
} }
EXPORT_SYMBOL(get_random_bytes); EXPORT_SYMBOL(get_random_bytes);
/* /*
* Each time the timer fires, we expect that we got an unpredictable * Each time the timer fires, we expect that we got an unpredictable
* jump in the cycle counter. Even if the timer is running on another * jump in the cycle counter. Even if the timer is running on another
...@@ -1526,7 +1519,7 @@ static void try_to_generate_entropy(void) ...@@ -1526,7 +1519,7 @@ static void try_to_generate_entropy(void)
timer_setup_on_stack(&stack.timer, entropy_timer, 0); timer_setup_on_stack(&stack.timer, entropy_timer, 0);
while (!crng_ready()) { while (!crng_ready()) {
if (!timer_pending(&stack.timer)) if (!timer_pending(&stack.timer))
mod_timer(&stack.timer, jiffies+1); mod_timer(&stack.timer, jiffies + 1);
mix_pool_bytes(&stack.now, sizeof(stack.now)); mix_pool_bytes(&stack.now, sizeof(stack.now));
schedule(); schedule();
stack.now = random_get_entropy(); stack.now = random_get_entropy();
...@@ -1736,9 +1729,8 @@ void rand_initialize_disk(struct gendisk *disk) ...@@ -1736,9 +1729,8 @@ void rand_initialize_disk(struct gendisk *disk)
} }
#endif #endif
static ssize_t static ssize_t urandom_read_nowarn(struct file *file, char __user *buf,
urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes, size_t nbytes, loff_t *ppos)
loff_t *ppos)
{ {
int ret; int ret;
...@@ -1748,8 +1740,8 @@ urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes, ...@@ -1748,8 +1740,8 @@ urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
return ret; return ret;
} }
static ssize_t static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) loff_t *ppos)
{ {
static int maxwarn = 10; static int maxwarn = 10;
...@@ -1763,8 +1755,8 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) ...@@ -1763,8 +1755,8 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return urandom_read_nowarn(file, buf, nbytes, ppos); return urandom_read_nowarn(file, buf, nbytes, ppos);
} }
static ssize_t static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) loff_t *ppos)
{ {
int ret; int ret;
...@@ -1774,8 +1766,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) ...@@ -1774,8 +1766,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return urandom_read_nowarn(file, buf, nbytes, ppos); return urandom_read_nowarn(file, buf, nbytes, ppos);
} }
static __poll_t static __poll_t random_poll(struct file *file, poll_table *wait)
random_poll(struct file *file, poll_table * wait)
{ {
__poll_t mask; __poll_t mask;
...@@ -1789,8 +1780,7 @@ random_poll(struct file *file, poll_table * wait) ...@@ -1789,8 +1780,7 @@ random_poll(struct file *file, poll_table * wait)
return mask; return mask;
} }
static int static int write_pool(const char __user *buffer, size_t count)
write_pool(const char __user *buffer, size_t count)
{ {
size_t bytes; size_t bytes;
u32 t, buf[16]; u32 t, buf[16];
...@@ -1910,19 +1900,19 @@ const struct file_operations urandom_fops = { ...@@ -1910,19 +1900,19 @@ const struct file_operations urandom_fops = {
.llseek = noop_llseek, .llseek = noop_llseek,
}; };
SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
unsigned int, flags) flags)
{ {
int ret; int ret;
if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE)) if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
return -EINVAL; return -EINVAL;
/* /*
* Requesting insecure and blocking randomness at the same time makes * Requesting insecure and blocking randomness at the same time makes
* no sense. * no sense.
*/ */
if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM)) if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL; return -EINVAL;
if (count > INT_MAX) if (count > INT_MAX)
...@@ -1962,8 +1952,8 @@ static char sysctl_bootid[16]; ...@@ -1962,8 +1952,8 @@ static char sysctl_bootid[16];
* returned as an ASCII string in the standard UUID format; if via the * returned as an ASCII string in the standard UUID format; if via the
* sysctl system call, as 16 bytes of binary data. * sysctl system call, as 16 bytes of binary data.
*/ */
static int proc_do_uuid(struct ctl_table *table, int write, static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
void *buffer, size_t *lenp, loff_t *ppos) size_t *lenp, loff_t *ppos)
{ {
struct ctl_table fake_table; struct ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid; unsigned char buf[64], tmp_uuid[16], *uuid;
...@@ -1992,8 +1982,8 @@ static int proc_do_uuid(struct ctl_table *table, int write, ...@@ -1992,8 +1982,8 @@ static int proc_do_uuid(struct ctl_table *table, int write,
/* /*
* Return entropy available scaled to integral bits * Return entropy available scaled to integral bits
*/ */
static int proc_do_entropy(struct ctl_table *table, int write, static int proc_do_entropy(struct ctl_table *table, int write, void *buffer,
void *buffer, size_t *lenp, loff_t *ppos) size_t *lenp, loff_t *ppos)
{ {
struct ctl_table fake_table; struct ctl_table fake_table;
int entropy_count; int entropy_count;
...@@ -2147,7 +2137,7 @@ static void invalidate_batched_entropy(void) ...@@ -2147,7 +2137,7 @@ static void invalidate_batched_entropy(void)
int cpu; int cpu;
unsigned long flags; unsigned long flags;
for_each_possible_cpu (cpu) { for_each_possible_cpu(cpu) {
struct batched_entropy *batched_entropy; struct batched_entropy *batched_entropy;
batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
...@@ -2176,8 +2166,7 @@ static void invalidate_batched_entropy(void) ...@@ -2176,8 +2166,7 @@ static void invalidate_batched_entropy(void)
* Return: A page aligned address within [start, start + range). On error, * Return: A page aligned address within [start, start + range). On error,
* @start is returned. * @start is returned.
*/ */
unsigned long unsigned long randomize_page(unsigned long start, unsigned long range)
randomize_page(unsigned long start, unsigned long range)
{ {
if (!PAGE_ALIGNED(start)) { if (!PAGE_ALIGNED(start)) {
range -= PAGE_ALIGN(start) - start; range -= PAGE_ALIGN(start) - start;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment