Commit 43759d4f authored by Theodore Ts'o's avatar Theodore Ts'o

random: use an improved fast_mix() function

Use more efficient fast_mix() function.  Thanks to George Spelvin for
doing the leg work to find a more efficient mixing function.
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
Cc: George Spelvin <linux@horizon.com>
parent 840f9507
...@@ -267,6 +267,8 @@ ...@@ -267,6 +267,8 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/random.h> #include <trace/events/random.h>
/* #define ADD_INTERRUPT_BENCH */
/* /*
* Configuration information * Configuration information
*/ */
...@@ -558,25 +560,29 @@ struct fast_pool { ...@@ -558,25 +560,29 @@ struct fast_pool {
* collector. It's hardcoded for an 128 bit pool and assumes that any * collector. It's hardcoded for an 128 bit pool and assumes that any
* locks that might be needed are taken by the caller. * locks that might be needed are taken by the caller.
*/ */
static void fast_mix(struct fast_pool *f, __u32 input[4]) static void fast_mix(struct fast_pool *f)
{ {
__u32 w; __u32 a = f->pool[0], b = f->pool[1];
unsigned input_rotate = f->rotate; __u32 c = f->pool[2], d = f->pool[3];
w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3]; a += b; c += d;
f->pool[0] = (w >> 3) ^ twist_table[w & 7]; b = rol32(a, 6); d = rol32(c, 27);
input_rotate = (input_rotate + 14) & 31; d ^= a; b ^= c;
w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0];
f->pool[1] = (w >> 3) ^ twist_table[w & 7]; a += b; c += d;
input_rotate = (input_rotate + 7) & 31; b = rol32(a, 16); d = rol32(c, 14);
w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1]; d ^= a; b ^= c;
f->pool[2] = (w >> 3) ^ twist_table[w & 7];
input_rotate = (input_rotate + 7) & 31; a += b; c += d;
w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2]; b = rol32(a, 6); d = rol32(c, 27);
f->pool[3] = (w >> 3) ^ twist_table[w & 7]; d ^= a; b ^= c;
input_rotate = (input_rotate + 7) & 31;
a += b; c += d;
f->rotate = input_rotate; b = rol32(a, 16); d = rol32(c, 14);
d ^= a; b ^= c;
f->pool[0] = a; f->pool[1] = b;
f->pool[2] = c; f->pool[3] = d;
f->count++; f->count++;
} }
...@@ -829,6 +835,27 @@ EXPORT_SYMBOL_GPL(add_input_randomness); ...@@ -829,6 +835,27 @@ EXPORT_SYMBOL_GPL(add_input_randomness);
static DEFINE_PER_CPU(struct fast_pool, irq_randomness); static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
#ifdef ADD_INTERRUPT_BENCH
static unsigned long avg_cycles, avg_deviation;
#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
#define FIXED_1_2 (1 << (AVG_SHIFT-1))
static void add_interrupt_bench(cycles_t start)
{
long delta = random_get_entropy() - start;
/* Use a weighted moving average */
delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
avg_cycles += delta;
/* And average deviation */
delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
avg_deviation += delta;
}
#else
#define add_interrupt_bench(x)
#endif
void add_interrupt_randomness(int irq, int irq_flags) void add_interrupt_randomness(int irq, int irq_flags)
{ {
struct entropy_store *r; struct entropy_store *r;
...@@ -836,22 +863,23 @@ void add_interrupt_randomness(int irq, int irq_flags) ...@@ -836,22 +863,23 @@ void add_interrupt_randomness(int irq, int irq_flags)
struct pt_regs *regs = get_irq_regs(); struct pt_regs *regs = get_irq_regs();
unsigned long now = jiffies; unsigned long now = jiffies;
cycles_t cycles = random_get_entropy(); cycles_t cycles = random_get_entropy();
__u32 input[4], c_high, j_high; __u32 c_high, j_high;
__u64 ip; __u64 ip;
unsigned long seed; unsigned long seed;
int credit = 0; int credit = 0;
c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
j_high = (sizeof(now) > 4) ? now >> 32 : 0; j_high = (sizeof(now) > 4) ? now >> 32 : 0;
input[0] = cycles ^ j_high ^ irq; fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
input[1] = now ^ c_high; fast_pool->pool[1] ^= now ^ c_high;
ip = regs ? instruction_pointer(regs) : _RET_IP_; ip = regs ? instruction_pointer(regs) : _RET_IP_;
input[2] = ip; fast_pool->pool[2] ^= ip;
input[3] = ip >> 32; fast_pool->pool[3] ^= ip >> 32;
fast_mix(fast_pool, input); fast_mix(fast_pool);
if ((irq_flags & __IRQF_TIMER) == 0) if ((irq_flags & __IRQF_TIMER) == 0)
fast_pool->notimer_count++; fast_pool->notimer_count++;
add_interrupt_bench(cycles);
if (cycles) { if (cycles) {
if ((fast_pool->count < 64) && if ((fast_pool->count < 64) &&
...@@ -1650,6 +1678,22 @@ struct ctl_table random_table[] = { ...@@ -1650,6 +1678,22 @@ struct ctl_table random_table[] = {
.mode = 0444, .mode = 0444,
.proc_handler = proc_do_uuid, .proc_handler = proc_do_uuid,
}, },
#ifdef ADD_INTERRUPT_BENCH
{
.procname = "add_interrupt_avg_cycles",
.data = &avg_cycles,
.maxlen = sizeof(avg_cycles),
.mode = 0444,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "add_interrupt_avg_deviation",
.data = &avg_deviation,
.maxlen = sizeof(avg_deviation),
.mode = 0444,
.proc_handler = proc_doulongvec_minmax,
},
#endif
{ } { }
}; };
#endif /* CONFIG_SYSCTL */ #endif /* CONFIG_SYSCTL */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment