Commit 775f4b297b780601e61787b766f306ed3e1d23eb

Authored by Theodore Ts'o
1 parent 74feec5dd8

random: make 'add_interrupt_randomness()' do something sane

We've been moving away from add_interrupt_randomness() for various
reasons: it's too expensive to do on every interrupt, and flooding the
CPU with interrupts could theoretically cause bogus floods of entropy
from a somewhat externally controllable source.

This solves both problems by limiting the actual randomness addition
to just once a second or after 64 interrupts, whicever comes first.
During that time, the interrupt cycle data is buffered up in a per-cpu
pool.  Also, we make sure the the nonblocking pool used by urandom is
initialized before we start feeding the normal input pool.  This
assures that /dev/urandom is returning unpredictable data as soon as
possible.

(Based on an original patch by Linus, but significantly modified by
tytso.)

Tested-by: Eric Wustrow <ewust@umich.edu>
Reported-by: Eric Wustrow <ewust@umich.edu>
Reported-by: Nadia Heninger <nadiah@cs.ucsd.edu>
Reported-by: Zakir Durumeric <zakir@umich.edu>
Reported-by: J. Alex Halderman <jhalderm@umich.edu>.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@vger.kernel.org

Showing 4 changed files with 90 additions and 24 deletions Side-by-side Diff

drivers/char/random.c
... ... @@ -127,19 +127,15 @@
127 127 *
128 128 * void add_input_randomness(unsigned int type, unsigned int code,
129 129 * unsigned int value);
130   - * void add_interrupt_randomness(int irq);
  130 + * void add_interrupt_randomness(int irq, int irq_flags);
131 131 * void add_disk_randomness(struct gendisk *disk);
132 132 *
133 133 * add_input_randomness() uses the input layer interrupt timing, as well as
134 134 * the event type information from the hardware.
135 135 *
136   - * add_interrupt_randomness() uses the inter-interrupt timing as random
137   - * inputs to the entropy pool. Note that not all interrupts are good
138   - * sources of randomness! For example, the timer interrupts is not a
139   - * good choice, because the periodicity of the interrupts is too
140   - * regular, and hence predictable to an attacker. Network Interface
141   - * Controller interrupts are a better measure, since the timing of the
142   - * NIC interrupts are more unpredictable.
  136 + * add_interrupt_randomness() uses the interrupt timing as random
  137 + * inputs to the entropy pool. Using the cycle counters and the irq source
  138 + * as inputs, it feeds the randomness roughly once a second.
143 139 *
144 140 * add_disk_randomness() uses what amounts to the seek time of block
145 141 * layer request events, on a per-disk_devt basis, as input to the
... ... @@ -248,6 +244,7 @@
248 244 #include <linux/percpu.h>
249 245 #include <linux/cryptohash.h>
250 246 #include <linux/fips.h>
  247 +#include <linux/ptrace.h>
251 248  
252 249 #ifdef CONFIG_GENERIC_HARDIRQS
253 250 # include <linux/irq.h>
... ... @@ -256,6 +253,7 @@
256 253 #include <asm/processor.h>
257 254 #include <asm/uaccess.h>
258 255 #include <asm/irq.h>
  256 +#include <asm/irq_regs.h>
259 257 #include <asm/io.h>
260 258  
261 259 /*
262 260  
... ... @@ -421,7 +419,9 @@
421 419 spinlock_t lock;
422 420 unsigned add_ptr;
423 421 int entropy_count;
  422 + int entropy_total;
424 423 int input_rotate;
  424 + unsigned int initialized:1;
425 425 __u8 last_data[EXTRACT_SIZE];
426 426 };
427 427  
... ... @@ -454,6 +454,10 @@
454 454 .pool = nonblocking_pool_data
455 455 };
456 456  
  457 +static __u32 const twist_table[8] = {
  458 + 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
  459 + 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
  460 +
457 461 /*
458 462 * This function adds bytes into the entropy "pool". It does not
459 463 * update the entropy estimate. The caller should call
... ... @@ -467,9 +471,6 @@
467 471 static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
468 472 int nbytes, __u8 out[64])
469 473 {
470   - static __u32 const twist_table[8] = {
471   - 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
472   - 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
473 474 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
474 475 int input_rotate;
475 476 int wordmask = r->poolinfo->poolwords - 1;
476 477  
... ... @@ -528,7 +529,37 @@
528 529 mix_pool_bytes_extract(r, in, bytes, NULL);
529 530 }
530 531  
  532 +struct fast_pool {
  533 + __u32 pool[4];
  534 + unsigned long last;
  535 + unsigned short count;
  536 + unsigned char rotate;
  537 + unsigned char last_timer_intr;
  538 +};
  539 +
531 540 /*
  541 + * This is a fast mixing routine used by the interrupt randomness
  542 + * collector. It's hardcoded for an 128 bit pool and assumes that any
  543 + * locks that might be needed are taken by the caller.
  544 + */
  545 +static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
  546 +{
  547 + const char *bytes = in;
  548 + __u32 w;
  549 + unsigned i = f->count;
  550 + unsigned input_rotate = f->rotate;
  551 +
  552 + while (nbytes--) {
  553 + w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
  554 + f->pool[(i + 1) & 3];
  555 + f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
  556 + input_rotate += (i++ & 3) ? 7 : 14;
  557 + }
  558 + f->count = i;
  559 + f->rotate = input_rotate;
  560 +}
  561 +
  562 +/*
532 563 * Credit (or debit) the entropy store with n bits of entropy
533 564 */
534 565 static void credit_entropy_bits(struct entropy_store *r, int nbits)
... ... @@ -551,6 +582,12 @@
551 582 entropy_count = r->poolinfo->POOLBITS;
552 583 r->entropy_count = entropy_count;
553 584  
  585 + if (!r->initialized && nbits > 0) {
  586 + r->entropy_total += nbits;
  587 + if (r->entropy_total > 128)
  588 + r->initialized = 1;
  589 + }
  590 +
554 591 /* should we wake readers? */
555 592 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
556 593 wake_up_interruptible(&random_read_wait);
557 594  
558 595  
559 596  
560 597  
... ... @@ -700,17 +737,48 @@
700 737 }
701 738 EXPORT_SYMBOL_GPL(add_input_randomness);
702 739  
703   -void add_interrupt_randomness(int irq)
  740 +static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
  741 +
  742 +void add_interrupt_randomness(int irq, int irq_flags)
704 743 {
705   - struct timer_rand_state *state;
  744 + struct entropy_store *r;
  745 + struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
  746 + struct pt_regs *regs = get_irq_regs();
  747 + unsigned long now = jiffies;
  748 + __u32 input[4], cycles = get_cycles();
706 749  
707   - state = get_timer_rand_state(irq);
  750 + input[0] = cycles ^ jiffies;
  751 + input[1] = irq;
  752 + if (regs) {
  753 + __u64 ip = instruction_pointer(regs);
  754 + input[2] = ip;
  755 + input[3] = ip >> 32;
  756 + }
708 757  
709   - if (state == NULL)
  758 + fast_mix(fast_pool, input, sizeof(input));
  759 +
  760 + if ((fast_pool->count & 1023) &&
  761 + !time_after(now, fast_pool->last + HZ))
710 762 return;
711 763  
712   - DEBUG_ENT("irq event %d\n", irq);
713   - add_timer_randomness(state, 0x100 + irq);
  764 + fast_pool->last = now;
  765 +
  766 + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
  767 + mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
  768 + /*
  769 + * If we don't have a valid cycle counter, and we see
  770 + * back-to-back timer interrupts, then skip giving credit for
  771 + * any entropy.
  772 + */
  773 + if (cycles == 0) {
  774 + if (irq_flags & __IRQF_TIMER) {
  775 + if (fast_pool->last_timer_intr)
  776 + return;
  777 + fast_pool->last_timer_intr = 1;
  778 + } else
  779 + fast_pool->last_timer_intr = 0;
  780 + }
  781 + credit_entropy_bits(r, 1);
714 782 }
715 783  
716 784 #ifdef CONFIG_BLOCK
... ... @@ -971,6 +1039,7 @@
971 1039  
972 1040 spin_lock_irqsave(&r->lock, flags);
973 1041 r->entropy_count = 0;
  1042 + r->entropy_total = 0;
974 1043 spin_unlock_irqrestore(&r->lock, flags);
975 1044  
976 1045 now = ktime_get_real();
drivers/mfd/ab3100-core.c
... ... @@ -409,8 +409,6 @@
409 409 u32 fatevent;
410 410 int err;
411 411  
412   - add_interrupt_randomness(irq);
413   -
414 412 err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
415 413 event_regs, 3);
416 414 if (err)
include/linux/random.h
... ... @@ -52,7 +52,7 @@
52 52  
53 53 extern void add_input_randomness(unsigned int type, unsigned int code,
54 54 unsigned int value);
55   -extern void add_interrupt_randomness(int irq);
  55 +extern void add_interrupt_randomness(int irq, int irq_flags);
56 56  
57 57 extern void get_random_bytes(void *buf, int nbytes);
58 58 void generate_random_uuid(unsigned char uuid_out[16]);
... ... @@ -133,7 +133,7 @@
133 133 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
134 134 {
135 135 irqreturn_t retval = IRQ_NONE;
136   - unsigned int random = 0, irq = desc->irq_data.irq;
  136 + unsigned int flags = 0, irq = desc->irq_data.irq;
137 137  
138 138 do {
139 139 irqreturn_t res;
... ... @@ -161,7 +161,7 @@
161 161  
162 162 /* Fall through to add to randomness */
163 163 case IRQ_HANDLED:
164   - random |= action->flags;
  164 + flags |= action->flags;
165 165 break;
166 166  
167 167 default:
... ... @@ -172,8 +172,7 @@
172 172 action = action->next;
173 173 } while (action);
174 174  
175   - if (random & IRQF_SAMPLE_RANDOM)
176   - add_interrupt_randomness(irq);
  175 + add_interrupt_randomness(irq, flags);
177 176  
178 177 if (!noirqdebug)
179 178 note_interrupt(irq, desc, retval);