summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2012-07-02 07:52:16 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2012-08-17 15:36:05 -0400
commit17ef73f8c0be242d38efe9b28d9caaca9c3ef442 (patch)
tree1a74e6fc35adb0a3ee19a21593eb5e2ce859d635 /kernel
parenta5c606bc8eef5c3153477f1eb0c6fbf54972e40a (diff)
random: make 'add_interrupt_randomness()' do something sane
commit 775f4b297b780601e61787b766f306ed3e1d23eb upstream. We've been moving away from add_interrupt_randomness() for various reasons: it's too expensive to do on every interrupt, and flooding the CPU with interrupts could theoretically cause bogus floods of entropy from a somewhat externally controllable source. This solves both problems by limiting the actual randomness addition to just once a second or after 64 interrupts, whicever comes first. During that time, the interrupt cycle data is buffered up in a per-cpu pool. Also, we make sure the the nonblocking pool used by urandom is initialized before we start feeding the normal input pool. This assures that /dev/urandom is returning unpredictable data as soon as possible. (Based on an original patch by Linus, but significantly modified by tytso.) Tested-by: Eric Wustrow <ewust@umich.edu> Reported-by: Eric Wustrow <ewust@umich.edu> Reported-by: Nadia Heninger <nadiah@cs.ucsd.edu> Reported-by: Zakir Durumeric <zakir@umich.edu> Reported-by: J. Alex Halderman <jhalderm@umich.edu>. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> [PG: minor adjustment required since .34 doesn't have f9e4989eb8 which renames "status" to "random" in kernel/irq/handle.c ] Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/handle.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 76d5a671bfe1..86bcf104bdd5 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -368,7 +368,7 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
irqreturn_t ret, retval = IRQ_NONE;
- unsigned int status = 0;
+ unsigned int flags = 0;
if (!(action->flags & IRQF_DISABLED))
local_irq_enable_in_hardirq();
@@ -411,7 +411,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
/* Fall through to add to randomness */
case IRQ_HANDLED:
- status |= action->flags;
+ flags |= action->flags;
break;
default:
@@ -422,8 +422,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
action = action->next;
} while (action);
- if (status & IRQF_SAMPLE_RANDOM)
- add_interrupt_randomness(irq);
+ add_interrupt_randomness(irq, flags);
local_irq_disable();
return retval;