Skip to content

Commit b169c13

Browse files
zx2c4tytso
authored andcommitted
random: invalidate batched entropy after crng init
It's possible that get_random_{u32,u64} is used before the crng has initialized, in which case, its output might not be cryptographically secure. For this problem, directly, this patch set is introducing the *_wait variety of functions, but even with that, there's a subtle issue: what happens to our batched entropy that was generated before initialization. Prior to this commit, it'd stick around, supplying bad numbers. After this commit, we force the entropy to be re-extracted after each phase of the crng has initialized. In order to avoid a race condition with the position counter, we introduce a simple rwlock for this invalidation. Since it's only during this awkward transition period, after things are all set up, we stop using it, so that it doesn't have an impact on performance. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Theodore Ts'o <tytso@mit.edu> Cc: stable@vger.kernel.org # v4.11+
1 parent 92e7542 commit b169c13

File tree

1 file changed

+37
-0
lines changed

1 file changed

+37
-0
lines changed

drivers/char/random.c

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
/*
22
* random.c -- A strong random number generator
33
*
4+
* Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
5+
* Rights Reserved.
6+
*
47
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
58
*
69
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
762765
static struct crng_state **crng_node_pool __read_mostly;
763766
#endif
764767

768+
static void invalidate_batched_entropy(void);
769+
765770
static void crng_initialize(struct crng_state *crng)
766771
{
767772
int i;
@@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len)
799804
cp++; crng_init_cnt++; len--;
800805
}
801806
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
807+
invalidate_batched_entropy();
802808
crng_init = 1;
803809
wake_up_interruptible(&crng_init_wait);
804810
pr_notice("random: fast init done\n");
@@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
836842
memzero_explicit(&buf, sizeof(buf));
837843
crng->init_time = jiffies;
838844
if (crng == &primary_crng && crng_init < 2) {
845+
invalidate_batched_entropy();
839846
crng_init = 2;
840847
process_random_ready_list();
841848
wake_up_interruptible(&crng_init_wait);
@@ -2023,6 +2030,7 @@ struct batched_entropy {
20232030
};
20242031
unsigned int position;
20252032
};
2033+
static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
20262034

20272035
/*
20282036
* Get a random word for internal kernel use only. The quality of the random
@@ -2033,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
20332041
u64 get_random_u64(void)
20342042
{
20352043
u64 ret;
2044+
bool use_lock = crng_init < 2;
2045+
unsigned long flags;
20362046
struct batched_entropy *batch;
20372047

20382048
#if BITS_PER_LONG == 64
@@ -2045,11 +2055,15 @@ u64 get_random_u64(void)
20452055
#endif
20462056

20472057
batch = &get_cpu_var(batched_entropy_u64);
2058+
if (use_lock)
2059+
read_lock_irqsave(&batched_entropy_reset_lock, flags);
20482060
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
20492061
extract_crng((u8 *)batch->entropy_u64);
20502062
batch->position = 0;
20512063
}
20522064
ret = batch->entropy_u64[batch->position++];
2065+
if (use_lock)
2066+
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
20532067
put_cpu_var(batched_entropy_u64);
20542068
return ret;
20552069
}
@@ -2059,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
20592073
u32 get_random_u32(void)
20602074
{
20612075
u32 ret;
2076+
bool use_lock = crng_init < 2;
2077+
unsigned long flags;
20622078
struct batched_entropy *batch;
20632079

20642080
if (arch_get_random_int(&ret))
20652081
return ret;
20662082

20672083
batch = &get_cpu_var(batched_entropy_u32);
2084+
if (use_lock)
2085+
read_lock_irqsave(&batched_entropy_reset_lock, flags);
20682086
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
20692087
extract_crng((u8 *)batch->entropy_u32);
20702088
batch->position = 0;
20712089
}
20722090
ret = batch->entropy_u32[batch->position++];
2091+
if (use_lock)
2092+
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
20732093
put_cpu_var(batched_entropy_u32);
20742094
return ret;
20752095
}
20762096
EXPORT_SYMBOL(get_random_u32);
20772097

2098+
/* It's important to invalidate all potential batched entropy that might
2099+
* be stored before the crng is initialized, which we can do lazily by
2100+
* simply resetting the counter to zero so that it's re-extracted on the
2101+
* next usage. */
2102+
static void invalidate_batched_entropy(void)
2103+
{
2104+
int cpu;
2105+
unsigned long flags;
2106+
2107+
write_lock_irqsave(&batched_entropy_reset_lock, flags);
2108+
for_each_possible_cpu (cpu) {
2109+
per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2110+
per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2111+
}
2112+
write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2113+
}
2114+
20782115
/**
20792116
* randomize_page - Generate a random, page aligned address
20802117
* @start: The smallest acceptable address the caller will take.

0 commit comments

Comments
 (0)