Skip to content

Commit 0791e8b

Browse files
committed
random: tie batched entropy generation to base_crng generation
Now that we have an explicit base_crng generation counter, we don't need a separate one for batched entropy. Rather, we can just move the generation forward every time we change crng_init state or update the base_crng key. Cc: Theodore Ts'o <[email protected]> Reviewed-by: Eric Biggers <[email protected]> Reviewed-by: Dominik Brodowski <[email protected]> Signed-off-by: Jason A. Donenfeld <[email protected]>
1 parent 7191c62 commit 0791e8b

File tree

1 file changed

+8
-21
lines changed

1 file changed

+8
-21
lines changed

drivers/char/random.c

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -430,8 +430,6 @@ static DEFINE_PER_CPU(struct crng, crngs) = {
430430

431431
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
432432

433-
static void invalidate_batched_entropy(void);
434-
435433
/*
436434
* crng_fast_load() can be called by code in the interrupt service
437435
* path. So we can't afford to dilly-dally. Returns the number of
@@ -454,7 +452,7 @@ static size_t crng_fast_load(const void *cp, size_t len)
454452
src++; crng_init_cnt++; len--; ret++;
455453
}
456454
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
457-
invalidate_batched_entropy();
455+
++base_crng.generation;
458456
crng_init = 1;
459457
}
460458
spin_unlock_irqrestore(&base_crng.lock, flags);
@@ -531,7 +529,6 @@ static void crng_reseed(void)
531529
WRITE_ONCE(base_crng.generation, next_gen);
532530
WRITE_ONCE(base_crng.birth, jiffies);
533531
if (crng_init < 2) {
534-
invalidate_batched_entropy();
535532
crng_init = 2;
536533
finalize_init = true;
537534
}
@@ -1256,8 +1253,9 @@ int __init rand_initialize(void)
12561253
mix_pool_bytes(utsname(), sizeof(*(utsname())));
12571254

12581255
extract_entropy(base_crng.key, sizeof(base_crng.key));
1256+
++base_crng.generation;
1257+
12591258
if (arch_init && trust_cpu && crng_init < 2) {
1260-
invalidate_batched_entropy();
12611259
crng_init = 2;
12621260
pr_notice("crng init done (trusting CPU's manufacturer)\n");
12631261
}
@@ -1607,8 +1605,6 @@ static int __init random_sysctls_init(void)
16071605
device_initcall(random_sysctls_init);
16081606
#endif /* CONFIG_SYSCTL */
16091607

1610-
static atomic_t batch_generation = ATOMIC_INIT(0);
1611-
16121608
struct batched_entropy {
16131609
union {
16141610
/*
@@ -1622,8 +1618,8 @@ struct batched_entropy {
16221618
u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
16231619
};
16241620
local_lock_t lock;
1621+
unsigned long generation;
16251622
unsigned int position;
1626-
int generation;
16271623
};
16281624

16291625
/*
@@ -1643,14 +1639,14 @@ u64 get_random_u64(void)
16431639
unsigned long flags;
16441640
struct batched_entropy *batch;
16451641
static void *previous;
1646-
int next_gen;
1642+
unsigned long next_gen;
16471643

16481644
warn_unseeded_randomness(&previous);
16491645

16501646
local_lock_irqsave(&batched_entropy_u64.lock, flags);
16511647
batch = raw_cpu_ptr(&batched_entropy_u64);
16521648

1653-
next_gen = atomic_read(&batch_generation);
1649+
next_gen = READ_ONCE(base_crng.generation);
16541650
if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
16551651
next_gen != batch->generation) {
16561652
_get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
@@ -1677,14 +1673,14 @@ u32 get_random_u32(void)
16771673
unsigned long flags;
16781674
struct batched_entropy *batch;
16791675
static void *previous;
1680-
int next_gen;
1676+
unsigned long next_gen;
16811677

16821678
warn_unseeded_randomness(&previous);
16831679

16841680
local_lock_irqsave(&batched_entropy_u32.lock, flags);
16851681
batch = raw_cpu_ptr(&batched_entropy_u32);
16861682

1687-
next_gen = atomic_read(&batch_generation);
1683+
next_gen = READ_ONCE(base_crng.generation);
16881684
if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
16891685
next_gen != batch->generation) {
16901686
_get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
@@ -1700,15 +1696,6 @@ u32 get_random_u32(void)
17001696
}
17011697
EXPORT_SYMBOL(get_random_u32);
17021698

1703-
/* It's important to invalidate all potential batched entropy that might
1704-
* be stored before the crng is initialized, which we can do lazily by
1705-
* bumping the generation counter.
1706-
*/
1707-
static void invalidate_batched_entropy(void)
1708-
{
1709-
atomic_inc(&batch_generation);
1710-
}
1711-
17121699
/**
17131700
* randomize_page - Generate a random, page aligned address
17141701
* @start: The smallest acceptable address the caller will take.

0 commit comments

Comments
 (0)