Skip to content

Commit 2cb0850

Browse files
riastradhriastradh
riastradh
authored and
riastradh
committed
arc4random(3): Reseed if system entropy epoch changes.
This can happen, for example, if the system is a VM instance, and the VM is cloned. This incurs the cost of a system call on every arc4random call, which is unfortunate, but 1. we don't currently have a (machine-independent) mechanism for exposing a read-only page to userland shared by the kernel to enable a cheaper access path to the entropy epoch; and 2. the algorithm here -- a simple application of ChaCha -- is likely also a bottleneck and could be much cheaper by (a) using sys/crypto/chacha for machine-dependent vectorized ChaCha code, and (b) filling a buffer (somewhere between a cipher block and a page) in a batch at a time, instead of running ChaCha to generate only 32 bytes at a time. So although this might be a performance hit, the security benefit is worthwhile and we have a clear path to do better than reversing the performance hit later. PR kern/58632: getentropy(2) and arc4random(3) do not reseed on VM fork
1 parent 098c0b8 commit 2cb0850

File tree

1 file changed

+63
-5
lines changed

1 file changed

+63
-5
lines changed

lib/libc/gen/arc4random.c

Lines changed: 63 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
/* $NetBSD: arc4random.c,v 1.34 2024/01/20 14:52:47 christos Exp $ */
1+
/* $NetBSD: arc4random.c,v 1.35 2024/08/26 15:19:22 riastradh Exp $ */
22

33
/*-
44
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -52,7 +52,7 @@
5252
*/
5353

5454
#include <sys/cdefs.h>
55-
__RCSID("$NetBSD: arc4random.c,v 1.34 2024/01/20 14:52:47 christos Exp $");
55+
__RCSID("$NetBSD: arc4random.c,v 1.35 2024/08/26 15:19:22 riastradh Exp $");
5656

5757
#include "namespace.h"
5858
#include "reentrant.h"
@@ -65,6 +65,7 @@ __RCSID("$NetBSD: arc4random.c,v 1.34 2024/01/20 14:52:47 christos Exp $");
6565

6666
#include <assert.h>
6767
#include <sha2.h>
68+
#include <stdatomic.h>
6869
#include <stdbool.h>
6970
#include <stdint.h>
7071
#include <stdlib.h>
@@ -398,11 +399,67 @@ crypto_onetimestream(const void *seed, void *buf, size_t n)
398399
(void)explicit_memset(block, 0, sizeof block);
399400
}
400401

402+
/*
403+
* entropy_epoch()
404+
*
405+
* Return the current entropy epoch, from the sysctl node
406+
* kern.entropy.epoch.
407+
*
408+
* The entropy epoch is never zero. Initially, or on error, it is
409+
* (unsigned)-1. It may wrap around but it skips (unsigned)-1 and
410+
* 0 when it does. Changes happen less than once per second, so
411+
* wraparound will only affect systems after 136 years of uptime.
412+
*
413+
* XXX This should get it from a page shared read-only by kernel
414+
* with userland, but until we implement such a mechanism, this
415+
* sysctl -- incurring the cost of a syscall -- will have to
416+
* serve.
417+
*/
418+
static unsigned
419+
entropy_epoch(void)
420+
{
421+
static atomic_int mib0[3];
422+
static atomic_bool initialized = false;
423+
int mib[3];
424+
unsigned epoch = -1;
425+
size_t epochlen = sizeof(epoch);
426+
427+
/*
428+
* Resolve kern.entropy.epoch if we haven't already. Cache it
429+
* for the next caller. Initialization is idempotent, so it's
430+
* OK if two threads do it at once.
431+
*/
432+
if (atomic_load_explicit(&initialized, memory_order_acquire)) {
433+
mib[0] = atomic_load_explicit(&mib0[0], memory_order_relaxed);
434+
mib[1] = atomic_load_explicit(&mib0[1], memory_order_relaxed);
435+
mib[2] = atomic_load_explicit(&mib0[2], memory_order_relaxed);
436+
} else {
437+
size_t nmib = __arraycount(mib);
438+
439+
if (sysctlnametomib("kern.entropy.epoch", mib, &nmib) == -1)
440+
return -1;
441+
if (nmib != __arraycount(mib))
442+
return -1;
443+
atomic_store_explicit(&mib0[0], mib[0], memory_order_relaxed);
444+
atomic_store_explicit(&mib0[1], mib[1], memory_order_relaxed);
445+
atomic_store_explicit(&mib0[2], mib[2], memory_order_relaxed);
446+
atomic_store_explicit(&initialized, true,
447+
memory_order_release);
448+
}
449+
450+
if (sysctl(mib, __arraycount(mib), &epoch, &epochlen, NULL, 0) == -1)
451+
return -1;
452+
if (epochlen != sizeof(epoch))
453+
return -1;
454+
455+
return epoch;
456+
}
457+
401458
/* arc4random state: per-thread, per-process (zeroed in child on fork) */
402459

403460
struct arc4random_prng {
404461
struct crypto_prng arc4_prng;
405-
bool arc4_seeded;
462+
unsigned arc4_epoch;
406463
};
407464

408465
static void
@@ -413,6 +470,7 @@ arc4random_prng_addrandom(struct arc4random_prng *prng, const void *data,
413470
SHA256_CTX ctx;
414471
uint8_t buf[crypto_prng_SEEDBYTES];
415472
size_t buflen = sizeof buf;
473+
unsigned epoch = entropy_epoch();
416474

417475
__CTASSERT(sizeof buf == SHA256_DIGEST_LENGTH);
418476

@@ -436,7 +494,7 @@ arc4random_prng_addrandom(struct arc4random_prng *prng, const void *data,
436494
/* reseed(SHA256(prng() || sysctl(KERN_ARND) || data)) */
437495
crypto_prng_seed(&prng->arc4_prng, buf);
438496
(void)explicit_memset(buf, 0, sizeof buf);
439-
prng->arc4_seeded = true;
497+
prng->arc4_epoch = epoch;
440498
}
441499

442500
#ifdef _REENTRANT
@@ -567,7 +625,7 @@ arc4random_prng_get(void)
567625
}
568626

569627
/* Guarantee the PRNG is seeded. */
570-
if (__predict_false(!prng->arc4_seeded))
628+
if (__predict_false(prng->arc4_epoch != entropy_epoch()))
571629
arc4random_prng_addrandom(prng, NULL, 0);
572630

573631
return prng;

0 commit comments

Comments
 (0)