|
1 | 1 | /*
|
2 | 2 | * random.c -- A strong random number generator
|
3 | 3 | *
|
4 |
| - * Copyright (C) 2017 Jason A. Donenfeld <[email protected]>. All |
5 |
| - * Rights Reserved. |
| 4 | + * Copyright (C) 2017-2022 Jason A. Donenfeld <[email protected]>. All Rights Reserved. |
6 | 5 | *
|
7 | 6 | * Copyright Matt Mackall <[email protected]>, 2003, 2004, 2005
|
8 | 7 | *
|
|
78 | 77 | * an *estimate* of how many bits of randomness have been stored into
|
79 | 78 | * the random number generator's internal state.
|
80 | 79 | *
|
81 |
| - * When random bytes are desired, they are obtained by taking the SHA |
82 |
| - * hash of the contents of the "entropy pool". The SHA hash avoids |
| 80 | + * When random bytes are desired, they are obtained by taking the BLAKE2s |
| 81 | + * hash of the contents of the "entropy pool". The BLAKE2s hash avoids |
83 | 82 | * exposing the internal state of the entropy pool. It is believed to
|
84 | 83 | * be computationally infeasible to derive any useful information
|
85 |
| - * about the input of SHA from its output. Even if it is possible to |
86 |
| - * analyze SHA in some clever way, as long as the amount of data |
| 84 | + * about the input of BLAKE2s from its output. Even if it is possible to |
| 85 | + * analyze BLAKE2s in some clever way, as long as the amount of data |
87 | 86 | * returned from the generator is less than the inherent entropy in
|
88 | 87 | * the pool, the output data is totally unpredictable. For this
|
89 | 88 | * reason, the routine decreases its internal estimate of how many
|
|
93 | 92 | * If this estimate goes to zero, the routine can still generate
|
94 | 93 | * random numbers; however, an attacker may (at least in theory) be
|
95 | 94 | * able to infer the future output of the generator from prior
|
96 |
| - * outputs. This requires successful cryptanalysis of SHA, which is |
| 95 | + * outputs. This requires successful cryptanalysis of BLAKE2s, which is |
97 | 96 | * not believed to be feasible, but there is a remote possibility.
|
98 | 97 | * Nonetheless, these numbers should be useful for the vast majority
|
99 | 98 | * of purposes.
|
|
347 | 346 | #include <linux/completion.h>
|
348 | 347 | #include <linux/uuid.h>
|
349 | 348 | #include <crypto/chacha.h>
|
350 |
| -#include <crypto/sha1.h> |
| 349 | +#include <crypto/blake2s.h> |
351 | 350 |
|
352 | 351 | #include <asm/processor.h>
|
353 | 352 | #include <linux/uaccess.h>
|
|
367 | 366 | #define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
|
368 | 367 | #define OUTPUT_POOL_SHIFT 10
|
369 | 368 | #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
|
370 |
| -#define EXTRACT_SIZE 10 |
371 |
| - |
372 |
| - |
373 |
| -#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) |
| 369 | +#define EXTRACT_SIZE (BLAKE2S_HASH_SIZE / 2) |
374 | 370 |
|
375 | 371 | /*
|
376 | 372 | * To allow fractional bits to be tracked, the entropy_count field is
|
@@ -406,7 +402,7 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
|
406 | 402 | * Thanks to Colin Plumb for suggesting this.
|
407 | 403 | *
|
408 | 404 | * The mixing operation is much less sensitive than the output hash,
|
409 |
| - * where we use SHA-1. All that we want of mixing operation is that |
| 405 | + * where we use BLAKE2s. All that we want of mixing operation is that |
410 | 406 | * it be a good non-cryptographic hash; i.e. it not produce collisions
|
411 | 407 | * when fed "random" data of the sort we expect to see. As long as
|
412 | 408 | * the pool state differs for different inputs, we have preserved the
|
@@ -1384,56 +1380,49 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
|
1384 | 1380 | */
|
1385 | 1381 | static void extract_buf(struct entropy_store *r, __u8 *out)
|
1386 | 1382 | {
|
1387 |
| - int i; |
1388 |
| - union { |
1389 |
| - __u32 w[5]; |
1390 |
| - unsigned long l[LONGS(20)]; |
1391 |
| - } hash; |
1392 |
| - __u32 workspace[SHA1_WORKSPACE_WORDS]; |
| 1383 | + struct blake2s_state state __aligned(__alignof__(unsigned long)); |
| 1384 | + u8 hash[BLAKE2S_HASH_SIZE]; |
| 1385 | + unsigned long *salt; |
1393 | 1386 | unsigned long flags;
|
1394 | 1387 |
|
| 1388 | + blake2s_init(&state, sizeof(hash)); |
| 1389 | + |
1395 | 1390 | /*
|
1396 | 1391 | * If we have an architectural hardware random number
|
1397 |
| - * generator, use it for SHA's initial vector |
| 1392 | + * generator, use it for BLAKE2's salt & personal fields. |
1398 | 1393 | */
|
1399 |
| - sha1_init(hash.w); |
1400 |
| - for (i = 0; i < LONGS(20); i++) { |
| 1394 | + for (salt = (unsigned long *)&state.h[4]; |
| 1395 | + salt < (unsigned long *)&state.h[8]; ++salt) { |
1401 | 1396 | unsigned long v;
|
1402 | 1397 | if (!arch_get_random_long(&v))
|
1403 | 1398 | break;
|
1404 |
| - hash.l[i] = v; |
| 1399 | + *salt ^= v; |
1405 | 1400 | }
|
1406 | 1401 |
|
1407 |
| - /* Generate a hash across the pool, 16 words (512 bits) at a time */ |
| 1402 | + /* Generate a hash across the pool */ |
1408 | 1403 | spin_lock_irqsave(&r->lock, flags);
|
1409 |
| - for (i = 0; i < r->poolinfo->poolwords; i += 16) |
1410 |
| - sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace); |
| 1404 | + blake2s_update(&state, (const u8 *)r->pool, |
| 1405 | + r->poolinfo->poolwords * sizeof(*r->pool)); |
| 1406 | + blake2s_final(&state, hash); /* final zeros out state */ |
1411 | 1407 |
|
1412 | 1408 | /*
|
1413 | 1409 | * We mix the hash back into the pool to prevent backtracking
|
1414 | 1410 | * attacks (where the attacker knows the state of the pool
|
1415 | 1411 | * plus the current outputs, and attempts to find previous
|
1416 |
| - * ouputs), unless the hash function can be inverted. By |
1417 |
| - * mixing at least a SHA1 worth of hash data back, we make |
| 1412 | + * outputs), unless the hash function can be inverted. By |
| 1413 | + * mixing at least a hash worth of hash data back, we make |
1418 | 1414 | * brute-forcing the feedback as hard as brute-forcing the
|
1419 | 1415 | * hash.
|
1420 | 1416 | */
|
1421 |
| - __mix_pool_bytes(r, hash.w, sizeof(hash.w)); |
| 1417 | + __mix_pool_bytes(r, hash, sizeof(hash)); |
1422 | 1418 | spin_unlock_irqrestore(&r->lock, flags);
|
1423 | 1419 |
|
1424 |
| - memzero_explicit(workspace, sizeof(workspace)); |
1425 |
| - |
1426 |
| - /* |
1427 |
| - * In case the hash function has some recognizable output |
1428 |
| - * pattern, we fold it in half. Thus, we always feed back |
1429 |
| - * twice as much data as we output. |
| 1420 | + /* Note that EXTRACT_SIZE is half of hash size here, because above |
| 1421 | + * we've dumped the full length back into mixer. By reducing the |
| 1422 | + * amount that we emit, we retain a level of forward secrecy. |
1430 | 1423 | */
|
1431 |
| - hash.w[0] ^= hash.w[3]; |
1432 |
| - hash.w[1] ^= hash.w[4]; |
1433 |
| - hash.w[2] ^= rol32(hash.w[2], 16); |
1434 |
| - |
1435 |
| - memcpy(out, &hash, EXTRACT_SIZE); |
1436 |
| - memzero_explicit(&hash, sizeof(hash)); |
| 1424 | + memcpy(out, hash, EXTRACT_SIZE); |
| 1425 | + memzero_explicit(hash, sizeof(hash)); |
1437 | 1426 | }
|
1438 | 1427 |
|
1439 | 1428 | static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
|
|
0 commit comments