Skip to content

Commit 327b18b

Browse files
zx2c4akpm00
authored andcommitted
mm/kfence: select random number before taking raw lock
The RNG uses vanilla spinlocks, not raw spinlocks, so kfence should pick its random numbers before taking its raw spinlocks. This also has the nice effect of doing less work inside the lock. It should fix a splat that Geert saw with CONFIG_PROVE_RAW_LOCK_NESTING: dump_backtrace.part.0+0x98/0xc0 show_stack+0x14/0x28 dump_stack_lvl+0xac/0xec dump_stack+0x14/0x2c __lock_acquire+0x388/0x10a0 lock_acquire+0x190/0x2c0 _raw_spin_lock_irqsave+0x6c/0x94 crng_make_state+0x148/0x1e4 _get_random_bytes.part.0+0x4c/0xe8 get_random_u32+0x4c/0x140 __kfence_alloc+0x460/0x5c4 kmem_cache_alloc_trace+0x194/0x1dc __kthread_create_on_node+0x5c/0x1a8 kthread_create_on_node+0x58/0x7c printk_start_kthread.part.0+0x34/0xa8 printk_activate_kthreads+0x4c/0x54 do_one_initcall+0xec/0x278 kernel_init_freeable+0x11c/0x214 kernel_init+0x24/0x124 ret_from_fork+0x10/0x20 Link: https://lkml.kernel.org/r/[email protected] Fixes: d415077 ("random32: use real rng for non-deterministic randomness") Signed-off-by: Jason A. Donenfeld <[email protected]> Reported-by: Geert Uytterhoeven <[email protected]> Tested-by: Geert Uytterhoeven <[email protected]> Reviewed-by: Marco Elver <[email protected]> Reviewed-by: Petr Mladek <[email protected]> Cc: John Ogness <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Dmitry Vyukov <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 8a6f62a commit 327b18b

File tree

1 file changed

+5
-2
lines changed

1 file changed

+5
-2
lines changed

mm/kfence/core.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
360360
unsigned long flags;
361361
struct slab *slab;
362362
void *addr;
363+
const bool random_right_allocate = prandom_u32_max(2);
364+
const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
365+
!prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS);
363366

364367
/* Try to obtain a free object. */
365368
raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
@@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
404407
* is that the out-of-bounds accesses detected are deterministic for
405408
* such allocations.
406409
*/
407-
if (prandom_u32_max(2)) {
410+
if (random_right_allocate) {
408411
/* Allocate on the "right" side, re-calculate address. */
409412
meta->addr += PAGE_SIZE - size;
410413
meta->addr = ALIGN_DOWN(meta->addr, cache->align);
@@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
444447
if (cache->ctor)
445448
cache->ctor(addr);
446449

447-
if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
450+
if (random_fault)
448451
kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
449452

450453
atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);

0 commit comments

Comments
 (0)