Skip to content

Commit 227ca9f

Browse files
committed
LoongArch: Disable KASAN if PGDIR_SIZE is too large for cpu_vabits
If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will overflow UINTPTR_MAX because KASAN_SHADOW_START/KASAN_SHADOW_END are aligned up by PGDIR_SIZE. And then the overflowed KASAN_SHADOW_END looks like a user space address. For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too large for Loongson-2K series whose cpu_vabits = 39. Since CONFIG_4KB_4LEVEL is completely legal for CPUs with cpu_vabits <= 39, we just disable KASAN via early return in kasan_init(). Otherwise we get a boot failure. Moreover, we change KASAN_SHADOW_END from the first address after KASAN shadow area to the last address in KASAN shadow area, in order to avoid the end address exactly overflow to 0 (which is a legal case). We don't need to worry about alignment because pgd_addr_end() can handle it. Cc: [email protected] Reviewed-by: Jiaxun Yang <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent a410656 commit 227ca9f

File tree

2 files changed

+14
-3
lines changed

2 files changed

+14
-3
lines changed

arch/loongarch/include/asm/kasan.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
/* KAsan shadow memory start right after vmalloc. */
5252
#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
5353
#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
54-
#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
54+
#define KASAN_SHADOW_END (round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) - 1)
5555

5656
#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
5757
#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)

arch/loongarch/mm/kasan_init.c

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
238238
asmlinkage void __init kasan_early_init(void)
239239
{
240240
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
241-
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
241+
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
242242
}
243243

244244
static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
@@ -253,7 +253,7 @@ static void __init clear_pgds(unsigned long start, unsigned long end)
253253
* swapper_pg_dir. pgd_clear() can't be used
254254
* here because it's nop on 2,3-level pagetable setups
255255
*/
256-
for (; start < end; start += PGDIR_SIZE)
256+
for (; start < end; start = pgd_addr_end(start, end))
257257
kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
258258
}
259259

@@ -262,6 +262,17 @@ void __init kasan_init(void)
262262
u64 i;
263263
phys_addr_t pa_start, pa_end;
264264

265+
/*
266+
* If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
267+
* overflow UINTPTR_MAX and then looks like a user space address.
268+
* For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
269+
* large for Loongson-2K series whose cpu_vabits = 39.
270+
*/
271+
if (KASAN_SHADOW_END < vm_map_base) {
272+
pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
273+
return;
274+
}
275+
265276
/*
266277
* PGD was populated as invalid_pmd_table or invalid_pud_table
267278
* in pagetable_init() which depends on how many levels of page

0 commit comments

Comments
 (0)