Skip to content

Commit c6975d7

Browse files
Qian Caiwilldeacon
authored andcommitted
arm64: Track no early_pgtable_alloc() for kmemleak
After switched page size from 64KB to 4KB on several arm64 servers here, kmemleak starts to run out of early memory pool due to a huge number of those early_pgtable_alloc() calls: kmemleak_alloc_phys() memblock_alloc_range_nid() memblock_phys_alloc_range() early_pgtable_alloc() init_pmd() alloc_init_pud() __create_pgd_mapping() __map_memblock() paging_init() setup_arch() start_kernel() Increased the default value of DEBUG_KMEMLEAK_MEM_POOL_SIZE by 4 times won't be enough for a server with 200GB+ memory. There isn't much interesting to check memory leaks for those early page tables and those early memory mappings should not reference to other memory. Hence, no kmemleak false positives, and we can safely skip tracking those early allocations from kmemleak like we did in the commit fed84c7 ("mm/memblock.c: skip kmemleak for kasan_init()") without needing to introduce complications to automatically scale the value depends on the runtime memory size etc. After the patch, the default value of DEBUG_KMEMLEAK_MEM_POOL_SIZE becomes sufficient again. Signed-off-by: Qian Cai <[email protected]> Reviewed-by: Catalin Marinas <[email protected]> Reviewed-by: Mike Rapoport <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent aedad3e commit c6975d7

File tree

5 files changed

+13
-8
lines changed

5 files changed

+13
-8
lines changed

arch/arm/mm/kasan_init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
3232
static __init void *kasan_alloc_block(size_t size)
3333
{
3434
return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
35-
MEMBLOCK_ALLOC_KASAN, NUMA_NO_NODE);
35+
MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE);
3636
}
3737

3838
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,

arch/arm64/mm/kasan_init.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
3636
{
3737
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
3838
__pa(MAX_DMA_ADDRESS),
39-
MEMBLOCK_ALLOC_KASAN, node);
39+
MEMBLOCK_ALLOC_NOLEAKTRACE, node);
4040
if (!p)
4141
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
4242
__func__, PAGE_SIZE, PAGE_SIZE, node,
@@ -49,7 +49,8 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
4949
{
5050
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
5151
__pa(MAX_DMA_ADDRESS),
52-
MEMBLOCK_ALLOC_KASAN, node);
52+
MEMBLOCK_ALLOC_NOLEAKTRACE,
53+
node);
5354
if (!p)
5455
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
5556
__func__, PAGE_SIZE, PAGE_SIZE, node,

arch/arm64/mm/mmu.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,8 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
9696
phys_addr_t phys;
9797
void *ptr;
9898

99-
phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
99+
phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
100+
MEMBLOCK_ALLOC_NOLEAKTRACE);
100101
if (!phys)
101102
panic("Failed to allocate page table page\n");
102103

include/linux/memblock.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -372,7 +372,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
372372
/* Flags for memblock allocation APIs */
373373
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
374374
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
375-
#define MEMBLOCK_ALLOC_KASAN 1
375+
#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
376376

377377
/* We are using top down, so it is safe to use 0 here */
378378
#define MEMBLOCK_LOW_LIMIT 0

mm/memblock.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
287287
{
288288
/* pump up @end */
289289
if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
290-
end == MEMBLOCK_ALLOC_KASAN)
290+
end == MEMBLOCK_ALLOC_NOLEAKTRACE)
291291
end = memblock.current_limit;
292292

293293
/* avoid allocating the first page */
@@ -1379,8 +1379,11 @@ phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
13791379
return 0;
13801380

13811381
done:
1382-
/* Skip kmemleak for kasan_init() due to high volume. */
1383-
if (end != MEMBLOCK_ALLOC_KASAN)
1382+
/*
1383+
* Skip kmemleak for those places like kasan_init() and
1384+
* early_pgtable_alloc() due to high volume.
1385+
*/
1386+
if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
13841387
/*
13851388
* The min_count is set to 0 so that memblock allocated
13861389
* blocks are never reported as leaks. This is because many

0 commit comments

Comments
 (0)