Skip to content

Commit 04a2a7a

Browse files
Baoquan Hewilldeacon
authored andcommitted
arm64: kdump: do not map crashkernel region specifically
After taking off the protection functions on crashkernel memory region, there's no need to map crashkernel region with page granularity during linear mapping. With this change, the system can make use of block or section mapping on linear region to largely improve perforcemence during system bootup and running. Signed-off-by: Baoquan He <[email protected]> Acked-by: Catalin Marinas <[email protected]> Acked-by: Mike Rapoport (IBM) <[email protected]> Reviewed-by: Zhen Lei <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 0d124e9 commit 04a2a7a

File tree

1 file changed

+0
-43
lines changed

1 file changed

+0
-43
lines changed

arch/arm64/mm/mmu.c

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -510,21 +510,6 @@ void __init mark_linear_text_alias_ro(void)
510510
PAGE_KERNEL_RO);
511511
}
512512

513-
static bool crash_mem_map __initdata;
514-
515-
static int __init enable_crash_mem_map(char *arg)
516-
{
517-
/*
518-
* Proper parameter parsing is done by reserve_crashkernel(). We only
519-
* need to know if the linear map has to avoid block mappings so that
520-
* the crashkernel reservations can be unmapped later.
521-
*/
522-
crash_mem_map = true;
523-
524-
return 0;
525-
}
526-
early_param("crashkernel", enable_crash_mem_map);
527-
528513
static void __init map_mem(pgd_t *pgdp)
529514
{
530515
static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
@@ -554,16 +539,6 @@ static void __init map_mem(pgd_t *pgdp)
554539
*/
555540
memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
556541

557-
#ifdef CONFIG_KEXEC_CORE
558-
if (crash_mem_map) {
559-
if (defer_reserve_crashkernel())
560-
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
561-
else if (crashk_res.end)
562-
memblock_mark_nomap(crashk_res.start,
563-
resource_size(&crashk_res));
564-
}
565-
#endif
566-
567542
/* map all the memory banks */
568543
for_each_mem_range(i, &start, &end) {
569544
if (start >= end)
@@ -590,24 +565,6 @@ static void __init map_mem(pgd_t *pgdp)
590565
__map_memblock(pgdp, kernel_start, kernel_end,
591566
PAGE_KERNEL, NO_CONT_MAPPINGS);
592567
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
593-
594-
/*
595-
* Use page-level mappings here so that we can shrink the region
596-
* in page granularity and put back unused memory to buddy system
597-
* through /sys/kernel/kexec_crash_size interface.
598-
*/
599-
#ifdef CONFIG_KEXEC_CORE
600-
if (crash_mem_map && !defer_reserve_crashkernel()) {
601-
if (crashk_res.end) {
602-
__map_memblock(pgdp, crashk_res.start,
603-
crashk_res.end + 1,
604-
PAGE_KERNEL,
605-
NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
606-
memblock_clear_nomap(crashk_res.start,
607-
resource_size(&crashk_res));
608-
}
609-
}
610-
#endif
611568
}
612569

613570
void mark_rodata_ro(void)

0 commit comments

Comments
 (0)