Skip to content

Commit f8863bc

Browse files
committed
Merge branch 'for-next/kdump' into for-next/core
* for-next/kdump: arm64: kdump: defer the crashkernel reservation for platforms with no DMA memory zones arm64: kdump: do not map crashkernel region specifically arm64: kdump : take off the protection on crashkernel memory region
2 parents ea88dc9 + 504cae4 commit f8863bc

File tree

5 files changed

+3
-105
lines changed

5 files changed

+3
-105
lines changed

arch/arm64/include/asm/kexec.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -102,12 +102,6 @@ void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
102102

103103
int machine_kexec_post_load(struct kimage *image);
104104
#define machine_kexec_post_load machine_kexec_post_load
105-
106-
void arch_kexec_protect_crashkres(void);
107-
#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
108-
109-
void arch_kexec_unprotect_crashkres(void);
110-
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
111105
#endif
112106

113107
#define ARCH_HAS_KIMAGE_ARCH

arch/arm64/include/asm/memory.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -374,11 +374,6 @@ static inline void *phys_to_virt(phys_addr_t x)
374374
})
375375

376376
void dump_mem_limit(void);
377-
378-
static inline bool defer_reserve_crashkernel(void)
379-
{
380-
return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32);
381-
}
382377
#endif /* !ASSEMBLY */
383378

384379
/*

arch/arm64/kernel/machine_kexec.c

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -268,26 +268,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
268268
pr_info("Starting crashdump kernel...\n");
269269
}
270270

271-
void arch_kexec_protect_crashkres(void)
272-
{
273-
int i;
274-
275-
for (i = 0; i < kexec_crash_image->nr_segments; i++)
276-
set_memory_valid(
277-
__phys_to_virt(kexec_crash_image->segment[i].mem),
278-
kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
279-
}
280-
281-
void arch_kexec_unprotect_crashkres(void)
282-
{
283-
int i;
284-
285-
for (i = 0; i < kexec_crash_image->nr_segments; i++)
286-
set_memory_valid(
287-
__phys_to_virt(kexec_crash_image->segment[i].mem),
288-
kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
289-
}
290-
291271
#ifdef CONFIG_HIBERNATION
292272
/*
293273
* To preserve the crash dump kernel image, the relevant memory segments

arch/arm64/mm/init.c

Lines changed: 3 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -61,34 +61,8 @@ EXPORT_SYMBOL(memstart_addr);
6161
* unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
6262
* In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
6363
* otherwise it is empty.
64-
*
65-
* Memory reservation for crash kernel either done early or deferred
66-
* depending on DMA memory zones configs (ZONE_DMA) --
67-
*
68-
* In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
69-
* here instead of max_zone_phys(). This lets early reservation of
70-
* crash kernel memory which has a dependency on arm64_dma_phys_limit.
71-
* Reserving memory early for crash kernel allows linear creation of block
72-
* mappings (greater than page-granularity) for all the memory bank rangs.
73-
* In this scheme a comparatively quicker boot is observed.
74-
*
75-
* If ZONE_DMA configs are defined, crash kernel memory reservation
76-
* is delayed until DMA zone memory range size initialization performed in
77-
* zone_sizes_init(). The defer is necessary to steer clear of DMA zone
78-
* memory range to avoid overlap allocation. So crash kernel memory boundaries
79-
* are not known when mapping all bank memory ranges, which otherwise means
80-
* not possible to exclude crash kernel range from creating block mappings
81-
* so page-granularity mappings are created for the entire memory range.
82-
* Hence a slightly slower boot is observed.
83-
*
84-
* Note: Page-granularity mappings are necessary for crash kernel memory
85-
* range for shrinking its size via /sys/kernel/kexec_crash_size interface.
8664
*/
87-
#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
8865
phys_addr_t __ro_after_init arm64_dma_phys_limit;
89-
#else
90-
phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
91-
#endif
9266

9367
/* Current arm64 boot protocol requires 2MB alignment */
9468
#define CRASH_ALIGN SZ_2M
@@ -248,6 +222,8 @@ static void __init zone_sizes_init(void)
248222
if (!arm64_dma_phys_limit)
249223
arm64_dma_phys_limit = dma32_phys_limit;
250224
#endif
225+
if (!arm64_dma_phys_limit)
226+
arm64_dma_phys_limit = PHYS_MASK + 1;
251227
max_zone_pfns[ZONE_NORMAL] = max_pfn;
252228

253229
free_area_init(max_zone_pfns);
@@ -408,9 +384,6 @@ void __init arm64_memblock_init(void)
408384

409385
early_init_fdt_scan_reserved_mem();
410386

411-
if (!defer_reserve_crashkernel())
412-
reserve_crashkernel();
413-
414387
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
415388
}
416389

@@ -457,8 +430,7 @@ void __init bootmem_init(void)
457430
* request_standard_resources() depends on crashkernel's memory being
458431
* reserved, so do it here.
459432
*/
460-
if (defer_reserve_crashkernel())
461-
reserve_crashkernel();
433+
reserve_crashkernel();
462434

463435
memblock_dump_all();
464436
}

arch/arm64/mm/mmu.c

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -510,21 +510,6 @@ void __init mark_linear_text_alias_ro(void)
510510
PAGE_KERNEL_RO);
511511
}
512512

513-
static bool crash_mem_map __initdata;
514-
515-
static int __init enable_crash_mem_map(char *arg)
516-
{
517-
/*
518-
* Proper parameter parsing is done by reserve_crashkernel(). We only
519-
* need to know if the linear map has to avoid block mappings so that
520-
* the crashkernel reservations can be unmapped later.
521-
*/
522-
crash_mem_map = true;
523-
524-
return 0;
525-
}
526-
early_param("crashkernel", enable_crash_mem_map);
527-
528513
static void __init map_mem(pgd_t *pgdp)
529514
{
530515
static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
@@ -554,16 +539,6 @@ static void __init map_mem(pgd_t *pgdp)
554539
*/
555540
memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
556541

557-
#ifdef CONFIG_KEXEC_CORE
558-
if (crash_mem_map) {
559-
if (defer_reserve_crashkernel())
560-
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
561-
else if (crashk_res.end)
562-
memblock_mark_nomap(crashk_res.start,
563-
resource_size(&crashk_res));
564-
}
565-
#endif
566-
567542
/* map all the memory banks */
568543
for_each_mem_range(i, &start, &end) {
569544
if (start >= end)
@@ -590,24 +565,6 @@ static void __init map_mem(pgd_t *pgdp)
590565
__map_memblock(pgdp, kernel_start, kernel_end,
591566
PAGE_KERNEL, NO_CONT_MAPPINGS);
592567
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
593-
594-
/*
595-
* Use page-level mappings here so that we can shrink the region
596-
* in page granularity and put back unused memory to buddy system
597-
* through /sys/kernel/kexec_crash_size interface.
598-
*/
599-
#ifdef CONFIG_KEXEC_CORE
600-
if (crash_mem_map && !defer_reserve_crashkernel()) {
601-
if (crashk_res.end) {
602-
__map_memblock(pgdp, crashk_res.start,
603-
crashk_res.end + 1,
604-
PAGE_KERNEL,
605-
NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
606-
memblock_clear_nomap(crashk_res.start,
607-
resource_size(&crashk_res));
608-
}
609-
}
610-
#endif
611568
}
612569

613570
void mark_rodata_ro(void)

0 commit comments

Comments
 (0)