Skip to content

Commit c2c9402

Browse files
committed
Merge branch 'for-next/mm' into for-next/core
* for-next/mm: arm64/mm: use lm_alias() with addresses passed to memblock_free() mm: arm64: document why pte is not advanced in contpte_ptep_set_access_flags() arm64: Expose the end of the linear map in PHYSMEM_END arm64: trans_pgd: mark PTEs entries as valid to avoid dead kexec() arm64/mm: Delete __init region from memblock.reserved
2 parents f661eb5 + c02e7c5 commit c2c9402

File tree

4 files changed

+22
-4
lines changed

4 files changed

+22
-4
lines changed

arch/arm64/include/asm/memory.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,8 @@
110110
#define PAGE_END (_PAGE_END(VA_BITS_MIN))
111111
#endif /* CONFIG_KASAN */
112112

113+
#define PHYSMEM_END __pa(PAGE_END - 1)
114+
113115
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
114116

115117
/*

arch/arm64/mm/contpte.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -421,6 +421,12 @@ int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
421421
ptep = contpte_align_down(ptep);
422422
start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
423423

424+
/*
425+
* We are not advancing entry because __ptep_set_access_flags()
426+
* only consumes access flags from entry. And since we have checked
427+
* for the whole contpte block and returned early, pte_same()
428+
* within __ptep_set_access_flags() is likely false.
429+
*/
424430
for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE)
425431
__ptep_set_access_flags(vma, addr, ptep, entry, 0);
426432

arch/arm64/mm/init.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -414,8 +414,16 @@ void __init mem_init(void)
414414

415415
void free_initmem(void)
416416
{
417-
free_reserved_area(lm_alias(__init_begin),
418-
lm_alias(__init_end),
417+
void *lm_init_begin = lm_alias(__init_begin);
418+
void *lm_init_end = lm_alias(__init_end);
419+
420+
WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE));
421+
WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE));
422+
423+
/* Delete __init region from memblock.reserved. */
424+
memblock_free(lm_init_begin, lm_init_end - lm_init_begin);
425+
426+
free_reserved_area(lm_init_begin, lm_init_end,
419427
POISON_FREE_INITMEM, "unused kernel");
420428
/*
421429
* Unmap the __init region but leave the VM area in place. This

arch/arm64/mm/trans_pgd.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,14 +42,16 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
4242
* the temporary mappings we use during restore.
4343
*/
4444
__set_pte(dst_ptep, pte_mkwrite_novma(pte));
45-
} else if ((debug_pagealloc_enabled() ||
46-
is_kfence_address((void *)addr)) && !pte_none(pte)) {
45+
} else if (!pte_none(pte)) {
4746
/*
4847
* debug_pagealloc will removed the PTE_VALID bit if
4948
* the page isn't in use by the resume kernel. It may have
5049
* been in use by the original kernel, in which case we need
5150
* to put it back in our copy to do the restore.
5251
*
52+
* Other cases include kfence / vmalloc / memfd_secret which
53+
* may call `set_direct_map_invalid_noflush()`.
54+
*
5355
* Before marking this entry valid, check the pfn should
5456
* be mapped.
5557
*/

0 commit comments

Comments
 (0)