Skip to content

Commit 44562c7

Browse files
Ryan Robertswilldeacon
authored andcommitted
mm/vmalloc: Enter lazy mmu mode while manipulating vmalloc ptes
Wrap vmalloc's pte table manipulation loops with arch_enter_lazy_mmu_mode() / arch_leave_lazy_mmu_mode(). This provides the arch code with the opportunity to optimize the pte manipulations. Note that vmap_pfn() already uses lazy mmu mode since it delegates to apply_to_page_range() which enters lazy mmu mode for both user and kernel mappings. These hooks will shortly be used by arm64 to improve vmalloc performance. Reviewed-by: Uladzislau Rezki (Sony) <[email protected]> Reviewed-by: Catalin Marinas <[email protected]> Reviewed-by: Anshuman Khandual <[email protected]> Signed-off-by: Ryan Roberts <[email protected]> Tested-by: Luiz Capitulino <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 06fc959 commit 44562c7

File tree

1 file changed

+14
-0
lines changed

1 file changed

+14
-0
lines changed

mm/vmalloc.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,9 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
104104
pte = pte_alloc_kernel_track(pmd, addr, mask);
105105
if (!pte)
106106
return -ENOMEM;
107+
108+
arch_enter_lazy_mmu_mode();
109+
107110
do {
108111
if (unlikely(!pte_none(ptep_get(pte)))) {
109112
if (pfn_valid(pfn)) {
@@ -127,6 +130,8 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
127130
set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
128131
pfn++;
129132
} while (pte += PFN_DOWN(size), addr += size, addr != end);
133+
134+
arch_leave_lazy_mmu_mode();
130135
*mask |= PGTBL_PTE_MODIFIED;
131136
return 0;
132137
}
@@ -354,6 +359,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
354359
unsigned long size = PAGE_SIZE;
355360

356361
pte = pte_offset_kernel(pmd, addr);
362+
arch_enter_lazy_mmu_mode();
363+
357364
do {
358365
#ifdef CONFIG_HUGETLB_PAGE
359366
size = arch_vmap_pte_range_unmap_size(addr, pte);
@@ -370,6 +377,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
370377
ptent = ptep_get_and_clear(&init_mm, addr, pte);
371378
WARN_ON(!pte_none(ptent) && !pte_present(ptent));
372379
} while (pte += (size >> PAGE_SHIFT), addr += size, addr != end);
380+
381+
arch_leave_lazy_mmu_mode();
373382
*mask |= PGTBL_PTE_MODIFIED;
374383
}
375384

@@ -515,6 +524,9 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
515524
pte = pte_alloc_kernel_track(pmd, addr, mask);
516525
if (!pte)
517526
return -ENOMEM;
527+
528+
arch_enter_lazy_mmu_mode();
529+
518530
do {
519531
struct page *page = pages[*nr];
520532

@@ -528,6 +540,8 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
528540
set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
529541
(*nr)++;
530542
} while (pte++, addr += PAGE_SIZE, addr != end);
543+
544+
arch_leave_lazy_mmu_mode();
531545
*mask |= PGTBL_PTE_MODIFIED;
532546
return 0;
533547
}

0 commit comments

Comments
 (0)