Skip to content

Commit ef493d2

Browse files
Ryan Robertswilldeacon
authored andcommitted
arm64/mm: Refactor __set_ptes() and __ptep_get_and_clear()
Refactor __set_ptes(), set_pmd_at() and set_pud_at() so that they are all a thin wrapper around a new common __set_ptes_anysz(), which takes pgsize parameter. Additionally, refactor __ptep_get_and_clear() and pmdp_huge_get_and_clear() to use a new common __ptep_get_and_clear_anysz() which also takes a pgsize parameter. These changes will permit the huge_pte API to efficiently batch-set pgtable entries and take advantage of the future barrier optimizations. Additionally since the new *_anysz() helpers call the correct page_table_check_*_set() API based on pgsize, this means that huge_ptes will be able to get proper coverage. Currently the huge_pte API always uses the pte API which assumes an entry only covers a single page. Reviewed-by: Catalin Marinas <[email protected]> Signed-off-by: Ryan Roberts <[email protected]> Reviewed-by: Anshuman Khandual <[email protected]> Tested-by: Luiz Capitulino <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 91e4066 commit ef493d2

File tree

1 file changed

+73
-41
lines changed

1 file changed

+73
-41
lines changed

arch/arm64/include/asm/pgtable.h

Lines changed: 73 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -423,23 +423,6 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
423423
return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte));
424424
}
425425

426-
static inline void __set_ptes(struct mm_struct *mm,
427-
unsigned long __always_unused addr,
428-
pte_t *ptep, pte_t pte, unsigned int nr)
429-
{
430-
page_table_check_ptes_set(mm, ptep, pte, nr);
431-
__sync_cache_and_tags(pte, nr);
432-
433-
for (;;) {
434-
__check_safe_pte_update(mm, ptep, pte);
435-
__set_pte(ptep, pte);
436-
if (--nr == 0)
437-
break;
438-
ptep++;
439-
pte = pte_advance_pfn(pte, 1);
440-
}
441-
}
442-
443426
/*
444427
* Hugetlb definitions.
445428
*/
@@ -649,30 +632,62 @@ static inline pgprot_t pud_pgprot(pud_t pud)
649632
return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
650633
}
651634

652-
static inline void __set_pte_at(struct mm_struct *mm,
653-
unsigned long __always_unused addr,
654-
pte_t *ptep, pte_t pte, unsigned int nr)
635+
static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep,
636+
pte_t pte, unsigned int nr,
637+
unsigned long pgsize)
655638
{
656-
__sync_cache_and_tags(pte, nr);
657-
__check_safe_pte_update(mm, ptep, pte);
658-
__set_pte(ptep, pte);
639+
unsigned long stride = pgsize >> PAGE_SHIFT;
640+
641+
switch (pgsize) {
642+
case PAGE_SIZE:
643+
page_table_check_ptes_set(mm, ptep, pte, nr);
644+
break;
645+
case PMD_SIZE:
646+
page_table_check_pmds_set(mm, (pmd_t *)ptep, pte_pmd(pte), nr);
647+
break;
648+
#ifndef __PAGETABLE_PMD_FOLDED
649+
case PUD_SIZE:
650+
page_table_check_puds_set(mm, (pud_t *)ptep, pte_pud(pte), nr);
651+
break;
652+
#endif
653+
default:
654+
VM_WARN_ON(1);
655+
}
656+
657+
__sync_cache_and_tags(pte, nr * stride);
658+
659+
for (;;) {
660+
__check_safe_pte_update(mm, ptep, pte);
661+
__set_pte(ptep, pte);
662+
if (--nr == 0)
663+
break;
664+
ptep++;
665+
pte = pte_advance_pfn(pte, stride);
666+
}
667+
}
668+
669+
static inline void __set_ptes(struct mm_struct *mm,
670+
unsigned long __always_unused addr,
671+
pte_t *ptep, pte_t pte, unsigned int nr)
672+
{
673+
__set_ptes_anysz(mm, ptep, pte, nr, PAGE_SIZE);
659674
}
660675

661-
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
662-
pmd_t *pmdp, pmd_t pmd)
676+
static inline void __set_pmds(struct mm_struct *mm,
677+
unsigned long __always_unused addr,
678+
pmd_t *pmdp, pmd_t pmd, unsigned int nr)
663679
{
664-
page_table_check_pmd_set(mm, pmdp, pmd);
665-
return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
666-
PMD_SIZE >> PAGE_SHIFT);
680+
__set_ptes_anysz(mm, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE);
667681
}
682+
#define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1)
668683

669-
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
670-
pud_t *pudp, pud_t pud)
684+
static inline void __set_puds(struct mm_struct *mm,
685+
unsigned long __always_unused addr,
686+
pud_t *pudp, pud_t pud, unsigned int nr)
671687
{
672-
page_table_check_pud_set(mm, pudp, pud);
673-
return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
674-
PUD_SIZE >> PAGE_SHIFT);
688+
__set_ptes_anysz(mm, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE);
675689
}
690+
#define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1)
676691

677692
#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
678693
#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
@@ -1301,16 +1316,37 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
13011316
}
13021317
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
13031318

1304-
static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
1305-
unsigned long address, pte_t *ptep)
1319+
static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm,
1320+
pte_t *ptep,
1321+
unsigned long pgsize)
13061322
{
13071323
pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
13081324

1309-
page_table_check_pte_clear(mm, pte);
1325+
switch (pgsize) {
1326+
case PAGE_SIZE:
1327+
page_table_check_pte_clear(mm, pte);
1328+
break;
1329+
case PMD_SIZE:
1330+
page_table_check_pmd_clear(mm, pte_pmd(pte));
1331+
break;
1332+
#ifndef __PAGETABLE_PMD_FOLDED
1333+
case PUD_SIZE:
1334+
page_table_check_pud_clear(mm, pte_pud(pte));
1335+
break;
1336+
#endif
1337+
default:
1338+
VM_WARN_ON(1);
1339+
}
13101340

13111341
return pte;
13121342
}
13131343

1344+
static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
1345+
unsigned long address, pte_t *ptep)
1346+
{
1347+
return __ptep_get_and_clear_anysz(mm, ptep, PAGE_SIZE);
1348+
}
1349+
13141350
static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
13151351
pte_t *ptep, unsigned int nr, int full)
13161352
{
@@ -1347,11 +1383,7 @@ static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
13471383
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
13481384
unsigned long address, pmd_t *pmdp)
13491385
{
1350-
pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
1351-
1352-
page_table_check_pmd_clear(mm, pmd);
1353-
1354-
return pmd;
1386+
return pte_pmd(__ptep_get_and_clear_anysz(mm, (pte_t *)pmdp, PMD_SIZE));
13551387
}
13561388
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
13571389

0 commit comments

Comments
 (0)