@@ -2021,12 +2021,14 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
2021
2021
struct pagemapread * pm = walk -> private ;
2022
2022
struct vm_area_struct * vma = walk -> vma ;
2023
2023
u64 flags = 0 , frame = 0 ;
2024
+ spinlock_t * ptl ;
2024
2025
int err = 0 ;
2025
2026
pte_t pte ;
2026
2027
2027
2028
if (vma -> vm_flags & VM_SOFTDIRTY )
2028
2029
flags |= PM_SOFT_DIRTY ;
2029
2030
2031
+ ptl = huge_pte_lock (hstate_vma (vma ), walk -> mm , ptep );
2030
2032
pte = huge_ptep_get (walk -> mm , addr , ptep );
2031
2033
if (pte_present (pte )) {
2032
2034
struct folio * folio = page_folio (pte_page (pte ));
@@ -2054,11 +2056,12 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
2054
2056
2055
2057
err = add_to_pagemap (& pme , pm );
2056
2058
if (err )
2057
- return err ;
2059
+ break ;
2058
2060
if (pm -> show_pfn && (flags & PM_PRESENT ))
2059
2061
frame ++ ;
2060
2062
}
2061
2063
2064
+ spin_unlock (ptl );
2062
2065
cond_resched ();
2063
2066
2064
2067
return err ;
@@ -3132,17 +3135,22 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
3132
3135
static int gather_hugetlb_stats (pte_t * pte , unsigned long hmask ,
3133
3136
unsigned long addr , unsigned long end , struct mm_walk * walk )
3134
3137
{
3135
- pte_t huge_pte = huge_ptep_get ( walk -> mm , addr , pte ) ;
3138
+ pte_t huge_pte ;
3136
3139
struct numa_maps * md ;
3137
3140
struct page * page ;
3141
+ spinlock_t * ptl ;
3138
3142
3143
+ ptl = huge_pte_lock (hstate_vma (walk -> vma ), walk -> mm , pte );
3144
+ huge_pte = huge_ptep_get (walk -> mm , addr , pte );
3139
3145
if (!pte_present (huge_pte ))
3140
- return 0 ;
3146
+ goto out ;
3141
3147
3142
3148
page = pte_page (huge_pte );
3143
3149
3144
3150
md = walk -> private ;
3145
3151
gather_stats (page , md , pte_dirty (huge_pte ), 1 );
3152
+ out :
3153
+ spin_unlock (ptl );
3146
3154
return 0 ;
3147
3155
}
3148
3156
0 commit comments