@@ -120,9 +120,8 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
120
120
121
121
static bool prot_numa_skip (struct vm_area_struct * vma , unsigned long addr ,
122
122
pte_t oldpte , pte_t * pte , int target_node ,
123
- struct folio * * foliop )
123
+ struct folio * folio )
124
124
{
125
- struct folio * folio = NULL ;
126
125
bool ret = true;
127
126
bool toptier ;
128
127
int nid ;
@@ -131,7 +130,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
131
130
if (pte_protnone (oldpte ))
132
131
goto skip ;
133
132
134
- folio = vm_normal_folio (vma , addr , oldpte );
135
133
if (!folio )
136
134
goto skip ;
137
135
@@ -173,7 +171,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
173
171
folio_xchg_access_time (folio , jiffies_to_msecs (jiffies ));
174
172
175
173
skip :
176
- * foliop = folio ;
177
174
return ret ;
178
175
}
179
176
@@ -231,10 +228,9 @@ static int page_anon_exclusive_sub_batch(int start_idx, int max_len,
231
228
* retrieve sub-batches.
232
229
*/
233
230
static void commit_anon_folio_batch (struct vm_area_struct * vma ,
234
- struct folio * folio , unsigned long addr , pte_t * ptep ,
231
+ struct folio * folio , struct page * first_page , unsigned long addr , pte_t * ptep ,
235
232
pte_t oldpte , pte_t ptent , int nr_ptes , struct mmu_gather * tlb )
236
233
{
237
- struct page * first_page = folio_page (folio , 0 );
238
234
bool expected_anon_exclusive ;
239
235
int sub_batch_idx = 0 ;
240
236
int len ;
@@ -251,7 +247,7 @@ static void commit_anon_folio_batch(struct vm_area_struct *vma,
251
247
}
252
248
253
249
static void set_write_prot_commit_flush_ptes (struct vm_area_struct * vma ,
254
- struct folio * folio , unsigned long addr , pte_t * ptep ,
250
+ struct folio * folio , struct page * page , unsigned long addr , pte_t * ptep ,
255
251
pte_t oldpte , pte_t ptent , int nr_ptes , struct mmu_gather * tlb )
256
252
{
257
253
bool set_write ;
@@ -270,7 +266,7 @@ static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
270
266
/* idx = */ 0 , set_write , tlb );
271
267
return ;
272
268
}
273
- commit_anon_folio_batch (vma , folio , addr , ptep , oldpte , ptent , nr_ptes , tlb );
269
+ commit_anon_folio_batch (vma , folio , page , addr , ptep , oldpte , ptent , nr_ptes , tlb );
274
270
}
275
271
276
272
static long change_pte_range (struct mmu_gather * tlb ,
@@ -305,15 +301,19 @@ static long change_pte_range(struct mmu_gather *tlb,
305
301
const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE ;
306
302
int max_nr_ptes = (end - addr ) >> PAGE_SHIFT ;
307
303
struct folio * folio = NULL ;
304
+ struct page * page ;
308
305
pte_t ptent ;
309
306
307
+ page = vm_normal_page (vma , addr , oldpte );
308
+ if (page )
309
+ folio = page_folio (page );
310
310
/*
311
311
* Avoid trapping faults against the zero or KSM
312
312
* pages. See similar comment in change_huge_pmd.
313
313
*/
314
314
if (prot_numa ) {
315
315
int ret = prot_numa_skip (vma , addr , oldpte , pte ,
316
- target_node , & folio );
316
+ target_node , folio );
317
317
if (ret ) {
318
318
319
319
/* determine batch to skip */
@@ -323,9 +323,6 @@ static long change_pte_range(struct mmu_gather *tlb,
323
323
}
324
324
}
325
325
326
- if (!folio )
327
- folio = vm_normal_folio (vma , addr , oldpte );
328
-
329
326
nr_ptes = mprotect_folio_pte_batch (folio , pte , oldpte , max_nr_ptes , flags );
330
327
331
328
oldpte = modify_prot_start_ptes (vma , addr , pte , nr_ptes );
@@ -351,7 +348,7 @@ static long change_pte_range(struct mmu_gather *tlb,
351
348
*/
352
349
if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE ) &&
353
350
!pte_write (ptent ))
354
- set_write_prot_commit_flush_ptes (vma , folio ,
351
+ set_write_prot_commit_flush_ptes (vma , folio , page ,
355
352
addr , pte , oldpte , ptent , nr_ptes , tlb );
356
353
else
357
354
prot_commit_flush_ptes (vma , addr , pte , oldpte , ptent ,
0 commit comments