@@ -3227,7 +3227,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
3227
3227
return false;
3228
3228
}
3229
3229
3230
- static struct page * next_uptodate_page (struct folio * folio ,
3230
+ static struct folio * next_uptodate_page (struct folio * folio ,
3231
3231
struct address_space * mapping ,
3232
3232
struct xa_state * xas , pgoff_t end_pgoff )
3233
3233
{
@@ -3258,7 +3258,7 @@ static struct page *next_uptodate_page(struct folio *folio,
3258
3258
max_idx = DIV_ROUND_UP (i_size_read (mapping -> host ), PAGE_SIZE );
3259
3259
if (xas -> xa_index >= max_idx )
3260
3260
goto unlock ;
3261
- return & folio -> page ;
3261
+ return folio ;
3262
3262
unlock :
3263
3263
folio_unlock (folio );
3264
3264
skip :
@@ -3268,15 +3268,15 @@ static struct page *next_uptodate_page(struct folio *folio,
3268
3268
return NULL ;
3269
3269
}
3270
3270
3271
- static inline struct page * first_map_page (struct address_space * mapping ,
3271
+ static inline struct folio * first_map_page (struct address_space * mapping ,
3272
3272
struct xa_state * xas ,
3273
3273
pgoff_t end_pgoff )
3274
3274
{
3275
3275
return next_uptodate_page (xas_find (xas , end_pgoff ),
3276
3276
mapping , xas , end_pgoff );
3277
3277
}
3278
3278
3279
- static inline struct page * next_map_page (struct address_space * mapping ,
3279
+ static inline struct folio * next_map_page (struct address_space * mapping ,
3280
3280
struct xa_state * xas ,
3281
3281
pgoff_t end_pgoff )
3282
3282
{
@@ -3293,24 +3293,25 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3293
3293
pgoff_t last_pgoff = start_pgoff ;
3294
3294
unsigned long addr ;
3295
3295
XA_STATE (xas , & mapping -> i_pages , start_pgoff );
3296
- struct page * head , * page ;
3296
+ struct folio * folio ;
3297
+ struct page * page ;
3297
3298
unsigned int mmap_miss = READ_ONCE (file -> f_ra .mmap_miss );
3298
3299
vm_fault_t ret = 0 ;
3299
3300
3300
3301
rcu_read_lock ();
3301
- head = first_map_page (mapping , & xas , end_pgoff );
3302
- if (!head )
3302
+ folio = first_map_page (mapping , & xas , end_pgoff );
3303
+ if (!folio )
3303
3304
goto out ;
3304
3305
3305
- if (filemap_map_pmd (vmf , head )) {
3306
+ if (filemap_map_pmd (vmf , & folio -> page )) {
3306
3307
ret = VM_FAULT_NOPAGE ;
3307
3308
goto out ;
3308
3309
}
3309
3310
3310
3311
addr = vma -> vm_start + ((start_pgoff - vma -> vm_pgoff ) << PAGE_SHIFT );
3311
3312
vmf -> pte = pte_offset_map_lock (vma -> vm_mm , vmf -> pmd , addr , & vmf -> ptl );
3312
3313
do {
3313
- page = find_subpage ( head , xas .xa_index );
3314
+ page = folio_file_page ( folio , xas .xa_index );
3314
3315
if (PageHWPoison (page ))
3315
3316
goto unlock ;
3316
3317
@@ -3331,12 +3332,12 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3331
3332
do_set_pte (vmf , page , addr );
3332
3333
/* no need to invalidate: a not-present page won't be cached */
3333
3334
update_mmu_cache (vma , addr , vmf -> pte );
3334
- unlock_page ( head );
3335
+ folio_unlock ( folio );
3335
3336
continue ;
3336
3337
unlock :
3337
- unlock_page ( head );
3338
- put_page ( head );
3339
- } while ((head = next_map_page (mapping , & xas , end_pgoff )) != NULL );
3338
+ folio_unlock ( folio );
3339
+ folio_put ( folio );
3340
+ } while ((folio = next_map_page (mapping , & xas , end_pgoff )) != NULL );
3340
3341
pte_unmap_unlock (vmf -> pte , vmf -> ptl );
3341
3342
out :
3342
3343
rcu_read_unlock ();
0 commit comments