@@ -376,23 +376,23 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
376
376
}
377
377
378
378
/**
379
- * gfs2_allocate_page_backing - Allocate blocks for a write fault
380
- * @page : The (locked) page to allocate backing for
379
+ * gfs2_allocate_folio_backing - Allocate blocks for a write fault
380
+ * @folio : The (locked) folio to allocate backing for
381
381
* @length: Size of the allocation
382
382
*
383
- * We try to allocate all the blocks required for the page in one go. This
383
+ * We try to allocate all the blocks required for the folio in one go. This
384
384
* might fail for various reasons, so we keep trying until all the blocks to
385
- * back this page are allocated. If some of the blocks are already allocated,
385
+ * back this folio are allocated. If some of the blocks are already allocated,
386
386
* that is ok too.
387
387
*/
388
- static int gfs2_allocate_page_backing (struct page * page , unsigned int length )
388
+ static int gfs2_allocate_folio_backing (struct folio * folio , size_t length )
389
389
{
390
- u64 pos = page_offset ( page );
390
+ u64 pos = folio_pos ( folio );
391
391
392
392
do {
393
393
struct iomap iomap = { };
394
394
395
- if (gfs2_iomap_alloc (page -> mapping -> host , pos , length , & iomap ))
395
+ if (gfs2_iomap_alloc (folio -> mapping -> host , pos , length , & iomap ))
396
396
return - EIO ;
397
397
398
398
if (length < iomap .length )
@@ -414,16 +414,16 @@ static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
414
414
415
415
static vm_fault_t gfs2_page_mkwrite (struct vm_fault * vmf )
416
416
{
417
- struct page * page = vmf -> page ;
417
+ struct folio * folio = page_folio ( vmf -> page ) ;
418
418
struct inode * inode = file_inode (vmf -> vma -> vm_file );
419
419
struct gfs2_inode * ip = GFS2_I (inode );
420
420
struct gfs2_sbd * sdp = GFS2_SB (inode );
421
421
struct gfs2_alloc_parms ap = {};
422
- u64 offset = page_offset ( page );
422
+ u64 pos = folio_pos ( folio );
423
423
unsigned int data_blocks , ind_blocks , rblocks ;
424
424
vm_fault_t ret = VM_FAULT_LOCKED ;
425
425
struct gfs2_holder gh ;
426
- unsigned int length ;
426
+ size_t length ;
427
427
loff_t size ;
428
428
int err ;
429
429
@@ -436,23 +436,23 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
436
436
goto out_uninit ;
437
437
}
438
438
439
- /* Check page index against inode size */
439
+ /* Check folio index against inode size */
440
440
size = i_size_read (inode );
441
- if (offset >= size ) {
441
+ if (pos >= size ) {
442
442
ret = VM_FAULT_SIGBUS ;
443
443
goto out_unlock ;
444
444
}
445
445
446
- /* Update file times before taking page lock */
446
+ /* Update file times before taking folio lock */
447
447
file_update_time (vmf -> vma -> vm_file );
448
448
449
- /* page is wholly or partially inside EOF */
450
- if (size - offset < PAGE_SIZE )
451
- length = size - offset ;
449
+ /* folio is wholly or partially inside EOF */
450
+ if (size - pos < folio_size ( folio ) )
451
+ length = size - pos ;
452
452
else
453
- length = PAGE_SIZE ;
453
+ length = folio_size ( folio ) ;
454
454
455
- gfs2_size_hint (vmf -> vma -> vm_file , offset , length );
455
+ gfs2_size_hint (vmf -> vma -> vm_file , pos , length );
456
456
457
457
set_bit (GLF_DIRTY , & ip -> i_gl -> gl_flags );
458
458
set_bit (GIF_SW_PAGED , & ip -> i_flags );
@@ -463,11 +463,12 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
463
463
*/
464
464
465
465
if (!gfs2_is_stuffed (ip ) &&
466
- !gfs2_write_alloc_required (ip , offset , length )) {
467
- lock_page (page );
468
- if (!PageUptodate (page ) || page -> mapping != inode -> i_mapping ) {
466
+ !gfs2_write_alloc_required (ip , pos , length )) {
467
+ folio_lock (folio );
468
+ if (!folio_test_uptodate (folio ) ||
469
+ folio -> mapping != inode -> i_mapping ) {
469
470
ret = VM_FAULT_NOPAGE ;
470
- unlock_page ( page );
471
+ folio_unlock ( folio );
471
472
}
472
473
goto out_unlock ;
473
474
}
@@ -504,7 +505,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
504
505
goto out_trans_fail ;
505
506
}
506
507
507
- /* Unstuff, if required, and allocate backing blocks for page */
508
+ /* Unstuff, if required, and allocate backing blocks for folio */
508
509
if (gfs2_is_stuffed (ip )) {
509
510
err = gfs2_unstuff_dinode (ip );
510
511
if (err ) {
@@ -513,22 +514,22 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
513
514
}
514
515
}
515
516
516
- lock_page ( page );
517
+ folio_lock ( folio );
517
518
/* If truncated, we must retry the operation, we may have raced
518
519
* with the glock demotion code.
519
520
*/
520
- if (!PageUptodate ( page ) || page -> mapping != inode -> i_mapping ) {
521
+ if (!folio_test_uptodate ( folio ) || folio -> mapping != inode -> i_mapping ) {
521
522
ret = VM_FAULT_NOPAGE ;
522
523
goto out_page_locked ;
523
524
}
524
525
525
- err = gfs2_allocate_page_backing ( page , length );
526
+ err = gfs2_allocate_folio_backing ( folio , length );
526
527
if (err )
527
528
ret = vmf_fs_error (err );
528
529
529
530
out_page_locked :
530
531
if (ret != VM_FAULT_LOCKED )
531
- unlock_page ( page );
532
+ folio_unlock ( folio );
532
533
out_trans_end :
533
534
gfs2_trans_end (sdp );
534
535
out_trans_fail :
@@ -540,8 +541,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
540
541
out_uninit :
541
542
gfs2_holder_uninit (& gh );
542
543
if (ret == VM_FAULT_LOCKED ) {
543
- set_page_dirty ( page );
544
- wait_for_stable_page ( page );
544
+ folio_mark_dirty ( folio );
545
+ folio_wait_stable ( folio );
545
546
}
546
547
sb_end_pagefault (inode -> i_sb );
547
548
return ret ;
0 commit comments