@@ -243,7 +243,7 @@ static int orangefs_writepages(struct address_space *mapping,
243
243
return ret ;
244
244
}
245
245
246
- static int orangefs_launder_page (struct page * );
246
+ static int orangefs_launder_folio (struct folio * );
247
247
248
248
static void orangefs_readahead (struct readahead_control * rac )
249
249
{
@@ -290,14 +290,15 @@ static void orangefs_readahead(struct readahead_control *rac)
290
290
291
291
static int orangefs_readpage (struct file * file , struct page * page )
292
292
{
293
+ struct folio * folio = page_folio (page );
293
294
struct inode * inode = page -> mapping -> host ;
294
295
struct iov_iter iter ;
295
296
struct bio_vec bv ;
296
297
ssize_t ret ;
297
298
loff_t off ; /* offset into this page */
298
299
299
- if (PageDirty ( page ))
300
- orangefs_launder_page ( page );
300
+ if (folio_test_dirty ( folio ))
301
+ orangefs_launder_folio ( folio );
301
302
302
303
off = page_offset (page );
303
304
bv .bv_page = page ;
@@ -330,6 +331,7 @@ static int orangefs_write_begin(struct file *file,
330
331
void * * fsdata )
331
332
{
332
333
struct orangefs_write_range * wr ;
334
+ struct folio * folio ;
333
335
struct page * page ;
334
336
pgoff_t index ;
335
337
int ret ;
@@ -341,27 +343,28 @@ static int orangefs_write_begin(struct file *file,
341
343
return - ENOMEM ;
342
344
343
345
* pagep = page ;
346
+ folio = page_folio (page );
344
347
345
- if (PageDirty ( page ) && !PagePrivate ( page )) {
348
+ if (folio_test_dirty ( folio ) && !folio_test_private ( folio )) {
346
349
/*
347
350
* Should be impossible. If it happens, launder the page
348
351
* since we don't know what's dirty. This will WARN in
349
352
* orangefs_writepage_locked.
350
353
*/
351
- ret = orangefs_launder_page ( page );
354
+ ret = orangefs_launder_folio ( folio );
352
355
if (ret )
353
356
return ret ;
354
357
}
355
- if (PagePrivate ( page )) {
358
+ if (folio_test_private ( folio )) {
356
359
struct orangefs_write_range * wr ;
357
- wr = ( struct orangefs_write_range * ) page_private ( page );
360
+ wr = folio_get_private ( folio );
358
361
if (wr -> pos + wr -> len == pos &&
359
362
uid_eq (wr -> uid , current_fsuid ()) &&
360
363
gid_eq (wr -> gid , current_fsgid ())) {
361
364
wr -> len += len ;
362
365
goto okay ;
363
366
} else {
364
- ret = orangefs_launder_page ( page );
367
+ ret = orangefs_launder_folio ( folio );
365
368
if (ret )
366
369
return ret ;
367
370
}
@@ -375,7 +378,7 @@ static int orangefs_write_begin(struct file *file,
375
378
wr -> len = len ;
376
379
wr -> uid = current_fsuid ();
377
380
wr -> gid = current_fsgid ();
378
- attach_page_private ( page , wr );
381
+ folio_attach_private ( folio , wr );
379
382
okay :
380
383
return 0 ;
381
384
}
@@ -481,7 +484,7 @@ static void orangefs_invalidate_folio(struct folio *folio,
481
484
* Thus the following runs if wr was modified above.
482
485
*/
483
486
484
- orangefs_launder_page ( & folio -> page );
487
+ orangefs_launder_folio ( folio );
485
488
}
486
489
487
490
static int orangefs_releasepage (struct page * page , gfp_t foo )
@@ -494,17 +497,17 @@ static void orangefs_freepage(struct page *page)
494
497
kfree (detach_page_private (page ));
495
498
}
496
499
497
- static int orangefs_launder_page (struct page * page )
500
+ static int orangefs_launder_folio (struct folio * folio )
498
501
{
499
502
int r = 0 ;
500
503
struct writeback_control wbc = {
501
504
.sync_mode = WB_SYNC_ALL ,
502
505
.nr_to_write = 0 ,
503
506
};
504
- wait_on_page_writeback ( page );
505
- if (clear_page_dirty_for_io ( page )) {
506
- r = orangefs_writepage_locked (page , & wbc );
507
- end_page_writeback ( page );
507
+ folio_wait_writeback ( folio );
508
+ if (folio_clear_dirty_for_io ( folio )) {
509
+ r = orangefs_writepage_locked (& folio -> page , & wbc );
510
+ folio_end_writeback ( folio );
508
511
}
509
512
return r ;
510
513
}
@@ -637,13 +640,13 @@ static const struct address_space_operations orangefs_address_operations = {
637
640
.invalidate_folio = orangefs_invalidate_folio ,
638
641
.releasepage = orangefs_releasepage ,
639
642
.freepage = orangefs_freepage ,
640
- .launder_page = orangefs_launder_page ,
643
+ .launder_folio = orangefs_launder_folio ,
641
644
.direct_IO = orangefs_direct_IO ,
642
645
};
643
646
644
647
vm_fault_t orangefs_page_mkwrite (struct vm_fault * vmf )
645
648
{
646
- struct page * page = vmf -> page ;
649
+ struct folio * folio = page_folio ( vmf -> page ) ;
647
650
struct inode * inode = file_inode (vmf -> vma -> vm_file );
648
651
struct orangefs_inode_s * orangefs_inode = ORANGEFS_I (inode );
649
652
unsigned long * bitlock = & orangefs_inode -> bitlock ;
@@ -657,27 +660,27 @@ vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
657
660
goto out ;
658
661
}
659
662
660
- lock_page ( page );
661
- if (PageDirty ( page ) && !PagePrivate ( page )) {
663
+ folio_lock ( folio );
664
+ if (folio_test_dirty ( folio ) && !folio_test_private ( folio )) {
662
665
/*
663
- * Should be impossible. If it happens, launder the page
666
+ * Should be impossible. If it happens, launder the folio
664
667
* since we don't know what's dirty. This will WARN in
665
668
* orangefs_writepage_locked.
666
669
*/
667
- if (orangefs_launder_page ( page )) {
670
+ if (orangefs_launder_folio ( folio )) {
668
671
ret = VM_FAULT_LOCKED |VM_FAULT_RETRY ;
669
672
goto out ;
670
673
}
671
674
}
672
- if (PagePrivate ( page )) {
673
- wr = ( struct orangefs_write_range * ) page_private ( page );
675
+ if (folio_test_private ( folio )) {
676
+ wr = folio_get_private ( folio );
674
677
if (uid_eq (wr -> uid , current_fsuid ()) &&
675
678
gid_eq (wr -> gid , current_fsgid ())) {
676
- wr -> pos = page_offset (page );
679
+ wr -> pos = page_offset (vmf -> page );
677
680
wr -> len = PAGE_SIZE ;
678
681
goto okay ;
679
682
} else {
680
- if (orangefs_launder_page ( page )) {
683
+ if (orangefs_launder_folio ( folio )) {
681
684
ret = VM_FAULT_LOCKED |VM_FAULT_RETRY ;
682
685
goto out ;
683
686
}
@@ -688,27 +691,27 @@ vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
688
691
ret = VM_FAULT_LOCKED |VM_FAULT_RETRY ;
689
692
goto out ;
690
693
}
691
- wr -> pos = page_offset (page );
694
+ wr -> pos = page_offset (vmf -> page );
692
695
wr -> len = PAGE_SIZE ;
693
696
wr -> uid = current_fsuid ();
694
697
wr -> gid = current_fsgid ();
695
- attach_page_private ( page , wr );
698
+ folio_attach_private ( folio , wr );
696
699
okay :
697
700
698
701
file_update_time (vmf -> vma -> vm_file );
699
- if (page -> mapping != inode -> i_mapping ) {
700
- unlock_page ( page );
702
+ if (folio -> mapping != inode -> i_mapping ) {
703
+ folio_unlock ( folio );
701
704
ret = VM_FAULT_LOCKED |VM_FAULT_NOPAGE ;
702
705
goto out ;
703
706
}
704
707
705
708
/*
706
- * We mark the page dirty already here so that when freeze is in
709
+ * We mark the folio dirty already here so that when freeze is in
707
710
* progress, we are guaranteed that writeback during freezing will
708
- * see the dirty page and writeprotect it again.
711
+ * see the dirty folio and writeprotect it again.
709
712
*/
710
- set_page_dirty ( page );
711
- wait_for_stable_page ( page );
713
+ folio_mark_dirty ( folio );
714
+ folio_wait_stable ( folio );
712
715
ret = VM_FAULT_LOCKED ;
713
716
out :
714
717
sb_end_pagefault (inode -> i_sb );
0 commit comments