@@ -481,19 +481,16 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
481
481
482
482
static int nilfs_recovery_copy_block (struct the_nilfs * nilfs ,
483
483
struct nilfs_recovery_block * rb ,
484
- loff_t pos , struct page * page )
484
+ loff_t pos , struct folio * folio )
485
485
{
486
486
struct buffer_head * bh_org ;
487
- size_t from = pos & ~PAGE_MASK ;
488
- void * kaddr ;
487
+ size_t from = offset_in_folio (folio , pos );
489
488
490
489
bh_org = __bread (nilfs -> ns_bdev , rb -> blocknr , nilfs -> ns_blocksize );
491
490
if (unlikely (!bh_org ))
492
491
return - EIO ;
493
492
494
- kaddr = kmap_local_page (page );
495
- memcpy (kaddr + from , bh_org -> b_data , bh_org -> b_size );
496
- kunmap_local (kaddr );
493
+ memcpy_to_folio (folio , from , bh_org -> b_data , bh_org -> b_size );
497
494
brelse (bh_org );
498
495
return 0 ;
499
496
}
@@ -531,13 +528,13 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
531
528
goto failed_inode ;
532
529
}
533
530
534
- err = nilfs_recovery_copy_block (nilfs , rb , pos , & folio -> page );
531
+ err = nilfs_recovery_copy_block (nilfs , rb , pos , folio );
535
532
if (unlikely (err ))
536
- goto failed_page ;
533
+ goto failed_folio ;
537
534
538
535
err = nilfs_set_file_dirty (inode , 1 );
539
536
if (unlikely (err ))
540
- goto failed_page ;
537
+ goto failed_folio ;
541
538
542
539
block_write_end (NULL , inode -> i_mapping , pos , blocksize ,
543
540
blocksize , folio , NULL );
@@ -548,7 +545,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
548
545
(* nr_salvaged_blocks )++ ;
549
546
goto next ;
550
547
551
- failed_page :
548
+ failed_folio :
552
549
folio_unlock (folio );
553
550
folio_put (folio );
554
551
0 commit comments