@@ -709,26 +709,31 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
709
709
return __dax_invalidate_entry (mapping , index , false);
710
710
}
711
711
712
- static int copy_cow_page_dax (struct block_device * bdev , struct dax_device * dax_dev ,
713
- sector_t sector , struct page * to , unsigned long vaddr )
712
+ static sector_t dax_iomap_sector (const struct iomap * iomap , loff_t pos )
714
713
{
714
+ return (iomap -> addr + (pos & PAGE_MASK ) - iomap -> offset ) >> 9 ;
715
+ }
716
+
717
+ static int copy_cow_page_dax (struct vm_fault * vmf , const struct iomap_iter * iter )
718
+ {
719
+ sector_t sector = dax_iomap_sector (& iter -> iomap , iter -> pos );
715
720
void * vto , * kaddr ;
716
721
pgoff_t pgoff ;
717
722
long rc ;
718
723
int id ;
719
724
720
- rc = bdev_dax_pgoff (bdev , sector , PAGE_SIZE , & pgoff );
725
+ rc = bdev_dax_pgoff (iter -> iomap . bdev , sector , PAGE_SIZE , & pgoff );
721
726
if (rc )
722
727
return rc ;
723
728
724
729
id = dax_read_lock ();
725
- rc = dax_direct_access (dax_dev , pgoff , 1 , & kaddr , NULL );
730
+ rc = dax_direct_access (iter -> iomap . dax_dev , pgoff , 1 , & kaddr , NULL );
726
731
if (rc < 0 ) {
727
732
dax_read_unlock (id );
728
733
return rc ;
729
734
}
730
- vto = kmap_atomic (to );
731
- copy_user_page (vto , kaddr , vaddr , to );
735
+ vto = kmap_atomic (vmf -> cow_page );
736
+ copy_user_page (vto , kaddr , vmf -> address , vmf -> cow_page );
732
737
kunmap_atomic (vto );
733
738
dax_read_unlock (id );
734
739
return 0 ;
@@ -1005,11 +1010,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
1005
1010
}
1006
1011
EXPORT_SYMBOL_GPL (dax_writeback_mapping_range );
1007
1012
1008
- static sector_t dax_iomap_sector (const struct iomap * iomap , loff_t pos )
1009
- {
1010
- return (iomap -> addr + (pos & PAGE_MASK ) - iomap -> offset ) >> 9 ;
1011
- }
1012
-
1013
1013
static int dax_iomap_pfn (const struct iomap * iomap , loff_t pos , size_t size ,
1014
1014
pfn_t * pfnp )
1015
1015
{
@@ -1332,19 +1332,16 @@ static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
1332
1332
static vm_fault_t dax_fault_cow_page (struct vm_fault * vmf ,
1333
1333
const struct iomap_iter * iter )
1334
1334
{
1335
- sector_t sector = dax_iomap_sector (& iter -> iomap , iter -> pos );
1336
- unsigned long vaddr = vmf -> address ;
1337
1335
vm_fault_t ret ;
1338
1336
int error = 0 ;
1339
1337
1340
1338
switch (iter -> iomap .type ) {
1341
1339
case IOMAP_HOLE :
1342
1340
case IOMAP_UNWRITTEN :
1343
- clear_user_highpage (vmf -> cow_page , vaddr );
1341
+ clear_user_highpage (vmf -> cow_page , vmf -> address );
1344
1342
break ;
1345
1343
case IOMAP_MAPPED :
1346
- error = copy_cow_page_dax (iter -> iomap .bdev , iter -> iomap .dax_dev ,
1347
- sector , vmf -> cow_page , vaddr );
1344
+ error = copy_cow_page_dax (vmf , iter );
1348
1345
break ;
1349
1346
default :
1350
1347
WARN_ON_ONCE (1 );
0 commit comments