@@ -1252,7 +1252,7 @@ xfs_file_llseek(
1252
1252
}
1253
1253
1254
1254
static inline vm_fault_t
1255
- xfs_dax_fault (
1255
+ xfs_dax_fault_locked (
1256
1256
struct vm_fault * vmf ,
1257
1257
unsigned int order ,
1258
1258
bool write_fault )
@@ -1273,6 +1273,45 @@ xfs_dax_fault(
1273
1273
return ret ;
1274
1274
}
1275
1275
1276
+ static vm_fault_t
1277
+ xfs_dax_read_fault (
1278
+ struct vm_fault * vmf ,
1279
+ unsigned int order )
1280
+ {
1281
+ struct xfs_inode * ip = XFS_I (file_inode (vmf -> vma -> vm_file ));
1282
+ unsigned int lock_mode ;
1283
+ vm_fault_t ret ;
1284
+
1285
+ lock_mode = xfs_ilock_for_write_fault (ip );
1286
+ ret = xfs_dax_fault_locked (vmf , order , false);
1287
+ xfs_iunlock (ip , lock_mode );
1288
+
1289
+ return ret ;
1290
+ }
1291
+
1292
+ static vm_fault_t
1293
+ xfs_write_fault (
1294
+ struct vm_fault * vmf ,
1295
+ unsigned int order )
1296
+ {
1297
+ struct inode * inode = file_inode (vmf -> vma -> vm_file );
1298
+ unsigned int lock_mode ;
1299
+ vm_fault_t ret ;
1300
+
1301
+ sb_start_pagefault (inode -> i_sb );
1302
+ file_update_time (vmf -> vma -> vm_file );
1303
+
1304
+ lock_mode = xfs_ilock_for_write_fault (XFS_I (inode ));
1305
+ if (IS_DAX (inode ))
1306
+ ret = xfs_dax_fault_locked (vmf , order , true);
1307
+ else
1308
+ ret = iomap_page_mkwrite (vmf , & xfs_page_mkwrite_iomap_ops );
1309
+ xfs_iunlock (XFS_I (inode ), lock_mode );
1310
+
1311
+ sb_end_pagefault (inode -> i_sb );
1312
+ return ret ;
1313
+ }
1314
+
1276
1315
/*
1277
1316
* Locking for serialisation of IO during page faults. This results in a lock
1278
1317
* ordering of:
@@ -1290,34 +1329,14 @@ __xfs_filemap_fault(
1290
1329
bool write_fault )
1291
1330
{
1292
1331
struct inode * inode = file_inode (vmf -> vma -> vm_file );
1293
- struct xfs_inode * ip = XFS_I (inode );
1294
- vm_fault_t ret ;
1295
- unsigned int lock_mode = 0 ;
1296
-
1297
- trace_xfs_filemap_fault (ip , order , write_fault );
1298
1332
1299
- if (write_fault ) {
1300
- sb_start_pagefault (inode -> i_sb );
1301
- file_update_time (vmf -> vma -> vm_file );
1302
- }
1303
-
1304
- if (IS_DAX (inode ) || write_fault )
1305
- lock_mode = xfs_ilock_for_write_fault (XFS_I (inode ));
1306
-
1307
- if (IS_DAX (inode )) {
1308
- ret = xfs_dax_fault (vmf , order , write_fault );
1309
- } else if (write_fault ) {
1310
- ret = iomap_page_mkwrite (vmf , & xfs_page_mkwrite_iomap_ops );
1311
- } else {
1312
- ret = filemap_fault (vmf );
1313
- }
1314
-
1315
- if (lock_mode )
1316
- xfs_iunlock (XFS_I (inode ), lock_mode );
1333
+ trace_xfs_filemap_fault (XFS_I (inode ), order , write_fault );
1317
1334
1318
1335
if (write_fault )
1319
- sb_end_pagefault (inode -> i_sb );
1320
- return ret ;
1336
+ return xfs_write_fault (vmf , order );
1337
+ if (IS_DAX (inode ))
1338
+ return xfs_dax_read_fault (vmf , order );
1339
+ return filemap_fault (vmf );
1321
1340
}
1322
1341
1323
1342
static inline bool
0 commit comments