5
5
#include <linux/fs.h>
6
6
#include <linux/mm.h>
7
7
#include <linux/pagemap.h>
8
- #include <linux/writeback.h> /* generic_writepages */
9
8
#include <linux/slab.h>
10
9
#include <linux/pagevec.h>
11
10
#include <linux/task_io_accounting_ops.h>
@@ -384,6 +383,38 @@ static void ceph_readahead(struct readahead_control *ractl)
384
383
netfs_readahead (ractl , & ceph_netfs_read_ops , (void * )(uintptr_t )got );
385
384
}
386
385
386
+ #ifdef CONFIG_CEPH_FSCACHE
387
+ static void ceph_set_page_fscache (struct page * page )
388
+ {
389
+ set_page_fscache (page );
390
+ }
391
+
392
+ static void ceph_fscache_write_terminated (void * priv , ssize_t error , bool was_async )
393
+ {
394
+ struct inode * inode = priv ;
395
+
396
+ if (IS_ERR_VALUE (error ) && error != - ENOBUFS )
397
+ ceph_fscache_invalidate (inode , false);
398
+ }
399
+
400
+ static void ceph_fscache_write_to_cache (struct inode * inode , u64 off , u64 len , bool caching )
401
+ {
402
+ struct ceph_inode_info * ci = ceph_inode (inode );
403
+ struct fscache_cookie * cookie = ceph_fscache_cookie (ci );
404
+
405
+ fscache_write_to_cache (cookie , inode -> i_mapping , off , len , i_size_read (inode ),
406
+ ceph_fscache_write_terminated , inode , caching );
407
+ }
408
+ #else
409
+ static inline void ceph_set_page_fscache (struct page * page )
410
+ {
411
+ }
412
+
413
+ static inline void ceph_fscache_write_to_cache (struct inode * inode , u64 off , u64 len , bool caching )
414
+ {
415
+ }
416
+ #endif /* CONFIG_CEPH_FSCACHE */
417
+
387
418
struct ceph_writeback_ctl
388
419
{
389
420
loff_t i_size ;
@@ -499,6 +530,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
499
530
struct ceph_writeback_ctl ceph_wbc ;
500
531
struct ceph_osd_client * osdc = & fsc -> client -> osdc ;
501
532
struct ceph_osd_request * req ;
533
+ bool caching = ceph_is_cache_enabled (inode );
502
534
503
535
dout ("writepage %p idx %lu\n" , page , page -> index );
504
536
@@ -537,16 +569,17 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
537
569
CONGESTION_ON_THRESH (fsc -> mount_options -> congestion_kb ))
538
570
set_bdi_congested (inode_to_bdi (inode ), BLK_RW_ASYNC );
539
571
540
- set_page_writeback (page );
541
572
req = ceph_osdc_new_request (osdc , & ci -> i_layout , ceph_vino (inode ), page_off , & len , 0 , 1 ,
542
573
CEPH_OSD_OP_WRITE , CEPH_OSD_FLAG_WRITE , snapc ,
543
574
ceph_wbc .truncate_seq , ceph_wbc .truncate_size ,
544
575
true);
545
- if (IS_ERR (req )) {
546
- redirty_page_for_writepage (wbc , page );
547
- end_page_writeback (page );
576
+ if (IS_ERR (req ))
548
577
return PTR_ERR (req );
549
- }
578
+
579
+ set_page_writeback (page );
580
+ if (caching )
581
+ ceph_set_page_fscache (page );
582
+ ceph_fscache_write_to_cache (inode , page_off , len , caching );
550
583
551
584
/* it may be a short write due to an object boundary */
552
585
WARN_ON_ONCE (len > thp_size (page ));
@@ -605,6 +638,9 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
605
638
struct inode * inode = page -> mapping -> host ;
606
639
BUG_ON (!inode );
607
640
ihold (inode );
641
+
642
+ wait_on_page_fscache (page );
643
+
608
644
err = writepage_nounlock (page , wbc );
609
645
if (err == - ERESTARTSYS ) {
610
646
/* direct memory reclaimer was killed by SIGKILL. return 0
@@ -726,6 +762,7 @@ static int ceph_writepages_start(struct address_space *mapping,
726
762
struct ceph_writeback_ctl ceph_wbc ;
727
763
bool should_loop , range_whole = false;
728
764
bool done = false;
765
+ bool caching = ceph_is_cache_enabled (inode );
729
766
730
767
dout ("writepages_start %p (mode=%s)\n" , inode ,
731
768
wbc -> sync_mode == WB_SYNC_NONE ? "NONE" :
@@ -849,14 +886,15 @@ static int ceph_writepages_start(struct address_space *mapping,
849
886
unlock_page (page );
850
887
break ;
851
888
}
852
- if (PageWriteback (page )) {
889
+ if (PageWriteback (page ) || PageFsCache ( page ) ) {
853
890
if (wbc -> sync_mode == WB_SYNC_NONE ) {
854
891
dout ("%p under writeback\n" , page );
855
892
unlock_page (page );
856
893
continue ;
857
894
}
858
895
dout ("waiting on writeback %p\n" , page );
859
896
wait_on_page_writeback (page );
897
+ wait_on_page_fscache (page );
860
898
}
861
899
862
900
if (!clear_page_dirty_for_io (page )) {
@@ -989,9 +1027,19 @@ static int ceph_writepages_start(struct address_space *mapping,
989
1027
op_idx = 0 ;
990
1028
for (i = 0 ; i < locked_pages ; i ++ ) {
991
1029
u64 cur_offset = page_offset (pages [i ]);
1030
+ /*
1031
+ * Discontinuity in page range? Ceph can handle that by just passing
1032
+ * multiple extents in the write op.
1033
+ */
992
1034
if (offset + len != cur_offset ) {
1035
+ /* If it's full, stop here */
993
1036
if (op_idx + 1 == req -> r_num_ops )
994
1037
break ;
1038
+
1039
+ /* Kick off an fscache write with what we have so far. */
1040
+ ceph_fscache_write_to_cache (inode , offset , len , caching );
1041
+
1042
+ /* Start a new extent */
995
1043
osd_req_op_extent_dup_last (req , op_idx ,
996
1044
cur_offset - offset );
997
1045
dout ("writepages got pages at %llu~%llu\n" ,
@@ -1002,14 +1050,17 @@ static int ceph_writepages_start(struct address_space *mapping,
1002
1050
osd_req_op_extent_update (req , op_idx , len );
1003
1051
1004
1052
len = 0 ;
1005
- offset = cur_offset ;
1053
+ offset = cur_offset ;
1006
1054
data_pages = pages + i ;
1007
1055
op_idx ++ ;
1008
1056
}
1009
1057
1010
1058
set_page_writeback (pages [i ]);
1059
+ if (caching )
1060
+ ceph_set_page_fscache (pages [i ]);
1011
1061
len += thp_size (page );
1012
1062
}
1063
+ ceph_fscache_write_to_cache (inode , offset , len , caching );
1013
1064
1014
1065
if (ceph_wbc .size_stable ) {
1015
1066
len = min (len , ceph_wbc .i_size - offset );
0 commit comments