14
14
#include <linux/netfs.h>
15
15
#include "internal.h"
16
16
17
+ static void afs_write_to_cache (struct afs_vnode * vnode , loff_t start , size_t len ,
18
+ loff_t i_size , bool caching );
19
+
20
+ #ifdef CONFIG_AFS_FSCACHE
17
21
/*
18
- * mark a page as having been made dirty and thus needing writeback
22
+ * Mark a page as having been made dirty and thus needing writeback. We also
23
+ * need to pin the cache object to write back to.
19
24
*/
20
25
int afs_set_page_dirty (struct page * page )
21
26
{
22
- _enter ("" );
23
- return __set_page_dirty_nobuffers (page );
27
+ return fscache_set_page_dirty (page , afs_vnode_cache (AFS_FS_I (page -> mapping -> host )));
28
+ }
29
+ static void afs_folio_start_fscache (bool caching , struct folio * folio )
30
+ {
31
+ if (caching )
32
+ folio_start_fscache (folio );
33
+ }
34
+ #else
35
+ static void afs_folio_start_fscache (bool caching , struct folio * folio )
36
+ {
24
37
}
38
+ #endif
25
39
26
40
/*
27
41
* prepare to perform part of a write to a page
@@ -113,7 +127,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
113
127
unsigned long priv ;
114
128
unsigned int f , from = offset_in_folio (folio , pos );
115
129
unsigned int t , to = from + copied ;
116
- loff_t i_size , maybe_i_size ;
130
+ loff_t i_size , write_end_pos ;
117
131
118
132
_enter ("{%llx:%llu},{%lx}" ,
119
133
vnode -> fid .vid , vnode -> fid .vnode , folio_index (folio ));
@@ -130,15 +144,16 @@ int afs_write_end(struct file *file, struct address_space *mapping,
130
144
if (copied == 0 )
131
145
goto out ;
132
146
133
- maybe_i_size = pos + copied ;
147
+ write_end_pos = pos + copied ;
134
148
135
149
i_size = i_size_read (& vnode -> vfs_inode );
136
- if (maybe_i_size > i_size ) {
150
+ if (write_end_pos > i_size ) {
137
151
write_seqlock (& vnode -> cb_lock );
138
152
i_size = i_size_read (& vnode -> vfs_inode );
139
- if (maybe_i_size > i_size )
140
- afs_set_i_size (vnode , maybe_i_size );
153
+ if (write_end_pos > i_size )
154
+ afs_set_i_size (vnode , write_end_pos );
141
155
write_sequnlock (& vnode -> cb_lock );
156
+ fscache_update_cookie (afs_vnode_cache (vnode ), NULL , & write_end_pos );
142
157
}
143
158
144
159
if (folio_test_private (folio )) {
@@ -417,6 +432,7 @@ static void afs_extend_writeback(struct address_space *mapping,
417
432
loff_t start ,
418
433
loff_t max_len ,
419
434
bool new_content ,
435
+ bool caching ,
420
436
unsigned int * _len )
421
437
{
422
438
struct pagevec pvec ;
@@ -463,7 +479,9 @@ static void afs_extend_writeback(struct address_space *mapping,
463
479
folio_put (folio );
464
480
break ;
465
481
}
466
- if (!folio_test_dirty (folio ) || folio_test_writeback (folio )) {
482
+ if (!folio_test_dirty (folio ) ||
483
+ folio_test_writeback (folio ) ||
484
+ folio_test_fscache (folio )) {
467
485
folio_unlock (folio );
468
486
folio_put (folio );
469
487
break ;
@@ -511,6 +529,7 @@ static void afs_extend_writeback(struct address_space *mapping,
511
529
BUG ();
512
530
if (folio_start_writeback (folio ))
513
531
BUG ();
532
+ afs_folio_start_fscache (caching , folio );
514
533
515
534
* _count -= folio_nr_pages (folio );
516
535
folio_unlock (folio );
@@ -538,13 +557,15 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
538
557
unsigned int offset , to , len , max_len ;
539
558
loff_t i_size = i_size_read (& vnode -> vfs_inode );
540
559
bool new_content = test_bit (AFS_VNODE_NEW_CONTENT , & vnode -> flags );
560
+ bool caching = fscache_cookie_enabled (afs_vnode_cache (vnode ));
541
561
long count = wbc -> nr_to_write ;
542
562
int ret ;
543
563
544
564
_enter (",%lx,%llx-%llx" , folio_index (folio ), start , end );
545
565
546
566
if (folio_start_writeback (folio ))
547
567
BUG ();
568
+ afs_folio_start_fscache (caching , folio );
548
569
549
570
count -= folio_nr_pages (folio );
550
571
@@ -571,7 +592,8 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
571
592
if (len < max_len &&
572
593
(to == folio_size (folio ) || new_content ))
573
594
afs_extend_writeback (mapping , vnode , & count ,
574
- start , max_len , new_content , & len );
595
+ start , max_len , new_content ,
596
+ caching , & len );
575
597
len = min_t (loff_t , len , max_len );
576
598
}
577
599
@@ -584,12 +606,19 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
584
606
if (start < i_size ) {
585
607
_debug ("write back %x @%llx [%llx]" , len , start , i_size );
586
608
609
+ /* Speculatively write to the cache. We have to fix this up
610
+ * later if the store fails.
611
+ */
612
+ afs_write_to_cache (vnode , start , len , i_size , caching );
613
+
587
614
iov_iter_xarray (& iter , WRITE , & mapping -> i_pages , start , len );
588
615
ret = afs_store_data (vnode , & iter , start , false);
589
616
} else {
590
617
_debug ("write discard %x @%llx [%llx]" , len , start , i_size );
591
618
592
619
/* The dirty region was entirely beyond the EOF. */
620
+ fscache_clear_page_bits (afs_vnode_cache (vnode ),
621
+ mapping , start , len , caching );
593
622
afs_pages_written_back (vnode , start , len );
594
623
ret = 0 ;
595
624
}
@@ -648,6 +677,10 @@ int afs_writepage(struct page *subpage, struct writeback_control *wbc)
648
677
649
678
_enter ("{%lx}," , folio_index (folio ));
650
679
680
+ #ifdef CONFIG_AFS_FSCACHE
681
+ folio_wait_fscache (folio );
682
+ #endif
683
+
651
684
start = folio_index (folio ) * PAGE_SIZE ;
652
685
ret = afs_write_back_from_locked_folio (folio_mapping (folio ), wbc ,
653
686
folio , start , LLONG_MAX - start );
@@ -713,10 +746,15 @@ static int afs_writepages_region(struct address_space *mapping,
713
746
continue ;
714
747
}
715
748
716
- if (folio_test_writeback (folio )) {
749
+ if (folio_test_writeback (folio ) ||
750
+ folio_test_fscache (folio )) {
717
751
folio_unlock (folio );
718
- if (wbc -> sync_mode != WB_SYNC_NONE )
752
+ if (wbc -> sync_mode != WB_SYNC_NONE ) {
719
753
folio_wait_writeback (folio );
754
+ #ifdef CONFIG_AFS_FSCACHE
755
+ folio_wait_fscache (folio );
756
+ #endif
757
+ }
720
758
folio_put (folio );
721
759
continue ;
722
760
}
@@ -969,3 +1007,28 @@ int afs_launder_page(struct page *subpage)
969
1007
folio_wait_fscache (folio );
970
1008
return ret ;
971
1009
}
1010
+
1011
+ /*
1012
+ * Deal with the completion of writing the data to the cache.
1013
+ */
1014
+ static void afs_write_to_cache_done (void * priv , ssize_t transferred_or_error ,
1015
+ bool was_async )
1016
+ {
1017
+ struct afs_vnode * vnode = priv ;
1018
+
1019
+ if (IS_ERR_VALUE (transferred_or_error ) &&
1020
+ transferred_or_error != - ENOBUFS )
1021
+ afs_invalidate_cache (vnode , 0 );
1022
+ }
1023
+
1024
+ /*
1025
+ * Save the write to the cache also.
1026
+ */
1027
+ static void afs_write_to_cache (struct afs_vnode * vnode ,
1028
+ loff_t start , size_t len , loff_t i_size ,
1029
+ bool caching )
1030
+ {
1031
+ fscache_write_to_cache (afs_vnode_cache (vnode ),
1032
+ vnode -> vfs_inode .i_mapping , start , len , i_size ,
1033
+ afs_write_to_cache_done , vnode , caching );
1034
+ }
0 commit comments