14
14
#include <linux/netfs.h>
15
15
#include "internal.h"
16
16
17
+ static int afs_writepages_region (struct address_space * mapping ,
18
+ struct writeback_control * wbc ,
19
+ loff_t start , loff_t end , loff_t * _next ,
20
+ bool max_one_loop );
21
+
17
22
static void afs_write_to_cache (struct afs_vnode * vnode , loff_t start , size_t len ,
18
23
loff_t i_size , bool caching );
19
24
@@ -38,6 +43,25 @@ static void afs_folio_start_fscache(bool caching, struct folio *folio)
38
43
}
39
44
#endif
40
45
46
+ /*
47
+ * Flush out a conflicting write. This may extend the write to the surrounding
48
+ * pages if also dirty and contiguous to the conflicting region..
49
+ */
50
+ static int afs_flush_conflicting_write (struct address_space * mapping ,
51
+ struct folio * folio )
52
+ {
53
+ struct writeback_control wbc = {
54
+ .sync_mode = WB_SYNC_ALL ,
55
+ .nr_to_write = LONG_MAX ,
56
+ .range_start = folio_pos (folio ),
57
+ .range_end = LLONG_MAX ,
58
+ };
59
+ loff_t next ;
60
+
61
+ return afs_writepages_region (mapping , & wbc , folio_pos (folio ), LLONG_MAX ,
62
+ & next , true);
63
+ }
64
+
41
65
/*
42
66
* prepare to perform part of a write to a page
43
67
*/
@@ -80,7 +104,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
80
104
81
105
if (folio_test_writeback (folio )) {
82
106
trace_afs_folio_dirty (vnode , tracepoint_string ("alrdy" ), folio );
83
- goto flush_conflicting_write ;
107
+ folio_unlock (folio );
108
+ goto wait_for_writeback ;
84
109
}
85
110
/* If the file is being filled locally, allow inter-write
86
111
* spaces to be merged into writes. If it's not, only write
@@ -99,8 +124,15 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
99
124
* flush the page out.
100
125
*/
101
126
flush_conflicting_write :
102
- _debug ("flush conflict" );
103
- ret = folio_write_one (folio );
127
+ trace_afs_folio_dirty (vnode , tracepoint_string ("confl" ), folio );
128
+ folio_unlock (folio );
129
+
130
+ ret = afs_flush_conflicting_write (mapping , folio );
131
+ if (ret < 0 )
132
+ goto error ;
133
+
134
+ wait_for_writeback :
135
+ ret = folio_wait_writeback_killable (folio );
104
136
if (ret < 0 )
105
137
goto error ;
106
138
@@ -663,40 +695,13 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
663
695
return ret ;
664
696
}
665
697
666
- /*
667
- * write a page back to the server
668
- * - the caller locked the page for us
669
- */
670
- int afs_writepage (struct page * subpage , struct writeback_control * wbc )
671
- {
672
- struct folio * folio = page_folio (subpage );
673
- ssize_t ret ;
674
- loff_t start ;
675
-
676
- _enter ("{%lx}," , folio_index (folio ));
677
-
678
- #ifdef CONFIG_AFS_FSCACHE
679
- folio_wait_fscache (folio );
680
- #endif
681
-
682
- start = folio_index (folio ) * PAGE_SIZE ;
683
- ret = afs_write_back_from_locked_folio (folio_mapping (folio ), wbc ,
684
- folio , start , LLONG_MAX - start );
685
- if (ret < 0 ) {
686
- _leave (" = %zd" , ret );
687
- return ret ;
688
- }
689
-
690
- _leave (" = 0" );
691
- return 0 ;
692
- }
693
-
694
698
/*
695
699
* write a region of pages back to the server
696
700
*/
697
701
static int afs_writepages_region (struct address_space * mapping ,
698
702
struct writeback_control * wbc ,
699
- loff_t start , loff_t end , loff_t * _next )
703
+ loff_t start , loff_t end , loff_t * _next ,
704
+ bool max_one_loop )
700
705
{
701
706
struct folio * folio ;
702
707
struct page * head_page ;
@@ -775,6 +780,9 @@ static int afs_writepages_region(struct address_space *mapping,
775
780
776
781
start += ret ;
777
782
783
+ if (max_one_loop )
784
+ break ;
785
+
778
786
cond_resched ();
779
787
} while (wbc -> nr_to_write > 0 );
780
788
@@ -806,24 +814,27 @@ int afs_writepages(struct address_space *mapping,
806
814
807
815
if (wbc -> range_cyclic ) {
808
816
start = mapping -> writeback_index * PAGE_SIZE ;
809
- ret = afs_writepages_region (mapping , wbc , start , LLONG_MAX , & next );
817
+ ret = afs_writepages_region (mapping , wbc , start , LLONG_MAX ,
818
+ & next , false);
810
819
if (ret == 0 ) {
811
820
mapping -> writeback_index = next / PAGE_SIZE ;
812
821
if (start > 0 && wbc -> nr_to_write > 0 ) {
813
822
ret = afs_writepages_region (mapping , wbc , 0 ,
814
- start , & next );
823
+ start , & next , false );
815
824
if (ret == 0 )
816
825
mapping -> writeback_index =
817
826
next / PAGE_SIZE ;
818
827
}
819
828
}
820
829
} else if (wbc -> range_start == 0 && wbc -> range_end == LLONG_MAX ) {
821
- ret = afs_writepages_region (mapping , wbc , 0 , LLONG_MAX , & next );
830
+ ret = afs_writepages_region (mapping , wbc , 0 , LLONG_MAX ,
831
+ & next , false);
822
832
if (wbc -> nr_to_write > 0 && ret == 0 )
823
833
mapping -> writeback_index = next / PAGE_SIZE ;
824
834
} else {
825
835
ret = afs_writepages_region (mapping , wbc ,
826
- wbc -> range_start , wbc -> range_end , & next );
836
+ wbc -> range_start , wbc -> range_end ,
837
+ & next , false);
827
838
}
828
839
829
840
up_read (& vnode -> validate_lock );
0 commit comments