Skip to content

Commit a9eb558

Browse files
committed
afs: Stop implementing ->writepage()
We're trying to get rid of the ->writepage() hook[1]. Stop afs from using it by unlocking the page and calling afs_writepages_region() rather than folio_write_one(). A flag is passed to afs_writepages_region() to indicate that it should only write a single region so that we don't flush the entire file in ->write_begin(), but do add other dirty data to the region being written to try and reduce the number of RPC ops. This requires ->migrate_folio() to be implemented, so point that at filemap_migrate_folio() for files and also for symlinks and directories. This can be tested by turning on the afs_folio_dirty tracepoint and then doing something like: xfs_io -c "w 2223 7000" -c "w 15000 22222" -c "w 23 7" /afs/my/test/foo and then looking in the trace to see if the write at position 15000 gets stored before page 0 gets dirtied for the write at position 23. Signed-off-by: David Howells <[email protected]> cc: Marc Dionne <[email protected]> cc: Christoph Hellwig <[email protected]> cc: Matthew Wilcox <[email protected]> cc: [email protected] Link: https://lore.kernel.org/r/[email protected]/ [1] Link: https://lore.kernel.org/r/166876785552.222254.4403222906022558715.stgit@warthog.procyon.org.uk/ # v1
1 parent b3d3ca5 commit a9eb558

File tree

3 files changed

+50
-37
lines changed

3 files changed

+50
-37
lines changed

fs/afs/dir.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ const struct address_space_operations afs_dir_aops = {
7777
.dirty_folio = afs_dir_dirty_folio,
7878
.release_folio = afs_dir_release_folio,
7979
.invalidate_folio = afs_dir_invalidate_folio,
80+
.migrate_folio = filemap_migrate_folio,
8081
};
8182

8283
const struct dentry_operations afs_fs_dentry_operations = {

fs/afs/file.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,14 +58,15 @@ const struct address_space_operations afs_file_aops = {
5858
.invalidate_folio = afs_invalidate_folio,
5959
.write_begin = afs_write_begin,
6060
.write_end = afs_write_end,
61-
.writepage = afs_writepage,
6261
.writepages = afs_writepages,
62+
.migrate_folio = filemap_migrate_folio,
6363
};
6464

6565
const struct address_space_operations afs_symlink_aops = {
6666
.read_folio = afs_symlink_read_folio,
6767
.release_folio = afs_release_folio,
6868
.invalidate_folio = afs_invalidate_folio,
69+
.migrate_folio = filemap_migrate_folio,
6970
};
7071

7172
static const struct vm_operations_struct afs_vm_ops = {

fs/afs/write.c

Lines changed: 47 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,11 @@
1414
#include <linux/netfs.h>
1515
#include "internal.h"
1616

17+
static int afs_writepages_region(struct address_space *mapping,
18+
struct writeback_control *wbc,
19+
loff_t start, loff_t end, loff_t *_next,
20+
bool max_one_loop);
21+
1722
static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
1823
loff_t i_size, bool caching);
1924

@@ -38,6 +43,25 @@ static void afs_folio_start_fscache(bool caching, struct folio *folio)
3843
}
3944
#endif
4045

46+
/*
47+
* Flush out a conflicting write. This may extend the write to the surrounding
48+
* pages if also dirty and contiguous to the conflicting region..
49+
*/
50+
static int afs_flush_conflicting_write(struct address_space *mapping,
51+
struct folio *folio)
52+
{
53+
struct writeback_control wbc = {
54+
.sync_mode = WB_SYNC_ALL,
55+
.nr_to_write = LONG_MAX,
56+
.range_start = folio_pos(folio),
57+
.range_end = LLONG_MAX,
58+
};
59+
loff_t next;
60+
61+
return afs_writepages_region(mapping, &wbc, folio_pos(folio), LLONG_MAX,
62+
&next, true);
63+
}
64+
4165
/*
4266
* prepare to perform part of a write to a page
4367
*/
@@ -80,7 +104,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
80104

81105
if (folio_test_writeback(folio)) {
82106
trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
83-
goto flush_conflicting_write;
107+
folio_unlock(folio);
108+
goto wait_for_writeback;
84109
}
85110
/* If the file is being filled locally, allow inter-write
86111
* spaces to be merged into writes. If it's not, only write
@@ -99,8 +124,15 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
99124
* flush the page out.
100125
*/
101126
flush_conflicting_write:
102-
_debug("flush conflict");
103-
ret = folio_write_one(folio);
127+
trace_afs_folio_dirty(vnode, tracepoint_string("confl"), folio);
128+
folio_unlock(folio);
129+
130+
ret = afs_flush_conflicting_write(mapping, folio);
131+
if (ret < 0)
132+
goto error;
133+
134+
wait_for_writeback:
135+
ret = folio_wait_writeback_killable(folio);
104136
if (ret < 0)
105137
goto error;
106138

@@ -663,40 +695,13 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
663695
return ret;
664696
}
665697

666-
/*
667-
* write a page back to the server
668-
* - the caller locked the page for us
669-
*/
670-
int afs_writepage(struct page *subpage, struct writeback_control *wbc)
671-
{
672-
struct folio *folio = page_folio(subpage);
673-
ssize_t ret;
674-
loff_t start;
675-
676-
_enter("{%lx},", folio_index(folio));
677-
678-
#ifdef CONFIG_AFS_FSCACHE
679-
folio_wait_fscache(folio);
680-
#endif
681-
682-
start = folio_index(folio) * PAGE_SIZE;
683-
ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc,
684-
folio, start, LLONG_MAX - start);
685-
if (ret < 0) {
686-
_leave(" = %zd", ret);
687-
return ret;
688-
}
689-
690-
_leave(" = 0");
691-
return 0;
692-
}
693-
694698
/*
695699
* write a region of pages back to the server
696700
*/
697701
static int afs_writepages_region(struct address_space *mapping,
698702
struct writeback_control *wbc,
699-
loff_t start, loff_t end, loff_t *_next)
703+
loff_t start, loff_t end, loff_t *_next,
704+
bool max_one_loop)
700705
{
701706
struct folio *folio;
702707
struct page *head_page;
@@ -775,6 +780,9 @@ static int afs_writepages_region(struct address_space *mapping,
775780

776781
start += ret;
777782

783+
if (max_one_loop)
784+
break;
785+
778786
cond_resched();
779787
} while (wbc->nr_to_write > 0);
780788

@@ -806,24 +814,27 @@ int afs_writepages(struct address_space *mapping,
806814

807815
if (wbc->range_cyclic) {
808816
start = mapping->writeback_index * PAGE_SIZE;
809-
ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
817+
ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX,
818+
&next, false);
810819
if (ret == 0) {
811820
mapping->writeback_index = next / PAGE_SIZE;
812821
if (start > 0 && wbc->nr_to_write > 0) {
813822
ret = afs_writepages_region(mapping, wbc, 0,
814-
start, &next);
823+
start, &next, false);
815824
if (ret == 0)
816825
mapping->writeback_index =
817826
next / PAGE_SIZE;
818827
}
819828
}
820829
} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
821-
ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
830+
ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX,
831+
&next, false);
822832
if (wbc->nr_to_write > 0 && ret == 0)
823833
mapping->writeback_index = next / PAGE_SIZE;
824834
} else {
825835
ret = afs_writepages_region(mapping, wbc,
826-
wbc->range_start, wbc->range_end, &next);
836+
wbc->range_start, wbc->range_end,
837+
&next, false);
827838
}
828839

829840
up_read(&vnode->validate_lock);

0 commit comments

Comments
 (0)