Skip to content

Commit 3003bbd

Browse files
committed
afs: Use the netfs_write_begin() helper
Make AFS use the new netfs_write_begin() helper to do the pre-reading required before the write. If successful, the helper returns with the required page filled in and locked. It may read more than just one page, expanding the read to meet cache granularity requirements as necessary. Note: A more advanced version of this could be made that does generic_perform_write() for a whole cache granule. This would make it easier to avoid doing the download/read for the data to be overwritten. Signed-off-by: David Howells <[email protected]> Tested-By: Marc Dionne <[email protected]> cc: [email protected] cc: [email protected] cc: [email protected] Link: https://lore.kernel.org/r/160588546422.3465195.1546354372589291098.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161539563244.286939.16537296241609909980.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653819291.2770958.406013201547420544.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789102743.6155.17396591236631761195.stgit@warthog.procyon.org.uk/ # v6
1 parent 5cbf039 commit 3003bbd

File tree

3 files changed

+31
-97
lines changed

3 files changed

+31
-97
lines changed

fs/afs/file.c

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -333,21 +333,38 @@ static void afs_init_rreq(struct netfs_read_request *rreq, struct file *file)
333333
rreq->netfs_priv = key_get(afs_file_key(file));
334334
}
335335

336+
static bool afs_is_cache_enabled(struct inode *inode)
337+
{
338+
struct fscache_cookie *cookie = afs_vnode_cache(AFS_FS_I(inode));
339+
340+
return fscache_cookie_enabled(cookie) && !hlist_empty(&cookie->backing_objects);
341+
}
342+
336343
static int afs_begin_cache_operation(struct netfs_read_request *rreq)
337344
{
338345
struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
339346

340347
return fscache_begin_read_operation(rreq, afs_vnode_cache(vnode));
341348
}
342349

350+
static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
351+
struct page *page, void **_fsdata)
352+
{
353+
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
354+
355+
return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
356+
}
357+
343358
static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv)
344359
{
345360
key_put(netfs_priv);
346361
}
347362

348-
static const struct netfs_read_request_ops afs_req_ops = {
363+
const struct netfs_read_request_ops afs_req_ops = {
349364
.init_rreq = afs_init_rreq,
365+
.is_cache_enabled = afs_is_cache_enabled,
350366
.begin_cache_operation = afs_begin_cache_operation,
367+
.check_write_begin = afs_check_write_begin,
351368
.issue_op = afs_req_issue_op,
352369
.cleanup = afs_priv_cleanup,
353370
};

fs/afs/internal.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1045,6 +1045,7 @@ extern void afs_dynroot_depopulate(struct super_block *);
10451045
extern const struct address_space_operations afs_fs_aops;
10461046
extern const struct inode_operations afs_file_inode_operations;
10471047
extern const struct file_operations afs_file_operations;
1048+
extern const struct netfs_read_request_ops afs_req_ops;
10481049

10491050
extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *);
10501051
extern void afs_put_wb_key(struct afs_wb_key *);

fs/afs/write.c

Lines changed: 12 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@
1111
#include <linux/pagemap.h>
1212
#include <linux/writeback.h>
1313
#include <linux/pagevec.h>
14+
#include <linux/netfs.h>
15+
#include <linux/fscache.h>
1416
#include "internal.h"
1517

1618
/*
@@ -22,68 +24,6 @@ int afs_set_page_dirty(struct page *page)
2224
return __set_page_dirty_nobuffers(page);
2325
}
2426

25-
/*
26-
* Handle completion of a read operation to fill a page.
27-
*/
28-
static void afs_fill_hole(struct afs_read *req)
29-
{
30-
if (iov_iter_count(req->iter) > 0)
31-
/* The read was short - clear the excess buffer. */
32-
iov_iter_zero(iov_iter_count(req->iter), req->iter);
33-
}
34-
35-
/*
36-
* partly or wholly fill a page that's under preparation for writing
37-
*/
38-
static int afs_fill_page(struct file *file,
39-
loff_t pos, unsigned int len, struct page *page)
40-
{
41-
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
42-
struct afs_read *req;
43-
size_t p;
44-
void *data;
45-
int ret;
46-
47-
_enter(",,%llu", (unsigned long long)pos);
48-
49-
if (pos >= vnode->vfs_inode.i_size) {
50-
p = pos & ~PAGE_MASK;
51-
ASSERTCMP(p + len, <=, PAGE_SIZE);
52-
data = kmap(page);
53-
memset(data + p, 0, len);
54-
kunmap(page);
55-
return 0;
56-
}
57-
58-
req = kzalloc(sizeof(struct afs_read), GFP_KERNEL);
59-
if (!req)
60-
return -ENOMEM;
61-
62-
refcount_set(&req->usage, 1);
63-
req->vnode = vnode;
64-
req->done = afs_fill_hole;
65-
req->key = key_get(afs_file_key(file));
66-
req->pos = pos;
67-
req->len = len;
68-
req->nr_pages = 1;
69-
req->iter = &req->def_iter;
70-
iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, pos, len);
71-
72-
ret = afs_fetch_data(vnode, req);
73-
afs_put_read(req);
74-
if (ret < 0) {
75-
if (ret == -ENOENT) {
76-
_debug("got NOENT from server"
77-
" - marking file deleted and stale");
78-
set_bit(AFS_VNODE_DELETED, &vnode->flags);
79-
ret = -ESTALE;
80-
}
81-
}
82-
83-
_leave(" = %d", ret);
84-
return ret;
85-
}
86-
8727
/*
8828
* prepare to perform part of a write to a page
8929
*/
@@ -102,24 +42,14 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
10242
_enter("{%llx:%llu},%llx,%x",
10343
vnode->fid.vid, vnode->fid.vnode, pos, len);
10444

105-
page = grab_cache_page_write_begin(mapping, pos / PAGE_SIZE, flags);
106-
if (!page)
107-
return -ENOMEM;
108-
109-
if (!PageUptodate(page) && len != PAGE_SIZE) {
110-
ret = afs_fill_page(file, pos & PAGE_MASK, PAGE_SIZE, page);
111-
if (ret < 0) {
112-
unlock_page(page);
113-
put_page(page);
114-
_leave(" = %d [prep]", ret);
115-
return ret;
116-
}
117-
SetPageUptodate(page);
118-
}
119-
120-
#ifdef CONFIG_AFS_FSCACHE
121-
wait_on_page_fscache(page);
122-
#endif
45+
/* Prefetch area to be written into the cache if we're caching this
46+
* file. We need to do this before we get a lock on the page in case
47+
* there's more than one writer competing for the same cache block.
48+
*/
49+
ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata,
50+
&afs_req_ops, NULL);
51+
if (ret < 0)
52+
return ret;
12353

12454
index = page->index;
12555
from = pos - index * PAGE_SIZE;
@@ -184,7 +114,6 @@ int afs_write_end(struct file *file, struct address_space *mapping,
184114
unsigned int f, from = pos & (thp_size(page) - 1);
185115
unsigned int t, to = from + copied;
186116
loff_t i_size, maybe_i_size;
187-
int ret = 0;
188117

189118
_enter("{%llx:%llu},{%lx}",
190119
vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -203,19 +132,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
203132
write_sequnlock(&vnode->cb_lock);
204133
}
205134

206-
if (!PageUptodate(page)) {
207-
if (copied < len) {
208-
/* Try and load any missing data from the server. The
209-
* unmarshalling routine will take care of clearing any
210-
* bits that are beyond the EOF.
211-
*/
212-
ret = afs_fill_page(file, pos + copied,
213-
len - copied, page);
214-
if (ret < 0)
215-
goto out;
216-
}
217-
SetPageUptodate(page);
218-
}
135+
ASSERT(PageUptodate(page));
219136

220137
if (PagePrivate(page)) {
221138
priv = page_private(page);
@@ -236,12 +153,11 @@ int afs_write_end(struct file *file, struct address_space *mapping,
236153

237154
if (set_page_dirty(page))
238155
_debug("dirtied %lx", page->index);
239-
ret = copied;
240156

241157
out:
242158
unlock_page(page);
243159
put_page(page);
244-
return ret;
160+
return copied;
245161
}
246162

247163
/*

0 commit comments

Comments
 (0)