Skip to content

Commit 052e04a

Browse files
dhowellssmfrench
authored andcommitted
cifs: Transition from ->readpages() to ->readahead()
Transition the cifs filesystem from using the old ->readpages() method to using the new ->readahead() method. For the moment, this removes any invocation of fscache to read data from the local cache, leaving that to another patch. Signed-off-by: David Howells <[email protected]> cc: Steve French <[email protected]> cc: Shyam Prasad N <[email protected]> cc: Matthew Wilcox <[email protected]> cc: Jeff Layton <[email protected]> cc: [email protected] cc: [email protected] Reviewed-by: Rohith Surabattula <[email protected]> Acked-by: Jeff Layton <[email protected]> Signed-off-by: Steve French <[email protected]>
1 parent 489f710 commit 052e04a

File tree

1 file changed

+35
-137
lines changed

1 file changed

+35
-137
lines changed

fs/cifs/file.c

Lines changed: 35 additions & 137 deletions
Original file line numberDiff line numberDiff line change
@@ -4269,8 +4269,6 @@ cifs_readv_complete(struct work_struct *work)
42694269
for (i = 0; i < rdata->nr_pages; i++) {
42704270
struct page *page = rdata->pages[i];
42714271

4272-
lru_cache_add(page);
4273-
42744272
if (rdata->result == 0 ||
42754273
(rdata->result == -EAGAIN && got_bytes)) {
42764274
flush_dcache_page(page);
@@ -4340,7 +4338,6 @@ readpages_fill_pages(struct TCP_Server_Info *server,
43404338
* fill them until the writes are flushed.
43414339
*/
43424340
zero_user(page, 0, PAGE_SIZE);
4343-
lru_cache_add(page);
43444341
flush_dcache_page(page);
43454342
SetPageUptodate(page);
43464343
unlock_page(page);
@@ -4350,7 +4347,6 @@ readpages_fill_pages(struct TCP_Server_Info *server,
43504347
continue;
43514348
} else {
43524349
/* no need to hold page hostage */
4353-
lru_cache_add(page);
43544350
unlock_page(page);
43554351
put_page(page);
43564352
rdata->pages[i] = NULL;
@@ -4393,92 +4389,16 @@ cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
43934389
return readpages_fill_pages(server, rdata, iter, iter->count);
43944390
}
43954391

4396-
static int
4397-
readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4398-
unsigned int rsize, struct list_head *tmplist,
4399-
unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4392+
static void cifs_readahead(struct readahead_control *ractl)
44004393
{
4401-
struct page *page, *tpage;
4402-
unsigned int expected_index;
44034394
int rc;
4404-
gfp_t gfp = readahead_gfp_mask(mapping);
4405-
4406-
INIT_LIST_HEAD(tmplist);
4407-
4408-
page = lru_to_page(page_list);
4409-
4410-
/*
4411-
* Lock the page and put it in the cache. Since no one else
4412-
* should have access to this page, we're safe to simply set
4413-
* PG_locked without checking it first.
4414-
*/
4415-
__SetPageLocked(page);
4416-
rc = add_to_page_cache_locked(page, mapping,
4417-
page->index, gfp);
4418-
4419-
/* give up if we can't stick it in the cache */
4420-
if (rc) {
4421-
__ClearPageLocked(page);
4422-
return rc;
4423-
}
4424-
4425-
/* move first page to the tmplist */
4426-
*offset = (loff_t)page->index << PAGE_SHIFT;
4427-
*bytes = PAGE_SIZE;
4428-
*nr_pages = 1;
4429-
list_move_tail(&page->lru, tmplist);
4430-
4431-
/* now try and add more pages onto the request */
4432-
expected_index = page->index + 1;
4433-
list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4434-
/* discontinuity ? */
4435-
if (page->index != expected_index)
4436-
break;
4437-
4438-
/* would this page push the read over the rsize? */
4439-
if (*bytes + PAGE_SIZE > rsize)
4440-
break;
4441-
4442-
__SetPageLocked(page);
4443-
rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
4444-
if (rc) {
4445-
__ClearPageLocked(page);
4446-
break;
4447-
}
4448-
list_move_tail(&page->lru, tmplist);
4449-
(*bytes) += PAGE_SIZE;
4450-
expected_index++;
4451-
(*nr_pages)++;
4452-
}
4453-
return rc;
4454-
}
4455-
4456-
static int cifs_readpages(struct file *file, struct address_space *mapping,
4457-
struct list_head *page_list, unsigned num_pages)
4458-
{
4459-
int rc;
4460-
int err = 0;
4461-
struct list_head tmplist;
4462-
struct cifsFileInfo *open_file = file->private_data;
4463-
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
4395+
struct cifsFileInfo *open_file = ractl->file->private_data;
4396+
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
44644397
struct TCP_Server_Info *server;
44654398
pid_t pid;
4466-
unsigned int xid;
4399+
unsigned int xid, last_batch_size = 0;
44674400

44684401
xid = get_xid();
4469-
/*
4470-
* Reads as many pages as possible from fscache. Returns -ENOBUFS
4471-
* immediately if the cookie is negative
4472-
*
4473-
* After this point, every page in the list might have PG_fscache set,
4474-
* so we will need to clean that up off of every page we don't use.
4475-
*/
4476-
rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4477-
&num_pages);
4478-
if (rc == 0) {
4479-
free_xid(xid);
4480-
return rc;
4481-
}
44824402

44834403
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
44844404
pid = open_file->pid;
@@ -4489,93 +4409,72 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
44894409
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
44904410

44914411
cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4492-
__func__, file, mapping, num_pages);
4412+
__func__, ractl->file, ractl->mapping, readahead_count(ractl));
44934413

44944414
/*
4495-
* Start with the page at end of list and move it to private
4496-
* list. Do the same with any following pages until we hit
4497-
* the rsize limit, hit an index discontinuity, or run out of
4498-
* pages. Issue the async read and then start the loop again
4499-
* until the list is empty.
4500-
*
4501-
* Note that list order is important. The page_list is in
4502-
* the order of declining indexes. When we put the pages in
4503-
* the rdata->pages, then we want them in increasing order.
4415+
* Chop the readahead request up into rsize-sized read requests.
45044416
*/
4505-
while (!list_empty(page_list) && !err) {
4506-
unsigned int i, nr_pages, bytes, rsize;
4507-
loff_t offset;
4508-
struct page *page, *tpage;
4417+
while (readahead_count(ractl) - last_batch_size) {
4418+
unsigned int i, nr_pages, got, rsize;
4419+
struct page *page;
45094420
struct cifs_readdata *rdata;
45104421
struct cifs_credits credits_on_stack;
45114422
struct cifs_credits *credits = &credits_on_stack;
45124423

45134424
if (open_file->invalidHandle) {
45144425
rc = cifs_reopen_file(open_file, true);
4515-
if (rc == -EAGAIN)
4516-
continue;
4517-
else if (rc)
4426+
if (rc) {
4427+
if (rc == -EAGAIN)
4428+
continue;
45184429
break;
4430+
}
45194431
}
45204432

45214433
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
45224434
&rsize, credits);
45234435
if (rc)
45244436
break;
4437+
nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
45254438

45264439
/*
45274440
* Give up immediately if rsize is too small to read an entire
45284441
* page. The VFS will fall back to readpage. We should never
45294442
* reach this point however since we set ra_pages to 0 when the
45304443
* rsize is smaller than a cache page.
45314444
*/
4532-
if (unlikely(rsize < PAGE_SIZE)) {
4533-
add_credits_and_wake_if(server, credits, 0);
4534-
free_xid(xid);
4535-
return 0;
4536-
}
4537-
4538-
nr_pages = 0;
4539-
err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4540-
&nr_pages, &offset, &bytes);
4541-
if (!nr_pages) {
4445+
if (unlikely(!nr_pages)) {
45424446
add_credits_and_wake_if(server, credits, 0);
45434447
break;
45444448
}
45454449

45464450
rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
45474451
if (!rdata) {
45484452
/* best to give up if we're out of mem */
4549-
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4550-
list_del(&page->lru);
4551-
lru_cache_add(page);
4552-
unlock_page(page);
4553-
put_page(page);
4554-
}
4555-
rc = -ENOMEM;
45564453
add_credits_and_wake_if(server, credits, 0);
45574454
break;
45584455
}
45594456

4560-
rdata->cfile = cifsFileInfo_get(open_file);
4561-
rdata->server = server;
4562-
rdata->mapping = mapping;
4563-
rdata->offset = offset;
4564-
rdata->bytes = bytes;
4565-
rdata->pid = pid;
4566-
rdata->pagesz = PAGE_SIZE;
4567-
rdata->tailsz = PAGE_SIZE;
4457+
got = __readahead_batch(ractl, rdata->pages, nr_pages);
4458+
if (got != nr_pages) {
4459+
pr_warn("__readahead_batch() returned %u/%u\n",
4460+
got, nr_pages);
4461+
nr_pages = got;
4462+
}
4463+
4464+
rdata->nr_pages = nr_pages;
4465+
rdata->bytes = readahead_batch_length(ractl);
4466+
rdata->cfile = cifsFileInfo_get(open_file);
4467+
rdata->server = server;
4468+
rdata->mapping = ractl->mapping;
4469+
rdata->offset = readahead_pos(ractl);
4470+
rdata->pid = pid;
4471+
rdata->pagesz = PAGE_SIZE;
4472+
rdata->tailsz = PAGE_SIZE;
45684473
rdata->read_into_pages = cifs_readpages_read_into_pages;
45694474
rdata->copy_into_pages = cifs_readpages_copy_into_pages;
4570-
rdata->credits = credits_on_stack;
4571-
4572-
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4573-
list_del(&page->lru);
4574-
rdata->pages[rdata->nr_pages++] = page;
4575-
}
4475+
rdata->credits = credits_on_stack;
45764476

45774477
rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4578-
45794478
if (!rc) {
45804479
if (rdata->cfile->invalidHandle)
45814480
rc = -EAGAIN;
@@ -4587,7 +4486,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
45874486
add_credits_and_wake_if(server, &rdata->credits, 0);
45884487
for (i = 0; i < rdata->nr_pages; i++) {
45894488
page = rdata->pages[i];
4590-
lru_cache_add(page);
45914489
unlock_page(page);
45924490
put_page(page);
45934491
}
@@ -4597,10 +4495,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
45974495
}
45984496

45994497
kref_put(&rdata->refcount, cifs_readdata_release);
4498+
last_batch_size = nr_pages;
46004499
}
46014500

46024501
free_xid(xid);
4603-
return rc;
46044502
}
46054503

46064504
/*
@@ -4924,7 +4822,7 @@ void cifs_oplock_break(struct work_struct *work)
49244822
* In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
49254823
* so this method should never be called.
49264824
*
4927-
* Direct IO is not yet supported in the cached mode.
4825+
* Direct IO is not yet supported in the cached mode.
49284826
*/
49294827
static ssize_t
49304828
cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
@@ -5006,7 +4904,7 @@ static int cifs_set_page_dirty(struct page *page)
50064904

50074905
const struct address_space_operations cifs_addr_ops = {
50084906
.readpage = cifs_readpage,
5009-
.readpages = cifs_readpages,
4907+
.readahead = cifs_readahead,
50104908
.writepage = cifs_writepage,
50114909
.writepages = cifs_writepages,
50124910
.write_begin = cifs_write_begin,

0 commit comments

Comments
 (0)