Skip to content

Commit 44a65a0

Browse files
author
Trond Myklebust
committed
NFS: Reverse the submission order of requests in __nfs_pageio_add_request()
If we have to split the request up into subrequests, we have to submit the request pointed to by the function call parameter last, in case there is an error or other issue that causes us to exit before the last request is submitted. The reason is that the caller is expected to perform cleanup in those cases. Signed-off-by: Trond Myklebust <[email protected]>
1 parent a62f8e3 commit 44a65a0

File tree

1 file changed

+64
-69
lines changed

1 file changed

+64
-69
lines changed

fs/nfs/pagelist.c

Lines changed: 64 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -454,15 +454,23 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
454454
}
455455

456456
static struct nfs_page *
457-
nfs_create_subreq(struct nfs_page *req, struct nfs_page *last,
458-
unsigned int pgbase, unsigned int offset,
457+
nfs_create_subreq(struct nfs_page *req,
458+
unsigned int pgbase,
459+
unsigned int offset,
459460
unsigned int count)
460461
{
462+
struct nfs_page *last;
461463
struct nfs_page *ret;
462464

463465
ret = __nfs_create_request(req->wb_lock_context, req->wb_page,
464466
pgbase, offset, count);
465467
if (!IS_ERR(ret)) {
468+
/* find the last request */
469+
for (last = req->wb_head;
470+
last->wb_this_page != req->wb_head;
471+
last = last->wb_this_page)
472+
;
473+
466474
nfs_lock_request(ret);
467475
ret->wb_index = req->wb_index;
468476
nfs_page_group_init(ret, last);
@@ -988,7 +996,7 @@ static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
988996
}
989997

990998
/**
991-
* nfs_can_coalesce_requests - test two requests for compatibility
999+
* nfs_coalesce_size - test two requests for compatibility
9921000
* @prev: pointer to nfs_page
9931001
* @req: pointer to nfs_page
9941002
* @pgio: pointer to nfs_pagio_descriptor
@@ -997,57 +1005,53 @@ static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
9971005
* page data area they describe is contiguous, and that their RPC
9981006
* credentials, NFSv4 open state, and lockowners are the same.
9991007
*
1000-
* Return 'true' if this is the case, else return 'false'.
1008+
* Returns size of the request that can be coalesced
10011009
*/
1002-
static bool nfs_can_coalesce_requests(struct nfs_page *prev,
1010+
static unsigned int nfs_coalesce_size(struct nfs_page *prev,
10031011
struct nfs_page *req,
10041012
struct nfs_pageio_descriptor *pgio)
10051013
{
1006-
size_t size;
10071014
struct file_lock_context *flctx;
10081015

10091016
if (prev) {
10101017
if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
1011-
return false;
1018+
return 0;
10121019
flctx = d_inode(nfs_req_openctx(req)->dentry)->i_flctx;
10131020
if (flctx != NULL &&
10141021
!(list_empty_careful(&flctx->flc_posix) &&
10151022
list_empty_careful(&flctx->flc_flock)) &&
10161023
!nfs_match_lock_context(req->wb_lock_context,
10171024
prev->wb_lock_context))
1018-
return false;
1025+
return 0;
10191026
if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
1020-
return false;
1027+
return 0;
10211028
if (req->wb_page == prev->wb_page) {
10221029
if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
1023-
return false;
1030+
return 0;
10241031
} else {
10251032
if (req->wb_pgbase != 0 ||
10261033
prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
1027-
return false;
1034+
return 0;
10281035
}
10291036
}
1030-
size = pgio->pg_ops->pg_test(pgio, prev, req);
1031-
WARN_ON_ONCE(size > req->wb_bytes);
1032-
if (size && size < req->wb_bytes)
1033-
req->wb_bytes = size;
1034-
return size > 0;
1037+
return pgio->pg_ops->pg_test(pgio, prev, req);
10351038
}
10361039

10371040
/**
10381041
* nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
10391042
* @desc: destination io descriptor
10401043
* @req: request
10411044
*
1042-
* Returns true if the request 'req' was successfully coalesced into the
1043-
* existing list of pages 'desc'.
1045+
* If the request 'req' was successfully coalesced into the existing list
1046+
* of pages 'desc', it returns the size of req.
10441047
*/
1045-
static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
1046-
struct nfs_page *req)
1048+
static unsigned int
1049+
nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
1050+
struct nfs_page *req)
10471051
{
10481052
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1049-
10501053
struct nfs_page *prev = NULL;
1054+
unsigned int size;
10511055

10521056
if (mirror->pg_count != 0) {
10531057
prev = nfs_list_entry(mirror->pg_list.prev);
@@ -1067,11 +1071,12 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
10671071
return 0;
10681072
}
10691073

1070-
if (!nfs_can_coalesce_requests(prev, req, desc))
1071-
return 0;
1074+
size = nfs_coalesce_size(prev, req, desc);
1075+
if (size < req->wb_bytes)
1076+
return size;
10721077
nfs_list_move_request(req, &mirror->pg_list);
10731078
mirror->pg_count += req->wb_bytes;
1074-
return 1;
1079+
return req->wb_bytes;
10751080
}
10761081

10771082
/*
@@ -1111,7 +1116,8 @@ nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
11111116
* @req: request
11121117
*
11131118
* This may split a request into subrequests which are all part of the
1114-
* same page group.
1119+
* same page group. If so, it will submit @req as the last one, to ensure
1120+
* the pointer to @req is still valid in case of failure.
11151121
*
11161122
* Returns true if the request 'req' was successfully coalesced into the
11171123
* existing list of pages 'desc'.
@@ -1120,62 +1126,57 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
11201126
struct nfs_page *req)
11211127
{
11221128
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1123-
11241129
struct nfs_page *subreq;
1125-
unsigned int bytes_left = 0;
1126-
unsigned int offset, pgbase;
1130+
unsigned int size, subreq_size;
11271131

11281132
nfs_page_group_lock(req);
11291133

11301134
subreq = req;
1131-
bytes_left = subreq->wb_bytes;
1132-
offset = subreq->wb_offset;
1133-
pgbase = subreq->wb_pgbase;
1134-
1135-
do {
1136-
if (!nfs_pageio_do_add_request(desc, subreq)) {
1137-
/* make sure pg_test call(s) did nothing */
1138-
WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
1139-
WARN_ON_ONCE(subreq->wb_offset != offset);
1140-
WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
1141-
1135+
subreq_size = subreq->wb_bytes;
1136+
for(;;) {
1137+
size = nfs_pageio_do_add_request(desc, subreq);
1138+
if (size == subreq_size) {
1139+
/* We successfully submitted a request */
1140+
if (subreq == req)
1141+
break;
1142+
req->wb_pgbase += size;
1143+
req->wb_bytes -= size;
1144+
req->wb_offset += size;
1145+
subreq_size = req->wb_bytes;
1146+
subreq = req;
1147+
continue;
1148+
}
1149+
if (WARN_ON_ONCE(subreq != req)) {
1150+
nfs_page_group_unlock(req);
1151+
nfs_pageio_cleanup_request(desc, subreq);
1152+
subreq = req;
1153+
subreq_size = req->wb_bytes;
1154+
nfs_page_group_lock(req);
1155+
}
1156+
if (!size) {
1157+
/* Can't coalesce any more, so do I/O */
11421158
nfs_page_group_unlock(req);
11431159
desc->pg_moreio = 1;
11441160
nfs_pageio_doio(desc);
11451161
if (desc->pg_error < 0 || mirror->pg_recoalesce)
1146-
goto out_cleanup_subreq;
1162+
return 0;
11471163
/* retry add_request for this subreq */
11481164
nfs_page_group_lock(req);
11491165
continue;
11501166
}
1151-
1152-
/* check for buggy pg_test call(s) */
1153-
WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
1154-
WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
1155-
WARN_ON_ONCE(subreq->wb_bytes == 0);
1156-
1157-
bytes_left -= subreq->wb_bytes;
1158-
offset += subreq->wb_bytes;
1159-
pgbase += subreq->wb_bytes;
1160-
1161-
if (bytes_left) {
1162-
subreq = nfs_create_subreq(req, subreq, pgbase,
1163-
offset, bytes_left);
1164-
if (IS_ERR(subreq))
1165-
goto err_ptr;
1166-
}
1167-
} while (bytes_left > 0);
1167+
subreq = nfs_create_subreq(req, req->wb_pgbase,
1168+
req->wb_offset, size);
1169+
if (IS_ERR(subreq))
1170+
goto err_ptr;
1171+
subreq_size = size;
1172+
}
11681173

11691174
nfs_page_group_unlock(req);
11701175
return 1;
11711176
err_ptr:
11721177
desc->pg_error = PTR_ERR(subreq);
11731178
nfs_page_group_unlock(req);
11741179
return 0;
1175-
out_cleanup_subreq:
1176-
if (req != subreq)
1177-
nfs_pageio_cleanup_request(desc, subreq);
1178-
return 0;
11791180
}
11801181

11811182
static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
@@ -1244,7 +1245,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
12441245
{
12451246
u32 midx;
12461247
unsigned int pgbase, offset, bytes;
1247-
struct nfs_page *dupreq, *lastreq;
1248+
struct nfs_page *dupreq;
12481249

12491250
pgbase = req->wb_pgbase;
12501251
offset = req->wb_offset;
@@ -1258,13 +1259,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
12581259
for (midx = 1; midx < desc->pg_mirror_count; midx++) {
12591260
nfs_page_group_lock(req);
12601261

1261-
/* find the last request */
1262-
for (lastreq = req->wb_head;
1263-
lastreq->wb_this_page != req->wb_head;
1264-
lastreq = lastreq->wb_this_page)
1265-
;
1266-
1267-
dupreq = nfs_create_subreq(req, lastreq,
1262+
dupreq = nfs_create_subreq(req,
12681263
pgbase, offset, bytes);
12691264

12701265
nfs_page_group_unlock(req);

0 commit comments

Comments
 (0)