Skip to content

Commit c4b50cd

Browse files
committed
svcrdma: Revert 2a1e4f2 ("svcrdma: Normalize Send page handling")
Get rid of the completion wait in svc_rdma_sendto(), and release pages in the send completion handler again. A subsequent patch will handle releasing those pages more efficiently. Reverted by hand: patch -R would not apply cleanly. Reviewed-by: Jeff Layton <[email protected]> Signed-off-by: Chuck Lever <[email protected]>
1 parent a944209 commit c4b50cd

File tree

3 files changed

+13
-23
lines changed

3 files changed

+13
-23
lines changed

include/linux/sunrpc/svc_rdma.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,6 @@ struct svc_rdma_send_ctxt {
154154

155155
struct ib_send_wr sc_send_wr;
156156
struct ib_cqe sc_cqe;
157-
struct completion sc_done;
158157
struct xdr_buf sc_hdrbuf;
159158
struct xdr_stream sc_stream;
160159
void *sc_xprt_buf;

net/sunrpc/xprtrdma/svc_rdma_backchannel.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -93,13 +93,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
9393
*/
9494
get_page(virt_to_page(rqst->rq_buffer));
9595
sctxt->sc_send_wr.opcode = IB_WR_SEND;
96-
ret = svc_rdma_send(rdma, sctxt);
97-
if (ret < 0)
98-
return ret;
99-
100-
ret = wait_for_completion_killable(&sctxt->sc_done);
101-
svc_rdma_send_ctxt_put(rdma, sctxt);
102-
return ret;
96+
return svc_rdma_send(rdma, sctxt);
10397
}
10498

10599
/* Server-side transport endpoint wants a whole page for its send

net/sunrpc/xprtrdma/svc_rdma_sendto.c

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,6 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
147147
ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
148148
ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
149149
ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
150-
init_completion(&ctxt->sc_done);
151150
ctxt->sc_cqe.done = svc_rdma_wc_send;
152151
ctxt->sc_xprt_buf = buffer;
153152
xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
@@ -286,19 +285,20 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
286285
container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
287286

288287
svc_rdma_wake_send_waiters(rdma, 1);
289-
complete(&ctxt->sc_done);
290288

291289
if (unlikely(wc->status != IB_WC_SUCCESS))
292290
goto flushed;
293291

294292
trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
293+
svc_rdma_send_ctxt_put(rdma, ctxt);
295294
return;
296295

297296
flushed:
298297
if (wc->status != IB_WC_WR_FLUSH_ERR)
299298
trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid);
300299
else
301300
trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid);
301+
svc_rdma_send_ctxt_put(rdma, ctxt);
302302
svc_xprt_deferred_close(&rdma->sc_xprt);
303303
}
304304

@@ -315,8 +315,6 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
315315
struct ib_send_wr *wr = &ctxt->sc_send_wr;
316316
int ret;
317317

318-
reinit_completion(&ctxt->sc_done);
319-
320318
/* Sync the transport header buffer */
321319
ib_dma_sync_single_for_device(rdma->sc_pd->device,
322320
wr->sg_list[0].addr,
@@ -808,8 +806,8 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
808806
* svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
809807
* so they are released by the Send completion handler.
810808
*/
811-
static inline void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
812-
struct svc_rdma_send_ctxt *ctxt)
809+
static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
810+
struct svc_rdma_send_ctxt *ctxt)
813811
{
814812
int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
815813

@@ -852,20 +850,16 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
852850
if (ret < 0)
853851
return ret;
854852

853+
svc_rdma_save_io_pages(rqstp, sctxt);
854+
855855
if (rctxt->rc_inv_rkey) {
856856
sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
857857
sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
858858
} else {
859859
sctxt->sc_send_wr.opcode = IB_WR_SEND;
860860
}
861861

862-
ret = svc_rdma_send(rdma, sctxt);
863-
if (ret < 0)
864-
return ret;
865-
866-
ret = wait_for_completion_killable(&sctxt->sc_done);
867-
svc_rdma_send_ctxt_put(rdma, sctxt);
868-
return ret;
862+
return svc_rdma_send(rdma, sctxt);
869863
}
870864

871865
/**
@@ -931,8 +925,7 @@ void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
931925
sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
932926
if (svc_rdma_send(rdma, sctxt))
933927
goto put_ctxt;
934-
935-
wait_for_completion_killable(&sctxt->sc_done);
928+
return;
936929

937930
put_ctxt:
938931
svc_rdma_send_ctxt_put(rdma, sctxt);
@@ -1006,6 +999,10 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
1006999
if (ret != -E2BIG && ret != -EINVAL)
10071000
goto put_ctxt;
10081001

1002+
/* Send completion releases payload pages that were part
1003+
* of previously posted RDMA Writes.
1004+
*/
1005+
svc_rdma_save_io_pages(rqstp, sctxt);
10091006
svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
10101007
return 0;
10111008

0 commit comments

Comments
 (0)