@@ -147,7 +147,6 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
147
147
ctxt -> sc_send_wr .wr_cqe = & ctxt -> sc_cqe ;
148
148
ctxt -> sc_send_wr .sg_list = ctxt -> sc_sges ;
149
149
ctxt -> sc_send_wr .send_flags = IB_SEND_SIGNALED ;
150
- init_completion (& ctxt -> sc_done );
151
150
ctxt -> sc_cqe .done = svc_rdma_wc_send ;
152
151
ctxt -> sc_xprt_buf = buffer ;
153
152
xdr_buf_init (& ctxt -> sc_hdrbuf , ctxt -> sc_xprt_buf ,
@@ -286,19 +285,20 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
286
285
container_of (cqe , struct svc_rdma_send_ctxt , sc_cqe );
287
286
288
287
svc_rdma_wake_send_waiters (rdma , 1 );
289
- complete (& ctxt -> sc_done );
290
288
291
289
if (unlikely (wc -> status != IB_WC_SUCCESS ))
292
290
goto flushed ;
293
291
294
292
trace_svcrdma_wc_send (wc , & ctxt -> sc_cid );
293
+ svc_rdma_send_ctxt_put (rdma , ctxt );
295
294
return ;
296
295
297
296
flushed :
298
297
if (wc -> status != IB_WC_WR_FLUSH_ERR )
299
298
trace_svcrdma_wc_send_err (wc , & ctxt -> sc_cid );
300
299
else
301
300
trace_svcrdma_wc_send_flush (wc , & ctxt -> sc_cid );
301
+ svc_rdma_send_ctxt_put (rdma , ctxt );
302
302
svc_xprt_deferred_close (& rdma -> sc_xprt );
303
303
}
304
304
@@ -315,8 +315,6 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
315
315
struct ib_send_wr * wr = & ctxt -> sc_send_wr ;
316
316
int ret ;
317
317
318
- reinit_completion (& ctxt -> sc_done );
319
-
320
318
/* Sync the transport header buffer */
321
319
ib_dma_sync_single_for_device (rdma -> sc_pd -> device ,
322
320
wr -> sg_list [0 ].addr ,
@@ -808,8 +806,8 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
808
806
* svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
809
807
* so they are released by the Send completion handler.
810
808
*/
811
- static inline void svc_rdma_save_io_pages (struct svc_rqst * rqstp ,
812
- struct svc_rdma_send_ctxt * ctxt )
809
+ static void svc_rdma_save_io_pages (struct svc_rqst * rqstp ,
810
+ struct svc_rdma_send_ctxt * ctxt )
813
811
{
814
812
int i , pages = rqstp -> rq_next_page - rqstp -> rq_respages ;
815
813
@@ -852,20 +850,16 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
852
850
if (ret < 0 )
853
851
return ret ;
854
852
853
+ svc_rdma_save_io_pages (rqstp , sctxt );
854
+
855
855
if (rctxt -> rc_inv_rkey ) {
856
856
sctxt -> sc_send_wr .opcode = IB_WR_SEND_WITH_INV ;
857
857
sctxt -> sc_send_wr .ex .invalidate_rkey = rctxt -> rc_inv_rkey ;
858
858
} else {
859
859
sctxt -> sc_send_wr .opcode = IB_WR_SEND ;
860
860
}
861
861
862
- ret = svc_rdma_send (rdma , sctxt );
863
- if (ret < 0 )
864
- return ret ;
865
-
866
- ret = wait_for_completion_killable (& sctxt -> sc_done );
867
- svc_rdma_send_ctxt_put (rdma , sctxt );
868
- return ret ;
862
+ return svc_rdma_send (rdma , sctxt );
869
863
}
870
864
871
865
/**
@@ -931,8 +925,7 @@ void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
931
925
sctxt -> sc_sges [0 ].length = sctxt -> sc_hdrbuf .len ;
932
926
if (svc_rdma_send (rdma , sctxt ))
933
927
goto put_ctxt ;
934
-
935
- wait_for_completion_killable (& sctxt -> sc_done );
928
+ return ;
936
929
937
930
put_ctxt :
938
931
svc_rdma_send_ctxt_put (rdma , sctxt );
@@ -1006,6 +999,10 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
1006
999
if (ret != - E2BIG && ret != - EINVAL )
1007
1000
goto put_ctxt ;
1008
1001
1002
+ /* Send completion releases payload pages that were part
1003
+ * of previously posted RDMA Writes.
1004
+ */
1005
+ svc_rdma_save_io_pages (rqstp , sctxt );
1009
1006
svc_rdma_send_error_msg (rdma , sctxt , rctxt , ret );
1010
1007
return 0 ;
1011
1008
0 commit comments