@@ -230,6 +230,28 @@ static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
230
230
queue_work (svcrdma_wq , & info -> wi_work );
231
231
}
232
232
233
+ /**
234
+ * svc_rdma_write_chunk_release - Release Write chunk I/O resources
235
+ * @rdma: controlling transport
236
+ * @ctxt: Send context that is being released
237
+ */
238
+ void svc_rdma_write_chunk_release (struct svcxprt_rdma * rdma ,
239
+ struct svc_rdma_send_ctxt * ctxt )
240
+ {
241
+ struct svc_rdma_write_info * info ;
242
+ struct svc_rdma_chunk_ctxt * cc ;
243
+
244
+ while (!list_empty (& ctxt -> sc_write_info_list )) {
245
+ info = list_first_entry (& ctxt -> sc_write_info_list ,
246
+ struct svc_rdma_write_info , wi_list );
247
+ list_del (& info -> wi_list );
248
+
249
+ cc = & info -> wi_cc ;
250
+ svc_rdma_wake_send_waiters (rdma , cc -> cc_sqecount );
251
+ svc_rdma_write_info_free (info );
252
+ }
253
+ }
254
+
233
255
/**
234
256
* svc_rdma_reply_chunk_release - Release Reply chunk I/O resources
235
257
* @rdma: controlling transport
@@ -286,26 +308,23 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
286
308
struct ib_cqe * cqe = wc -> wr_cqe ;
287
309
struct svc_rdma_chunk_ctxt * cc =
288
310
container_of (cqe , struct svc_rdma_chunk_ctxt , cc_cqe );
289
- struct svc_rdma_write_info * info =
290
- container_of (cc , struct svc_rdma_write_info , wi_cc );
291
311
292
312
switch (wc -> status ) {
293
313
case IB_WC_SUCCESS :
294
314
trace_svcrdma_wc_write (& cc -> cc_cid );
295
- break ;
315
+ return ;
296
316
case IB_WC_WR_FLUSH_ERR :
297
317
trace_svcrdma_wc_write_flush (wc , & cc -> cc_cid );
298
318
break ;
299
319
default :
300
320
trace_svcrdma_wc_write_err (wc , & cc -> cc_cid );
301
321
}
302
322
303
- svc_rdma_wake_send_waiters (rdma , cc -> cc_sqecount );
304
-
305
- if (unlikely (wc -> status != IB_WC_SUCCESS ))
306
- svc_xprt_deferred_close (& rdma -> sc_xprt );
307
-
308
- svc_rdma_write_info_free (info );
323
+ /* The RDMA Write has flushed, so the client won't get
324
+ * some of the outgoing RPC message. Signal the loss
325
+ * to the client by closing the connection.
326
+ */
327
+ svc_xprt_deferred_close (& rdma -> sc_xprt );
309
328
}
310
329
311
330
/**
@@ -601,13 +620,19 @@ static int svc_rdma_xb_write(const struct xdr_buf *xdr, void *data)
601
620
return xdr -> len ;
602
621
}
603
622
604
- static int svc_rdma_send_write_chunk (struct svcxprt_rdma * rdma ,
605
- const struct svc_rdma_chunk * chunk ,
606
- const struct xdr_buf * xdr )
623
+ /* Link Write WRs for @chunk onto @sctxt's WR chain.
624
+ */
625
+ static int svc_rdma_prepare_write_chunk (struct svcxprt_rdma * rdma ,
626
+ struct svc_rdma_send_ctxt * sctxt ,
627
+ const struct svc_rdma_chunk * chunk ,
628
+ const struct xdr_buf * xdr )
607
629
{
608
630
struct svc_rdma_write_info * info ;
609
631
struct svc_rdma_chunk_ctxt * cc ;
632
+ struct ib_send_wr * first_wr ;
610
633
struct xdr_buf payload ;
634
+ struct list_head * pos ;
635
+ struct ib_cqe * cqe ;
611
636
int ret ;
612
637
613
638
if (xdr_buf_subsegment (xdr , & payload , chunk -> ch_position ,
@@ -623,10 +648,25 @@ static int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
623
648
if (ret != payload .len )
624
649
goto out_err ;
625
650
626
- trace_svcrdma_post_write_chunk (& cc -> cc_cid , cc -> cc_sqecount );
627
- ret = svc_rdma_post_chunk_ctxt (rdma , cc );
628
- if (ret < 0 )
651
+ ret = - EINVAL ;
652
+ if (unlikely (cc -> cc_sqecount > rdma -> sc_sq_depth ))
629
653
goto out_err ;
654
+
655
+ first_wr = sctxt -> sc_wr_chain ;
656
+ cqe = & cc -> cc_cqe ;
657
+ list_for_each (pos , & cc -> cc_rwctxts ) {
658
+ struct svc_rdma_rw_ctxt * rwc ;
659
+
660
+ rwc = list_entry (pos , struct svc_rdma_rw_ctxt , rw_list );
661
+ first_wr = rdma_rw_ctx_wrs (& rwc -> rw_ctx , rdma -> sc_qp ,
662
+ rdma -> sc_port_num , cqe , first_wr );
663
+ cqe = NULL ;
664
+ }
665
+ sctxt -> sc_wr_chain = first_wr ;
666
+ sctxt -> sc_sqecount += cc -> cc_sqecount ;
667
+ list_add (& info -> wi_list , & sctxt -> sc_write_info_list );
668
+
669
+ trace_svcrdma_post_write_chunk (& cc -> cc_cid , cc -> cc_sqecount );
630
670
return 0 ;
631
671
632
672
out_err :
@@ -635,25 +675,27 @@ static int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
635
675
}
636
676
637
677
/**
638
- * svc_rdma_send_write_list - Send all chunks on the Write list
678
+ * svc_rdma_prepare_write_list - Construct WR chain for sending Write list
639
679
* @rdma: controlling RDMA transport
640
- * @rctxt: Write list provisioned by the client
680
+ * @write_pcl: Write list provisioned by the client
681
+ * @sctxt: Send WR resources
641
682
* @xdr: xdr_buf containing an RPC Reply message
642
683
*
643
684
* Returns zero on success, or a negative errno if one or more
644
685
* Write chunks could not be sent.
645
686
*/
646
- int svc_rdma_send_write_list (struct svcxprt_rdma * rdma ,
647
- const struct svc_rdma_recv_ctxt * rctxt ,
648
- const struct xdr_buf * xdr )
687
+ int svc_rdma_prepare_write_list (struct svcxprt_rdma * rdma ,
688
+ const struct svc_rdma_pcl * write_pcl ,
689
+ struct svc_rdma_send_ctxt * sctxt ,
690
+ const struct xdr_buf * xdr )
649
691
{
650
692
struct svc_rdma_chunk * chunk ;
651
693
int ret ;
652
694
653
- pcl_for_each_chunk (chunk , & rctxt -> rc_write_pcl ) {
695
+ pcl_for_each_chunk (chunk , write_pcl ) {
654
696
if (!chunk -> ch_payload_length )
655
697
break ;
656
- ret = svc_rdma_send_write_chunk (rdma , chunk , xdr );
698
+ ret = svc_rdma_prepare_write_chunk (rdma , sctxt , chunk , xdr );
657
699
if (ret < 0 )
658
700
return ret ;
659
701
}
0 commit comments