@@ -230,28 +230,6 @@ static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
230
230
queue_work (svcrdma_wq , & info -> wi_work );
231
231
}
232
232
233
- /**
234
- * svc_rdma_write_chunk_release - Release Write chunk I/O resources
235
- * @rdma: controlling transport
236
- * @ctxt: Send context that is being released
237
- */
238
- void svc_rdma_write_chunk_release (struct svcxprt_rdma * rdma ,
239
- struct svc_rdma_send_ctxt * ctxt )
240
- {
241
- struct svc_rdma_write_info * info ;
242
- struct svc_rdma_chunk_ctxt * cc ;
243
-
244
- while (!list_empty (& ctxt -> sc_write_info_list )) {
245
- info = list_first_entry (& ctxt -> sc_write_info_list ,
246
- struct svc_rdma_write_info , wi_list );
247
- list_del (& info -> wi_list );
248
-
249
- cc = & info -> wi_cc ;
250
- svc_rdma_wake_send_waiters (rdma , cc -> cc_sqecount );
251
- svc_rdma_write_info_free (info );
252
- }
253
- }
254
-
255
233
/**
256
234
* svc_rdma_reply_chunk_release - Release Reply chunk I/O resources
257
235
* @rdma: controlling transport
@@ -308,23 +286,26 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
308
286
struct ib_cqe * cqe = wc -> wr_cqe ;
309
287
struct svc_rdma_chunk_ctxt * cc =
310
288
container_of (cqe , struct svc_rdma_chunk_ctxt , cc_cqe );
289
+ struct svc_rdma_write_info * info =
290
+ container_of (cc , struct svc_rdma_write_info , wi_cc );
311
291
312
292
switch (wc -> status ) {
313
293
case IB_WC_SUCCESS :
314
294
trace_svcrdma_wc_write (& cc -> cc_cid );
315
- return ;
295
+ break ;
316
296
case IB_WC_WR_FLUSH_ERR :
317
297
trace_svcrdma_wc_write_flush (wc , & cc -> cc_cid );
318
298
break ;
319
299
default :
320
300
trace_svcrdma_wc_write_err (wc , & cc -> cc_cid );
321
301
}
322
302
323
- /* The RDMA Write has flushed, so the client won't get
324
- * some of the outgoing RPC message. Signal the loss
325
- * to the client by closing the connection.
326
- */
327
- svc_xprt_deferred_close (& rdma -> sc_xprt );
303
+ svc_rdma_wake_send_waiters (rdma , cc -> cc_sqecount );
304
+
305
+ if (unlikely (wc -> status != IB_WC_SUCCESS ))
306
+ svc_xprt_deferred_close (& rdma -> sc_xprt );
307
+
308
+ svc_rdma_write_info_free (info );
328
309
}
329
310
330
311
/**
@@ -620,19 +601,13 @@ static int svc_rdma_xb_write(const struct xdr_buf *xdr, void *data)
620
601
return xdr -> len ;
621
602
}
622
603
623
- /* Link Write WRs for @chunk onto @sctxt's WR chain.
624
- */
625
- static int svc_rdma_prepare_write_chunk (struct svcxprt_rdma * rdma ,
626
- struct svc_rdma_send_ctxt * sctxt ,
627
- const struct svc_rdma_chunk * chunk ,
628
- const struct xdr_buf * xdr )
604
+ static int svc_rdma_send_write_chunk (struct svcxprt_rdma * rdma ,
605
+ const struct svc_rdma_chunk * chunk ,
606
+ const struct xdr_buf * xdr )
629
607
{
630
608
struct svc_rdma_write_info * info ;
631
609
struct svc_rdma_chunk_ctxt * cc ;
632
- struct ib_send_wr * first_wr ;
633
610
struct xdr_buf payload ;
634
- struct list_head * pos ;
635
- struct ib_cqe * cqe ;
636
611
int ret ;
637
612
638
613
if (xdr_buf_subsegment (xdr , & payload , chunk -> ch_position ,
@@ -648,25 +623,10 @@ static int svc_rdma_prepare_write_chunk(struct svcxprt_rdma *rdma,
648
623
if (ret != payload .len )
649
624
goto out_err ;
650
625
651
- ret = - EINVAL ;
652
- if (unlikely (cc -> cc_sqecount > rdma -> sc_sq_depth ))
653
- goto out_err ;
654
-
655
- first_wr = sctxt -> sc_wr_chain ;
656
- cqe = & cc -> cc_cqe ;
657
- list_for_each (pos , & cc -> cc_rwctxts ) {
658
- struct svc_rdma_rw_ctxt * rwc ;
659
-
660
- rwc = list_entry (pos , struct svc_rdma_rw_ctxt , rw_list );
661
- first_wr = rdma_rw_ctx_wrs (& rwc -> rw_ctx , rdma -> sc_qp ,
662
- rdma -> sc_port_num , cqe , first_wr );
663
- cqe = NULL ;
664
- }
665
- sctxt -> sc_wr_chain = first_wr ;
666
- sctxt -> sc_sqecount += cc -> cc_sqecount ;
667
- list_add (& info -> wi_list , & sctxt -> sc_write_info_list );
668
-
669
626
trace_svcrdma_post_write_chunk (& cc -> cc_cid , cc -> cc_sqecount );
627
+ ret = svc_rdma_post_chunk_ctxt (rdma , cc );
628
+ if (ret < 0 )
629
+ goto out_err ;
670
630
return 0 ;
671
631
672
632
out_err :
@@ -675,27 +635,25 @@ static int svc_rdma_prepare_write_chunk(struct svcxprt_rdma *rdma,
675
635
}
676
636
677
637
/**
678
- * svc_rdma_prepare_write_list - Construct WR chain for sending Write list
638
+ * svc_rdma_send_write_list - Send all chunks on the Write list
679
639
* @rdma: controlling RDMA transport
680
- * @write_pcl: Write list provisioned by the client
681
- * @sctxt: Send WR resources
640
+ * @rctxt: Write list provisioned by the client
682
641
* @xdr: xdr_buf containing an RPC Reply message
683
642
*
684
643
* Returns zero on success, or a negative errno if one or more
685
644
* Write chunks could not be sent.
686
645
*/
687
- int svc_rdma_prepare_write_list (struct svcxprt_rdma * rdma ,
688
- const struct svc_rdma_pcl * write_pcl ,
689
- struct svc_rdma_send_ctxt * sctxt ,
690
- const struct xdr_buf * xdr )
646
+ int svc_rdma_send_write_list (struct svcxprt_rdma * rdma ,
647
+ const struct svc_rdma_recv_ctxt * rctxt ,
648
+ const struct xdr_buf * xdr )
691
649
{
692
650
struct svc_rdma_chunk * chunk ;
693
651
int ret ;
694
652
695
- pcl_for_each_chunk (chunk , write_pcl ) {
653
+ pcl_for_each_chunk (chunk , & rctxt -> rc_write_pcl ) {
696
654
if (!chunk -> ch_payload_length )
697
655
break ;
698
- ret = svc_rdma_prepare_write_chunk (rdma , sctxt , chunk , xdr );
656
+ ret = svc_rdma_send_write_chunk (rdma , chunk , xdr );
699
657
if (ret < 0 )
700
658
return ret ;
701
659
}
0 commit comments