@@ -366,9 +366,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
366
366
static void frwr_wc_fastreg (struct ib_cq * cq , struct ib_wc * wc )
367
367
{
368
368
struct ib_cqe * cqe = wc -> wr_cqe ;
369
- struct rpcrdma_frwr * frwr =
370
- container_of (cqe , struct rpcrdma_frwr , fr_cqe );
371
- struct rpcrdma_mr * mr = container_of (frwr , struct rpcrdma_mr , frwr );
369
+ struct rpcrdma_mr * mr = container_of (cqe , struct rpcrdma_mr , mr_cqe );
372
370
373
371
/* WARNING: Only wr_cqe and status are reliable at this point */
374
372
trace_xprtrdma_wc_fastreg (wc , & mr -> mr_cid );
@@ -405,9 +403,9 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
405
403
trace_xprtrdma_mr_fastreg (mr );
406
404
frwr = & mr -> frwr ;
407
405
408
- frwr -> fr_cqe .done = frwr_wc_fastreg ;
406
+ mr -> mr_cqe .done = frwr_wc_fastreg ;
409
407
frwr -> fr_regwr .wr .next = post_wr ;
410
- frwr -> fr_regwr .wr .wr_cqe = & frwr -> fr_cqe ;
408
+ frwr -> fr_regwr .wr .wr_cqe = & mr -> mr_cqe ;
411
409
frwr -> fr_regwr .wr .num_sge = 0 ;
412
410
frwr -> fr_regwr .wr .opcode = IB_WR_REG_MR ;
413
411
frwr -> fr_regwr .wr .send_flags = 0 ;
@@ -463,9 +461,7 @@ static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr)
463
461
static void frwr_wc_localinv (struct ib_cq * cq , struct ib_wc * wc )
464
462
{
465
463
struct ib_cqe * cqe = wc -> wr_cqe ;
466
- struct rpcrdma_frwr * frwr =
467
- container_of (cqe , struct rpcrdma_frwr , fr_cqe );
468
- struct rpcrdma_mr * mr = container_of (frwr , struct rpcrdma_mr , frwr );
464
+ struct rpcrdma_mr * mr = container_of (cqe , struct rpcrdma_mr , mr_cqe );
469
465
470
466
/* WARNING: Only wr_cqe and status are reliable at this point */
471
467
trace_xprtrdma_wc_li (wc , & mr -> mr_cid );
@@ -484,9 +480,8 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
484
480
static void frwr_wc_localinv_wake (struct ib_cq * cq , struct ib_wc * wc )
485
481
{
486
482
struct ib_cqe * cqe = wc -> wr_cqe ;
487
- struct rpcrdma_frwr * frwr =
488
- container_of (cqe , struct rpcrdma_frwr , fr_cqe );
489
- struct rpcrdma_mr * mr = container_of (frwr , struct rpcrdma_mr , frwr );
483
+ struct rpcrdma_mr * mr = container_of (cqe , struct rpcrdma_mr , mr_cqe );
484
+ struct rpcrdma_frwr * frwr = & mr -> frwr ;
490
485
491
486
/* WARNING: Only wr_cqe and status are reliable at this point */
492
487
trace_xprtrdma_wc_li_wake (wc , & mr -> mr_cid );
@@ -529,16 +524,17 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
529
524
r_xprt -> rx_stats .local_inv_needed ++ ;
530
525
531
526
frwr = & mr -> frwr ;
532
- frwr -> fr_cqe .done = frwr_wc_localinv ;
533
527
last = & frwr -> fr_invwr ;
534
528
last -> next = NULL ;
535
- last -> wr_cqe = & frwr -> fr_cqe ;
529
+ last -> wr_cqe = & mr -> mr_cqe ;
536
530
last -> sg_list = NULL ;
537
531
last -> num_sge = 0 ;
538
532
last -> opcode = IB_WR_LOCAL_INV ;
539
533
last -> send_flags = IB_SEND_SIGNALED ;
540
534
last -> ex .invalidate_rkey = mr -> mr_handle ;
541
535
536
+ last -> wr_cqe -> done = frwr_wc_localinv ;
537
+
542
538
* prev = last ;
543
539
prev = & last -> next ;
544
540
}
@@ -547,7 +543,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
547
543
* last WR in the chain completes, all WRs in the chain
548
544
* are complete.
549
545
*/
550
- frwr -> fr_cqe . done = frwr_wc_localinv_wake ;
546
+ last -> wr_cqe -> done = frwr_wc_localinv_wake ;
551
547
reinit_completion (& frwr -> fr_linv_done );
552
548
553
549
/* Transport disconnect drains the receive CQ before it
@@ -579,9 +575,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
579
575
static void frwr_wc_localinv_done (struct ib_cq * cq , struct ib_wc * wc )
580
576
{
581
577
struct ib_cqe * cqe = wc -> wr_cqe ;
582
- struct rpcrdma_frwr * frwr =
583
- container_of (cqe , struct rpcrdma_frwr , fr_cqe );
584
- struct rpcrdma_mr * mr = container_of (frwr , struct rpcrdma_mr , frwr );
578
+ struct rpcrdma_mr * mr = container_of (cqe , struct rpcrdma_mr , mr_cqe );
585
579
struct rpcrdma_rep * rep ;
586
580
587
581
/* WARNING: Only wr_cqe and status are reliable at this point */
@@ -630,16 +624,17 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
630
624
r_xprt -> rx_stats .local_inv_needed ++ ;
631
625
632
626
frwr = & mr -> frwr ;
633
- frwr -> fr_cqe .done = frwr_wc_localinv ;
634
627
last = & frwr -> fr_invwr ;
635
628
last -> next = NULL ;
636
- last -> wr_cqe = & frwr -> fr_cqe ;
629
+ last -> wr_cqe = & mr -> mr_cqe ;
637
630
last -> sg_list = NULL ;
638
631
last -> num_sge = 0 ;
639
632
last -> opcode = IB_WR_LOCAL_INV ;
640
633
last -> send_flags = IB_SEND_SIGNALED ;
641
634
last -> ex .invalidate_rkey = mr -> mr_handle ;
642
635
636
+ last -> wr_cqe -> done = frwr_wc_localinv ;
637
+
643
638
* prev = last ;
644
639
prev = & last -> next ;
645
640
}
@@ -649,7 +644,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
649
644
* are complete. The last completion will wake up the
650
645
* RPC waiter.
651
646
*/
652
- frwr -> fr_cqe . done = frwr_wc_localinv_done ;
647
+ last -> wr_cqe -> done = frwr_wc_localinv_done ;
653
648
654
649
/* Transport disconnect drains the receive CQ before it
655
650
* replaces the QP. The RPC reply handler won't call us
0 commit comments