@@ -78,7 +78,7 @@ static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
78
78
static void rpcrdma_reqs_reset (struct rpcrdma_xprt * r_xprt );
79
79
static void rpcrdma_reps_destroy (struct rpcrdma_buffer * buf );
80
80
static void rpcrdma_mrs_create (struct rpcrdma_xprt * r_xprt );
81
- static void rpcrdma_mrs_destroy (struct rpcrdma_buffer * buf );
81
+ static void rpcrdma_mrs_destroy (struct rpcrdma_xprt * r_xprt );
82
82
static struct rpcrdma_regbuf *
83
83
rpcrdma_regbuf_alloc (size_t size , enum dma_data_direction direction ,
84
84
gfp_t flags );
@@ -407,8 +407,6 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
407
407
struct rpcrdma_buffer * buf = & r_xprt -> rx_buf ;
408
408
struct rpcrdma_req * req ;
409
409
410
- cancel_work_sync (& buf -> rb_refresh_worker );
411
-
412
410
/* This is similar to rpcrdma_ep_destroy, but:
413
411
* - Don't cancel the connect worker.
414
412
* - Don't call rpcrdma_ep_disconnect, which waits
@@ -435,7 +433,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
435
433
rpcrdma_regbuf_dma_unmap (req -> rl_sendbuf );
436
434
rpcrdma_regbuf_dma_unmap (req -> rl_recvbuf );
437
435
}
438
- rpcrdma_mrs_destroy (buf );
436
+ rpcrdma_mrs_destroy (r_xprt );
439
437
ib_dealloc_pd (ia -> ri_pd );
440
438
ia -> ri_pd = NULL ;
441
439
@@ -628,8 +626,6 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
628
626
pr_err ("rpcrdma: rdma_create_qp returned %d\n" , err );
629
627
goto out3 ;
630
628
}
631
-
632
- rpcrdma_mrs_create (r_xprt );
633
629
return 0 ;
634
630
635
631
out3 :
@@ -703,7 +699,6 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
703
699
memcpy (& qp_init_attr , & ep -> rep_attr , sizeof (qp_init_attr ));
704
700
switch (ep -> rep_connected ) {
705
701
case 0 :
706
- dprintk ("RPC: %s: connecting...\n" , __func__ );
707
702
rc = rdma_create_qp (ia -> ri_id , ia -> ri_pd , & qp_init_attr );
708
703
if (rc ) {
709
704
rc = - ENETUNREACH ;
@@ -741,7 +736,7 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
741
736
goto out ;
742
737
}
743
738
744
- dprintk ( "RPC: %s: connected\n" , __func__ );
739
+ rpcrdma_mrs_create ( r_xprt );
745
740
746
741
out :
747
742
if (rc )
@@ -756,11 +751,8 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
756
751
* @ep: endpoint to disconnect
757
752
* @ia: associated interface adapter
758
753
*
759
- * This is separate from destroy to facilitate the ability
760
- * to reconnect without recreating the endpoint.
761
- *
762
- * This call is not reentrant, and must not be made in parallel
763
- * on the same endpoint.
754
+ * Caller serializes. Either the transport send lock is held,
755
+ * or we're being called to destroy the transport.
764
756
*/
765
757
void
766
758
rpcrdma_ep_disconnect (struct rpcrdma_ep * ep , struct rpcrdma_ia * ia )
@@ -780,6 +772,7 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
780
772
781
773
rpcrdma_xprt_drain (r_xprt );
782
774
rpcrdma_reqs_reset (r_xprt );
775
+ rpcrdma_mrs_destroy (r_xprt );
783
776
}
784
777
785
778
/* Fixed-size circular FIFO queue. This implementation is wait-free and
@@ -986,6 +979,28 @@ rpcrdma_mr_refresh_worker(struct work_struct *work)
986
979
xprt_write_space (& r_xprt -> rx_xprt );
987
980
}
988
981
982
+ /**
983
+ * rpcrdma_mrs_refresh - Wake the MR refresh worker
984
+ * @r_xprt: controlling transport instance
985
+ *
986
+ */
987
+ void rpcrdma_mrs_refresh (struct rpcrdma_xprt * r_xprt )
988
+ {
989
+ struct rpcrdma_buffer * buf = & r_xprt -> rx_buf ;
990
+ struct rpcrdma_ep * ep = & r_xprt -> rx_ep ;
991
+
992
+ /* If there is no underlying device, it's no use to
993
+ * wake the refresh worker.
994
+ */
995
+ if (ep -> rep_connected != - ENODEV ) {
996
+ /* The work is scheduled on a WQ_MEM_RECLAIM
997
+ * workqueue in order to prevent MR allocation
998
+ * from recursing into NFS during direct reclaim.
999
+ */
1000
+ queue_work (xprtiod_workqueue , & buf -> rb_refresh_worker );
1001
+ }
1002
+ }
1003
+
989
1004
/**
990
1005
* rpcrdma_req_create - Allocate an rpcrdma_req object
991
1006
* @r_xprt: controlling r_xprt
@@ -1145,8 +1160,6 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1145
1160
INIT_LIST_HEAD (& buf -> rb_all_mrs );
1146
1161
INIT_WORK (& buf -> rb_refresh_worker , rpcrdma_mr_refresh_worker );
1147
1162
1148
- rpcrdma_mrs_create (r_xprt );
1149
-
1150
1163
INIT_LIST_HEAD (& buf -> rb_send_bufs );
1151
1164
INIT_LIST_HEAD (& buf -> rb_allreqs );
1152
1165
@@ -1177,8 +1190,8 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1177
1190
* rpcrdma_req_destroy - Destroy an rpcrdma_req object
1178
1191
* @req: unused object to be destroyed
1179
1192
*
1180
- * This function assumes that the caller prevents concurrent device
1181
- * unload and transport tear-down .
1193
+ * Relies on caller holding the transport send lock to protect
1194
+ * removing req->rl_all from buf->rb_all_reqs safely .
1182
1195
*/
1183
1196
void rpcrdma_req_destroy (struct rpcrdma_req * req )
1184
1197
{
@@ -1204,17 +1217,18 @@ void rpcrdma_req_destroy(struct rpcrdma_req *req)
1204
1217
1205
1218
/**
1206
1219
* rpcrdma_mrs_destroy - Release all of a transport's MRs
1207
- * @buf : controlling buffer instance
1220
+ * @r_xprt : controlling transport instance
1208
1221
*
1209
1222
* Relies on caller holding the transport send lock to protect
1210
1223
* removing mr->mr_list from req->rl_free_mrs safely.
1211
1224
*/
1212
- static void rpcrdma_mrs_destroy (struct rpcrdma_buffer * buf )
1225
+ static void rpcrdma_mrs_destroy (struct rpcrdma_xprt * r_xprt )
1213
1226
{
1214
- struct rpcrdma_xprt * r_xprt = container_of (buf , struct rpcrdma_xprt ,
1215
- rx_buf );
1227
+ struct rpcrdma_buffer * buf = & r_xprt -> rx_buf ;
1216
1228
struct rpcrdma_mr * mr ;
1217
1229
1230
+ cancel_work_sync (& buf -> rb_refresh_worker );
1231
+
1218
1232
spin_lock (& buf -> rb_lock );
1219
1233
while ((mr = list_first_entry_or_null (& buf -> rb_all_mrs ,
1220
1234
struct rpcrdma_mr ,
@@ -1224,10 +1238,10 @@ static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
1224
1238
spin_unlock (& buf -> rb_lock );
1225
1239
1226
1240
frwr_release_mr (mr );
1241
+
1227
1242
spin_lock (& buf -> rb_lock );
1228
1243
}
1229
1244
spin_unlock (& buf -> rb_lock );
1230
- r_xprt -> rx_stats .mrs_allocated = 0 ;
1231
1245
}
1232
1246
1233
1247
/**
@@ -1241,8 +1255,6 @@ static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
1241
1255
void
1242
1256
rpcrdma_buffer_destroy (struct rpcrdma_buffer * buf )
1243
1257
{
1244
- cancel_work_sync (& buf -> rb_refresh_worker );
1245
-
1246
1258
rpcrdma_sendctxs_destroy (buf );
1247
1259
rpcrdma_reps_destroy (buf );
1248
1260
@@ -1254,8 +1266,6 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1254
1266
list_del (& req -> rl_list );
1255
1267
rpcrdma_req_destroy (req );
1256
1268
}
1257
-
1258
- rpcrdma_mrs_destroy (buf );
1259
1269
}
1260
1270
1261
1271
/**
0 commit comments