@@ -74,7 +74,7 @@ static void frwr_mr_recycle(struct rpcrdma_mr *mr)
74
74
75
75
if (mr -> mr_dir != DMA_NONE ) {
76
76
trace_xprtrdma_mr_unmap (mr );
77
- ib_dma_unmap_sg (r_xprt -> rx_ia . ri_id -> device ,
77
+ ib_dma_unmap_sg (r_xprt -> rx_ep . re_id -> device ,
78
78
mr -> mr_sg , mr -> mr_nents , mr -> mr_dir );
79
79
mr -> mr_dir = DMA_NONE ;
80
80
}
@@ -115,13 +115,13 @@ void frwr_reset(struct rpcrdma_req *req)
115
115
*/
116
116
int frwr_mr_init (struct rpcrdma_xprt * r_xprt , struct rpcrdma_mr * mr )
117
117
{
118
- struct rpcrdma_ia * ia = & r_xprt -> rx_ia ;
119
- unsigned int depth = ia -> ri_max_frwr_depth ;
118
+ struct rpcrdma_ep * ep = & r_xprt -> rx_ep ;
119
+ unsigned int depth = ep -> re_max_fr_depth ;
120
120
struct scatterlist * sg ;
121
121
struct ib_mr * frmr ;
122
122
int rc ;
123
123
124
- frmr = ib_alloc_mr (ia -> ri_pd , ia -> ri_mrtype , depth );
124
+ frmr = ib_alloc_mr (ep -> re_pd , ep -> re_mrtype , depth );
125
125
if (IS_ERR (frmr ))
126
126
goto out_mr_err ;
127
127
@@ -151,29 +151,24 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
151
151
152
152
/**
153
153
* frwr_query_device - Prepare a transport for use with FRWR
154
- * @r_xprt: controlling transport instance
154
+ * @ep: endpoint to fill in
155
155
* @device: RDMA device to query
156
156
*
157
157
* On success, sets:
158
- * ep->rep_attr
159
- * ep->rep_max_requests
160
- * ia->ri_max_rdma_segs
161
- *
162
- * And these FRWR-related fields:
163
- * ia->ri_max_frwr_depth
164
- * ia->ri_mrtype
158
+ * ep->re_attr
159
+ * ep->re_max_requests
160
+ * ep->re_max_rdma_segs
161
+ * ep->re_max_fr_depth
162
+ * ep->re_mrtype
165
163
*
166
164
* Return values:
167
165
* On success, returns zero.
168
166
* %-EINVAL - the device does not support FRWR memory registration
169
167
* %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
170
168
*/
171
- int frwr_query_device (struct rpcrdma_xprt * r_xprt ,
172
- const struct ib_device * device )
169
+ int frwr_query_device (struct rpcrdma_ep * ep , const struct ib_device * device )
173
170
{
174
171
const struct ib_device_attr * attrs = & device -> attrs ;
175
- struct rpcrdma_ia * ia = & r_xprt -> rx_ia ;
176
- struct rpcrdma_ep * ep = & r_xprt -> rx_ep ;
177
172
int max_qp_wr , depth , delta ;
178
173
unsigned int max_sge ;
179
174
@@ -190,23 +185,23 @@ int frwr_query_device(struct rpcrdma_xprt *r_xprt,
190
185
pr_err ("rpcrdma: HCA provides only %u send SGEs\n" , max_sge );
191
186
return - ENOMEM ;
192
187
}
193
- ep -> rep_attr .cap .max_send_sge = max_sge ;
194
- ep -> rep_attr .cap .max_recv_sge = 1 ;
188
+ ep -> re_attr .cap .max_send_sge = max_sge ;
189
+ ep -> re_attr .cap .max_recv_sge = 1 ;
195
190
196
- ia -> ri_mrtype = IB_MR_TYPE_MEM_REG ;
191
+ ep -> re_mrtype = IB_MR_TYPE_MEM_REG ;
197
192
if (attrs -> device_cap_flags & IB_DEVICE_SG_GAPS_REG )
198
- ia -> ri_mrtype = IB_MR_TYPE_SG_GAPS ;
193
+ ep -> re_mrtype = IB_MR_TYPE_SG_GAPS ;
199
194
200
195
/* Quirk: Some devices advertise a large max_fast_reg_page_list_len
201
196
* capability, but perform optimally when the MRs are not larger
202
197
* than a page.
203
198
*/
204
199
if (attrs -> max_sge_rd > RPCRDMA_MAX_HDR_SEGS )
205
- ia -> ri_max_frwr_depth = attrs -> max_sge_rd ;
200
+ ep -> re_max_fr_depth = attrs -> max_sge_rd ;
206
201
else
207
- ia -> ri_max_frwr_depth = attrs -> max_fast_reg_page_list_len ;
208
- if (ia -> ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS )
209
- ia -> ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS ;
202
+ ep -> re_max_fr_depth = attrs -> max_fast_reg_page_list_len ;
203
+ if (ep -> re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS )
204
+ ep -> re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS ;
210
205
211
206
/* Add room for frwr register and invalidate WRs.
212
207
* 1. FRWR reg WR for head
@@ -222,11 +217,11 @@ int frwr_query_device(struct rpcrdma_xprt *r_xprt,
222
217
/* Calculate N if the device max FRWR depth is smaller than
223
218
* RPCRDMA_MAX_DATA_SEGS.
224
219
*/
225
- if (ia -> ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS ) {
226
- delta = RPCRDMA_MAX_DATA_SEGS - ia -> ri_max_frwr_depth ;
220
+ if (ep -> re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS ) {
221
+ delta = RPCRDMA_MAX_DATA_SEGS - ep -> re_max_fr_depth ;
227
222
do {
228
223
depth += 2 ; /* FRWR reg + invalidate */
229
- delta -= ia -> ri_max_frwr_depth ;
224
+ delta -= ep -> re_max_fr_depth ;
230
225
} while (delta > 0 );
231
226
}
232
227
@@ -235,34 +230,34 @@ int frwr_query_device(struct rpcrdma_xprt *r_xprt,
235
230
max_qp_wr -= 1 ;
236
231
if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE )
237
232
return - ENOMEM ;
238
- if (ep -> rep_max_requests > max_qp_wr )
239
- ep -> rep_max_requests = max_qp_wr ;
240
- ep -> rep_attr .cap .max_send_wr = ep -> rep_max_requests * depth ;
241
- if (ep -> rep_attr .cap .max_send_wr > max_qp_wr ) {
242
- ep -> rep_max_requests = max_qp_wr / depth ;
243
- if (!ep -> rep_max_requests )
233
+ if (ep -> re_max_requests > max_qp_wr )
234
+ ep -> re_max_requests = max_qp_wr ;
235
+ ep -> re_attr .cap .max_send_wr = ep -> re_max_requests * depth ;
236
+ if (ep -> re_attr .cap .max_send_wr > max_qp_wr ) {
237
+ ep -> re_max_requests = max_qp_wr / depth ;
238
+ if (!ep -> re_max_requests )
244
239
return - ENOMEM ;
245
- ep -> rep_attr .cap .max_send_wr = ep -> rep_max_requests * depth ;
240
+ ep -> re_attr .cap .max_send_wr = ep -> re_max_requests * depth ;
246
241
}
247
- ep -> rep_attr .cap .max_send_wr += RPCRDMA_BACKWARD_WRS ;
248
- ep -> rep_attr .cap .max_send_wr += 1 ; /* for ib_drain_sq */
249
- ep -> rep_attr .cap .max_recv_wr = ep -> rep_max_requests ;
250
- ep -> rep_attr .cap .max_recv_wr += RPCRDMA_BACKWARD_WRS ;
251
- ep -> rep_attr .cap .max_recv_wr += 1 ; /* for ib_drain_rq */
252
-
253
- ia -> ri_max_rdma_segs =
254
- DIV_ROUND_UP (RPCRDMA_MAX_DATA_SEGS , ia -> ri_max_frwr_depth );
242
+ ep -> re_attr .cap .max_send_wr += RPCRDMA_BACKWARD_WRS ;
243
+ ep -> re_attr .cap .max_send_wr += 1 ; /* for ib_drain_sq */
244
+ ep -> re_attr .cap .max_recv_wr = ep -> re_max_requests ;
245
+ ep -> re_attr .cap .max_recv_wr += RPCRDMA_BACKWARD_WRS ;
246
+ ep -> re_attr .cap .max_recv_wr += 1 ; /* for ib_drain_rq */
247
+
248
+ ep -> re_max_rdma_segs =
249
+ DIV_ROUND_UP (RPCRDMA_MAX_DATA_SEGS , ep -> re_max_fr_depth );
255
250
/* Reply chunks require segments for head and tail buffers */
256
- ia -> ri_max_rdma_segs += 2 ;
257
- if (ia -> ri_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS )
258
- ia -> ri_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS ;
251
+ ep -> re_max_rdma_segs += 2 ;
252
+ if (ep -> re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS )
253
+ ep -> re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS ;
259
254
260
255
/* Ensure the underlying device is capable of conveying the
261
256
* largest r/wsize NFS will ask for. This guarantees that
262
257
* failing over from one RDMA device to another will not
263
258
* break NFS I/O.
264
259
*/
265
- if ((ia -> ri_max_rdma_segs * ia -> ri_max_frwr_depth ) < RPCRDMA_MAX_SEGS )
260
+ if ((ep -> re_max_rdma_segs * ep -> re_max_fr_depth ) < RPCRDMA_MAX_SEGS )
266
261
return - ENOMEM ;
267
262
268
263
return 0 ;
@@ -288,14 +283,14 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
288
283
int nsegs , bool writing , __be32 xid ,
289
284
struct rpcrdma_mr * mr )
290
285
{
291
- struct rpcrdma_ia * ia = & r_xprt -> rx_ia ;
286
+ struct rpcrdma_ep * ep = & r_xprt -> rx_ep ;
292
287
struct ib_reg_wr * reg_wr ;
293
288
int i , n , dma_nents ;
294
289
struct ib_mr * ibmr ;
295
290
u8 key ;
296
291
297
- if (nsegs > ia -> ri_max_frwr_depth )
298
- nsegs = ia -> ri_max_frwr_depth ;
292
+ if (nsegs > ep -> re_max_fr_depth )
293
+ nsegs = ep -> re_max_fr_depth ;
299
294
for (i = 0 ; i < nsegs ;) {
300
295
if (seg -> mr_page )
301
296
sg_set_page (& mr -> mr_sg [i ],
@@ -308,7 +303,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
308
303
309
304
++ seg ;
310
305
++ i ;
311
- if (ia -> ri_mrtype == IB_MR_TYPE_SG_GAPS )
306
+ if (ep -> re_mrtype == IB_MR_TYPE_SG_GAPS )
312
307
continue ;
313
308
if ((i < nsegs && offset_in_page (seg -> mr_offset )) ||
314
309
offset_in_page ((seg - 1 )-> mr_offset + (seg - 1 )-> mr_len ))
@@ -317,7 +312,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
317
312
mr -> mr_dir = rpcrdma_data_dir (writing );
318
313
mr -> mr_nents = i ;
319
314
320
- dma_nents = ib_dma_map_sg (ia -> ri_id -> device , mr -> mr_sg , mr -> mr_nents ,
315
+ dma_nents = ib_dma_map_sg (ep -> re_id -> device , mr -> mr_sg , mr -> mr_nents ,
321
316
mr -> mr_dir );
322
317
if (!dma_nents )
323
318
goto out_dmamap_err ;
@@ -391,7 +386,6 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
391
386
*/
392
387
int frwr_send (struct rpcrdma_xprt * r_xprt , struct rpcrdma_req * req )
393
388
{
394
- struct rpcrdma_ia * ia = & r_xprt -> rx_ia ;
395
389
struct ib_send_wr * post_wr ;
396
390
struct rpcrdma_mr * mr ;
397
391
@@ -411,7 +405,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
411
405
post_wr = & frwr -> fr_regwr .wr ;
412
406
}
413
407
414
- return ib_post_send (ia -> ri_id -> qp , post_wr , NULL );
408
+ return ib_post_send (r_xprt -> rx_ep . re_id -> qp , post_wr , NULL );
415
409
}
416
410
417
411
/**
@@ -538,10 +532,10 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
538
532
539
533
/* Transport disconnect drains the receive CQ before it
540
534
* replaces the QP. The RPC reply handler won't call us
541
- * unless ri_id ->qp is a valid pointer.
535
+ * unless re_id ->qp is a valid pointer.
542
536
*/
543
537
bad_wr = NULL ;
544
- rc = ib_post_send (r_xprt -> rx_ia . ri_id -> qp , first , & bad_wr );
538
+ rc = ib_post_send (r_xprt -> rx_ep . re_id -> qp , first , & bad_wr );
545
539
546
540
/* The final LOCAL_INV WR in the chain is supposed to
547
541
* do the wake. If it was never posted, the wake will
@@ -643,10 +637,10 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
643
637
644
638
/* Transport disconnect drains the receive CQ before it
645
639
* replaces the QP. The RPC reply handler won't call us
646
- * unless ri_id ->qp is a valid pointer.
640
+ * unless re_id ->qp is a valid pointer.
647
641
*/
648
642
bad_wr = NULL ;
649
- rc = ib_post_send (r_xprt -> rx_ia . ri_id -> qp , first , & bad_wr );
643
+ rc = ib_post_send (r_xprt -> rx_ep . re_id -> qp , first , & bad_wr );
650
644
if (!rc )
651
645
return ;
652
646
0 commit comments