@@ -96,6 +96,7 @@ struct nvme_rdma_queue {
96
96
int cm_error ;
97
97
struct completion cm_done ;
98
98
bool pi_support ;
99
+ int cq_size ;
99
100
};
100
101
101
102
struct nvme_rdma_ctrl {
@@ -275,6 +276,7 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
275
276
init_attr .recv_cq = queue -> ib_cq ;
276
277
if (queue -> pi_support )
277
278
init_attr .create_flags |= IB_QP_CREATE_INTEGRITY_EN ;
279
+ init_attr .qp_context = queue ;
278
280
279
281
ret = rdma_create_qp (queue -> cm_id , dev -> pd , & init_attr );
280
282
@@ -409,6 +411,14 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
409
411
return NULL ;
410
412
}
411
413
414
+ static void nvme_rdma_free_cq (struct nvme_rdma_queue * queue )
415
+ {
416
+ if (nvme_rdma_poll_queue (queue ))
417
+ ib_free_cq (queue -> ib_cq );
418
+ else
419
+ ib_cq_pool_put (queue -> ib_cq , queue -> cq_size );
420
+ }
421
+
412
422
static void nvme_rdma_destroy_queue_ib (struct nvme_rdma_queue * queue )
413
423
{
414
424
struct nvme_rdma_device * dev ;
@@ -430,7 +440,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
430
440
* the destruction of the QP shouldn't use rdma_cm API.
431
441
*/
432
442
ib_destroy_qp (queue -> qp );
433
- ib_free_cq (queue -> ib_cq );
443
+ nvme_rdma_free_cq (queue );
434
444
435
445
nvme_rdma_free_ring (ibdev , queue -> rsp_ring , queue -> queue_size ,
436
446
sizeof (struct nvme_completion ), DMA_FROM_DEVICE );
@@ -450,13 +460,42 @@ static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
450
460
return min_t (u32 , NVME_RDMA_MAX_SEGMENTS , max_page_list_len - 1 );
451
461
}
452
462
463
+ static int nvme_rdma_create_cq (struct ib_device * ibdev ,
464
+ struct nvme_rdma_queue * queue )
465
+ {
466
+ int ret , comp_vector , idx = nvme_rdma_queue_idx (queue );
467
+ enum ib_poll_context poll_ctx ;
468
+
469
+ /*
470
+ * Spread I/O queues completion vectors according their queue index.
471
+ * Admin queues can always go on completion vector 0.
472
+ */
473
+ comp_vector = (idx == 0 ? idx : idx - 1 ) % ibdev -> num_comp_vectors ;
474
+
475
+ /* Polling queues need direct cq polling context */
476
+ if (nvme_rdma_poll_queue (queue )) {
477
+ poll_ctx = IB_POLL_DIRECT ;
478
+ queue -> ib_cq = ib_alloc_cq (ibdev , queue , queue -> cq_size ,
479
+ comp_vector , poll_ctx );
480
+ } else {
481
+ poll_ctx = IB_POLL_SOFTIRQ ;
482
+ queue -> ib_cq = ib_cq_pool_get (ibdev , queue -> cq_size ,
483
+ comp_vector , poll_ctx );
484
+ }
485
+
486
+ if (IS_ERR (queue -> ib_cq )) {
487
+ ret = PTR_ERR (queue -> ib_cq );
488
+ return ret ;
489
+ }
490
+
491
+ return 0 ;
492
+ }
493
+
453
494
static int nvme_rdma_create_queue_ib (struct nvme_rdma_queue * queue )
454
495
{
455
496
struct ib_device * ibdev ;
456
497
const int send_wr_factor = 3 ; /* MR, SEND, INV */
457
498
const int cq_factor = send_wr_factor + 1 ; /* + RECV */
458
- int comp_vector , idx = nvme_rdma_queue_idx (queue );
459
- enum ib_poll_context poll_ctx ;
460
499
int ret , pages_per_mr ;
461
500
462
501
queue -> device = nvme_rdma_find_get_device (queue -> cm_id );
@@ -467,26 +506,12 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
467
506
}
468
507
ibdev = queue -> device -> dev ;
469
508
470
- /*
471
- * Spread I/O queues completion vectors according their queue index.
472
- * Admin queues can always go on completion vector 0.
473
- */
474
- comp_vector = (idx == 0 ? idx : idx - 1 ) % ibdev -> num_comp_vectors ;
475
-
476
- /* Polling queues need direct cq polling context */
477
- if (nvme_rdma_poll_queue (queue ))
478
- poll_ctx = IB_POLL_DIRECT ;
479
- else
480
- poll_ctx = IB_POLL_SOFTIRQ ;
481
-
482
509
/* +1 for ib_stop_cq */
483
- queue -> ib_cq = ib_alloc_cq (ibdev , queue ,
484
- cq_factor * queue -> queue_size + 1 ,
485
- comp_vector , poll_ctx );
486
- if (IS_ERR (queue -> ib_cq )) {
487
- ret = PTR_ERR (queue -> ib_cq );
510
+ queue -> cq_size = cq_factor * queue -> queue_size + 1 ;
511
+
512
+ ret = nvme_rdma_create_cq (ibdev , queue );
513
+ if (ret )
488
514
goto out_put_dev ;
489
- }
490
515
491
516
ret = nvme_rdma_create_qp (queue , send_wr_factor );
492
517
if (ret )
@@ -512,7 +537,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
512
537
if (ret ) {
513
538
dev_err (queue -> ctrl -> ctrl .device ,
514
539
"failed to initialize MR pool sized %d for QID %d\n" ,
515
- queue -> queue_size , idx );
540
+ queue -> queue_size , nvme_rdma_queue_idx ( queue ) );
516
541
goto out_destroy_ring ;
517
542
}
518
543
@@ -523,7 +548,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
523
548
if (ret ) {
524
549
dev_err (queue -> ctrl -> ctrl .device ,
525
550
"failed to initialize PI MR pool sized %d for QID %d\n" ,
526
- queue -> queue_size , idx );
551
+ queue -> queue_size , nvme_rdma_queue_idx ( queue ) );
527
552
goto out_destroy_mr_pool ;
528
553
}
529
554
}
@@ -540,7 +565,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
540
565
out_destroy_qp :
541
566
rdma_destroy_qp (queue -> cm_id );
542
567
out_destroy_ib_cq :
543
- ib_free_cq (queue -> ib_cq );
568
+ nvme_rdma_free_cq (queue );
544
569
out_put_dev :
545
570
nvme_rdma_dev_put (queue -> device );
546
571
return ret ;
@@ -1163,7 +1188,7 @@ static void nvme_rdma_end_request(struct nvme_rdma_request *req)
1163
1188
static void nvme_rdma_wr_error (struct ib_cq * cq , struct ib_wc * wc ,
1164
1189
const char * op )
1165
1190
{
1166
- struct nvme_rdma_queue * queue = cq -> cq_context ;
1191
+ struct nvme_rdma_queue * queue = wc -> qp -> qp_context ;
1167
1192
struct nvme_rdma_ctrl * ctrl = queue -> ctrl ;
1168
1193
1169
1194
if (ctrl -> ctrl .state == NVME_CTRL_LIVE )
@@ -1706,7 +1731,7 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1706
1731
{
1707
1732
struct nvme_rdma_qe * qe =
1708
1733
container_of (wc -> wr_cqe , struct nvme_rdma_qe , cqe );
1709
- struct nvme_rdma_queue * queue = cq -> cq_context ;
1734
+ struct nvme_rdma_queue * queue = wc -> qp -> qp_context ;
1710
1735
struct ib_device * ibdev = queue -> device -> dev ;
1711
1736
struct nvme_completion * cqe = qe -> data ;
1712
1737
const size_t len = sizeof (struct nvme_completion );
0 commit comments