Skip to content

Commit ca0f1a8

Browse files
yaminfChristoph Hellwig
authored andcommitted
nvmet-rdma: use new shared CQ mechanism
Has the driver use shared CQs providing ~10%-20% improvement when multiple disks are used. Instead of opening a CQ for each QP per controller, a CQ for each core will be provided by the RDMA core driver that will be shared between the QPs on that core reducing interrupt overhead. Signed-off-by: Yamin Friedman <[email protected]> Signed-off-by: Max Gurtovoy <[email protected]> Reviewed-by: Or Gerlitz <[email protected]> Reviewed-by: Sagi Grimberg <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 287f329 commit ca0f1a8

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

drivers/nvme/target/rdma.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -752,7 +752,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
752752
{
753753
struct nvmet_rdma_rsp *rsp =
754754
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
755-
struct nvmet_rdma_queue *queue = cq->cq_context;
755+
struct nvmet_rdma_queue *queue = wc->qp->qp_context;
756756
u16 status = 0;
757757

758758
WARN_ON(rsp->n_rdma <= 0);
@@ -1008,7 +1008,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
10081008
{
10091009
struct nvmet_rdma_cmd *cmd =
10101010
container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
1011-
struct nvmet_rdma_queue *queue = cq->cq_context;
1011+
struct nvmet_rdma_queue *queue = wc->qp->qp_context;
10121012
struct nvmet_rdma_rsp *rsp;
10131013

10141014
if (unlikely(wc->status != IB_WC_SUCCESS)) {
@@ -1258,9 +1258,8 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
12581258
*/
12591259
nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
12601260

1261-
queue->cq = ib_alloc_cq(ndev->device, queue,
1262-
nr_cqe + 1, queue->comp_vector,
1263-
IB_POLL_WORKQUEUE);
1261+
queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
1262+
queue->comp_vector, IB_POLL_WORKQUEUE);
12641263
if (IS_ERR(queue->cq)) {
12651264
ret = PTR_ERR(queue->cq);
12661265
pr_err("failed to create CQ cqe= %d ret= %d\n",
@@ -1322,7 +1321,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
13221321
err_destroy_qp:
13231322
rdma_destroy_qp(queue->cm_id);
13241323
err_destroy_cq:
1325-
ib_free_cq(queue->cq);
1324+
ib_cq_pool_put(queue->cq, nr_cqe + 1);
13261325
goto out;
13271326
}
13281327

@@ -1332,7 +1331,8 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
13321331
if (queue->cm_id)
13331332
rdma_destroy_id(queue->cm_id);
13341333
ib_destroy_qp(queue->qp);
1335-
ib_free_cq(queue->cq);
1334+
ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 *
1335+
queue->send_queue_size + 1);
13361336
}
13371337

13381338
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)

0 commit comments

Comments
 (0)