Skip to content

Commit c15d780

Browse files
longlimsftrleon
authored andcommitted
RDMA/mana_ib: Add CQ interrupt support for RAW QP
At probing time, the MANA core code allocates EQs for supporting interrupts on Ethernet queues. The same interrupt mechanisum is used by RAW QP. Use the same EQs for delivering interrupts on the CQ for the RAW QP. Signed-off-by: Long Li <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Leon Romanovsky <[email protected]>
1 parent 2c20e20 commit c15d780

File tree

3 files changed

+102
-7
lines changed

3 files changed

+102
-7
lines changed

drivers/infiniband/hw/mana/cq.c

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,20 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1212
struct ib_device *ibdev = ibcq->device;
1313
struct mana_ib_create_cq ucmd = {};
1414
struct mana_ib_dev *mdev;
15+
struct gdma_context *gc;
1516
int err;
1617

1718
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
19+
gc = mdev->gdma_dev->gdma_context;
1820

1921
if (udata->inlen < sizeof(ucmd))
2022
return -EINVAL;
2123

24+
if (attr->comp_vector > gc->max_num_queues)
25+
return -EINVAL;
26+
27+
cq->comp_vector = attr->comp_vector;
28+
2229
err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
2330
if (err) {
2431
ibdev_dbg(ibdev,
@@ -56,6 +63,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
5663
/*
5764
* The CQ ID is not known at this time. The ID is generated at create_qp
5865
*/
66+
cq->id = INVALID_QUEUE_ID;
5967

6068
return 0;
6169

@@ -69,11 +77,33 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
6977
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
7078
struct ib_device *ibdev = ibcq->device;
7179
struct mana_ib_dev *mdev;
80+
struct gdma_context *gc;
81+
int err;
7282

7383
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
84+
gc = mdev->gdma_dev->gdma_context;
85+
86+
err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
87+
if (err) {
88+
ibdev_dbg(ibdev,
89+
"Failed to destroy dma region, %d\n", err);
90+
return err;
91+
}
92+
93+
if (cq->id != INVALID_QUEUE_ID) {
94+
kfree(gc->cq_table[cq->id]);
95+
gc->cq_table[cq->id] = NULL;
96+
}
7497

75-
mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
7698
ib_umem_release(cq->umem);
7799

78100
return 0;
79101
}
102+
103+
void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
104+
{
105+
struct mana_ib_cq *cq = ctx;
106+
107+
if (cq->ibcq.comp_handler)
108+
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
109+
}

drivers/infiniband/hw/mana/mana_ib.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ struct mana_ib_cq {
8686
int cqe;
8787
u64 gdma_region;
8888
u64 id;
89+
u32 comp_vector;
8990
};
9091

9192
struct mana_ib_qp {
@@ -209,4 +210,6 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
209210
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
210211

211212
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
213+
214+
void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq);
212215
#endif

drivers/infiniband/hw/mana/qp.c

Lines changed: 68 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -102,21 +102,26 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
102102
struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
103103
struct mana_ib_create_qp_rss_resp resp = {};
104104
struct mana_ib_create_qp_rss ucmd = {};
105+
struct gdma_queue **gdma_cq_allocated;
105106
mana_handle_t *mana_ind_table;
106107
struct mana_port_context *mpc;
108+
struct gdma_queue *gdma_cq;
107109
unsigned int ind_tbl_size;
108110
struct mana_context *mc;
109111
struct net_device *ndev;
112+
struct gdma_context *gc;
110113
struct mana_ib_cq *cq;
111114
struct mana_ib_wq *wq;
112115
struct gdma_dev *gd;
116+
struct mana_eq *eq;
113117
struct ib_cq *ibcq;
114118
struct ib_wq *ibwq;
115119
int i = 0;
116120
u32 port;
117121
int ret;
118122

119-
gd = &mdev->gdma_dev->gdma_context->mana;
123+
gc = mdev->gdma_dev->gdma_context;
124+
gd = &gc->mana;
120125
mc = gd->driver_data;
121126

122127
if (!udata || udata->inlen < sizeof(ucmd))
@@ -179,6 +184,13 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
179184
goto fail;
180185
}
181186

187+
gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
188+
GFP_KERNEL);
189+
if (!gdma_cq_allocated) {
190+
ret = -ENOMEM;
191+
goto fail;
192+
}
193+
182194
qp->port = port;
183195

184196
for (i = 0; i < ind_tbl_size; i++) {
@@ -197,12 +209,16 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
197209
cq_spec.gdma_region = cq->gdma_region;
198210
cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
199211
cq_spec.modr_ctx_id = 0;
200-
cq_spec.attached_eq = GDMA_CQ_NO_EQ;
212+
eq = &mc->eqs[cq->comp_vector % gc->max_num_queues];
213+
cq_spec.attached_eq = eq->eq->id;
201214

202215
ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
203216
&wq_spec, &cq_spec, &wq->rx_object);
204-
if (ret)
217+
if (ret) {
218+
/* Do cleanup starting with index i-1 */
219+
i--;
205220
goto fail;
221+
}
206222

207223
/* The GDMA regions are now owned by the WQ object */
208224
wq->gdma_region = GDMA_INVALID_DMA_REGION;
@@ -219,6 +235,21 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
219235
resp.entries[i].wqid = wq->id;
220236

221237
mana_ind_table[i] = wq->rx_object;
238+
239+
/* Create CQ table entry */
240+
WARN_ON(gc->cq_table[cq->id]);
241+
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
242+
if (!gdma_cq) {
243+
ret = -ENOMEM;
244+
goto fail;
245+
}
246+
gdma_cq_allocated[i] = gdma_cq;
247+
248+
gdma_cq->cq.context = cq;
249+
gdma_cq->type = GDMA_CQ;
250+
gdma_cq->cq.callback = mana_ib_cq_handler;
251+
gdma_cq->id = cq->id;
252+
gc->cq_table[cq->id] = gdma_cq;
222253
}
223254
resp.num_entries = i;
224255

@@ -238,17 +269,25 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
238269
goto fail;
239270
}
240271

272+
kfree(gdma_cq_allocated);
241273
kfree(mana_ind_table);
242274

243275
return 0;
244276

245277
fail:
246278
while (i-- > 0) {
247279
ibwq = ind_tbl->ind_tbl[i];
280+
ibcq = ibwq->cq;
248281
wq = container_of(ibwq, struct mana_ib_wq, ibwq);
282+
cq = container_of(ibcq, struct mana_ib_cq, ibcq);
283+
284+
gc->cq_table[cq->id] = NULL;
285+
kfree(gdma_cq_allocated[i]);
286+
249287
mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
250288
}
251289

290+
kfree(gdma_cq_allocated);
252291
kfree(mana_ind_table);
253292

254293
return ret;
@@ -270,14 +309,17 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
270309
struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
271310
struct mana_ib_create_qp_resp resp = {};
272311
struct mana_ib_create_qp ucmd = {};
312+
struct gdma_queue *gdma_cq = NULL;
273313
struct mana_obj_spec wq_spec = {};
274314
struct mana_obj_spec cq_spec = {};
275315
struct mana_port_context *mpc;
276316
struct mana_context *mc;
277317
struct net_device *ndev;
278318
struct ib_umem *umem;
279-
int err;
319+
struct mana_eq *eq;
320+
int eq_vec;
280321
u32 port;
322+
int err;
281323

282324
mc = gd->driver_data;
283325

@@ -354,7 +396,9 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
354396
cq_spec.gdma_region = send_cq->gdma_region;
355397
cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
356398
cq_spec.modr_ctx_id = 0;
357-
cq_spec.attached_eq = GDMA_CQ_NO_EQ;
399+
eq_vec = send_cq->comp_vector % gd->gdma_context->max_num_queues;
400+
eq = &mc->eqs[eq_vec];
401+
cq_spec.attached_eq = eq->eq->id;
358402

359403
err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
360404
&cq_spec, &qp->tx_object);
@@ -372,6 +416,20 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
372416
qp->sq_id = wq_spec.queue_index;
373417
send_cq->id = cq_spec.queue_index;
374418

419+
/* Create CQ table entry */
420+
WARN_ON(gd->gdma_context->cq_table[send_cq->id]);
421+
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
422+
if (!gdma_cq) {
423+
err = -ENOMEM;
424+
goto err_destroy_wq_obj;
425+
}
426+
427+
gdma_cq->cq.context = send_cq;
428+
gdma_cq->type = GDMA_CQ;
429+
gdma_cq->cq.callback = mana_ib_cq_handler;
430+
gdma_cq->id = send_cq->id;
431+
gd->gdma_context->cq_table[send_cq->id] = gdma_cq;
432+
375433
ibdev_dbg(&mdev->ib_dev,
376434
"ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
377435
qp->tx_object, qp->sq_id, send_cq->id);
@@ -385,11 +443,15 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
385443
ibdev_dbg(&mdev->ib_dev,
386444
"Failed copy udata for create qp-raw, %d\n",
387445
err);
388-
goto err_destroy_wq_obj;
446+
goto err_release_gdma_cq;
389447
}
390448

391449
return 0;
392450

451+
err_release_gdma_cq:
452+
kfree(gdma_cq);
453+
gd->gdma_context->cq_table[send_cq->id] = NULL;
454+
393455
err_destroy_wq_obj:
394456
mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
395457

0 commit comments

Comments
 (0)