@@ -102,21 +102,26 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
102
102
struct ib_rwq_ind_table * ind_tbl = attr -> rwq_ind_tbl ;
103
103
struct mana_ib_create_qp_rss_resp resp = {};
104
104
struct mana_ib_create_qp_rss ucmd = {};
105
+ struct gdma_queue * * gdma_cq_allocated ;
105
106
mana_handle_t * mana_ind_table ;
106
107
struct mana_port_context * mpc ;
108
+ struct gdma_queue * gdma_cq ;
107
109
unsigned int ind_tbl_size ;
108
110
struct mana_context * mc ;
109
111
struct net_device * ndev ;
112
+ struct gdma_context * gc ;
110
113
struct mana_ib_cq * cq ;
111
114
struct mana_ib_wq * wq ;
112
115
struct gdma_dev * gd ;
116
+ struct mana_eq * eq ;
113
117
struct ib_cq * ibcq ;
114
118
struct ib_wq * ibwq ;
115
119
int i = 0 ;
116
120
u32 port ;
117
121
int ret ;
118
122
119
- gd = & mdev -> gdma_dev -> gdma_context -> mana ;
123
+ gc = mdev -> gdma_dev -> gdma_context ;
124
+ gd = & gc -> mana ;
120
125
mc = gd -> driver_data ;
121
126
122
127
if (!udata || udata -> inlen < sizeof (ucmd ))
@@ -179,6 +184,13 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
179
184
goto fail ;
180
185
}
181
186
187
+ gdma_cq_allocated = kcalloc (ind_tbl_size , sizeof (* gdma_cq_allocated ),
188
+ GFP_KERNEL );
189
+ if (!gdma_cq_allocated ) {
190
+ ret = - ENOMEM ;
191
+ goto fail ;
192
+ }
193
+
182
194
qp -> port = port ;
183
195
184
196
for (i = 0 ; i < ind_tbl_size ; i ++ ) {
@@ -197,12 +209,16 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
197
209
cq_spec .gdma_region = cq -> gdma_region ;
198
210
cq_spec .queue_size = cq -> cqe * COMP_ENTRY_SIZE ;
199
211
cq_spec .modr_ctx_id = 0 ;
200
- cq_spec .attached_eq = GDMA_CQ_NO_EQ ;
212
+ eq = & mc -> eqs [cq -> comp_vector % gc -> max_num_queues ];
213
+ cq_spec .attached_eq = eq -> eq -> id ;
201
214
202
215
ret = mana_create_wq_obj (mpc , mpc -> port_handle , GDMA_RQ ,
203
216
& wq_spec , & cq_spec , & wq -> rx_object );
204
- if (ret )
217
+ if (ret ) {
218
+ /* Do cleanup starting with index i-1 */
219
+ i -- ;
205
220
goto fail ;
221
+ }
206
222
207
223
/* The GDMA regions are now owned by the WQ object */
208
224
wq -> gdma_region = GDMA_INVALID_DMA_REGION ;
@@ -219,6 +235,21 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
219
235
resp .entries [i ].wqid = wq -> id ;
220
236
221
237
mana_ind_table [i ] = wq -> rx_object ;
238
+
239
+ /* Create CQ table entry */
240
+ WARN_ON (gc -> cq_table [cq -> id ]);
241
+ gdma_cq = kzalloc (sizeof (* gdma_cq ), GFP_KERNEL );
242
+ if (!gdma_cq ) {
243
+ ret = - ENOMEM ;
244
+ goto fail ;
245
+ }
246
+ gdma_cq_allocated [i ] = gdma_cq ;
247
+
248
+ gdma_cq -> cq .context = cq ;
249
+ gdma_cq -> type = GDMA_CQ ;
250
+ gdma_cq -> cq .callback = mana_ib_cq_handler ;
251
+ gdma_cq -> id = cq -> id ;
252
+ gc -> cq_table [cq -> id ] = gdma_cq ;
222
253
}
223
254
resp .num_entries = i ;
224
255
@@ -238,17 +269,25 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
238
269
goto fail ;
239
270
}
240
271
272
+ kfree (gdma_cq_allocated );
241
273
kfree (mana_ind_table );
242
274
243
275
return 0 ;
244
276
245
277
fail :
246
278
while (i -- > 0 ) {
247
279
ibwq = ind_tbl -> ind_tbl [i ];
280
+ ibcq = ibwq -> cq ;
248
281
wq = container_of (ibwq , struct mana_ib_wq , ibwq );
282
+ cq = container_of (ibcq , struct mana_ib_cq , ibcq );
283
+
284
+ gc -> cq_table [cq -> id ] = NULL ;
285
+ kfree (gdma_cq_allocated [i ]);
286
+
249
287
mana_destroy_wq_obj (mpc , GDMA_RQ , wq -> rx_object );
250
288
}
251
289
290
+ kfree (gdma_cq_allocated );
252
291
kfree (mana_ind_table );
253
292
254
293
return ret ;
@@ -270,14 +309,17 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
270
309
struct gdma_dev * gd = & mdev -> gdma_dev -> gdma_context -> mana ;
271
310
struct mana_ib_create_qp_resp resp = {};
272
311
struct mana_ib_create_qp ucmd = {};
312
+ struct gdma_queue * gdma_cq = NULL ;
273
313
struct mana_obj_spec wq_spec = {};
274
314
struct mana_obj_spec cq_spec = {};
275
315
struct mana_port_context * mpc ;
276
316
struct mana_context * mc ;
277
317
struct net_device * ndev ;
278
318
struct ib_umem * umem ;
279
- int err ;
319
+ struct mana_eq * eq ;
320
+ int eq_vec ;
280
321
u32 port ;
322
+ int err ;
281
323
282
324
mc = gd -> driver_data ;
283
325
@@ -354,7 +396,9 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
354
396
cq_spec .gdma_region = send_cq -> gdma_region ;
355
397
cq_spec .queue_size = send_cq -> cqe * COMP_ENTRY_SIZE ;
356
398
cq_spec .modr_ctx_id = 0 ;
357
- cq_spec .attached_eq = GDMA_CQ_NO_EQ ;
399
+ eq_vec = send_cq -> comp_vector % gd -> gdma_context -> max_num_queues ;
400
+ eq = & mc -> eqs [eq_vec ];
401
+ cq_spec .attached_eq = eq -> eq -> id ;
358
402
359
403
err = mana_create_wq_obj (mpc , mpc -> port_handle , GDMA_SQ , & wq_spec ,
360
404
& cq_spec , & qp -> tx_object );
@@ -372,6 +416,20 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
372
416
qp -> sq_id = wq_spec .queue_index ;
373
417
send_cq -> id = cq_spec .queue_index ;
374
418
419
+ /* Create CQ table entry */
420
+ WARN_ON (gd -> gdma_context -> cq_table [send_cq -> id ]);
421
+ gdma_cq = kzalloc (sizeof (* gdma_cq ), GFP_KERNEL );
422
+ if (!gdma_cq ) {
423
+ err = - ENOMEM ;
424
+ goto err_destroy_wq_obj ;
425
+ }
426
+
427
+ gdma_cq -> cq .context = send_cq ;
428
+ gdma_cq -> type = GDMA_CQ ;
429
+ gdma_cq -> cq .callback = mana_ib_cq_handler ;
430
+ gdma_cq -> id = send_cq -> id ;
431
+ gd -> gdma_context -> cq_table [send_cq -> id ] = gdma_cq ;
432
+
375
433
ibdev_dbg (& mdev -> ib_dev ,
376
434
"ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n" , err ,
377
435
qp -> tx_object , qp -> sq_id , send_cq -> id );
@@ -385,11 +443,15 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
385
443
ibdev_dbg (& mdev -> ib_dev ,
386
444
"Failed copy udata for create qp-raw, %d\n" ,
387
445
err );
388
- goto err_destroy_wq_obj ;
446
+ goto err_release_gdma_cq ;
389
447
}
390
448
391
449
return 0 ;
392
450
451
+ err_release_gdma_cq :
452
+ kfree (gdma_cq );
453
+ gd -> gdma_context -> cq_table [send_cq -> id ] = NULL ;
454
+
393
455
err_destroy_wq_obj :
394
456
mana_destroy_wq_obj (mpc , GDMA_SQ , qp -> tx_object );
395
457
0 commit comments