@@ -73,17 +73,8 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
73
73
}
74
74
}
75
75
76
- static int hns_roce_hw_create_cq (struct hns_roce_dev * dev ,
77
- struct hns_roce_cmd_mailbox * mailbox ,
78
- unsigned long cq_num )
79
- {
80
- return hns_roce_cmd_mbox (dev , mailbox -> dma , 0 , cq_num , 0 ,
81
- HNS_ROCE_CMD_CREATE_CQ ,
82
- HNS_ROCE_CMD_TIMEOUT_MSECS );
83
- }
84
-
85
- static int hns_roce_cq_alloc (struct hns_roce_dev * hr_dev ,
86
- struct hns_roce_cq * hr_cq )
76
+ static int hns_roce_alloc_cqc (struct hns_roce_dev * hr_dev ,
77
+ struct hns_roce_cq * hr_cq )
87
78
{
88
79
struct hns_roce_cmd_mailbox * mailbox ;
89
80
struct hns_roce_hem_table * mtt_table ;
@@ -140,7 +131,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev,
140
131
hr_dev -> hw -> write_cqc (hr_dev , hr_cq , mailbox -> buf , mtts , dma_handle );
141
132
142
133
/* Send mailbox to hw */
143
- ret = hns_roce_hw_create_cq (hr_dev , mailbox , hr_cq -> cqn );
134
+ ret = hns_roce_cmd_mbox (hr_dev , mailbox -> dma , 0 , hr_cq -> cqn , 0 ,
135
+ HNS_ROCE_CMD_CREATE_CQC , HNS_ROCE_CMD_TIMEOUT_MSECS );
144
136
hns_roce_free_cmd_mailbox (hr_dev , mailbox );
145
137
if (ret ) {
146
138
dev_err (dev ,
@@ -168,22 +160,15 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev,
168
160
return ret ;
169
161
}
170
162
171
- static int hns_roce_hw_destroy_cq (struct hns_roce_dev * dev ,
172
- struct hns_roce_cmd_mailbox * mailbox ,
173
- unsigned long cq_num )
174
- {
175
- return hns_roce_cmd_mbox (dev , 0 , mailbox ? mailbox -> dma : 0 , cq_num ,
176
- mailbox ? 0 : 1 , HNS_ROCE_CMD_DESTROY_CQ ,
177
- HNS_ROCE_CMD_TIMEOUT_MSECS );
178
- }
179
-
180
- void hns_roce_free_cq (struct hns_roce_dev * hr_dev , struct hns_roce_cq * hr_cq )
163
+ void hns_roce_free_cqc (struct hns_roce_dev * hr_dev , struct hns_roce_cq * hr_cq )
181
164
{
182
165
struct hns_roce_cq_table * cq_table = & hr_dev -> cq_table ;
183
166
struct device * dev = hr_dev -> dev ;
184
167
int ret ;
185
168
186
- ret = hns_roce_hw_destroy_cq (hr_dev , NULL , hr_cq -> cqn );
169
+ ret = hns_roce_cmd_mbox (hr_dev , 0 , 0 , hr_cq -> cqn , 1 ,
170
+ HNS_ROCE_CMD_DESTROY_CQC ,
171
+ HNS_ROCE_CMD_TIMEOUT_MSECS );
187
172
if (ret )
188
173
dev_err (dev , "DESTROY_CQ failed (%d) for CQN %06lx\n" , ret ,
189
174
hr_cq -> cqn );
@@ -202,10 +187,9 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
202
187
hns_roce_bitmap_free (& cq_table -> bitmap , hr_cq -> cqn , BITMAP_NO_RR );
203
188
}
204
189
205
- static int hns_roce_ib_get_cq_umem (struct hns_roce_dev * hr_dev ,
206
- struct hns_roce_cq * hr_cq ,
207
- struct hns_roce_ib_create_cq ucmd ,
208
- struct ib_udata * udata )
190
+ static int get_cq_umem (struct hns_roce_dev * hr_dev , struct hns_roce_cq * hr_cq ,
191
+ struct hns_roce_ib_create_cq ucmd ,
192
+ struct ib_udata * udata )
209
193
{
210
194
struct hns_roce_buf * buf = & hr_cq -> buf ;
211
195
struct hns_roce_mtt * mtt = & hr_cq -> mtt ;
@@ -243,8 +227,7 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
243
227
return ret ;
244
228
}
245
229
246
- static int hns_roce_ib_alloc_cq_buf (struct hns_roce_dev * hr_dev ,
247
- struct hns_roce_cq * hr_cq )
230
+ static int alloc_cq_buf (struct hns_roce_dev * hr_dev , struct hns_roce_cq * hr_cq )
248
231
{
249
232
struct hns_roce_buf * buf = & hr_cq -> buf ;
250
233
struct hns_roce_mtt * mtt = & hr_cq -> mtt ;
@@ -280,8 +263,7 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
280
263
return ret ;
281
264
}
282
265
283
- static void hns_roce_ib_free_cq_buf (struct hns_roce_dev * hr_dev ,
284
- struct hns_roce_cq * hr_cq )
266
+ static void free_cq_buf (struct hns_roce_dev * hr_dev , struct hns_roce_cq * hr_cq )
285
267
{
286
268
hns_roce_buf_free (hr_dev , hr_cq -> buf .size , & hr_cq -> buf );
287
269
}
@@ -303,7 +285,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
303
285
}
304
286
305
287
/* Get user space address, write it into mtt table */
306
- ret = hns_roce_ib_get_cq_umem (hr_dev , hr_cq , ucmd , udata );
288
+ ret = get_cq_umem (hr_dev , hr_cq , ucmd , udata );
307
289
if (ret ) {
308
290
dev_err (dev , "Failed to get_cq_umem.\n" );
309
291
return ret ;
@@ -347,7 +329,7 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
347
329
}
348
330
349
331
/* Init mtt table and write buff address to mtt table */
350
- ret = hns_roce_ib_alloc_cq_buf (hr_dev , hr_cq );
332
+ ret = alloc_cq_buf (hr_dev , hr_cq );
351
333
if (ret ) {
352
334
dev_err (dev , "Failed to alloc_cq_buf.\n" );
353
335
goto err_db ;
@@ -385,15 +367,14 @@ static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
385
367
struct hns_roce_cq * hr_cq )
386
368
{
387
369
hns_roce_mtt_cleanup (hr_dev , & hr_cq -> mtt );
388
- hns_roce_ib_free_cq_buf (hr_dev , hr_cq );
370
+ free_cq_buf (hr_dev , hr_cq );
389
371
390
372
if (hr_dev -> caps .flags & HNS_ROCE_CAP_FLAG_RECORD_DB )
391
373
hns_roce_free_db (hr_dev , & hr_cq -> db );
392
374
}
393
375
394
- int hns_roce_ib_create_cq (struct ib_cq * ib_cq ,
395
- const struct ib_cq_init_attr * attr ,
396
- struct ib_udata * udata )
376
+ int hns_roce_create_cq (struct ib_cq * ib_cq , const struct ib_cq_init_attr * attr ,
377
+ struct ib_udata * udata )
397
378
{
398
379
struct hns_roce_dev * hr_dev = to_hr_dev (ib_cq -> device );
399
380
struct hns_roce_ib_create_cq_resp resp = {};
@@ -438,8 +419,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
438
419
}
439
420
}
440
421
441
- /* Allocate cq index, fill cq_context */
442
- ret = hns_roce_cq_alloc (hr_dev , hr_cq );
422
+ ret = hns_roce_alloc_cqc (hr_dev , hr_cq );
443
423
if (ret ) {
444
424
dev_err (dev , "Alloc CQ failed(%d).\n" , ret );
445
425
goto err_dbmap ;
@@ -468,7 +448,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
468
448
return 0 ;
469
449
470
450
err_cqc :
471
- hns_roce_free_cq (hr_dev , hr_cq );
451
+ hns_roce_free_cqc (hr_dev , hr_cq );
472
452
473
453
err_dbmap :
474
454
if (udata )
@@ -480,7 +460,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
480
460
return ret ;
481
461
}
482
462
483
- void hns_roce_ib_destroy_cq (struct ib_cq * ib_cq , struct ib_udata * udata )
463
+ void hns_roce_destroy_cq (struct ib_cq * ib_cq , struct ib_udata * udata )
484
464
{
485
465
struct hns_roce_dev * hr_dev = to_hr_dev (ib_cq -> device );
486
466
struct hns_roce_cq * hr_cq = to_hr_cq (ib_cq );
@@ -490,7 +470,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
490
470
return ;
491
471
}
492
472
493
- hns_roce_free_cq (hr_dev , hr_cq );
473
+ hns_roce_free_cqc (hr_dev , hr_cq );
494
474
hns_roce_mtt_cleanup (hr_dev , & hr_cq -> mtt );
495
475
496
476
ib_umem_release (hr_cq -> umem );
@@ -503,7 +483,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
503
483
& hr_cq -> db );
504
484
} else {
505
485
/* Free the buff of stored cq */
506
- hns_roce_ib_free_cq_buf (hr_dev , hr_cq );
486
+ free_cq_buf (hr_dev , hr_cq );
507
487
if (hr_dev -> caps .flags & HNS_ROCE_CAP_FLAG_RECORD_DB )
508
488
hns_roce_free_db (hr_dev , & hr_cq -> db );
509
489
}
0 commit comments