|
39 | 39 | #include <rdma/hns-abi.h>
|
40 | 40 | #include "hns_roce_common.h"
|
41 | 41 |
|
42 |
| -static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq) |
43 |
| -{ |
44 |
| - struct ib_cq *ibcq = &hr_cq->ib_cq; |
45 |
| - |
46 |
| - ibcq->comp_handler(ibcq, ibcq->cq_context); |
47 |
| -} |
48 |
| - |
49 |
| -static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq, |
50 |
| - enum hns_roce_event event_type) |
51 |
| -{ |
52 |
| - struct hns_roce_dev *hr_dev; |
53 |
| - struct ib_event event; |
54 |
| - struct ib_cq *ibcq; |
55 |
| - |
56 |
| - ibcq = &hr_cq->ib_cq; |
57 |
| - hr_dev = to_hr_dev(ibcq->device); |
58 |
| - |
59 |
| - if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && |
60 |
| - event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && |
61 |
| - event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { |
62 |
| - dev_err(hr_dev->dev, |
63 |
| - "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n", |
64 |
| - event_type, hr_cq->cqn); |
65 |
| - return; |
66 |
| - } |
67 |
| - |
68 |
| - if (ibcq->event_handler) { |
69 |
| - event.device = ibcq->device; |
70 |
| - event.event = IB_EVENT_CQ_ERR; |
71 |
| - event.element.cq = ibcq; |
72 |
| - ibcq->event_handler(&event, ibcq->cq_context); |
73 |
| - } |
74 |
| -} |
75 |
| - |
76 | 42 | static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
|
77 | 43 | struct hns_roce_cq *hr_cq)
|
78 | 44 | {
|
@@ -434,10 +400,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
|
434 | 400 | if (!udata && hr_cq->tptr_addr)
|
435 | 401 | *hr_cq->tptr_addr = 0;
|
436 | 402 |
|
437 |
| - /* Get created cq handler and carry out event */ |
438 |
| - hr_cq->comp = hns_roce_ib_cq_comp; |
439 |
| - hr_cq->event = hns_roce_ib_cq_event; |
440 |
| - |
441 | 403 | if (udata) {
|
442 | 404 | resp.cqn = hr_cq->cqn;
|
443 | 405 | ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
|
@@ -491,38 +453,57 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
491 | 453 |
|
492 | 454 | void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
|
493 | 455 | {
|
494 |
| - struct device *dev = hr_dev->dev; |
495 |
| - struct hns_roce_cq *cq; |
| 456 | + struct hns_roce_cq *hr_cq; |
| 457 | + struct ib_cq *ibcq; |
496 | 458 |
|
497 |
| - cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1)); |
498 |
| - if (!cq) { |
499 |
| - dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn); |
| 459 | + hr_cq = xa_load(&hr_dev->cq_table.array, |
| 460 | + cqn & (hr_dev->caps.num_cqs - 1)); |
| 461 | + if (!hr_cq) { |
| 462 | + dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n", |
| 463 | + cqn); |
500 | 464 | return;
|
501 | 465 | }
|
502 | 466 |
|
503 |
| - ++cq->arm_sn; |
504 |
| - cq->comp(cq); |
| 467 | + ++hr_cq->arm_sn; |
| 468 | + ibcq = &hr_cq->ib_cq; |
| 469 | + if (ibcq->comp_handler) |
| 470 | + ibcq->comp_handler(ibcq, ibcq->cq_context); |
505 | 471 | }
|
506 | 472 |
|
507 | 473 | void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
|
508 | 474 | {
|
509 |
| - struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; |
510 | 475 | struct device *dev = hr_dev->dev;
|
511 |
| - struct hns_roce_cq *cq; |
| 476 | + struct hns_roce_cq *hr_cq; |
| 477 | + struct ib_event event; |
| 478 | + struct ib_cq *ibcq; |
512 | 479 |
|
513 |
| - cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1)); |
514 |
| - if (cq) |
515 |
| - atomic_inc(&cq->refcount); |
| 480 | + hr_cq = xa_load(&hr_dev->cq_table.array, |
| 481 | + cqn & (hr_dev->caps.num_cqs - 1)); |
| 482 | + if (!hr_cq) { |
| 483 | + dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn); |
| 484 | + return; |
| 485 | + } |
516 | 486 |
|
517 |
| - if (!cq) { |
518 |
| - dev_warn(dev, "Async event for bogus CQ %08x\n", cqn); |
| 487 | + if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && |
| 488 | + event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && |
| 489 | + event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { |
| 490 | + dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n", |
| 491 | + event_type, cqn); |
519 | 492 | return;
|
520 | 493 | }
|
521 | 494 |
|
522 |
| - cq->event(cq, (enum hns_roce_event)event_type); |
| 495 | + atomic_inc(&hr_cq->refcount); |
523 | 496 |
|
524 |
| - if (atomic_dec_and_test(&cq->refcount)) |
525 |
| - complete(&cq->free); |
| 497 | + ibcq = &hr_cq->ib_cq; |
| 498 | + if (ibcq->event_handler) { |
| 499 | + event.device = ibcq->device; |
| 500 | + event.element.cq = ibcq; |
| 501 | + event.event = IB_EVENT_CQ_ERR; |
| 502 | + ibcq->event_handler(&event, ibcq->cq_context); |
| 503 | + } |
| 504 | + |
| 505 | + if (atomic_dec_and_test(&hr_cq->refcount)) |
| 506 | + complete(&hr_cq->free); |
526 | 507 | }
|
527 | 508 |
|
528 | 509 | int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
|
|
0 commit comments