36
36
#include <linux/iopoll.h>
37
37
#include <linux/kernel.h>
38
38
#include <linux/types.h>
39
+ #include <linux/workqueue.h>
39
40
#include <net/addrconf.h>
40
41
#include <rdma/ib_addr.h>
41
42
#include <rdma/ib_cache.h>
@@ -6140,33 +6141,11 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
6140
6141
!!(eq -> cons_index & eq -> entries )) ? ceqe : NULL ;
6141
6142
}
6142
6143
6143
- static irqreturn_t hns_roce_v2_ceq_int (struct hns_roce_dev * hr_dev ,
6144
- struct hns_roce_eq * eq )
6144
+ static irqreturn_t hns_roce_v2_ceq_int (struct hns_roce_eq * eq )
6145
6145
{
6146
- struct hns_roce_ceqe * ceqe = next_ceqe_sw_v2 (eq );
6147
- irqreturn_t ceqe_found = IRQ_NONE ;
6148
- u32 cqn ;
6149
-
6150
- while (ceqe ) {
6151
- /* Make sure we read CEQ entry after we have checked the
6152
- * ownership bit
6153
- */
6154
- dma_rmb ();
6155
-
6156
- cqn = hr_reg_read (ceqe , CEQE_CQN );
6157
-
6158
- hns_roce_cq_completion (hr_dev , cqn );
6159
-
6160
- ++ eq -> cons_index ;
6161
- ceqe_found = IRQ_HANDLED ;
6162
- atomic64_inc (& hr_dev -> dfx_cnt [HNS_ROCE_DFX_CEQE_CNT ]);
6163
-
6164
- ceqe = next_ceqe_sw_v2 (eq );
6165
- }
6146
+ queue_work (system_bh_wq , & eq -> work );
6166
6147
6167
- update_eq_db (eq );
6168
-
6169
- return IRQ_RETVAL (ceqe_found );
6148
+ return IRQ_HANDLED ;
6170
6149
}
6171
6150
6172
6151
static irqreturn_t hns_roce_v2_msix_interrupt_eq (int irq , void * eq_ptr )
@@ -6177,7 +6156,7 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
6177
6156
6178
6157
if (eq -> type_flag == HNS_ROCE_CEQ )
6179
6158
/* Completion event interrupt */
6180
- int_work = hns_roce_v2_ceq_int (hr_dev , eq );
6159
+ int_work = hns_roce_v2_ceq_int (eq );
6181
6160
else
6182
6161
/* Asynchronous event interrupt */
6183
6162
int_work = hns_roce_v2_aeq_int (hr_dev , eq );
@@ -6545,6 +6524,34 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
6545
6524
return ret ;
6546
6525
}
6547
6526
6527
+ static void hns_roce_ceq_work (struct work_struct * work )
6528
+ {
6529
+ struct hns_roce_eq * eq = from_work (eq , work , work );
6530
+ struct hns_roce_ceqe * ceqe = next_ceqe_sw_v2 (eq );
6531
+ struct hns_roce_dev * hr_dev = eq -> hr_dev ;
6532
+ int ceqe_num = 0 ;
6533
+ u32 cqn ;
6534
+
6535
+ while (ceqe && ceqe_num < hr_dev -> caps .ceqe_depth ) {
6536
+ /* Make sure we read CEQ entry after we have checked the
6537
+ * ownership bit
6538
+ */
6539
+ dma_rmb ();
6540
+
6541
+ cqn = hr_reg_read (ceqe , CEQE_CQN );
6542
+
6543
+ hns_roce_cq_completion (hr_dev , cqn );
6544
+
6545
+ ++ eq -> cons_index ;
6546
+ ++ ceqe_num ;
6547
+ atomic64_inc (& hr_dev -> dfx_cnt [HNS_ROCE_DFX_CEQE_CNT ]);
6548
+
6549
+ ceqe = next_ceqe_sw_v2 (eq );
6550
+ }
6551
+
6552
+ update_eq_db (eq );
6553
+ }
6554
+
6548
6555
static int __hns_roce_request_irq (struct hns_roce_dev * hr_dev , int irq_num ,
6549
6556
int comp_num , int aeq_num , int other_num )
6550
6557
{
@@ -6576,21 +6583,24 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
6576
6583
j - other_num - aeq_num );
6577
6584
6578
6585
for (j = 0 ; j < irq_num ; j ++ ) {
6579
- if (j < other_num )
6586
+ if (j < other_num ) {
6580
6587
ret = request_irq (hr_dev -> irq [j ],
6581
6588
hns_roce_v2_msix_interrupt_abn ,
6582
6589
0 , hr_dev -> irq_names [j ], hr_dev );
6583
-
6584
- else if (j < (other_num + comp_num ))
6590
+ } else if (j < (other_num + comp_num )) {
6591
+ INIT_WORK (& eq_table -> eq [j - other_num ].work ,
6592
+ hns_roce_ceq_work );
6585
6593
ret = request_irq (eq_table -> eq [j - other_num ].irq ,
6586
6594
hns_roce_v2_msix_interrupt_eq ,
6587
6595
0 , hr_dev -> irq_names [j + aeq_num ],
6588
6596
& eq_table -> eq [j - other_num ]);
6589
- else
6597
+ } else {
6590
6598
ret = request_irq (eq_table -> eq [j - other_num ].irq ,
6591
6599
hns_roce_v2_msix_interrupt_eq ,
6592
6600
0 , hr_dev -> irq_names [j - comp_num ],
6593
6601
& eq_table -> eq [j - other_num ]);
6602
+ }
6603
+
6594
6604
if (ret ) {
6595
6605
dev_err (hr_dev -> dev , "request irq error!\n" );
6596
6606
goto err_request_failed ;
@@ -6600,12 +6610,16 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
6600
6610
return 0 ;
6601
6611
6602
6612
err_request_failed :
6603
- for (j -= 1 ; j >= 0 ; j -- )
6604
- if (j < other_num )
6613
+ for (j -= 1 ; j >= 0 ; j -- ) {
6614
+ if (j < other_num ) {
6605
6615
free_irq (hr_dev -> irq [j ], hr_dev );
6606
- else
6607
- free_irq (eq_table -> eq [j - other_num ].irq ,
6608
- & eq_table -> eq [j - other_num ]);
6616
+ continue ;
6617
+ }
6618
+ free_irq (eq_table -> eq [j - other_num ].irq ,
6619
+ & eq_table -> eq [j - other_num ]);
6620
+ if (j < other_num + comp_num )
6621
+ cancel_work_sync (& eq_table -> eq [j - other_num ].work );
6622
+ }
6609
6623
6610
6624
err_kzalloc_failed :
6611
6625
for (i -= 1 ; i >= 0 ; i -- )
@@ -6626,8 +6640,11 @@ static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
6626
6640
for (i = 0 ; i < hr_dev -> caps .num_other_vectors ; i ++ )
6627
6641
free_irq (hr_dev -> irq [i ], hr_dev );
6628
6642
6629
- for (i = 0 ; i < eq_num ; i ++ )
6643
+ for (i = 0 ; i < eq_num ; i ++ ) {
6630
6644
free_irq (hr_dev -> eq_table .eq [i ].irq , & hr_dev -> eq_table .eq [i ]);
6645
+ if (i < hr_dev -> caps .num_comp_vectors )
6646
+ cancel_work_sync (& hr_dev -> eq_table .eq [i ].work );
6647
+ }
6631
6648
6632
6649
for (i = 0 ; i < irq_num ; i ++ )
6633
6650
kfree (hr_dev -> irq_names [i ]);
0 commit comments