@@ -290,7 +290,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
290
290
struct bnxt_qplib_hwq * hwq ;
291
291
u32 sw_prod , cmdq_prod ;
292
292
struct pci_dev * pdev ;
293
- unsigned long flags ;
294
293
u16 cookie ;
295
294
u8 * preq ;
296
295
@@ -301,7 +300,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
301
300
/* Cmdq are in 16-byte units, each request can consume 1 or more
302
301
* cmdqe
303
302
*/
304
- spin_lock_irqsave (& hwq -> lock , flags );
303
+ spin_lock_bh (& hwq -> lock );
305
304
required_slots = bnxt_qplib_get_cmd_slots (msg -> req );
306
305
free_slots = HWQ_FREE_SLOTS (hwq );
307
306
cookie = cmdq -> seq_num & RCFW_MAX_COOKIE_VALUE ;
@@ -311,7 +310,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
311
310
dev_info_ratelimited (& pdev -> dev ,
312
311
"CMDQ is full req/free %d/%d!" ,
313
312
required_slots , free_slots );
314
- spin_unlock_irqrestore (& hwq -> lock , flags );
313
+ spin_unlock_bh (& hwq -> lock );
315
314
return - EAGAIN ;
316
315
}
317
316
if (msg -> block )
@@ -367,7 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
367
366
wmb ();
368
367
writel (cmdq_prod , cmdq -> cmdq_mbox .prod );
369
368
writel (RCFW_CMDQ_TRIG_VAL , cmdq -> cmdq_mbox .db );
370
- spin_unlock_irqrestore (& hwq -> lock , flags );
369
+ spin_unlock_bh (& hwq -> lock );
371
370
/* Return the CREQ response pointer */
372
371
return 0 ;
373
372
}
@@ -486,7 +485,6 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
486
485
{
487
486
struct creq_qp_event * evnt = (struct creq_qp_event * )msg -> resp ;
488
487
struct bnxt_qplib_crsqe * crsqe ;
489
- unsigned long flags ;
490
488
u16 cookie ;
491
489
int rc ;
492
490
u8 opcode ;
@@ -512,12 +510,12 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
512
510
rc = __poll_for_resp (rcfw , cookie );
513
511
514
512
if (rc ) {
515
- spin_lock_irqsave (& rcfw -> cmdq .hwq .lock , flags );
513
+ spin_lock_bh (& rcfw -> cmdq .hwq .lock );
516
514
crsqe = & rcfw -> crsqe_tbl [cookie ];
517
515
crsqe -> is_waiter_alive = false;
518
516
if (rc == - ENODEV )
519
517
set_bit (FIRMWARE_STALL_DETECTED , & rcfw -> cmdq .flags );
520
- spin_unlock_irqrestore (& rcfw -> cmdq .hwq .lock , flags );
518
+ spin_unlock_bh (& rcfw -> cmdq .hwq .lock );
521
519
return - ETIMEDOUT ;
522
520
}
523
521
@@ -628,7 +626,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
628
626
u16 cookie , blocked = 0 ;
629
627
bool is_waiter_alive ;
630
628
struct pci_dev * pdev ;
631
- unsigned long flags ;
632
629
u32 wait_cmds = 0 ;
633
630
int rc = 0 ;
634
631
@@ -637,17 +634,21 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
637
634
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION :
638
635
err_event = (struct creq_qp_error_notification * )qp_event ;
639
636
qp_id = le32_to_cpu (err_event -> xid );
637
+ spin_lock (& rcfw -> tbl_lock );
640
638
tbl_indx = map_qp_id_to_tbl_indx (qp_id , rcfw );
641
639
qp = rcfw -> qp_tbl [tbl_indx ].qp_handle ;
640
+ if (!qp ) {
641
+ spin_unlock (& rcfw -> tbl_lock );
642
+ break ;
643
+ }
644
+ bnxt_qplib_mark_qp_error (qp );
645
+ rc = rcfw -> creq .aeq_handler (rcfw , qp_event , qp );
646
+ spin_unlock (& rcfw -> tbl_lock );
642
647
dev_dbg (& pdev -> dev , "Received QP error notification\n" );
643
648
dev_dbg (& pdev -> dev ,
644
649
"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n" ,
645
650
qp_id , err_event -> req_err_state_reason ,
646
651
err_event -> res_err_state_reason );
647
- if (!qp )
648
- break ;
649
- bnxt_qplib_mark_qp_error (qp );
650
- rc = rcfw -> creq .aeq_handler (rcfw , qp_event , qp );
651
652
break ;
652
653
default :
653
654
/*
@@ -659,8 +660,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
659
660
*
660
661
*/
661
662
662
- spin_lock_irqsave_nested (& hwq -> lock , flags ,
663
- SINGLE_DEPTH_NESTING );
663
+ spin_lock_nested (& hwq -> lock , SINGLE_DEPTH_NESTING );
664
664
cookie = le16_to_cpu (qp_event -> cookie );
665
665
blocked = cookie & RCFW_CMD_IS_BLOCKING ;
666
666
cookie &= RCFW_MAX_COOKIE_VALUE ;
@@ -672,7 +672,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
672
672
dev_info (& pdev -> dev ,
673
673
"rcfw timedout: cookie = %#x, free_slots = %d" ,
674
674
cookie , crsqe -> free_slots );
675
- spin_unlock_irqrestore (& hwq -> lock , flags );
675
+ spin_unlock (& hwq -> lock );
676
676
return rc ;
677
677
}
678
678
@@ -720,7 +720,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
720
720
__destroy_timedout_ah (rcfw ,
721
721
(struct creq_create_ah_resp * )
722
722
qp_event );
723
- spin_unlock_irqrestore (& hwq -> lock , flags );
723
+ spin_unlock (& hwq -> lock );
724
724
}
725
725
* num_wait += wait_cmds ;
726
726
return rc ;
@@ -734,12 +734,11 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
734
734
u32 type , budget = CREQ_ENTRY_POLL_BUDGET ;
735
735
struct bnxt_qplib_hwq * hwq = & creq -> hwq ;
736
736
struct creq_base * creqe ;
737
- unsigned long flags ;
738
737
u32 num_wakeup = 0 ;
739
738
u32 hw_polled = 0 ;
740
739
741
740
/* Service the CREQ until budget is over */
742
- spin_lock_irqsave (& hwq -> lock , flags );
741
+ spin_lock_bh (& hwq -> lock );
743
742
while (budget > 0 ) {
744
743
creqe = bnxt_qplib_get_qe (hwq , hwq -> cons , NULL );
745
744
if (!CREQ_CMP_VALID (creqe , creq -> creq_db .dbinfo .flags ))
@@ -782,7 +781,7 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
782
781
if (hw_polled )
783
782
bnxt_qplib_ring_nq_db (& creq -> creq_db .dbinfo ,
784
783
rcfw -> res -> cctx , true);
785
- spin_unlock_irqrestore (& hwq -> lock , flags );
784
+ spin_unlock_bh (& hwq -> lock );
786
785
if (num_wakeup )
787
786
wake_up_nr (& rcfw -> cmdq .waitq , num_wakeup );
788
787
}
@@ -978,6 +977,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
978
977
GFP_KERNEL );
979
978
if (!rcfw -> qp_tbl )
980
979
goto fail ;
980
+ spin_lock_init (& rcfw -> tbl_lock );
981
981
982
982
rcfw -> max_timeout = res -> cctx -> hwrm_cmd_max_timeout ;
983
983
0 commit comments