@@ -489,7 +489,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
489
489
490
490
do {
491
491
if (test_bit (MT76_RESET , & phy -> state ) || phy -> offchannel )
492
- return - EBUSY ;
492
+ break ;
493
493
494
494
if (stop || mt76_txq_stopped (q ))
495
495
break ;
@@ -522,24 +522,16 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
522
522
static int
523
523
mt76_txq_schedule_list (struct mt76_phy * phy , enum mt76_txq_id qid )
524
524
{
525
- struct mt76_queue * q = phy -> q_tx [qid ];
526
525
struct mt76_dev * dev = phy -> dev ;
527
526
struct ieee80211_txq * txq ;
528
527
struct mt76_txq * mtxq ;
529
528
struct mt76_wcid * wcid ;
529
+ struct mt76_queue * q ;
530
530
int ret = 0 ;
531
531
532
532
while (1 ) {
533
533
int n_frames = 0 ;
534
534
535
- if (test_bit (MT76_RESET , & phy -> state ) || phy -> offchannel )
536
- return - EBUSY ;
537
-
538
- if (dev -> queue_ops -> tx_cleanup &&
539
- q -> queued + 2 * MT_TXQ_FREE_THR >= q -> ndesc ) {
540
- dev -> queue_ops -> tx_cleanup (dev , q , false);
541
- }
542
-
543
535
txq = ieee80211_next_txq (phy -> hw , qid );
544
536
if (!txq )
545
537
break ;
@@ -549,6 +541,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
549
541
if (!wcid || test_bit (MT_WCID_FLAG_PS , & wcid -> flags ))
550
542
continue ;
551
543
544
+ phy = mt76_dev_phy (dev , wcid -> phy_idx );
545
+ if (test_bit (MT76_RESET , & phy -> state ) || phy -> offchannel )
546
+ continue ;
547
+
548
+ q = phy -> q_tx [qid ];
549
+ if (dev -> queue_ops -> tx_cleanup &&
550
+ q -> queued + 2 * MT_TXQ_FREE_THR >= q -> ndesc ) {
551
+ dev -> queue_ops -> tx_cleanup (dev , q , false);
552
+ }
553
+
552
554
if (mtxq -> send_bar && mtxq -> aggr ) {
553
555
struct ieee80211_txq * txq = mtxq_to_txq (mtxq );
554
556
struct ieee80211_sta * sta = txq -> sta ;
@@ -578,7 +580,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
578
580
{
579
581
int len ;
580
582
581
- if (qid >= 4 || phy -> offchannel )
583
+ if (qid >= 4 )
582
584
return ;
583
585
584
586
local_bh_disable ();
@@ -680,9 +682,14 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
680
682
681
683
void mt76_txq_schedule_all (struct mt76_phy * phy )
682
684
{
685
+ struct mt76_phy * main_phy = & phy -> dev -> phy ;
683
686
int i ;
684
687
685
688
mt76_txq_schedule_pending (phy );
689
+
690
+ if (phy != main_phy && phy -> hw == main_phy -> hw )
691
+ return ;
692
+
686
693
for (i = 0 ; i <= MT_TXQ_BK ; i ++ )
687
694
mt76_txq_schedule (phy , i );
688
695
}
@@ -693,6 +700,7 @@ void mt76_tx_worker_run(struct mt76_dev *dev)
693
700
struct mt76_phy * phy ;
694
701
int i ;
695
702
703
+ mt76_txq_schedule_all (& dev -> phy );
696
704
for (i = 0 ; i < ARRAY_SIZE (dev -> phys ); i ++ ) {
697
705
phy = dev -> phys [i ];
698
706
if (!phy )
@@ -748,9 +756,6 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
748
756
struct mt76_phy * phy = hw -> priv ;
749
757
struct mt76_dev * dev = phy -> dev ;
750
758
751
- if (!test_bit (MT76_STATE_RUNNING , & phy -> state ))
752
- return ;
753
-
754
759
mt76_worker_schedule (& dev -> tx_worker );
755
760
}
756
761
EXPORT_SYMBOL_GPL (mt76_wake_tx_queue );
0 commit comments