@@ -330,6 +330,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
330330 struct mt76_wcid * wcid , struct sk_buff * skb )
331331{
332332 struct ieee80211_tx_info * info = IEEE80211_SKB_CB (skb );
333+ struct sk_buff_head * head ;
333334
334335 if (mt76_testmode_enabled (phy )) {
335336 ieee80211_free_txskb (phy -> hw , skb );
@@ -345,9 +346,15 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
345346
346347 info -> hw_queue |= FIELD_PREP (MT_TX_HW_QUEUE_PHY , phy -> band_idx );
347348
348- spin_lock_bh (& wcid -> tx_pending .lock );
349- __skb_queue_tail (& wcid -> tx_pending , skb );
350- spin_unlock_bh (& wcid -> tx_pending .lock );
349+ if ((info -> flags & IEEE80211_TX_CTL_TX_OFFCHAN ) ||
350+ (info -> control .flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK ))
351+ head = & wcid -> tx_offchannel ;
352+ else
353+ head = & wcid -> tx_pending ;
354+
355+ spin_lock_bh (& head -> lock );
356+ __skb_queue_tail (head , skb );
357+ spin_unlock_bh (& head -> lock );
351358
352359 spin_lock_bh (& phy -> tx_lock );
353360 if (list_empty (& wcid -> tx_list ))
@@ -478,7 +485,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
478485 return idx ;
479486
480487 do {
481- if (test_bit (MT76_RESET , & phy -> state ))
488+ if (test_bit (MT76_RESET , & phy -> state ) || phy -> offchannel )
482489 return - EBUSY ;
483490
484491 if (stop || mt76_txq_stopped (q ))
@@ -522,7 +529,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
522529 while (1 ) {
523530 int n_frames = 0 ;
524531
525- if (test_bit (MT76_RESET , & phy -> state ))
532+ if (test_bit (MT76_RESET , & phy -> state ) || phy -> offchannel )
526533 return - EBUSY ;
527534
528535 if (dev -> queue_ops -> tx_cleanup &&
@@ -568,7 +575,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
568575{
569576 int len ;
570577
571- if (qid >= 4 )
578+ if (qid >= 4 || phy -> offchannel )
572579 return ;
573580
574581 local_bh_disable ();
@@ -586,16 +593,17 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
586593EXPORT_SYMBOL_GPL (mt76_txq_schedule );
587594
588595static int
589- mt76_txq_schedule_pending_wcid (struct mt76_phy * phy , struct mt76_wcid * wcid )
596+ mt76_txq_schedule_pending_wcid (struct mt76_phy * phy , struct mt76_wcid * wcid ,
597+ struct sk_buff_head * head )
590598{
591599 struct mt76_dev * dev = phy -> dev ;
592600 struct ieee80211_sta * sta ;
593601 struct mt76_queue * q ;
594602 struct sk_buff * skb ;
595603 int ret = 0 ;
596604
597- spin_lock (& wcid -> tx_pending . lock );
598- while ((skb = skb_peek (& wcid -> tx_pending )) != NULL ) {
605+ spin_lock (& head -> lock );
606+ while ((skb = skb_peek (head )) != NULL ) {
599607 struct ieee80211_hdr * hdr = (struct ieee80211_hdr * )skb -> data ;
600608 struct ieee80211_tx_info * info = IEEE80211_SKB_CB (skb );
601609 int qid = skb_get_queue_mapping (skb );
@@ -607,52 +615,59 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
607615 qid = MT_TXQ_PSD ;
608616
609617 q = phy -> q_tx [qid ];
610- if (mt76_txq_stopped (q )) {
618+ if (mt76_txq_stopped (q ) || test_bit ( MT76_RESET , & phy -> state ) ) {
611619 ret = -1 ;
612620 break ;
613621 }
614622
615- __skb_unlink (skb , & wcid -> tx_pending );
616- spin_unlock (& wcid -> tx_pending . lock );
623+ __skb_unlink (skb , head );
624+ spin_unlock (& head -> lock );
617625
618626 sta = wcid_to_sta (wcid );
619627 spin_lock (& q -> lock );
620628 __mt76_tx_queue_skb (phy , qid , skb , wcid , sta , NULL );
621629 dev -> queue_ops -> kick (dev , q );
622630 spin_unlock (& q -> lock );
623631
624- spin_lock (& wcid -> tx_pending . lock );
632+ spin_lock (& head -> lock );
625633 }
626- spin_unlock (& wcid -> tx_pending . lock );
634+ spin_unlock (& head -> lock );
627635
628636 return ret ;
629637}
630638
631639static void mt76_txq_schedule_pending (struct mt76_phy * phy )
632640{
641+ LIST_HEAD (tx_list );
642+
633643 if (list_empty (& phy -> tx_list ))
634644 return ;
635645
636646 local_bh_disable ();
637647 rcu_read_lock ();
638648
639649 spin_lock (& phy -> tx_lock );
640- while (!list_empty (& phy -> tx_list )) {
641- struct mt76_wcid * wcid = NULL ;
650+ list_splice_init (& phy -> tx_list , & tx_list );
651+ while (!list_empty (& tx_list )) {
652+ struct mt76_wcid * wcid ;
642653 int ret ;
643654
644- wcid = list_first_entry (& phy -> tx_list , struct mt76_wcid , tx_list );
655+ wcid = list_first_entry (& tx_list , struct mt76_wcid , tx_list );
645656 list_del_init (& wcid -> tx_list );
646657
647658 spin_unlock (& phy -> tx_lock );
648- ret = mt76_txq_schedule_pending_wcid (phy , wcid );
659+ ret = mt76_txq_schedule_pending_wcid (phy , wcid , & wcid -> tx_offchannel );
660+ if (ret >= 0 && !phy -> offchannel )
661+ ret = mt76_txq_schedule_pending_wcid (phy , wcid , & wcid -> tx_pending );
649662 spin_lock (& phy -> tx_lock );
650663
651- if (ret ) {
652- if (list_empty (& wcid -> tx_list ))
653- list_add_tail (& wcid -> tx_list , & phy -> tx_list );
664+ if (!skb_queue_empty (& wcid -> tx_pending ) &&
665+ !skb_queue_empty (& wcid -> tx_offchannel ) &&
666+ list_empty (& wcid -> tx_list ))
667+ list_add_tail (& wcid -> tx_list , & phy -> tx_list );
668+
669+ if (ret < 0 )
654670 break ;
655- }
656671 }
657672 spin_unlock (& phy -> tx_lock );
658673
0 commit comments