Skip to content
Permalink
Browse files

mt76: add support for reporting tx status with skb

MT76x2/MT76x0 has somewhat unreliable tx status reporting, and for that
reason the driver currently does not report per-skb tx ack status at all.
This breaks things like client idle polling, which relies on the tx ack
status of a transmitted nullfunc frame.

This patch adds code to report skb-attached tx status if requested by
mac80211 or the rate control module. Since tx status is polled from a
simple FIFO register, the code needs to account for the possibility of
tx status events getting lost.

The code keeps a list of skbs for which tx status is required and passes
them to mac80211 once tx status has been filled in and the DMA queue is
done with it.
If a tx status event is not received after one second, the status rates
are cleared, and a succesful ACK is indicated to avoid spurious disassoc
during assoc or client polling.

Signed-off-by: Felix Fietkau <nbd@nbd.name>
  • Loading branch information
nbd168 committed Oct 25, 2018
1 parent d83ac6e commit 23abe5d2bbd8e1e769f78838fc93de4f961f2a83
Showing with 208 additions and 57 deletions.
  1. +3 −0 mac80211.c
  2. +48 −0 mt76.h
  3. +0 −1 mt76x2.h
  4. +37 −34 mt76x2_mac.c
  5. +0 −19 mt76x2_mac.h
  6. +1 −0 mt76x2_main.c
  7. +7 −3 mt76x2_tx.c
  8. +112 −0 tx.c
@@ -284,6 +284,7 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
spin_lock_init(&dev->lock);
spin_lock_init(&dev->cc_lock);
init_waitqueue_head(&dev->tx_wait);
skb_queue_head_init(&dev->status_list);

return dev;
}
@@ -331,6 +332,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
ieee80211_hw_set(hw, TX_FRAG_LIST);
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);

wiphy->flags |= WIPHY_FLAG_IBSS_RSN;

@@ -362,6 +364,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;

mt76_tx_status_flush(dev, NULL);
ieee80211_unregister_hw(hw);
mt76_tx_free(dev);
}
48 mt76.h
@@ -147,6 +147,8 @@ struct mt76_wcid {
u8 tx_rate_nss;
s8 max_txpwr_adj;
bool sw_iv;

u8 packet_id;
};

struct mt76_txq {
@@ -185,6 +187,22 @@ struct mt76_rx_tid {
struct sk_buff *reorder_buf[];
};

#define MT_TX_CB_DMA_DONE BIT(0)
#define MT_TX_CB_TXS_DONE BIT(1)
#define MT_TX_CB_TXS_FAILED BIT(2)

#define MT_PACKET_ID_MASK GENMASK(7, 0)
#define MT_PACKET_ID_NO_ACK MT_PACKET_ID_MASK

#define MT_TX_STATUS_SKB_TIMEOUT HZ

struct mt76_tx_cb {
unsigned long jiffies;
u8 wcid;
u8 pktid;
u8 flags;
};

enum {
MT76_STATE_INITIALIZED,
MT76_STATE_RUNNING,
@@ -253,6 +271,7 @@ struct mt76_dev {
const struct mt76_queue_ops *queue_ops;

wait_queue_head_t tx_wait;
struct sk_buff_head status_list;

u8 macaddr[ETH_ALEN];
u32 rev;
@@ -410,6 +429,13 @@ wcid_to_sta(struct mt76_wcid *wcid)
return container_of(ptr, struct ieee80211_sta, drv_priv);
}

static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
}

int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta);
@@ -440,6 +466,28 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);

void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key);
int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff *skb);
struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
struct mt76_wcid *wcid, int pktid);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);

static inline void
mt76_tx_status_check(struct mt76_dev *dev)
{
spin_lock_bh(&dev->status_list.lock);
mt76_tx_status_skb_get(dev, NULL, 0);
spin_unlock_bh(&dev->status_list.lock);
}

static inline void
mt76_tx_status_flush(struct mt76_dev *dev, struct mt76_wcid *wcid)
{
spin_lock_bh(&dev->status_list.lock);
mt76_tx_status_skb_get(dev, wcid, -1);
spin_unlock_bh(&dev->status_list.lock);
}

/* internal */
void mt76_tx_free(struct mt76_dev *dev);
@@ -216,7 +216,6 @@ int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, int cmd, int seq);
void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb);
int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
@@ -199,8 +199,6 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
else
txwi->wcid = 0xff;

txwi->pktid = 1;

if (wcid && wcid->sw_iv && key) {
u64 pn = atomic64_inc_return(&key->tx_pn);
ccmp_pn[0] = pn;
@@ -246,8 +244,6 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
txwi->pktid |= MT_TXWI_PKTID_PROBE;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;

@@ -483,9 +479,6 @@ mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
info->status.ampdu_len = n_frames;
info->status.ampdu_ack_len = st->success ? n_frames : 0;

if (st->pktid & MT_TXWI_PKTID_PROBE)
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;

if (st->aggr)
info->flags |= IEEE80211_TX_CTL_AMPDU |
IEEE80211_TX_STAT_AMPDU;
@@ -501,23 +494,36 @@ mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
u8 *update)
{
struct ieee80211_tx_info info = {};
struct ieee80211_sta *sta = NULL;
struct ieee80211_tx_status status = {
.info = &info
};
struct mt76_dev *mdev = &dev->mt76;
struct mt76_wcid *wcid = NULL;
struct mt76x2_sta *msta = NULL;

if (stat->pktid == MT_PACKET_ID_NO_ACK)
return;

rcu_read_lock();
spin_lock_bh(&mdev->status_list.lock);

if (stat->wcid < ARRAY_SIZE(dev->wcid))
wcid = rcu_dereference(dev->wcid[stat->wcid]);

if (wcid && wcid->sta) {
void *priv;

priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
sta = container_of(priv, struct ieee80211_sta,
drv_priv);
status.sta = container_of(priv, struct ieee80211_sta,
drv_priv);
if (stat->pktid)
status.skb = mt76_tx_status_skb_get(mdev, wcid,
stat->pktid);
if (status.skb)
status.info = IEEE80211_SKB_CB(status.skb);
}

if (msta && stat->aggr) {
if (msta && stat->aggr && !status.skb) {
u32 stat_val, stat_cache;

stat_val = stat->rate;
@@ -531,20 +537,24 @@ mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
goto out;
}

mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
mt76x2_mac_fill_tx_status(dev, status.info, &msta->status,
msta->n_frames);

msta->status = *stat;
msta->n_frames = 1;
*update = 0;
} else {
mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
mt76x2_mac_fill_tx_status(dev, status.info, stat, 1);
*update = 1;
}

ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
if (status.skb)
mt76_tx_status_skb_done(mdev, status.skb);
else
ieee80211_tx_status_ext(mt76_hw(dev), &status);

out:
spin_unlock_bh(&mdev->status_list.lock);
rcu_read_unlock();
}

@@ -591,23 +601,6 @@ void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
}
}

static void
mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
void *txwi_ptr)
{
struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
struct mt76x2_txwi *txwi = txwi_ptr;

mt76x2_mac_poll_tx_status(dev, false);

txi->tries = 0;
txi->jiffies = jiffies;
txi->wcid = txwi->wcid;
txi->pktid = txwi->pktid;
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
mt76x2_tx_complete(dev, skb);
}

void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
{
struct mt76x2_tx_status stat;
@@ -621,11 +614,19 @@ void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush)
{
struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
struct mt76x2_txwi *txwi;

if (e->txwi)
mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
else
if (!e->txwi) {
dev_kfree_skb_any(e->skb);
return;
}

mt76x2_mac_poll_tx_status(dev, false);

txwi = (struct mt76x2_txwi *) &e->txwi->txwi;
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);

mt76_tx_complete_skb(mdev, e->skb);
}

static enum mt76x2_cipher_type
@@ -866,6 +867,8 @@ void mt76x2_mac_work(struct work_struct *work)
dev->aggr_stats[idx++] += val >> 16;
}

mt76_tx_status_check(&dev->mt76);

ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
}
@@ -35,15 +35,6 @@ struct mt76x2_tx_status {
u16 rate;
} __packed __aligned(2);

struct mt76x2_tx_info {
unsigned long jiffies;
u8 tries;

u8 wcid;
u8 pktid;
u8 retry;
};

struct mt76x2_rxwi {
__le32 rxinfo;

@@ -133,8 +124,6 @@ enum mt76x2_phy_bandwidth {
#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)

#define MT_TXWI_PKTID_PROBE BIT(7)

struct mt76x2_txwi {
__le16 flags;
__le16 rate;
@@ -149,14 +138,6 @@ struct mt76x2_txwi {
u8 pktid;
} __packed __aligned(4);

static inline struct mt76x2_tx_info *
mt76x2_skb_tx_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

return (void *) info->status.status_driver_data;
}

int mt76x2_mac_start(struct mt76x2_dev *dev);
void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
void mt76x2_mac_resume(struct mt76x2_dev *dev);
@@ -314,6 +314,7 @@ mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int i;

mutex_lock(&dev->mutex);
mt76_tx_status_flush(&dev->mt76, &msta->wcid);
rcu_assign_pointer(dev->wcid[idx], NULL);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(&dev->mt76, sta->txq[i]);
@@ -148,26 +148,30 @@ static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
return 2;
}

int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info)
{
struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76x2_txwi *txwi = txwi_ptr;
int qsel = MT_QSEL_EDCA;
int pid;
int ret;

if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);

mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);

pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;

ret = mt76x2_insert_hdr_pad(skb);
if (ret < 0)
return ret;

if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
if (pid && pid != MT_PACKET_ID_NO_ACK)
qsel = MT_QSEL_MGMT;

*tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |

0 comments on commit 23abe5d

Please sign in to comment.
You can’t perform that action at this time.