Skip to content

Commit

Permalink
mt76: use napi polling for tx cleanup
Browse files Browse the repository at this point in the history
This allows tx scheduling and tx cleanup to run concurrently

Signed-off-by: Felix Fietkau <nbd@nbd.name>
  • Loading branch information
nbd168 committed Mar 22, 2019
1 parent deacb8f commit c9402eb
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 14 deletions.
2 changes: 1 addition & 1 deletion mt76x02.h
Expand Up @@ -83,7 +83,7 @@ struct mt76x02_dev {

struct sk_buff *rx_head;

struct tasklet_struct tx_tasklet;
struct napi_struct tx_napi;
struct tasklet_struct pre_tbtt_tasklet;
struct delayed_work cal_work;
struct delayed_work mac_work;
Expand Down
49 changes: 36 additions & 13 deletions mt76x02_mmio.c
Expand Up @@ -203,18 +203,32 @@ static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
static void mt76x02_tx_tasklet(unsigned long data)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
int i;

mt76x02_mac_poll_tx_status(dev, false);
mt76x02_process_tx_status_fifo(dev);

mt76_txq_schedule_all(&dev->mt76);
}

int mt76x02_poll_tx(struct napi_struct *napi, int budget)
{
struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, tx_napi);
int i;

mt76x02_mac_poll_tx_status(dev, false);

for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);

mt76x02_mac_poll_tx_status(dev, false);
if (napi_complete_done(napi, 0))
mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);

mt76_txq_schedule_all(&dev->mt76);
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);

mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
tasklet_schedule(&dev->mt76.tx_tasklet);

return 0;
}

int mt76x02_dma_init(struct mt76x02_dev *dev)
Expand Down Expand Up @@ -274,7 +288,15 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;

return mt76_init_queues(dev);
ret = mt76_init_queues(dev);
if (ret)
return ret;

netif_tx_napi_add(&dev->mt76.napi_dev, &dev->tx_napi, mt76x02_poll_tx,
NAPI_POLL_WEIGHT);
napi_enable(&dev->tx_napi);

return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_dma_init);

Expand Down Expand Up @@ -302,11 +324,6 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)

intr &= dev->mt76.mmio.irqmask;

if (intr & MT_INT_TX_DONE_ALL) {
mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
tasklet_schedule(&dev->mt76.tx_tasklet);
}

if (intr & MT_INT_RX_DONE(0)) {
mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
napi_schedule(&dev->mt76.napi[0]);
Expand All @@ -328,9 +345,12 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
}

if (intr & MT_INT_TX_STAT) {
if (intr & MT_INT_TX_STAT)
mt76x02_mac_poll_tx_status(dev, true);
tasklet_schedule(&dev->mt76.tx_tasklet);

if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
napi_schedule(&dev->tx_napi);
}

if (intr & MT_INT_GPTIMER) {
Expand Down Expand Up @@ -361,6 +381,7 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev)
void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
{
tasklet_kill(&dev->mt76.tx_tasklet);
netif_napi_del(&dev->tx_napi);
mt76_dma_cleanup(&dev->mt76);
}
EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
Expand Down Expand Up @@ -483,6 +504,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)

tasklet_disable(&dev->pre_tbtt_tasklet);
tasklet_disable(&dev->mt76.tx_tasklet);
napi_disable(&dev->tx_napi);

for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
napi_disable(&dev->mt76.napi[i]);
Expand Down Expand Up @@ -536,7 +558,8 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
clear_bit(MT76_RESET, &dev->mt76.state);

tasklet_enable(&dev->mt76.tx_tasklet);
tasklet_schedule(&dev->mt76.tx_tasklet);
napi_enable(&dev->tx_napi);
napi_schedule(&dev->tx_napi);

tasklet_enable(&dev->pre_tbtt_tasklet);

Expand Down

0 comments on commit c9402eb

Please sign in to comment.