Skip to content

Commit 7acfb68

Browse files
committed
Merge branch 'increase-maximum-mtu-to-9k-for-airoha-en7581-soc'
Lorenzo Bianconi says: ==================== Increase maximum MTU to 9k for Airoha EN7581 SoC EN7581 SoC supports 9k maximum MTU. Enable the reception of Scatter-Gather (SG) frames for Airoha EN7581. Introduce airoha_dev_change_mtu callback. ==================== Link: https://patch.msgid.link/20250304-airoha-eth-rx-sg-v1-0-283ebc61120e@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2 parents f732549 + 168ef0c commit 7acfb68

File tree

3 files changed

+71
-34
lines changed

3 files changed

+71
-34
lines changed

drivers/net/ethernet/airoha/airoha_eth.c

Lines changed: 64 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -138,15 +138,10 @@ static void airoha_fe_maccr_init(struct airoha_eth *eth)
138138
{
139139
int p;
140140

141-
for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
141+
for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
142142
airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
143143
GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
144144
GDM_DROP_CRC_ERR);
145-
airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
146-
GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
147-
FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
148-
FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
149-
}
150145

151146
airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
152147
FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
@@ -620,10 +615,10 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
620615
struct airoha_qdma_desc *desc = &q->desc[q->tail];
621616
u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
622617
dma_addr_t dma_addr = le32_to_cpu(desc->addr);
618+
struct page *page = virt_to_head_page(e->buf);
623619
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
624620
struct airoha_gdm_port *port;
625-
struct sk_buff *skb;
626-
int len, p;
621+
int data_len, len, p;
627622

628623
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
629624
break;
@@ -641,30 +636,41 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
641636
dma_sync_single_for_cpu(eth->dev, dma_addr,
642637
SKB_WITH_OVERHEAD(q->buf_size), dir);
643638

639+
data_len = q->skb ? q->buf_size
640+
: SKB_WITH_OVERHEAD(q->buf_size);
641+
if (data_len < len)
642+
goto free_frag;
643+
644644
p = airoha_qdma_get_gdm_port(eth, desc);
645-
if (p < 0 || !eth->ports[p]) {
646-
page_pool_put_full_page(q->page_pool,
647-
virt_to_head_page(e->buf),
648-
true);
649-
continue;
650-
}
645+
if (p < 0 || !eth->ports[p])
646+
goto free_frag;
651647

652648
port = eth->ports[p];
653-
skb = napi_build_skb(e->buf, q->buf_size);
654-
if (!skb) {
655-
page_pool_put_full_page(q->page_pool,
656-
virt_to_head_page(e->buf),
657-
true);
658-
break;
649+
if (!q->skb) { /* first buffer */
650+
q->skb = napi_build_skb(e->buf, q->buf_size);
651+
if (!q->skb)
652+
goto free_frag;
653+
654+
__skb_put(q->skb, len);
655+
skb_mark_for_recycle(q->skb);
656+
q->skb->dev = port->dev;
657+
q->skb->protocol = eth_type_trans(q->skb, port->dev);
658+
q->skb->ip_summed = CHECKSUM_UNNECESSARY;
659+
skb_record_rx_queue(q->skb, qid);
660+
} else { /* scattered frame */
661+
struct skb_shared_info *shinfo = skb_shinfo(q->skb);
662+
int nr_frags = shinfo->nr_frags;
663+
664+
if (nr_frags >= ARRAY_SIZE(shinfo->frags))
665+
goto free_frag;
666+
667+
skb_add_rx_frag(q->skb, nr_frags, page,
668+
e->buf - page_address(page), len,
669+
q->buf_size);
659670
}
660671

661-
skb_reserve(skb, 2);
662-
__skb_put(skb, len);
663-
skb_mark_for_recycle(skb);
664-
skb->dev = port->dev;
665-
skb->protocol = eth_type_trans(skb, skb->dev);
666-
skb->ip_summed = CHECKSUM_UNNECESSARY;
667-
skb_record_rx_queue(skb, qid);
672+
if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
673+
continue;
668674

669675
if (netdev_uses_dsa(port->dev)) {
670676
/* PPE module requires untagged packets to work
@@ -677,22 +683,27 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
677683

678684
if (sptag < ARRAY_SIZE(port->dsa_meta) &&
679685
port->dsa_meta[sptag])
680-
skb_dst_set_noref(skb,
686+
skb_dst_set_noref(q->skb,
681687
&port->dsa_meta[sptag]->dst);
682688
}
683689

684690
hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
685691
if (hash != AIROHA_RXD4_FOE_ENTRY)
686-
skb_set_hash(skb, jhash_1word(hash, 0),
692+
skb_set_hash(q->skb, jhash_1word(hash, 0),
687693
PKT_HASH_TYPE_L4);
688694

689695
reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
690696
if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
691697
airoha_ppe_check_skb(eth->ppe, hash);
692698

693-
napi_gro_receive(&q->napi, skb);
694-
695699
done++;
700+
napi_gro_receive(&q->napi, q->skb);
701+
q->skb = NULL;
702+
continue;
703+
free_frag:
704+
page_pool_put_full_page(q->page_pool, page, true);
705+
dev_kfree_skb(q->skb);
706+
q->skb = NULL;
696707
}
697708
airoha_qdma_fill_rx_queue(q);
698709

@@ -767,6 +778,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
767778
FIELD_PREP(RX_RING_THR_MASK, thr));
768779
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
769780
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
781+
airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
770782

771783
airoha_qdma_fill_rx_queue(q);
772784

@@ -1166,7 +1178,6 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
11661178
}
11671179

11681180
airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
1169-
GLOBAL_CFG_RX_2B_OFFSET_MASK |
11701181
FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
11711182
GLOBAL_CFG_CPU_TXR_RR_MASK |
11721183
GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
@@ -1520,9 +1531,9 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
15201531

15211532
static int airoha_dev_open(struct net_device *dev)
15221533
{
1534+
int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
15231535
struct airoha_gdm_port *port = netdev_priv(dev);
15241536
struct airoha_qdma *qdma = port->qdma;
1525-
int err;
15261537

15271538
netif_tx_start_all_queues(dev);
15281539
err = airoha_set_vip_for_gdm_port(port, true);
@@ -1536,6 +1547,11 @@ static int airoha_dev_open(struct net_device *dev)
15361547
airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
15371548
GDM_STAG_EN_MASK);
15381549

1550+
airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
1551+
GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1552+
FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1553+
FIELD_PREP(GDM_LONG_LEN_MASK, len));
1554+
15391555
airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
15401556
GLOBAL_CFG_TX_DMA_EN_MASK |
15411557
GLOBAL_CFG_RX_DMA_EN_MASK);
@@ -1689,6 +1705,20 @@ static void airoha_dev_get_stats64(struct net_device *dev,
16891705
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
16901706
}
16911707

1708+
static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
1709+
{
1710+
struct airoha_gdm_port *port = netdev_priv(dev);
1711+
struct airoha_eth *eth = port->qdma->eth;
1712+
u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
1713+
1714+
airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
1715+
GDM_LONG_LEN_MASK,
1716+
FIELD_PREP(GDM_LONG_LEN_MASK, len));
1717+
WRITE_ONCE(dev->mtu, mtu);
1718+
1719+
return 0;
1720+
}
1721+
16921722
static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
16931723
struct net_device *sb_dev)
16941724
{
@@ -2384,6 +2414,7 @@ static const struct net_device_ops airoha_netdev_ops = {
23842414
.ndo_init = airoha_dev_init,
23852415
.ndo_open = airoha_dev_open,
23862416
.ndo_stop = airoha_dev_stop,
2417+
.ndo_change_mtu = airoha_dev_change_mtu,
23872418
.ndo_select_queue = airoha_dev_select_queue,
23882419
.ndo_start_xmit = airoha_dev_xmit,
23892420
.ndo_get_stats64 = airoha_dev_get_stats64,

drivers/net/ethernet/airoha/airoha_eth.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
#define AIROHA_MAX_DSA_PORTS 7
2121
#define AIROHA_MAX_NUM_RSTS 3
2222
#define AIROHA_MAX_NUM_XSI_RSTS 5
23-
#define AIROHA_MAX_MTU 2000
23+
#define AIROHA_MAX_MTU 9216
2424
#define AIROHA_MAX_PACKET_SIZE 2048
2525
#define AIROHA_NUM_QOS_CHANNELS 4
2626
#define AIROHA_NUM_QOS_QUEUES 8
@@ -176,6 +176,7 @@ struct airoha_queue {
176176

177177
struct napi_struct napi;
178178
struct page_pool *page_pool;
179+
struct sk_buff *skb;
179180
};
180181

181182
struct airoha_tx_irq_queue {

drivers/net/ethernet/airoha/airoha_regs.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -626,10 +626,15 @@
626626
#define REG_RX_DELAY_INT_IDX(_n) \
627627
(((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
628628

629+
#define REG_RX_SCATTER_CFG(_n) \
630+
(((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
631+
629632
#define RX_DELAY_INT_MASK GENMASK(15, 0)
630633

631634
#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
632635

636+
#define RX_RING_SG_EN_MASK BIT(0)
637+
633638
#define REG_INGRESS_TRTCM_CFG 0x0070
634639
#define INGRESS_TRTCM_EN_MASK BIT(31)
635640
#define INGRESS_TRTCM_MODE_MASK BIT(30)

0 commit comments

Comments
 (0)