Skip to content

Commit

Permalink
lantiq: xrx200: enable use of baby jumbo frames
Browse files Browse the repository at this point in the history
xrx200 max MTU is reduced so that it works correctly when set to the
max, and the max MTU of the switch is increased to match.

In 5.10, the switch driver now enables non-standard MTUs on a per-port
basis, with the overall frame size set based on the cpu port. When the
MTU is not used, this should have no effect. The maximum packet size is
limited as large packets cause the switch to lock up.

0702-net-lantiq-add-support-for-jumbo-frames.patch comes from net-next
commit 998ac358019e491217e752bc6dcbb3afb2a6fa3e.

In 5.4, all switch ports are configured to accept the max MTU, as 5.4
does not have port_max_mtu/port_change_mtu callbacks.

Signed-off-by: Thomas Nixon <tom@tomn.co.uk>
  • Loading branch information
tomjnixon authored and hauke committed Jan 16, 2022
1 parent 607f06f commit 255268c
Show file tree
Hide file tree
Showing 6 changed files with 484 additions and 12 deletions.
@@ -0,0 +1,145 @@
From 998ac358019e491217e752bc6dcbb3afb2a6fa3e Mon Sep 17 00:00:00 2001
From: Aleksander Jan Bajkowski <olek2@wp.pl>
Date: Sun, 19 Sep 2021 20:24:28 +0200
Subject: [PATCH] net: lantiq: add support for jumbo frames

Add support for jumbo frames. Full support for jumbo frames requires
changes in the DSA switch driver (lantiq_gswip.c).

Tested on BT Hone Hub 5A.

Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/ethernet/lantiq_xrx200.c | 64 +++++++++++++++++++++++++---
1 file changed, 57 insertions(+), 7 deletions(-)

--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -14,13 +14,15 @@
#include <linux/clk.h>
#include <linux/delay.h>

+#include <linux/if_vlan.h>
+
#include <linux/of_net.h>
#include <linux/of_platform.h>

#include <xway_dma.h>

/* DMA */
-#define XRX200_DMA_DATA_LEN 0x600
+#define XRX200_DMA_DATA_LEN (SZ_64K - 1)
#define XRX200_DMA_RX 0
#define XRX200_DMA_TX 1

@@ -106,7 +108,8 @@ static void xrx200_flush_dma(struct xrx2
break;

desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- XRX200_DMA_DATA_LEN;
+ (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
+ ETH_FCS_LEN);
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
@@ -154,19 +157,20 @@ static int xrx200_close(struct net_devic

static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
+ int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
struct sk_buff *skb = ch->skb[ch->dma.desc];
dma_addr_t mapping;
int ret = 0;

ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
- XRX200_DMA_DATA_LEN);
+ len);
if (!ch->skb[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}

mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
- XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ch->skb[ch->dma.desc] = skb;
@@ -179,8 +183,7 @@ static int xrx200_alloc_skb(struct xrx20
wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- XRX200_DMA_DATA_LEN;
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;

return ret;
}
@@ -340,10 +343,57 @@ err_drop:
return NETDEV_TX_OK;
}

+static int
+xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct xrx200_priv *priv = netdev_priv(net_dev);
+ struct xrx200_chan *ch_rx = &priv->chan_rx;
+ int old_mtu = net_dev->mtu;
+ bool running = false;
+ struct sk_buff *skb;
+ int curr_desc;
+ int ret = 0;
+
+ net_dev->mtu = new_mtu;
+
+ if (new_mtu <= old_mtu)
+ return ret;
+
+ running = netif_running(net_dev);
+ if (running) {
+ napi_disable(&ch_rx->napi);
+ ltq_dma_close(&ch_rx->dma);
+ }
+
+ xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM);
+ curr_desc = ch_rx->dma.desc;
+
+ for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
+ ch_rx->dma.desc++) {
+ skb = ch_rx->skb[ch_rx->dma.desc];
+ ret = xrx200_alloc_skb(ch_rx);
+ if (ret) {
+ net_dev->mtu = old_mtu;
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+
+ ch_rx->dma.desc = curr_desc;
+ if (running) {
+ napi_enable(&ch_rx->napi);
+ ltq_dma_open(&ch_rx->dma);
+ ltq_dma_enable_irq(&ch_rx->dma);
+ }
+
+ return ret;
+}
+
static const struct net_device_ops xrx200_netdev_ops = {
.ndo_open = xrx200_open,
.ndo_stop = xrx200_close,
.ndo_start_xmit = xrx200_start_xmit,
+ .ndo_change_mtu = xrx200_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -454,7 +504,7 @@ static int xrx200_probe(struct platform_
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
- net_dev->max_mtu = XRX200_DMA_DATA_LEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;

/* load the memory ranges */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -0,0 +1,101 @@
From 24a43ae2ac0ea06c474b1c80dc75651294d49321 Mon Sep 17 00:00:00 2001
From: Thomas Nixon <tom@tomn.co.uk>
Date: Sat, 2 Oct 2021 00:48:05 +0100
Subject: [PATCH 2/2] net: lantiq: enable jumbo frames on GSWIP

This enables non-standard MTUs on a per-port basis, with the overall
frame size set based on the CPU port.

When the MTU is not changed, this should have no effect.

Long packets crash the switch with MTUs of greater than 2526, so the
maximum is limited for now.

Signed-off-by: Thomas Nixon <tom@tomn.co.uk>
---
drivers/net/dsa/lantiq_gswip.c | 46 +++++++++++++++++++++++++++++++---
1 file changed, 42 insertions(+), 4 deletions(-)

--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -238,6 +238,11 @@

#define XRX200_GPHY_FW_ALIGN (16 * 1024)

+/* maximum packet size supported by the switch; in theory this should be 9600,
+ * but long packets currently cause lock-ups with an MTU of over 2526
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2556
+
struct gswip_hw_info {
int max_ports;
int cpu_port;
@@ -851,10 +856,6 @@ static int gswip_setup(struct dsa_switch
gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
GSWIP_PCE_PCTRL_0p(cpu_port));

- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
- GSWIP_MAC_CTRL_2p(cpu_port));
- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
- GSWIP_MAC_FLEN);
gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
GSWIP_BM_QUEUE_GCTRL);

@@ -871,6 +872,8 @@ static int gswip_setup(struct dsa_switch
return err;
}

+ ds->mtu_enforcement_ingress = true;
+
gswip_port_enable(ds, cpu_port, NULL);
return 0;
}
@@ -1433,6 +1436,39 @@ static int gswip_port_fdb_dump(struct ds
return 0;
}

+static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* includes 8 bytes for special header */
+ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct gswip_priv *priv = ds->priv;
+ int cpu_port = priv->hw_info->cpu_port;
+
+ /* cpu port always has maximum mtu of user ports, so use it to set
+ * switch frame size, including 8 byte special header
+ */
+ if (port == cpu_port) {
+ new_mtu += 8;
+ gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
+ GSWIP_MAC_FLEN);
+ }
+
+ /* enable MLEN for ports with non-standard MTUs, including the special
+ * header on the CPU port added above
+ */
+ if (new_mtu != ETH_DATA_LEN)
+ gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
+ GSWIP_MAC_CTRL_2p(port));
+ else
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
+ GSWIP_MAC_CTRL_2p(port));
+
+ return 0;
+}
+
static void gswip_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -1776,6 +1812,8 @@ static const struct dsa_switch_ops gswip
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
.phylink_validate = gswip_phylink_validate,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
@@ -0,0 +1,122 @@
From 1488fc204568f707fe2a42a913788c00a95af30e Mon Sep 17 00:00:00 2001
From: Aleksander Jan Bajkowski <olek2@wp.pl>
Date: Fri, 17 Dec 2021 01:07:40 +0100
Subject: [PATCH] net: lantiq_xrx200: increase buffer reservation

If the user sets a lower mtu on the CPU port than on the switch,
then DMA inserts a few more bytes into the buffer than expected.
In the worst case, it may exceed the size of the buffer. The
experiments showed that the buffer should be a multiple of the
burst length value. This patch rounds the length of the rx buffer
upwards and fixes this bug. The reservation of FCS space in the
buffer has been removed as PMAC strips the FCS.

Fixes: 998ac358019e ("net: lantiq: add support for jumbo frames")
Reported-by: Thomas Nixon <tom@tomn.co.uk>
Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/ethernet/lantiq_xrx200.c | 34 ++++++++++++++++++++--------
1 file changed, 24 insertions(+), 10 deletions(-)

--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -70,6 +70,8 @@ struct xrx200_priv {
struct xrx200_chan chan_tx;
struct xrx200_chan chan_rx;

+ u16 rx_buf_size;
+
struct net_device *net_dev;
struct device *dev;

@@ -96,6 +98,16 @@ static void xrx200_pmac_mask(struct xrx2
xrx200_pmac_w32(priv, val, offset);
}

+static int xrx200_max_frame_len(int mtu)
+{
+ return VLAN_ETH_HLEN + mtu;
+}
+
+static int xrx200_buffer_size(int mtu)
+{
+ return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
+}
+
/* drop all the packets from the DMA ring */
static void xrx200_flush_dma(struct xrx200_chan *ch)
{
@@ -108,8 +120,7 @@ static void xrx200_flush_dma(struct xrx2
break;

desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
- ETH_FCS_LEN);
+ ch->priv->rx_buf_size;
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
@@ -157,21 +168,21 @@ static int xrx200_close(struct net_devic

static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
- int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
struct sk_buff *skb = ch->skb[ch->dma.desc];
+ struct xrx200_priv *priv = ch->priv;
dma_addr_t mapping;
int ret = 0;

- ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
- len);
+ ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
+ priv->rx_buf_size);
if (!ch->skb[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}

- mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
- len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
+ mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ch->skb[ch->dma.desc] = skb;
ret = -ENOMEM;
@@ -183,7 +194,7 @@ static int xrx200_alloc_skb(struct xrx20
wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;

return ret;
}
@@ -355,6 +366,7 @@ xrx200_change_mtu(struct net_device *net
int ret = 0;

net_dev->mtu = new_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(new_mtu);

if (new_mtu <= old_mtu)
return ret;
@@ -374,6 +386,7 @@ xrx200_change_mtu(struct net_device *net
ret = xrx200_alloc_skb(ch_rx);
if (ret) {
net_dev->mtu = old_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(old_mtu);
break;
}
dev_kfree_skb_any(skb);
@@ -504,7 +517,8 @@ static int xrx200_probe(struct platform_
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
- net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
+ priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);

/* load the memory ranges */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

0 comments on commit 255268c

Please sign in to comment.