Skip to content

Commit 115a731

Browse files
committed
Merge branch 'dpaa_eth-next' of git://git.freescale.com/ppc/upstream/linux
Madalin Bucur says: ==================== QorIQ DPAA 1 updates This patch set introduces a series of fixes and features to the DPAA 1 drivers. Besides activating hardware Rx checksum offloading, four traffic classes are added for Tx traffic prioritisation. changes from v1: added patch to enable context-A stashing ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents 5425077 + 7fe1e29 commit 115a731

File tree

5 files changed

+248
-47
lines changed

5 files changed

+248
-47
lines changed

drivers/net/ethernet/freescale/dpaa/dpaa_eth.c

Lines changed: 144 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
137137
/* L4 Type field: TCP */
138138
#define FM_L4_PARSE_RESULT_TCP 0x20
139139

140+
/* FD status field indicating whether the FM Parser has attempted to validate
141+
* the L4 csum of the frame.
142+
* Note that having this bit set doesn't necessarily imply that the checksum
143+
* is valid. One would have to check the parse results to find that out.
144+
*/
145+
#define FM_FD_STAT_L4CV 0x00000004
146+
140147
#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
141148
#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
142149

@@ -235,6 +242,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
235242
* For conformity, we'll still declare GSO explicitly.
236243
*/
237244
net_dev->features |= NETIF_F_GSO;
245+
net_dev->features |= NETIF_F_RXCSUM;
238246

239247
net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
240248
/* we do not want shared skbs on TX */
@@ -334,6 +342,41 @@ static void dpaa_get_stats64(struct net_device *net_dev,
334342
}
335343
}
336344

345+
static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
346+
struct tc_to_netdev *tc)
347+
{
348+
struct dpaa_priv *priv = netdev_priv(net_dev);
349+
int i;
350+
351+
if (tc->type != TC_SETUP_MQPRIO)
352+
return -EINVAL;
353+
354+
if (tc->tc == priv->num_tc)
355+
return 0;
356+
357+
if (!tc->tc) {
358+
netdev_reset_tc(net_dev);
359+
goto out;
360+
}
361+
362+
if (tc->tc > DPAA_TC_NUM) {
363+
netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
364+
DPAA_TC_NUM);
365+
return -EINVAL;
366+
}
367+
368+
netdev_set_num_tc(net_dev, tc->tc);
369+
370+
for (i = 0; i < tc->tc; i++)
371+
netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
372+
i * DPAA_TC_TXQ_NUM);
373+
374+
out:
375+
priv->num_tc = tc->tc ? tc->tc : 1;
376+
netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
377+
return 0;
378+
}
379+
337380
static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
338381
{
339382
struct platform_device *of_dev;
@@ -557,16 +600,18 @@ static void dpaa_bps_free(struct dpaa_priv *priv)
557600

558601
/* Use multiple WQs for FQ assignment:
559602
* - Tx Confirmation queues go to WQ1.
560-
* - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
561-
* to be scheduled, in case there are many more FQs in WQ3).
562-
* - Rx Default and Tx queues go to WQ3 (no differentiation between
563-
* Rx and Tx traffic).
603+
* - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
604+
* to be scheduled, in case there are many more FQs in WQ6).
605+
* - Rx Default goes to WQ6.
606+
* - Tx queues go to different WQs depending on their priority. Equal
607+
* chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
608+
* WQ0 (highest priority).
564609
* This ensures that Tx-confirmed buffers are timely released. In particular,
565610
* it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
566611
* are greatly outnumbered by other FQs in the system, while
567612
* dequeue scheduling is round-robin.
568613
*/
569-
static inline void dpaa_assign_wq(struct dpaa_fq *fq)
614+
static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
570615
{
571616
switch (fq->fq_type) {
572617
case FQ_TYPE_TX_CONFIRM:
@@ -575,11 +620,33 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq)
575620
break;
576621
case FQ_TYPE_RX_ERROR:
577622
case FQ_TYPE_TX_ERROR:
578-
fq->wq = 2;
623+
fq->wq = 5;
579624
break;
580625
case FQ_TYPE_RX_DEFAULT:
626+
fq->wq = 6;
627+
break;
581628
case FQ_TYPE_TX:
582-
fq->wq = 3;
629+
switch (idx / DPAA_TC_TXQ_NUM) {
630+
case 0:
631+
/* Low priority (best effort) */
632+
fq->wq = 6;
633+
break;
634+
case 1:
635+
/* Medium priority */
636+
fq->wq = 2;
637+
break;
638+
case 2:
639+
/* High priority */
640+
fq->wq = 1;
641+
break;
642+
case 3:
643+
/* Very high priority */
644+
fq->wq = 0;
645+
break;
646+
default:
647+
WARN(1, "Too many TX FQs: more than %d!\n",
648+
DPAA_ETH_TXQ_NUM);
649+
}
583650
break;
584651
default:
585652
WARN(1, "Invalid FQ type %d for FQID %d!\n",
@@ -607,7 +674,7 @@ static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
607674
}
608675

609676
for (i = 0; i < count; i++)
610-
dpaa_assign_wq(dpaa_fq + i);
677+
dpaa_assign_wq(dpaa_fq + i, i);
611678

612679
return dpaa_fq;
613680
}
@@ -985,7 +1052,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
9851052
/* Initialization common to all ingress queues */
9861053
if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
9871054
initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
988-
initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
1055+
initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
1056+
QM_FQCTRL_CTXASTASHING);
9891057
initfq.fqd.context_a.stashing.exclusive =
9901058
QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
9911059
QM_STASHING_EXCL_ANNOTATION;
@@ -1055,9 +1123,9 @@ static int dpaa_fq_free(struct device *dev, struct list_head *list)
10551123
return err;
10561124
}
10571125

1058-
static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1059-
struct dpaa_fq *defq,
1060-
struct dpaa_buffer_layout *buf_layout)
1126+
static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1127+
struct dpaa_fq *defq,
1128+
struct dpaa_buffer_layout *buf_layout)
10611129
{
10621130
struct fman_buffer_prefix_content buf_prefix_content;
10631131
struct fman_port_params params;
@@ -1076,23 +1144,29 @@ static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
10761144
params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
10771145

10781146
err = fman_port_config(port, &params);
1079-
if (err)
1147+
if (err) {
10801148
pr_err("%s: fman_port_config failed\n", __func__);
1149+
return err;
1150+
}
10811151

10821152
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1083-
if (err)
1153+
if (err) {
10841154
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
10851155
__func__);
1156+
return err;
1157+
}
10861158

10871159
err = fman_port_init(port);
10881160
if (err)
10891161
pr_err("%s: fm_port_init failed\n", __func__);
1162+
1163+
return err;
10901164
}
10911165

1092-
static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
1093-
size_t count, struct dpaa_fq *errq,
1094-
struct dpaa_fq *defq,
1095-
struct dpaa_buffer_layout *buf_layout)
1166+
static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
1167+
size_t count, struct dpaa_fq *errq,
1168+
struct dpaa_fq *defq,
1169+
struct dpaa_buffer_layout *buf_layout)
10961170
{
10971171
struct fman_buffer_prefix_content buf_prefix_content;
10981172
struct fman_port_rx_params *rx_p;
@@ -1120,32 +1194,44 @@ static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
11201194
}
11211195

11221196
err = fman_port_config(port, &params);
1123-
if (err)
1197+
if (err) {
11241198
pr_err("%s: fman_port_config failed\n", __func__);
1199+
return err;
1200+
}
11251201

11261202
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1127-
if (err)
1203+
if (err) {
11281204
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
11291205
__func__);
1206+
return err;
1207+
}
11301208

11311209
err = fman_port_init(port);
11321210
if (err)
11331211
pr_err("%s: fm_port_init failed\n", __func__);
1212+
1213+
return err;
11341214
}
11351215

1136-
static void dpaa_eth_init_ports(struct mac_device *mac_dev,
1137-
struct dpaa_bp **bps, size_t count,
1138-
struct fm_port_fqs *port_fqs,
1139-
struct dpaa_buffer_layout *buf_layout,
1140-
struct device *dev)
1216+
static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1217+
struct dpaa_bp **bps, size_t count,
1218+
struct fm_port_fqs *port_fqs,
1219+
struct dpaa_buffer_layout *buf_layout,
1220+
struct device *dev)
11411221
{
11421222
struct fman_port *rxport = mac_dev->port[RX];
11431223
struct fman_port *txport = mac_dev->port[TX];
1224+
int err;
1225+
1226+
err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1227+
port_fqs->tx_defq, &buf_layout[TX]);
1228+
if (err)
1229+
return err;
1230+
1231+
err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
1232+
port_fqs->rx_defq, &buf_layout[RX]);
11441233

1145-
dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1146-
port_fqs->tx_defq, &buf_layout[TX]);
1147-
dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
1148-
port_fqs->rx_defq, &buf_layout[RX]);
1234+
return err;
11491235
}
11501236

11511237
static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
@@ -1526,6 +1612,23 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
15261612
return skb;
15271613
}
15281614

1615+
static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
1616+
{
1617+
/* The parser has run and performed L4 checksum validation.
1618+
* We know there were no parser errors (and implicitly no
1619+
* L4 csum error), otherwise we wouldn't be here.
1620+
*/
1621+
if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1622+
(be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
1623+
return CHECKSUM_UNNECESSARY;
1624+
1625+
/* We're here because either the parser didn't run or the L4 checksum
1626+
* was not verified. This may include the case of a UDP frame with
1627+
* checksum zero or an L4 proto other than TCP/UDP
1628+
*/
1629+
return CHECKSUM_NONE;
1630+
}
1631+
15291632
/* Build a linear skb around the received buffer.
15301633
* We are guaranteed there is enough room at the end of the data buffer to
15311634
* accommodate the shared info area of the skb.
@@ -1556,7 +1659,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
15561659
skb_reserve(skb, fd_off);
15571660
skb_put(skb, qm_fd_get_length(fd));
15581661

1559-
skb->ip_summed = CHECKSUM_NONE;
1662+
skb->ip_summed = rx_csum_offload(priv, fd);
15601663

15611664
return skb;
15621665

@@ -1616,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
16161719
if (WARN_ON(unlikely(!skb)))
16171720
goto free_buffers;
16181721

1619-
skb->ip_summed = CHECKSUM_NONE;
1722+
skb->ip_summed = rx_csum_offload(priv, fd);
16201723

16211724
/* Make sure forwarded skbs will have enough space
16221725
* on Tx, if extra headers are added.
@@ -2093,7 +2196,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
20932196
dma_addr_t addr = qm_fd_addr(fd);
20942197
enum qm_fd_format fd_format;
20952198
struct net_device *net_dev;
2096-
u32 fd_status = fd->status;
2199+
u32 fd_status;
20972200
struct dpaa_bp *dpaa_bp;
20982201
struct dpaa_priv *priv;
20992202
unsigned int skb_len;
@@ -2350,6 +2453,7 @@ static const struct net_device_ops dpaa_ops = {
23502453
.ndo_validate_addr = eth_validate_addr,
23512454
.ndo_set_rx_mode = dpaa_set_rx_mode,
23522455
.ndo_do_ioctl = dpaa_ioctl,
2456+
.ndo_setup_tc = dpaa_setup_tc,
23532457
};
23542458

23552459
static int dpaa_napi_add(struct net_device *net_dev)
@@ -2624,8 +2728,10 @@ static int dpaa_eth_probe(struct platform_device *pdev)
26242728
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
26252729

26262730
/* All real interfaces need their ports initialized */
2627-
dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
2628-
&priv->buf_layout[0], dev);
2731+
err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
2732+
&priv->buf_layout[0], dev);
2733+
if (err)
2734+
goto init_ports_failed;
26292735

26302736
priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
26312737
if (!priv->percpu_priv) {
@@ -2638,6 +2744,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
26382744
memset(percpu_priv, 0, sizeof(*percpu_priv));
26392745
}
26402746

2747+
priv->num_tc = 1;
2748+
netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
2749+
26412750
/* Initialize NAPI */
26422751
err = dpaa_napi_add(net_dev);
26432752
if (err < 0)
@@ -2658,6 +2767,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
26582767
napi_add_failed:
26592768
dpaa_napi_del(net_dev);
26602769
alloc_percpu_failed:
2770+
init_ports_failed:
26612771
dpaa_fq_free(dev, &priv->dpaa_fq_list);
26622772
fq_alloc_failed:
26632773
qman_delete_cgr_safe(&priv->ingress_cgr);

drivers/net/ethernet/freescale/dpaa/dpaa_eth.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,12 @@
3939
#include "mac.h"
4040
#include "dpaa_eth_trace.h"
4141

42-
#define DPAA_ETH_TXQ_NUM NR_CPUS
42+
/* Number of prioritised traffic classes */
43+
#define DPAA_TC_NUM 4
44+
/* Number of Tx queues per traffic class */
45+
#define DPAA_TC_TXQ_NUM NR_CPUS
46+
/* Total number of Tx queues */
47+
#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
4348

4449
#define DPAA_BPS_NUM 3 /* number of bpools per interface */
4550

@@ -152,6 +157,7 @@ struct dpaa_priv {
152157
u16 channel;
153158
struct list_head dpaa_fq_list;
154159

160+
u8 num_tc;
155161
u32 msg_enable; /* net_device message level */
156162

157163
struct {

0 commit comments

Comments
 (0)