Skip to content

Commit 0796004

Browse files
Justin Laikuba-moo
authored andcommitted
rtase: Implement net_device_ops
1. Implement .ndo_set_rx_mode so that the device can change address list filtering. 2. Implement .ndo_set_mac_address so that mac address can be changed. 3. Implement .ndo_change_mtu so that mtu can be changed. 4. Implement .ndo_tx_timeout to perform related processing when the transmitter does not make any progress. 5. Implement .ndo_get_stats64 to provide statistics that are called when the user wants to get network device usage. 6. Implement .ndo_vlan_rx_add_vid to register VLAN ID when the device supports VLAN filtering. 7. Implement .ndo_vlan_rx_kill_vid to unregister VLAN ID when the device supports VLAN filtering. 8. Implement the .ndo_setup_tc to enable setting any "tc" scheduler, classifier or action on dev. 9. Implement .ndo_fix_features enables adjusting requested feature flags based on device-specific constraints. 10. Implement .ndo_set_features enables updating device configuration to new features. Signed-off-by: Justin Lai <justinlai0215@realtek.com> Link: https://patch.msgid.link/20240904032114.247117-9-justinlai0215@realtek.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent cf7226c commit 0796004

File tree

1 file changed

+235
-0
lines changed

1 file changed

+235
-0
lines changed

drivers/net/ethernet/realtek/rtase/rtase_main.c

Lines changed: 235 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1417,6 +1417,11 @@ static netdev_tx_t rtase_start_xmit(struct sk_buff *skb,
14171417
return NETDEV_TX_OK;
14181418
}
14191419

1420+
static void rtase_set_rx_mode(struct net_device *dev)
1421+
{
1422+
rtase_hw_set_rx_packet_filter(dev);
1423+
}
1424+
14201425
static void rtase_enable_eem_write(const struct rtase_private *tp)
14211426
{
14221427
u8 val;
@@ -1449,10 +1454,240 @@ static void rtase_rar_set(const struct rtase_private *tp, const u8 *addr)
14491454
rtase_w16(tp, RTASE_LBK_CTRL, RTASE_LBK_ATLD | RTASE_LBK_CLR);
14501455
}
14511456

1457+
static int rtase_set_mac_address(struct net_device *dev, void *p)
1458+
{
1459+
struct rtase_private *tp = netdev_priv(dev);
1460+
int ret;
1461+
1462+
ret = eth_mac_addr(dev, p);
1463+
if (ret)
1464+
return ret;
1465+
1466+
rtase_rar_set(tp, dev->dev_addr);
1467+
1468+
return 0;
1469+
}
1470+
1471+
static int rtase_change_mtu(struct net_device *dev, int new_mtu)
1472+
{
1473+
dev->mtu = new_mtu;
1474+
1475+
netdev_update_features(dev);
1476+
1477+
return 0;
1478+
}
1479+
1480+
static void rtase_wait_for_quiescence(const struct net_device *dev)
1481+
{
1482+
struct rtase_private *tp = netdev_priv(dev);
1483+
struct rtase_int_vector *ivec;
1484+
u32 i;
1485+
1486+
for (i = 0; i < tp->int_nums; i++) {
1487+
ivec = &tp->int_vector[i];
1488+
synchronize_irq(ivec->irq);
1489+
/* wait for any pending NAPI task to complete */
1490+
napi_disable(&ivec->napi);
1491+
}
1492+
1493+
rtase_irq_dis_and_clear(tp);
1494+
1495+
for (i = 0; i < tp->int_nums; i++) {
1496+
ivec = &tp->int_vector[i];
1497+
napi_enable(&ivec->napi);
1498+
}
1499+
}
1500+
1501+
static void rtase_sw_reset(struct net_device *dev)
1502+
{
1503+
struct rtase_private *tp = netdev_priv(dev);
1504+
int ret;
1505+
1506+
netif_stop_queue(dev);
1507+
netif_carrier_off(dev);
1508+
rtase_hw_reset(dev);
1509+
1510+
/* let's wait a bit while any (async) irq lands on */
1511+
rtase_wait_for_quiescence(dev);
1512+
rtase_tx_clear(tp);
1513+
rtase_rx_clear(tp);
1514+
1515+
ret = rtase_init_ring(dev);
1516+
if (ret) {
1517+
netdev_err(dev, "unable to init ring\n");
1518+
rtase_free_desc(tp);
1519+
return;
1520+
}
1521+
1522+
rtase_hw_config(dev);
1523+
/* always link, so start to transmit & receive */
1524+
rtase_hw_start(dev);
1525+
1526+
netif_carrier_on(dev);
1527+
netif_wake_queue(dev);
1528+
}
1529+
1530+
static void rtase_dump_tally_counter(const struct rtase_private *tp)
1531+
{
1532+
dma_addr_t paddr = tp->tally_paddr;
1533+
u32 cmd = lower_32_bits(paddr);
1534+
u32 val;
1535+
int err;
1536+
1537+
rtase_w32(tp, RTASE_DTCCR4, upper_32_bits(paddr));
1538+
rtase_w32(tp, RTASE_DTCCR0, cmd);
1539+
rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_DUMP);
1540+
1541+
err = read_poll_timeout(rtase_r32, val, !(val & RTASE_COUNTER_DUMP),
1542+
10, 250, false, tp, RTASE_DTCCR0);
1543+
1544+
if (err == -ETIMEDOUT)
1545+
netdev_err(tp->dev, "error occurred in dump tally counter\n");
1546+
}
1547+
1548+
static void rtase_dump_state(const struct net_device *dev)
1549+
{
1550+
const struct rtase_private *tp = netdev_priv(dev);
1551+
int max_reg_size = RTASE_PCI_REGS_SIZE;
1552+
const struct rtase_counters *counters;
1553+
const struct rtase_ring *ring;
1554+
u32 dword_rd;
1555+
int n = 0;
1556+
1557+
ring = &tp->tx_ring[0];
1558+
netdev_err(dev, "Tx descriptor info:\n");
1559+
netdev_err(dev, "Tx curIdx = 0x%x\n", ring->cur_idx);
1560+
netdev_err(dev, "Tx dirtyIdx = 0x%x\n", ring->dirty_idx);
1561+
netdev_err(dev, "Tx phyAddr = %pad\n", &ring->phy_addr);
1562+
1563+
ring = &tp->rx_ring[0];
1564+
netdev_err(dev, "Rx descriptor info:\n");
1565+
netdev_err(dev, "Rx curIdx = 0x%x\n", ring->cur_idx);
1566+
netdev_err(dev, "Rx dirtyIdx = 0x%x\n", ring->dirty_idx);
1567+
netdev_err(dev, "Rx phyAddr = %pad\n", &ring->phy_addr);
1568+
1569+
netdev_err(dev, "Device Registers:\n");
1570+
netdev_err(dev, "Chip Command = 0x%02x\n",
1571+
rtase_r8(tp, RTASE_CHIP_CMD));
1572+
netdev_err(dev, "IMR = %08x\n", rtase_r32(tp, RTASE_IMR0));
1573+
netdev_err(dev, "ISR = %08x\n", rtase_r32(tp, RTASE_ISR0));
1574+
netdev_err(dev, "Boot Ctrl Reg(0xE004) = %04x\n",
1575+
rtase_r16(tp, RTASE_BOOT_CTL));
1576+
netdev_err(dev, "EPHY ISR(0xE014) = %04x\n",
1577+
rtase_r16(tp, RTASE_EPHY_ISR));
1578+
netdev_err(dev, "EPHY IMR(0xE016) = %04x\n",
1579+
rtase_r16(tp, RTASE_EPHY_IMR));
1580+
netdev_err(dev, "CLKSW SET REG(0xE018) = %04x\n",
1581+
rtase_r16(tp, RTASE_CLKSW_SET));
1582+
1583+
netdev_err(dev, "Dump PCI Registers:\n");
1584+
1585+
while (n < max_reg_size) {
1586+
if ((n % RTASE_DWORD_MOD) == 0)
1587+
netdev_err(tp->dev, "0x%03x:\n", n);
1588+
1589+
pci_read_config_dword(tp->pdev, n, &dword_rd);
1590+
netdev_err(tp->dev, "%08x\n", dword_rd);
1591+
n += 4;
1592+
}
1593+
1594+
netdev_err(dev, "Dump tally counter:\n");
1595+
counters = tp->tally_vaddr;
1596+
rtase_dump_tally_counter(tp);
1597+
1598+
netdev_err(dev, "tx_packets %lld\n",
1599+
le64_to_cpu(counters->tx_packets));
1600+
netdev_err(dev, "rx_packets %lld\n",
1601+
le64_to_cpu(counters->rx_packets));
1602+
netdev_err(dev, "tx_errors %lld\n",
1603+
le64_to_cpu(counters->tx_errors));
1604+
netdev_err(dev, "rx_errors %d\n",
1605+
le32_to_cpu(counters->rx_errors));
1606+
netdev_err(dev, "rx_missed %d\n",
1607+
le16_to_cpu(counters->rx_missed));
1608+
netdev_err(dev, "align_errors %d\n",
1609+
le16_to_cpu(counters->align_errors));
1610+
netdev_err(dev, "tx_one_collision %d\n",
1611+
le32_to_cpu(counters->tx_one_collision));
1612+
netdev_err(dev, "tx_multi_collision %d\n",
1613+
le32_to_cpu(counters->tx_multi_collision));
1614+
netdev_err(dev, "rx_unicast %lld\n",
1615+
le64_to_cpu(counters->rx_unicast));
1616+
netdev_err(dev, "rx_broadcast %lld\n",
1617+
le64_to_cpu(counters->rx_broadcast));
1618+
netdev_err(dev, "rx_multicast %d\n",
1619+
le32_to_cpu(counters->rx_multicast));
1620+
netdev_err(dev, "tx_aborted %d\n",
1621+
le16_to_cpu(counters->tx_aborted));
1622+
netdev_err(dev, "tx_underun %d\n",
1623+
le16_to_cpu(counters->tx_underun));
1624+
}
1625+
1626+
static void rtase_tx_timeout(struct net_device *dev, unsigned int txqueue)
1627+
{
1628+
rtase_dump_state(dev);
1629+
rtase_sw_reset(dev);
1630+
}
1631+
1632+
static void rtase_get_stats64(struct net_device *dev,
1633+
struct rtnl_link_stats64 *stats)
1634+
{
1635+
const struct rtase_private *tp = netdev_priv(dev);
1636+
const struct rtase_counters *counters;
1637+
1638+
counters = tp->tally_vaddr;
1639+
1640+
dev_fetch_sw_netstats(stats, dev->tstats);
1641+
1642+
/* fetch additional counter values missing in stats collected by driver
1643+
* from tally counter
1644+
*/
1645+
rtase_dump_tally_counter(tp);
1646+
stats->rx_errors = tp->stats.rx_errors;
1647+
stats->tx_errors = le64_to_cpu(counters->tx_errors);
1648+
stats->rx_dropped = tp->stats.rx_dropped;
1649+
stats->tx_dropped = tp->stats.tx_dropped;
1650+
stats->multicast = tp->stats.multicast;
1651+
stats->rx_length_errors = tp->stats.rx_length_errors;
1652+
}
1653+
1654+
static netdev_features_t rtase_fix_features(struct net_device *dev,
1655+
netdev_features_t features)
1656+
{
1657+
netdev_features_t features_fix = features;
1658+
1659+
/* not support TSO for jumbo frames */
1660+
if (dev->mtu > ETH_DATA_LEN)
1661+
features_fix &= ~NETIF_F_ALL_TSO;
1662+
1663+
return features_fix;
1664+
}
1665+
1666+
static int rtase_set_features(struct net_device *dev,
1667+
netdev_features_t features)
1668+
{
1669+
netdev_features_t features_set = features;
1670+
1671+
features_set &= NETIF_F_RXALL | NETIF_F_RXCSUM |
1672+
NETIF_F_HW_VLAN_CTAG_RX;
1673+
1674+
if (features_set ^ dev->features)
1675+
rtase_hw_set_features(dev, features_set);
1676+
1677+
return 0;
1678+
}
1679+
14521680
static const struct net_device_ops rtase_netdev_ops = {
14531681
.ndo_open = rtase_open,
14541682
.ndo_stop = rtase_close,
14551683
.ndo_start_xmit = rtase_start_xmit,
1684+
.ndo_set_rx_mode = rtase_set_rx_mode,
1685+
.ndo_set_mac_address = rtase_set_mac_address,
1686+
.ndo_change_mtu = rtase_change_mtu,
1687+
.ndo_tx_timeout = rtase_tx_timeout,
1688+
.ndo_get_stats64 = rtase_get_stats64,
1689+
.ndo_fix_features = rtase_fix_features,
1690+
.ndo_set_features = rtase_set_features,
14561691
};
14571692

14581693
static void rtase_get_mac_address(struct net_device *dev)

0 commit comments

Comments
 (0)