Skip to content
Permalink
Browse files
net: dsa: remove the "dsa_to_port in a loop" antipattern from drivers
Since the DSA conversion from the ds->ports array into the dst->ports
list, the DSA API has encouraged driver writers to write inefficient
code.

Currently, switch drivers which want to filter by a specific type of
port when iterating, like {!unused, user, cpu, dsa}, use the
dsa_is_*_port helper. Under the hood, this uses dsa_to_port which
iterates again through dst->ports. But the driver iterates through the
port list already, so the complexity is quadratic for the typical case
of a single-switch tree.

Many drivers also call dsa_to_port many times while iterating and do not
even cache the result, probably unaware of the hidden complexity of this
function.

When drivers need to iterate between pairs of {to, from} ports, and use
any of the functions above, the complexity is even higher.

Use the newly introduced DSA port iterators, and also introduce some
more which are not directly needed by the DSA core.

Note that the b53_br_{join,leave} functions have been converted to use
the dp-based iterators, but to preserve the original behavior, the
dev->enabled_ports check from b53_for_each_port has been split out and
open-coded. This will be addressed in the next patch.

Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
  • Loading branch information
vladimiroltean authored and intel-lab-lkp committed Aug 10, 2021
1 parent f9a2b28 commit ed3baa791b1827372c9df651fca62a9152f2c852
Show file tree
Hide file tree
Showing 18 changed files with 284 additions and 327 deletions.
@@ -1851,6 +1851,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct dsa_port *dp;
u16 pvlan, reg;
unsigned int i;

@@ -1873,8 +1874,13 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)

b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);

b53_for_each_port(dev, i) {
if (dsa_to_port(ds, i)->bridge_dev != br)
dsa_switch_for_each_port(dp, ds) {
i = dp->index;

if (!(dev->enabled_ports & BIT(i)))
continue;

if (dp->bridge_dev != br)
continue;

/* Add this local port to the remote port VLAN control
@@ -1903,14 +1909,20 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
struct dsa_port *dp;
unsigned int i;

b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);

b53_for_each_port(dev, i) {
dsa_switch_for_each_port(dp, ds) {
i = dp->index;

if (!(dev->enabled_ports & BIT(i)))
continue;

/* Don't touch the remaining ports */
if (dsa_to_port(ds, i)->bridge_dev != br)
if (dp->bridge_dev != br)
continue;

b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
@@ -913,18 +913,16 @@ static void bcm_sf2_enable_acb(struct dsa_switch *ds)
static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port;
struct dsa_port *dp;

bcm_sf2_intr_disable(priv);

/* Disable all ports physically present including the IMP
* port, the other ones have already been disabled during
* bcm_sf2_sw_setup
*/
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
bcm_sf2_port_disable(ds, port);
}
dsa_switch_for_each_available_port(dp, ds)
bcm_sf2_port_disable(ds, dp->index);

if (!priv->wol_ports_mask)
clk_disable_unprepare(priv->clk);
@@ -345,19 +345,16 @@ static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
int i;
struct dsa_port *dp;

dev_dbg(hellcreek->dev, "VLAN prepare for port %d\n", port);

/* Restriction: Make sure that nobody uses the "private" VLANs. These
* VLANs are internally used by the driver to ensure port
* separation. Thus, they cannot be used by someone else.
*/
for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
const u16 restricted_vid = hellcreek_private_vid(i);

if (!dsa_is_user_port(ds, i))
continue;
dsa_switch_for_each_user_port(dp, ds) {
const u16 restricted_vid = hellcreek_private_vid(dp->index);

if (vlan->vid == restricted_vid) {
NL_SET_ERR_MSG_MOD(extack, "VID restricted by driver");
@@ -1301,8 +1298,9 @@ static void hellcreek_teardown_devlink_regions(struct dsa_switch *ds)
static int hellcreek_setup(struct dsa_switch *ds)
{
struct hellcreek *hellcreek = ds->priv;
struct dsa_port *dp;
u16 swcfg = 0;
int ret, i;
int ret;

dev_dbg(hellcreek->dev, "Set up the switch\n");

@@ -1328,12 +1326,8 @@ static int hellcreek_setup(struct dsa_switch *ds)
hellcreek_write(hellcreek, swcfg, HR_SWCFG);

/* Initial vlan membership to reflect port separation */
for (i = 0; i < ds->num_ports; ++i) {
if (!dsa_is_user_port(ds, i))
continue;

hellcreek_setup_vlan_membership(ds, i, true);
}
dsa_switch_for_each_user_port(dp, ds)
hellcreek_setup_vlan_membership(ds, dp->index, true);

/* Configure PCP <-> TC mapping */
hellcreek_setup_tc_identity_mapping(hellcreek);
@@ -1410,10 +1404,10 @@ hellcreek_port_prechangeupper(struct dsa_switch *ds, int port,
struct netdev_notifier_changeupper_info *info)
{
struct hellcreek *hellcreek = ds->priv;
struct dsa_port *dp;
bool used = true;
int ret = -EBUSY;
u16 vid;
int i;

dev_dbg(hellcreek->dev, "Pre change upper for port %d\n", port);

@@ -1432,9 +1426,8 @@ hellcreek_port_prechangeupper(struct dsa_switch *ds, int port,

/* For all ports, check bitmaps */
mutex_lock(&hellcreek->vlan_lock);
for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
if (!dsa_is_user_port(ds, i))
continue;
dsa_switch_for_each_user_port(dp, ds) {
int i = dp->index;

if (port == i)
continue;
@@ -354,13 +354,12 @@ long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp)
{
struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
struct dsa_switch *ds = hellcreek->ds;
int i, restart = 0;
struct dsa_port *dp;
int restart = 0;

for (i = 0; i < ds->num_ports; i++) {
dsa_switch_for_each_user_port(dp, ds) {
struct hellcreek_port_hwtstamp *ps;

if (!dsa_is_user_port(ds, i))
continue;
int i = dp->index;

ps = &hellcreek->ports[i].port_hwtstamp;

@@ -459,15 +458,11 @@ static void hellcreek_hwtstamp_port_setup(struct hellcreek *hellcreek, int port)
int hellcreek_hwtstamp_setup(struct hellcreek *hellcreek)
{
struct dsa_switch *ds = hellcreek->ds;
int i;
struct dsa_port *dp;

/* Initialize timestamping ports. */
for (i = 0; i < ds->num_ports; ++i) {
if (!dsa_is_user_port(ds, i))
continue;

hellcreek_hwtstamp_port_setup(hellcreek, i);
}
dsa_switch_for_each_user_port(dp, ds)
hellcreek_hwtstamp_port_setup(hellcreek, dp->index);

/* Select the synchronized clock as the source timekeeper for the
* timestamps and enable inline timestamping.
@@ -1266,11 +1266,14 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
static void ksz9477_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
struct ksz_port *p;
int i;

for (i = 0; i < dev->port_cnt; i++) {
if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
dsa_switch_for_each_port(dp, ds) {
i = dp->index;

if (dsa_port_is_cpu(dp) && (dev->cpu_ports & (1 << i))) {
phy_interface_t interface;
const char *prev_msg;
const char *prev_mode;
@@ -1609,18 +1612,22 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {

int ksz9477_switch_register(struct ksz_device *dev)
{
int ret, i;
struct phy_device *phydev;
struct dsa_switch *ds;
struct dsa_port *dp;
int ret;

ret = ksz_switch_register(dev, &ksz9477_dev_ops);
if (ret)
return ret;

for (i = 0; i < dev->phy_port_cnt; ++i) {
if (!dsa_is_user_port(dev->ds, i))
ds = dev->ds;

dsa_switch_for_each_user_port(dp, ds) {
if (dp->index >= dev->phy_port_cnt)
continue;

phydev = dsa_to_port(dev->ds, i)->slave->phydev;
phydev = dp->slave->phydev;

/* The MAC actually cannot run in 1000 half-duplex mode. */
phy_remove_link_mode(phydev,
@@ -68,28 +68,23 @@ static void ksz_mib_read_work(struct work_struct *work)
{
struct ksz_device *dev = container_of(work, struct ksz_device,
mib_read.work);
struct dsa_switch *ds = dev->ds;
struct ksz_port_mib *mib;
struct dsa_port *dp;
struct ksz_port *p;
int i;

for (i = 0; i < dev->port_cnt; i++) {
if (dsa_is_unused_port(dev->ds, i))
continue;

p = &dev->ports[i];
dsa_switch_for_each_available_port(dp, ds) {
p = &dev->ports[dp->index];
mib = &p->mib;
mutex_lock(&mib->cnt_mutex);

/* Only read MIB counters when the port is told to do.
* If not, read only dropped counters when link is not up.
*/
if (!p->read) {
const struct dsa_port *dp = dsa_to_port(dev->ds, i);
if (!p->read && !netif_carrier_ok(dp->slave))
mib->cnt_ptr = dev->reg_mib_cnt;

if (!netif_carrier_ok(dp->slave))
mib->cnt_ptr = dev->reg_mib_cnt;
}
port_r_cnt(dev, i);
port_r_cnt(dev, dp->index);
p->read = false;
mutex_unlock(&mib->cnt_mutex);
}
@@ -1195,25 +1195,27 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
u32 port_bitmap = BIT(MT7530_CPU_PORT);
int i;
struct dsa_port *dp;

mutex_lock(&priv->reg_mutex);

for (i = 0; i < MT7530_NUM_PORTS; i++) {
dsa_switch_for_each_user_port(dp, ds) {
if (dp->index == port)
continue;

if (dp->bridge_dev != bridge)
continue;

/* Add this port to the port matrix of the other ports in the
* same bridge. If the port is disabled, port matrix is kept
* and not being setup until the port becomes enabled.
*/
if (dsa_is_user_port(ds, i) && i != port) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
mt7530_set(priv, MT7530_PCR_P(i),
PCR_MATRIX(BIT(port)));
priv->ports[i].pm |= PCR_MATRIX(BIT(port));
if (priv->ports[dp->index].enable)
mt7530_set(priv, MT7530_PCR_P(dp->index),
PCR_MATRIX(BIT(port)));
priv->ports[dp->index].pm |= PCR_MATRIX(BIT(port));

port_bitmap |= BIT(i);
}
port_bitmap |= BIT(dp->index);
}

/* Add the all other ports to this port matrix. */
@@ -1236,7 +1238,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
bool all_user_ports_removed = true;
int i;
struct dsa_port *dp;

/* This is called after .port_bridge_leave when leaving a VLAN-aware
* bridge. Don't set standalone ports to fallback mode.
@@ -1255,9 +1257,8 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
G0_PORT_VID_DEF);

for (i = 0; i < MT7530_NUM_PORTS; i++) {
if (dsa_is_user_port(ds, i) &&
dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
dsa_switch_for_each_user_port(dp, ds) {
if (dsa_port_is_vlan_filtering(dp)) {
all_user_ports_removed = false;
break;
}
@@ -1307,26 +1308,31 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct mt7530_priv *priv = ds->priv;
int i;
struct dsa_port *dp;

mutex_lock(&priv->reg_mutex);

for (i = 0; i < MT7530_NUM_PORTS; i++) {
dsa_switch_for_each_user_port(dp, ds) {
/* Remove this port from the port matrix of the other ports
* in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled.
* And the other port's port matrix cannot be broken when the
* other port is still a VLAN-aware port.
*/
if (dsa_is_user_port(ds, i) && i != port &&
!dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
mt7530_clear(priv, MT7530_PCR_P(i),
PCR_MATRIX(BIT(port)));
priv->ports[i].pm &= ~PCR_MATRIX(BIT(port));
}
if (dp->index == port)
continue;

if (dsa_port_is_vlan_filtering(dp))
continue;

if (dp->bridge_dev != bridge)
continue;

if (priv->ports[dp->index].enable)
mt7530_clear(priv, MT7530_PCR_P(dp->index),
PCR_MATRIX(BIT(port)));

priv->ports[dp->index].pm &= ~PCR_MATRIX(BIT(port));
}

/* Set the cpu port to be the only one in the port matrix of

0 comments on commit ed3baa7

Please sign in to comment.