@@ -1979,6 +1979,115 @@ static struct mlxsw_driver mlxsw_sp_driver = {
19791979 .profile = & mlxsw_sp_config_profile ,
19801980};
19811981
1982+ static int
1983+ mlxsw_sp_port_fdb_flush_by_port (const struct mlxsw_sp_port * mlxsw_sp_port )
1984+ {
1985+ struct mlxsw_sp * mlxsw_sp = mlxsw_sp_port -> mlxsw_sp ;
1986+ char sfdf_pl [MLXSW_REG_SFDF_LEN ];
1987+
1988+ mlxsw_reg_sfdf_pack (sfdf_pl , MLXSW_REG_SFDF_FLUSH_PER_PORT );
1989+ mlxsw_reg_sfdf_system_port_set (sfdf_pl , mlxsw_sp_port -> local_port );
1990+
1991+ return mlxsw_reg_write (mlxsw_sp -> core , MLXSW_REG (sfdf ), sfdf_pl );
1992+ }
1993+
1994+ static int
1995+ mlxsw_sp_port_fdb_flush_by_port_fid (const struct mlxsw_sp_port * mlxsw_sp_port ,
1996+ u16 fid )
1997+ {
1998+ struct mlxsw_sp * mlxsw_sp = mlxsw_sp_port -> mlxsw_sp ;
1999+ char sfdf_pl [MLXSW_REG_SFDF_LEN ];
2000+
2001+ mlxsw_reg_sfdf_pack (sfdf_pl , MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID );
2002+ mlxsw_reg_sfdf_fid_set (sfdf_pl , fid );
2003+ mlxsw_reg_sfdf_port_fid_system_port_set (sfdf_pl ,
2004+ mlxsw_sp_port -> local_port );
2005+
2006+ return mlxsw_reg_write (mlxsw_sp -> core , MLXSW_REG (sfdf ), sfdf_pl );
2007+ }
2008+
2009+ static int
2010+ mlxsw_sp_port_fdb_flush_by_lag_id (const struct mlxsw_sp_port * mlxsw_sp_port )
2011+ {
2012+ struct mlxsw_sp * mlxsw_sp = mlxsw_sp_port -> mlxsw_sp ;
2013+ char sfdf_pl [MLXSW_REG_SFDF_LEN ];
2014+
2015+ mlxsw_reg_sfdf_pack (sfdf_pl , MLXSW_REG_SFDF_FLUSH_PER_LAG );
2016+ mlxsw_reg_sfdf_lag_id_set (sfdf_pl , mlxsw_sp_port -> lag_id );
2017+
2018+ return mlxsw_reg_write (mlxsw_sp -> core , MLXSW_REG (sfdf ), sfdf_pl );
2019+ }
2020+
2021+ static int
2022+ mlxsw_sp_port_fdb_flush_by_lag_id_fid (const struct mlxsw_sp_port * mlxsw_sp_port ,
2023+ u16 fid )
2024+ {
2025+ struct mlxsw_sp * mlxsw_sp = mlxsw_sp_port -> mlxsw_sp ;
2026+ char sfdf_pl [MLXSW_REG_SFDF_LEN ];
2027+
2028+ mlxsw_reg_sfdf_pack (sfdf_pl , MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID );
2029+ mlxsw_reg_sfdf_fid_set (sfdf_pl , fid );
2030+ mlxsw_reg_sfdf_lag_fid_lag_id_set (sfdf_pl , mlxsw_sp_port -> lag_id );
2031+
2032+ return mlxsw_reg_write (mlxsw_sp -> core , MLXSW_REG (sfdf ), sfdf_pl );
2033+ }
2034+
2035+ static int
2036+ __mlxsw_sp_port_fdb_flush (const struct mlxsw_sp_port * mlxsw_sp_port )
2037+ {
2038+ int err , last_err = 0 ;
2039+ u16 vid ;
2040+
2041+ for (vid = 1 ; vid < VLAN_N_VID - 1 ; vid ++ ) {
2042+ err = mlxsw_sp_port_fdb_flush_by_port_fid (mlxsw_sp_port , vid );
2043+ if (err )
2044+ last_err = err ;
2045+ }
2046+
2047+ return last_err ;
2048+ }
2049+
2050+ static int
2051+ __mlxsw_sp_port_fdb_flush_lagged (const struct mlxsw_sp_port * mlxsw_sp_port )
2052+ {
2053+ int err , last_err = 0 ;
2054+ u16 vid ;
2055+
2056+ for (vid = 1 ; vid < VLAN_N_VID - 1 ; vid ++ ) {
2057+ err = mlxsw_sp_port_fdb_flush_by_lag_id_fid (mlxsw_sp_port , vid );
2058+ if (err )
2059+ last_err = err ;
2060+ }
2061+
2062+ return last_err ;
2063+ }
2064+
2065+ static int mlxsw_sp_port_fdb_flush (struct mlxsw_sp_port * mlxsw_sp_port )
2066+ {
2067+ if (!list_empty (& mlxsw_sp_port -> vports_list ))
2068+ if (mlxsw_sp_port -> lagged )
2069+ return __mlxsw_sp_port_fdb_flush_lagged (mlxsw_sp_port );
2070+ else
2071+ return __mlxsw_sp_port_fdb_flush (mlxsw_sp_port );
2072+ else
2073+ if (mlxsw_sp_port -> lagged )
2074+ return mlxsw_sp_port_fdb_flush_by_lag_id (mlxsw_sp_port );
2075+ else
2076+ return mlxsw_sp_port_fdb_flush_by_port (mlxsw_sp_port );
2077+ }
2078+
2079+ static int mlxsw_sp_vport_fdb_flush (struct mlxsw_sp_port * mlxsw_sp_vport )
2080+ {
2081+ u16 vfid = mlxsw_sp_vport_vfid_get (mlxsw_sp_vport );
2082+ u16 fid = mlxsw_sp_vfid_to_fid (vfid );
2083+
2084+ if (mlxsw_sp_vport -> lagged )
2085+ return mlxsw_sp_port_fdb_flush_by_lag_id_fid (mlxsw_sp_vport ,
2086+ fid );
2087+ else
2088+ return mlxsw_sp_port_fdb_flush_by_port_fid (mlxsw_sp_vport , fid );
2089+ }
2090+
19822091static bool mlxsw_sp_port_dev_check (const struct net_device * dev )
19832092{
19842093 return dev -> netdev_ops == & mlxsw_sp_port_netdev_ops ;
@@ -2006,10 +2115,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
20062115 return 0 ;
20072116}
20082117
2009- static int mlxsw_sp_port_bridge_leave (struct mlxsw_sp_port * mlxsw_sp_port )
2118+ static int mlxsw_sp_port_bridge_leave (struct mlxsw_sp_port * mlxsw_sp_port ,
2119+ bool flush_fdb )
20102120{
20112121 struct net_device * dev = mlxsw_sp_port -> dev ;
20122122
2123+ if (flush_fdb && mlxsw_sp_port_fdb_flush (mlxsw_sp_port ))
2124+ netdev_err (mlxsw_sp_port -> dev , "Failed to flush FDB\n" );
2125+
20132126 mlxsw_sp_port -> learning = 0 ;
20142127 mlxsw_sp_port -> learning_sync = 0 ;
20152128 mlxsw_sp_port -> uc_flood = 0 ;
@@ -2200,10 +2313,15 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
22002313 return err ;
22012314}
22022315
2316+ static int mlxsw_sp_vport_bridge_leave (struct mlxsw_sp_port * mlxsw_sp_vport ,
2317+ struct net_device * br_dev ,
2318+ bool flush_fdb );
2319+
22032320static int mlxsw_sp_port_lag_leave (struct mlxsw_sp_port * mlxsw_sp_port ,
22042321 struct net_device * lag_dev )
22052322{
22062323 struct mlxsw_sp * mlxsw_sp = mlxsw_sp_port -> mlxsw_sp ;
2324+ struct mlxsw_sp_port * mlxsw_sp_vport ;
22072325 struct mlxsw_sp_upper * lag ;
22082326 u16 lag_id = mlxsw_sp_port -> lag_id ;
22092327 int err ;
@@ -2220,7 +2338,32 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
22202338 if (err )
22212339 return err ;
22222340
2341+ /* In case we leave a LAG device that has bridges built on top,
2342+ * then their teardown sequence is never issued and we need to
2343+ * invoke the necessary cleanup routines ourselves.
2344+ */
2345+ list_for_each_entry (mlxsw_sp_vport , & mlxsw_sp_port -> vports_list ,
2346+ vport .list ) {
2347+ struct net_device * br_dev ;
2348+
2349+ if (!mlxsw_sp_vport -> bridged )
2350+ continue ;
2351+
2352+ br_dev = mlxsw_sp_vport_br_get (mlxsw_sp_vport );
2353+ mlxsw_sp_vport_bridge_leave (mlxsw_sp_vport , br_dev , false);
2354+ }
2355+
2356+ if (mlxsw_sp_port -> bridged ) {
2357+ mlxsw_sp_port_active_vlans_del (mlxsw_sp_port );
2358+ mlxsw_sp_port_bridge_leave (mlxsw_sp_port , false);
2359+
2360+ if (lag -> ref_count == 1 )
2361+ mlxsw_sp_master_bridge_dec (mlxsw_sp , NULL );
2362+ }
2363+
22232364 if (lag -> ref_count == 1 ) {
2365+ if (mlxsw_sp_port_fdb_flush_by_lag_id (mlxsw_sp_port ))
2366+ netdev_err (mlxsw_sp_port -> dev , "Failed to flush FDB\n" );
22242367 err = mlxsw_sp_lag_destroy (mlxsw_sp , lag_id );
22252368 if (err )
22262369 return err ;
@@ -2272,9 +2415,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
22722415 return mlxsw_sp_port_lag_tx_en_set (mlxsw_sp_port , info -> tx_enabled );
22732416}
22742417
2275- static int mlxsw_sp_vport_bridge_leave (struct mlxsw_sp_port * mlxsw_sp_vport ,
2276- struct net_device * br_dev );
2277-
22782418static int mlxsw_sp_port_vlan_link (struct mlxsw_sp_port * mlxsw_sp_port ,
22792419 struct net_device * vlan_dev )
22802420{
@@ -2312,7 +2452,7 @@ static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
23122452 struct net_device * br_dev ;
23132453
23142454 br_dev = mlxsw_sp_vport_br_get (mlxsw_sp_vport );
2315- mlxsw_sp_vport_bridge_leave (mlxsw_sp_vport , br_dev );
2455+ mlxsw_sp_vport_bridge_leave (mlxsw_sp_vport , br_dev , true );
23162456 }
23172457
23182458 mlxsw_sp_vport -> dev = mlxsw_sp_port -> dev ;
@@ -2374,7 +2514,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
23742514 }
23752515 mlxsw_sp_master_bridge_inc (mlxsw_sp , upper_dev );
23762516 } else {
2377- err = mlxsw_sp_port_bridge_leave (mlxsw_sp_port );
2517+ err = mlxsw_sp_port_bridge_leave (mlxsw_sp_port ,
2518+ true);
23782519 mlxsw_sp_master_bridge_dec (mlxsw_sp , upper_dev );
23792520 if (err ) {
23802521 netdev_err (dev , "Failed to leave bridge\n" );
@@ -2541,7 +2682,8 @@ static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
25412682}
25422683
25432684static int mlxsw_sp_vport_bridge_leave (struct mlxsw_sp_port * mlxsw_sp_vport ,
2544- struct net_device * br_dev )
2685+ struct net_device * br_dev ,
2686+ bool flush_fdb )
25452687{
25462688 struct mlxsw_sp * mlxsw_sp = mlxsw_sp_vport -> mlxsw_sp ;
25472689 u16 vid = mlxsw_sp_vport_vid_get (mlxsw_sp_vport );
@@ -2604,6 +2746,9 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
26042746 goto err_vport_flood_set ;
26052747 }
26062748
2749+ if (flush_fdb && mlxsw_sp_vport_fdb_flush (mlxsw_sp_vport ))
2750+ netdev_err (dev , "Failed to flush FDB\n" );
2751+
26072752 /* Switch between the vFIDs and destroy the old one if needed. */
26082753 new_vfid -> nr_vports ++ ;
26092754 mlxsw_sp_vport -> vport .vfid = new_vfid ;
@@ -2777,7 +2922,7 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
27772922 if (!mlxsw_sp_vport )
27782923 return NOTIFY_DONE ;
27792924 err = mlxsw_sp_vport_bridge_leave (mlxsw_sp_vport ,
2780- upper_dev );
2925+ upper_dev , true );
27812926 if (err ) {
27822927 netdev_err (dev , "Failed to leave bridge\n" );
27832928 return NOTIFY_BAD ;
0 commit comments