Skip to content
Permalink
Browse files
mlxsw: spectrum_fid: Configure layer 3 egress VID classification
After routing, the device always consults a table that determines the
packet's egress VID based on {eRIF, TQ}. In the unified bridge model, it
is up to software to maintain this table via REIV register.

The table needs to be updated in the following flows:
1. When a RIF is set on a FID, need to iterate over the FID's {Port, VID}
   list and issue REIV write to map the {RIF, Port} to the given VID.
2. When a {Port, VID} is mapped to a FID and the FID already has a RIF,
   need to issue REIV write with a single record to map the {RIF, Port}
   to the given VID.

REIV register supports a simultaneous update of 256 ports, so use this
capability for the first flow.

Handle the two above mentioned flows.

Signed-off-by: Amit Cohen <amcohen@nvidia.com>
  • Loading branch information
Amit Cohen authored and idosch committed Jan 18, 2022
1 parent 271ecc5 commit 6e0855a9b3824d98f266ac39a1a19c7bce925cd2
Showing 1 changed file with 107 additions and 4 deletions.
@@ -458,6 +458,57 @@ static int __mlxsw_sp_fid_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
return err;
}

static int mlxsw_sp_fid_erif_eport_to_vid_map(struct mlxsw_sp_fid *fid,
u16 rif_index, bool valid)
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
struct mlxsw_sp_fid_port_vid *port_vid, *tmp;
char reiv_pl[MLXSW_REG_REIV_LEN] = {};
bool records_to_write = false;
u8 rec_num, current_page = 0;
u16 last_local_port;
int err;

mlxsw_reg_reiv_pack(reiv_pl, current_page, rif_index);
last_local_port = current_page * MLXSW_REG_REIV_REC_MAX_COUNT +
MLXSW_REG_REIV_REC_MAX_COUNT - 1;

list_for_each_entry_safe(port_vid, tmp, &fid->port_vid_list, list) {
/* The list is sorted by local_port. */
if (port_vid->local_port > last_local_port)
goto reg_write;

new_record_fill:
rec_num = port_vid->local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num,
valid ? port_vid->vid : 0);
records_to_write = true;
goto next_list_node;

reg_write:
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
if (err)
return err;

records_to_write = false;
current_page++;
memset(reiv_pl, 0, MLXSW_REG_REIV_LEN);
mlxsw_reg_reiv_pack(reiv_pl, current_page, rif_index);
last_local_port = current_page * MLXSW_REG_REIV_REC_MAX_COUNT +
MLXSW_REG_REIV_REC_MAX_COUNT - 1;
goto new_record_fill;

next_list_node:
continue;
}

if (records_to_write)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv),
reiv_pl);
return 0;
}

int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif,
u16 rif_index)
{
@@ -475,21 +526,33 @@ int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif,
goto err_vni_to_fid_rif_update;

err = __mlxsw_sp_fid_vid_to_fid_rif_update(fid, rif_index, true);
if (err)
goto err_vid_to_fid_rif_update;

err = mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, true);

out:
fid->rif = rif;
return err;

err_vid_to_fid_rif_update:
__mlxsw_sp_fid_vni_to_fid_rif_update(fid, rif_index, false);
err_vni_to_fid_rif_update:
__mlxsw_sp_fid_to_fid_rif_update(fid, rif_index, false);
return err;
}

void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid)
{
u16 rif_index = 0;

if (fid->rif)
rif_index = mlxsw_sp_rif_index_get(fid->rif);

fid->rif = NULL;
if (!fid->fid_family->mlxsw_sp->ubridge)
return;

mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, false);
__mlxsw_sp_fid_vid_to_fid_rif_update(fid, 0, false);
__mlxsw_sp_fid_vni_to_fid_rif_update(fid, 0, false);
__mlxsw_sp_fid_to_fid_rif_update(fid, 0, false);
@@ -607,22 +670,62 @@ static int mlxsw_sp_fid_vni_op(const struct mlxsw_sp_fid *fid, __be32 vni,
return err;
}

static int
mlxsw_sp_fid_erif_eport_to_vid_map_one(const struct mlxsw_sp_fid *fid,
u16 rif_index, u16 local_port, u16 vid,
bool valid)
{
u8 port_page = local_port / (MLXSW_REG_REIV_REC_MAX_COUNT - 1);
u8 rec_num = local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char reiv_pl[MLXSW_REG_REIV_LEN];

mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num, valid ? vid : 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
}

static int __mlxsw_sp_fid_port_vid_map(const struct mlxsw_sp_fid *fid,
u16 local_port, u16 vid, bool valid)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char svfa_pl[MLXSW_REG_SVFA_LEN];
u16 fid_index = fid->fid_index;
u16 rif_index = 0;
int err;

mlxsw_reg_svfa_vid_pack(svfa_pl, local_port, mt, valid, fid_index, vid);
if (fid->rif) {
rif_index = mlxsw_sp_rif_index_get(fid->rif);
mlxsw_reg_svfa_irif_v_set(svfa_pl, true);
mlxsw_reg_svfa_irif_set(svfa_pl,
mlxsw_sp_rif_index_get(fid->rif));
mlxsw_reg_svfa_irif_set(svfa_pl, rif_index);
}

return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
if (err || !fid->rif)
goto out;

if (!fid->fid_family->mlxsw_sp->ubridge)
goto out;

err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, rif_index, local_port,
vid, valid);
if (err)
goto err_erif_eport_to_vid_map_one;

goto out;

err_erif_eport_to_vid_map_one:
if (valid) {
mlxsw_reg_svfa_vid_pack(svfa_pl, local_port, mt, !valid,
fid_index, vid);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
out:
return err;

}

static struct mlxsw_sp_fid_8021d *

0 comments on commit 6e0855a

Please sign in to comment.