Commit c57529e1 authored by Ido Schimmel's avatar Ido Schimmel Committed by David S. Miller

mlxsw: spectrum: Replace vPorts with Port-VLAN

As explained in the cover letter, since the introduction of the bridge
offload in the mlxsw driver, information related to the offloaded bridge
and bridge ports was stored in the individual port struct,
mlxsw_sp_port.

This lead to a bloated struct storing both physical properties of the
port (e.g., autoneg status) as well as logical properties of an upper
bridge port (e.g., learning, mrouter indication). While this might work
well for simple devices, it proved to be hard to extend when stacked
devices were taken into account and more advanced use-cases (e.g., IGMP
snooping) considered.

This patch removes the excess information from the above struct and
instead stores it in more appropriate structs that represent the bridge
port, the bridge itself and a VLAN configured on the bridge port.

The membership of a port in a bridge is denoted using the Port-VLAN
struct, which points to the bridge port and also member in the bridge
VLAN group of the VLAN it represents. This allows us to completely
remove the vPort abstraction and consolidate many of the code paths
relating to VLAN-aware and unaware bridges.

Note that the FID / vFID code is currently duplicated, but this will
soon go away when the common FID core will be introduced.
Signed-off-by: default avatarIdo Schimmel <idosch@mellanox.com>
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ed9ddd3a
......@@ -1401,152 +1401,154 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
u16 vid, last_visited_vid;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_fid *fid;
u16 vid;
int err;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
vid);
if (err) {
last_visited_vid = vid;
list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
list) {
fid = mlxsw_sp_port_vlan->fid;
if (!fid || fid->fid >= MLXSW_SP_VFID_BASE)
continue;
vid = mlxsw_sp_port_vlan->vid;
err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true,
fid->fid, vid);
if (err)
goto err_port_vid_to_fid_set;
}
}
err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
if (err) {
last_visited_vid = VLAN_N_VID;
goto err_port_vid_to_fid_set;
}
if (err)
goto err_port_vp_mode_set;
return 0;
err_port_vp_mode_set:
err_port_vid_to_fid_set:
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
&mlxsw_sp_port->vlans_list, list) {
fid = mlxsw_sp_port_vlan->fid;
if (!fid || fid->fid >= MLXSW_SP_VFID_BASE)
continue;
vid = mlxsw_sp_port_vlan->vid;
mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid->fid,
vid);
}
return err;
}
int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
u16 vid;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
int err;
err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
if (err)
return err;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
vid, vid);
if (err)
return err;
list_for_each_entry_reverse(mlxsw_sp_port_vlan,
&mlxsw_sp_port->vlans_list, list) {
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
u16 vid = mlxsw_sp_port_vlan->vid;
if (!fid || fid->fid >= MLXSW_SP_VFID_BASE)
continue;
mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid->fid,
vid);
}
return 0;
}
static struct mlxsw_sp_port *
mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
if (!mlxsw_sp_vport)
return NULL;
/* dev will be set correctly after the VLAN device is linked
* with the real device. In case of bridge SELF invocation, dev
* will remain as is.
*/
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
mlxsw_sp_vport->vport.vid = vid;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
return mlxsw_sp_vport;
}
static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
{
list_del(&mlxsw_sp_vport->vport.list);
kfree(mlxsw_sp_vport);
list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
&mlxsw_sp_port->vlans_list, list)
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
}
static struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
bool untagged = vid == 1;
int err;
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
if (err)
return ERR_PTR(err);
mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
if (!mlxsw_sp_port_vlan)
return ERR_PTR(-ENOMEM);
if (!mlxsw_sp_port_vlan) {
err = -ENOMEM;
goto err_port_vlan_alloc;
}
mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
mlxsw_sp_port_vlan->vid = vid;
list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
return mlxsw_sp_port_vlan;
err_port_vlan_alloc:
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
return ERR_PTR(err);
}
static void
mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
u16 vid = mlxsw_sp_port_vlan->vid;
if (fid && !WARN_ON(!fid->leave))
fid->leave(mlxsw_sp_port_vlan);
list_del(&mlxsw_sp_port_vlan->list);
kfree(mlxsw_sp_port_vlan);
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
}
static int mlxsw_sp_port_add_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_port *mlxsw_sp_vport;
bool untagged = vid == 1;
int err;
/* VLAN 0 is added to HW filter when device goes up, but it is
* reserved in our case, so simply return.
*/
if (!vid)
return 0;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (mlxsw_sp_port_vlan)
return 0;
return mlxsw_sp_port_vlan;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
if (IS_ERR(mlxsw_sp_port_vlan))
return PTR_ERR(mlxsw_sp_port_vlan);
return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
}
mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
if (!mlxsw_sp_vport) {
err = -ENOMEM;
goto err_port_vport_create;
}
void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
if (mlxsw_sp_port_vlan->bridge_port)
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
else if (mlxsw_sp_port_vlan->fid)
mlxsw_sp_port_vlan->fid->leave(mlxsw_sp_port_vlan);
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
if (err)
goto err_port_add_vid;
mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_port_add_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
/* VLAN 0 is added to HW filter when device goes up, but it is
* reserved in our case, so simply return.
*/
if (!vid)
return 0;
err_port_add_vid:
mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
err_port_vport_create:
mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
return err;
return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
}
static int mlxsw_sp_port_kill_vid(struct net_device *dev,
......@@ -1554,8 +1556,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *f;
/* VLAN 0 is removed from HW filter when device goes down, but
* it is reserved in our case, so simply return.
......@@ -1564,25 +1564,9 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_vport))
if (!mlxsw_sp_port_vlan)
return 0;
mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
/* Drop FID reference. If this was the last reference the
* resources will be freed.
*/
f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
if (f && !WARN_ON(!f->leave))
f->leave(mlxsw_sp_port_vlan);
mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
return 0;
}
......@@ -2720,24 +2704,12 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
{
mlxsw_sp_port->pvid = 1;
return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
}
static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
{
return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
}
static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
size_t bytes;
int err;
dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
......@@ -2748,24 +2720,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->pvid = 1;
mlxsw_sp_port->split = split;
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
mlxsw_sp_port->mapping.lane = lane;
mlxsw_sp_port->link.autoneg = 1;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
err = -ENOMEM;
goto err_port_active_vlans_alloc;
}
mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->untagged_vlans) {
err = -ENOMEM;
goto err_port_untagged_vlans_alloc;
}
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
mlxsw_sp_port->pcpu_stats =
......@@ -2877,11 +2838,11 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_vp_mode_set;
}
err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
mlxsw_sp_port->local_port);
goto err_port_pvid_vport_create;
goto err_port_vlan_get;
}
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
......@@ -2902,8 +2863,8 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
err_register_netdev:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
err_port_pvid_vport_create:
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
err_port_vlan_get:
err_port_vp_mode_set:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
......@@ -2922,10 +2883,6 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
err_port_untagged_vlans_alloc:
kfree(mlxsw_sp_port->active_vlans);
err_port_active_vlans_alloc:
free_netdev(dev);
return err;
}
......@@ -2961,16 +2918,13 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev);
}
......@@ -3622,16 +3576,14 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create);
static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
return mlxsw_sp_fid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
}
static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
mlxsw_sp_fid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
}
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
......@@ -3847,7 +3799,7 @@ static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
return ret;
}
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
......@@ -3899,166 +3851,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
dev_put(mlxsw_sp_port->dev);
}
static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
u16 fid)
{
if (mlxsw_sp_fid_is_vfid(fid))
return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
else
return test_bit(fid, lag_port->active_vlans);
}
static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u16 lag_id = mlxsw_sp_port->lag_id;
u64 max_lag_members;
int i, count = 0;
if (!mlxsw_sp_port->lagged)
return true;
max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_LAG_MEMBERS);
for (i = 0; i < max_lag_members; i++) {
struct mlxsw_sp_port *lag_port;
lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
if (!lag_port || lag_port->local_port == local_port)
continue;
if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
count++;
}
return !count;
}
static int
mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char sfdf_pl[MLXSW_REG_SFDF_LEN];
mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
mlxsw_sp_port->local_port);
netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
mlxsw_sp_port->local_port, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
static int
mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char sfdf_pl[MLXSW_REG_SFDF_LEN];
mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
mlxsw_sp_port->lag_id, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
{
if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
return 0;
if (mlxsw_sp_port->lagged)
return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
fid);
else
return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
}
static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
struct net_device *br_dev)
{
struct mlxsw_sp_upper *master_bridge = mlxsw_sp_master_bridge(mlxsw_sp);
return !master_bridge->dev || master_bridge->dev == br_dev;
}
static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
struct net_device *br_dev)
{
struct mlxsw_sp_upper *master_bridge = mlxsw_sp_master_bridge(mlxsw_sp);
master_bridge->dev = br_dev;
master_bridge->ref_count++;
}
static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_upper *master_bridge = mlxsw_sp_master_bridge(mlxsw_sp);
if (--master_bridge->ref_count == 0)
master_bridge->dev = NULL;
}
static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev)
{
struct net_device *dev = mlxsw_sp_port->dev;
int err;
/* When port is not bridged untagged packets are tagged with
* PVID=VID=1, thereby creating an implicit VLAN interface in
* the device. Remove it and let bridge code take care of its
* own VLANs.
*/
err = mlxsw_sp_port_kill_vid(dev, 0, 1);
if (err)
return err;
mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
mlxsw_sp_port->learning = 1;
mlxsw_sp_port->learning_sync = 1;
mlxsw_sp_port->uc_flood = 1;
mlxsw_sp_port->mc_flood = 1;
mlxsw_sp_port->mc_router = 0;
mlxsw_sp_port->mc_disabled = 1;
mlxsw_sp_port->bridged = 1;
return 0;
}
static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev)
{
struct net_device *dev = mlxsw_sp_port->dev;
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
mlxsw_sp_port->learning = 0;
mlxsw_sp_port->learning_sync = 0;
mlxsw_sp_port->uc_flood = 0;
mlxsw_sp_port->mc_flood = 0;
mlxsw_sp_port->mc_router = 0;
mlxsw_sp_port->bridged = 0;
/* Add implicit VLAN interface in the device, so that untagged
* packets will be classified to the default vFID.
*/
mlxsw_sp_port_add_vid(dev, 0, 1);
}
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
......@@ -4177,55 +3969,11 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
return -EBUSY;
}
static void
mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev, u16 lag_id)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *f;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
if (WARN_ON(!mlxsw_sp_vport))
return;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
/* If vPort is assigned a RIF, then leave it since it's no
* longer valid.
*/
f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
if (f)
f->leave(mlxsw_sp_port_vlan);
mlxsw_sp_vport->lag_id = lag_id;
mlxsw_sp_vport->lagged = 1;
mlxsw_sp_vport->dev = lag_dev;
}
static void
mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *f;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
if (WARN_ON(!mlxsw_sp_vport))
return;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
if (f)
f->leave(mlxsw_sp_port_vlan);
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
mlxsw_sp_vport->lagged = 0;
}
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_upper *lag;
u16 lag_id;
u8 port_index;
......@@ -4258,7 +4006,10 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
/* Port is no longer usable as a router interface */
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
if (mlxsw_sp_port_vlan->fid)
mlxsw_sp_port_vlan->fid->leave(mlxsw_sp_port_vlan);
return 0;
......@@ -4285,10 +4036,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
if (mlxsw_sp_port->bridged) {
mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
mlxsw_sp_port_bridge_leave(mlxsw_sp_port, NULL, NULL);
}
/* Any VLANs configured on the port are no longer valid */
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
if (lag->ref_count == 1)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
......@@ -4298,7 +4047,9 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 0;
lag->ref_count--;
mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
/* Make sure untagged frames are allowed to ingress */
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
......@@ -4340,34 +4091,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
}
static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *vlan_dev)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_vport))
return -EINVAL;
mlxsw_sp_vport->dev = vlan_dev;
return 0;
}
static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *vlan_dev)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_vport))
return;
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
}
static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
......@@ -4448,10 +4171,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
return -EINVAL;
if (!info->linking)
break;
/* HW limitation forbids to put ports to multiple bridges. */
if (netif_is_bridge_master(upper_dev) &&
!mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
......@@ -4468,14 +4187,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (is_vlan_dev(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
upper_dev);
else
mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
upper_dev);
} else if (netif_is_bridge_master(upper_dev)) {
if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
lower_dev,
......@@ -4496,9 +4208,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
else
mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
} else {
err = -EINVAL;
WARN_ON(1);
}
break;
}
......@@ -4566,248 +4275,6 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
return 0;
}
static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
{
return find_first_zero_bit(mlxsw_sp->vfids.mapped,
MLXSW_SP_VFID_MAX);
}
static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
{
char sfmr_pl[MLXSW_REG_SFMR_LEN];
mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
static void
mlxsw_sp_port_vlan_vfid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
struct net_device *br_dev)
{
struct device *dev = mlxsw_sp->bus_info->dev;
struct mlxsw_sp_fid *f;
u16 vfid, fid;
int err;
vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
if (vfid == MLXSW_SP_VFID_MAX) {
dev_err(dev, "No available vFIDs\n");
return ERR_PTR(-ERANGE);
}
fid = mlxsw_sp_vfid_to_fid(vfid);
err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
if (err) {
dev_err(dev, "Failed to create FID=%d\n", fid);
return ERR_PTR(err);
}
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
goto err_allocate_vfid;
f->leave = mlxsw_sp_port_vlan_vfid_leave;
f->fid = fid;
f->dev = br_dev;
list_add(&f->list, &mlxsw_sp->vfids.list);
set_bit(vfid, mlxsw_sp->vfids.mapped);
return f;
err_allocate_vfid:
mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
return ERR_PTR(-ENOMEM);
}
static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *f)
{
u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
u16 fid = f->fid;
clear_bit(vfid, mlxsw_sp->vfids.mapped);
list_del(&f->list);
if (f->rif)
mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
kfree(f);
mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
}
static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
bool valid)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
vid);
}
static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *br_dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp_fid *f;
int err;
f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
if (!f) {
f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
if (IS_ERR(f))
return PTR_ERR(f);
}
err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
if (err)
goto err_vport_flood_set;
err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
if (err)
goto err_vport_fid_map;
mlxsw_sp_port = mlxsw_sp_vport_port(mlxsw_sp_vport);
if (mlxsw_sp_port->nr_port_vid_map++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
goto err_port_vp_mode_trans;
}
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
f->ref_count++;
netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
return 0;
err_port_vp_mode_trans:
mlxsw_sp_port->nr_port_vid_map--;
mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
err_vport_fid_map:
mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
err_vport_flood_set:
if (!f->ref_count)
mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
return err;
}
static void
mlxsw_sp_port_vlan_vfid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_port *mlxsw_sp_vport;
u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_fid *f;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
f->ref_count--;
if (mlxsw_sp_port->nr_port_vid_map == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp_port->nr_port_vid_map--;
mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
if (f->ref_count == 0)
mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
}
static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *brport_dev,
struct net_device *br_dev)
{
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct net_device *dev = mlxsw_sp_vport->dev;
struct mlxsw_sp_port *mlxsw_sp_port;
int err;
mlxsw_sp_port = mlxsw_sp_vport_port(mlxsw_sp_vport);
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (f && !WARN_ON(!f->leave))
f->leave(mlxsw_sp_port_vlan);
err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
if (err) {
netdev_err(dev, "Failed to join vFID\n");
return err;
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
if (err) {
netdev_err(dev, "Failed to enable learning\n");
goto err_port_vid_learning_set;
}
mlxsw_sp_vport->learning = 1;
mlxsw_sp_vport->learning_sync = 1;
mlxsw_sp_vport->uc_flood = 1;
mlxsw_sp_vport->mc_flood = 1;
mlxsw_sp_vport->mc_router = 0;
mlxsw_sp_vport->mc_disabled = 1;
mlxsw_sp_vport->bridged = 1;
return 0;
err_port_vid_learning_set:
mlxsw_sp_port_vlan_vfid_leave(mlxsw_sp_port_vlan);
return err;
}
static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *brport_dev,
struct net_device *br_dev)
{
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_port *mlxsw_sp_port;
mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
mlxsw_sp_port = mlxsw_sp_vport_port(mlxsw_sp_vport);
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
mlxsw_sp_port_vlan_vfid_leave(mlxsw_sp_port_vlan);
mlxsw_sp_vport->learning = 0;
mlxsw_sp_vport->learning_sync = 0;
mlxsw_sp_vport->uc_flood = 0;
mlxsw_sp_vport->mc_flood = 0;
mlxsw_sp_vport->mc_router = 0;
mlxsw_sp_vport->bridged = 0;
}
static bool
mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
const struct net_device *br_dev)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
if (dev && dev == br_dev)
return false;
}
return true;
}
static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
struct net_device *dev,
unsigned long event, void *ptr,
......@@ -4815,38 +4282,24 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct netdev_notifier_changeupper_info *info = ptr;
struct mlxsw_sp_port *mlxsw_sp_vport;
struct net_device *upper_dev;
int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
if (!mlxsw_sp_vport)
return 0;
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev))
return -EINVAL;
if (!info->linking)
break;
/* We can't have multiple VLAN interfaces configured on
* the same port and being members in the same bridge.
*/
if (netif_is_bridge_master(upper_dev) &&
!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
upper_dev))
return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
vlan_dev,
upper_dev);
else
mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
vlan_dev,
upper_dev);
} else {
......
......@@ -204,11 +204,15 @@ struct mlxsw_sp_port_sample {
bool truncate;
};
struct mlxsw_sp_bridge_port;
struct mlxsw_sp_port_vlan {
struct list_head list;
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp_fid *fid;
u16 vid;
struct mlxsw_sp_bridge_port *bridge_port;
struct list_head bridge_vlan_node;
};
struct mlxsw_sp_port {
......@@ -216,23 +220,10 @@ struct mlxsw_sp_port {
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
u8 local_port;
u8 stp_state;
u16 learning:1,
learning_sync:1,
uc_flood:1,
mc_flood:1,
mc_router:1,
mc_disabled:1,
bridged:1,
lagged:1,
u8 lagged:1,
split:1;
u16 pvid;
u16 lag_id;
struct {
struct list_head list;
struct mlxsw_sp_fid *f;
u16 vid;
} vport;
struct {
u8 tx_pause:1,
rx_pause:1,
......@@ -248,11 +239,6 @@ struct mlxsw_sp_port {
u8 width;
u8 lane;
} mapping;
/* 802.1Q bridge VLANs */
unsigned long *active_vlans;
unsigned long *untagged_vlans;
/* VLAN interfaces */
struct list_head vports_list;
/* TC handles */
struct list_head mall_tc_list;
struct {
......@@ -267,6 +253,7 @@ struct mlxsw_sp_port {
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
......@@ -303,79 +290,6 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
return NULL;
}
static inline u16
mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
return mlxsw_sp_vport->vport.vid;
}
static inline bool
mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
{
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
return vid != 0;
}
static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
struct mlxsw_sp_fid *f)
{
mlxsw_sp_vport->vport.f = f;
}
static inline struct mlxsw_sp_fid *
mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
return mlxsw_sp_vport->vport.f;
}
static inline struct net_device *
mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
return f ? f->dev : NULL;
}
static inline struct mlxsw_sp_port *
mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
return mlxsw_sp_vport;
}
return NULL;
}
static inline struct mlxsw_sp_port *
mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
if (f && f->fid == fid)
return mlxsw_sp_vport;
}
return NULL;
}
static inline struct mlxsw_sp_port *
mlxsw_sp_vport_port(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
return mlxsw_sp->ports[mlxsw_sp_vport->local_port];
}
static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
u16 fid)
{
......@@ -444,10 +358,8 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
struct mlxsw_sp_upper *mlxsw_sp_master_bridge(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
......@@ -455,14 +367,19 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
bool set);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding);
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index, bool valid);
void
mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
......@@ -481,6 +398,9 @@ int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port);
struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
......
......@@ -3244,7 +3244,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
if (is_vlan_dev(l3_dev))
fid_index = vlan_dev_vlan_id(l3_dev);
else if (mlxsw_sp_master_bridge(mlxsw_sp)->dev == l3_dev)
else if (br_vlan_enabled(l3_dev))
fid_index = 1;
else
return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
......@@ -3437,7 +3437,6 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
unsigned long event)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
......@@ -3446,8 +3445,7 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
else if (netif_is_lag_master(real_dev))
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid);
else if (netif_is_bridge_master(real_dev) &&
mlxsw_sp_master_bridge(mlxsw_sp)->dev == real_dev)
else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
event);
......
......@@ -52,6 +52,8 @@
#include "core.h"
#include "reg.h"
struct mlxsw_sp_bridge_ops;
struct mlxsw_sp_bridge {
struct mlxsw_sp *mlxsw_sp;
struct {
......@@ -63,58 +65,376 @@ struct mlxsw_sp_bridge {
#define MLXSW_SP_MAX_AGEING_TIME 1000000
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
bool vlan_enabled_exists;
struct list_head bridges_list;
struct list_head mids_list;
DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
};
struct mlxsw_sp_bridge_device {
struct net_device *dev;
struct list_head list;
struct list_head ports_list;
u8 vlan_enabled:1,
multicast_enabled:1;
const struct mlxsw_sp_bridge_ops *ops;
};
struct mlxsw_sp_bridge_port {
struct net_device *dev;
struct mlxsw_sp_bridge_device *bridge_device;
struct list_head list;
struct list_head vlans_list;
unsigned int ref_count;
u8 stp_state;
unsigned long flags;
bool mrouter;
bool lagged;
union {
u16 lag_id;
u16 system_port;
};
};
struct mlxsw_sp_bridge_vlan {
struct list_head list;
struct list_head port_vlan_list;
u16 vid;
u8 egress_untagged:1,
pvid:1;
};
struct mlxsw_sp_upper *mlxsw_sp_master_bridge(const struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_bridge_ops {
int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port);
void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port);
};
static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
u16 fid_index);
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
const struct net_device *br_dev)
{
return &mlxsw_sp->bridge->master_bridge;
struct mlxsw_sp_bridge_device *bridge_device;
list_for_each_entry(bridge_device, &bridge->bridges_list, list)
if (bridge_device->dev == br_dev)
return bridge_device;
return NULL;
}
static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev)
{
struct device *dev = bridge->mlxsw_sp->bus_info->dev;
struct mlxsw_sp_bridge_device *bridge_device;
bool vlan_enabled = br_vlan_enabled(br_dev);
if (vlan_enabled && bridge->vlan_enabled_exists) {
dev_err(dev, "Only one VLAN-aware bridge is supported\n");
return ERR_PTR(-EINVAL);
}
bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
if (!bridge_device)
return ERR_PTR(-ENOMEM);
bridge_device->dev = br_dev;
bridge_device->vlan_enabled = vlan_enabled;
bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
INIT_LIST_HEAD(&bridge_device->ports_list);
if (vlan_enabled) {
bridge->vlan_enabled_exists = true;
bridge_device->ops = bridge->bridge_8021q_ops;
} else {
bridge_device->ops = bridge->bridge_8021d_ops;
}
list_add(&bridge_device->list, &bridge->bridges_list);
return bridge_device;
}
static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
struct mlxsw_sp_bridge_device *bridge_device)
{
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
u16 fid = vid;
list_del(&bridge_device->list);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
WARN_ON(!list_empty(&bridge_device->ports_list));
kfree(bridge_device);
}
fid = f ? f->fid : fid;
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev)
{
struct mlxsw_sp_bridge_device *bridge_device;
if (!fid)
fid = mlxsw_sp_port->pvid;
bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
if (bridge_device)
return bridge_device;
return fid;
return mlxsw_sp_bridge_device_create(bridge, br_dev);
}
static struct mlxsw_sp_port *
mlxsw_sp_port_orig_get(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port)
static void
mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
struct mlxsw_sp_bridge_device *bridge_device)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *fid;
u16 vid;
if (list_empty(&bridge_device->ports_list))
mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
}
if (netif_is_bridge_master(dev)) {
fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp,
dev);
if (fid) {
mlxsw_sp_vport =
mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
fid->fid);
WARN_ON(!mlxsw_sp_vport);
return mlxsw_sp_vport;
static struct mlxsw_sp_bridge_port *
__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
const struct net_device *brport_dev)
{
struct mlxsw_sp_bridge_port *bridge_port;
list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
if (bridge_port->dev == brport_dev)
return bridge_port;
}
return NULL;
}
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
struct net_device *brport_dev)
{
struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
struct mlxsw_sp_bridge_device *bridge_device;
if (!br_dev)
return NULL;
bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
if (!bridge_device)
return NULL;
return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
}
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
struct net_device *brport_dev)
{
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
if (!bridge_port)
return NULL;
mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
bridge_port->lagged = mlxsw_sp_port->lagged;
if (bridge_port->lagged)
bridge_port->lag_id = mlxsw_sp_port->lag_id;
else
bridge_port->system_port = mlxsw_sp_port->local_port;
bridge_port->dev = brport_dev;
bridge_port->bridge_device = bridge_device;
bridge_port->stp_state = BR_STATE_DISABLED;
bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC;
INIT_LIST_HEAD(&bridge_port->vlans_list);
list_add(&bridge_port->list, &bridge_device->ports_list);
bridge_port->ref_count = 1;
return bridge_port;
}
static void
mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
{
list_del(&bridge_port->list);
WARN_ON(!list_empty(&bridge_port->vlans_list));
kfree(bridge_port);
}
static bool
mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
bridge_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
/* In case ports were pulled from out of a bridged LAG, then
* it's possible the reference count isn't zero, yet the bridge
* port should be destroyed, as it's no longer an upper of ours.
*/
if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
return true;
else if (bridge_port->ref_count == 0)
return true;
else
return false;
}
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
struct net_device *brport_dev)
{
struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
int err;
bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
if (bridge_port) {
bridge_port->ref_count++;
return bridge_port;
}
if (!is_vlan_dev(dev))
return mlxsw_sp_port;
bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
if (IS_ERR(bridge_device))
return ERR_CAST(bridge_device);
vid = vlan_dev_vlan_id(dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
WARN_ON(!mlxsw_sp_vport);
bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
if (!bridge_port) {
err = -ENOMEM;
goto err_bridge_port_create;
}
return bridge_port;
return mlxsw_sp_vport;
err_bridge_port_create:
mlxsw_sp_bridge_device_put(bridge, bridge_device);
return ERR_PTR(err);
}
static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
struct mlxsw_sp_bridge_port *bridge_port)
{
struct mlxsw_sp_bridge_device *bridge_device;
bridge_port->ref_count--;
if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
return;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_bridge_port_destroy(bridge_port);
mlxsw_sp_bridge_device_put(bridge, bridge_device);
}
static struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
const struct mlxsw_sp_bridge_device *
bridge_device,
u16 vid)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
list) {
if (!mlxsw_sp_port_vlan->bridge_port)
continue;
if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
bridge_device)
continue;
if (bridge_device->vlan_enabled &&
mlxsw_sp_port_vlan->vid != vid)
continue;
return mlxsw_sp_port_vlan;
}
return NULL;
}
static struct mlxsw_sp_port_vlan*
mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_index)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
list) {
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
if (fid && fid->fid == fid_index)
return mlxsw_sp_port_vlan;
}
return NULL;
}
static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
u16 vid)
{
struct mlxsw_sp_bridge_vlan *bridge_vlan;
list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
if (bridge_vlan->vid == vid)
return bridge_vlan;
}
return NULL;
}
static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
{
struct mlxsw_sp_bridge_vlan *bridge_vlan;
bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
if (!bridge_vlan)
return NULL;
INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
bridge_vlan->vid = vid;
list_add(&bridge_vlan->list, &bridge_port->vlans_list);
return bridge_vlan;
}
static void
mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
{
list_del(&bridge_vlan->list);
WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
kfree(bridge_vlan);
}
static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
{
struct mlxsw_sp_bridge_vlan *bridge_vlan;
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
if (bridge_vlan)
return bridge_vlan;
return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
}
static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
{
if (list_empty(&bridge_vlan->port_vlan_list))
mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
}
static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
struct net_device *dev,
unsigned long *brport_flags)
{
struct mlxsw_sp_bridge_port *bridge_port;
bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
if (WARN_ON(!bridge_port))
return;
memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
}
static int mlxsw_sp_port_attr_get(struct net_device *dev,
......@@ -123,10 +443,6 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
if (!mlxsw_sp_port)
return -EINVAL;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
......@@ -134,10 +450,8 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev,
attr->u.ppid.id_len);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
attr->u.brport_flags =
(mlxsw_sp_port->learning ? BR_LEARNING : 0) |
(mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
(mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
&attr->u.brport_flags);
break;
default:
return -EOPNOTSUPP;
......@@ -146,237 +460,213 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev,
return 0;
}
static int
mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan,
u8 state)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
bridge_vlan_node) {
if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
continue;
return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
bridge_vlan->vid, state);
}
return 0;
}
static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
struct net_device *orig_dev,
u8 state)
{
u16 vid;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err;
if (switchdev_trans_ph_prepare(trans))
return 0;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, state);
if (err)
return err;
mlxsw_sp_port->stp_state = state;
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (!bridge_port)
return 0;
}
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, state);
list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
bridge_vlan, state);
if (err)
return err;
goto err_port_bridge_vlan_stp_set;
}
mlxsw_sp_port->stp_state = state;
bridge_port->stp_state = state;
return 0;
err_port_bridge_vlan_stp_set:
list_for_each_entry_continue_reverse(bridge_vlan,
&bridge_port->vlans_list, list)
mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
bridge_port->stp_state);
return err;
}
static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 idx_begin, u16 idx_end,
static int mlxsw_sp_port_fid_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_table table,
bool set)
bool member)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 local_port = mlxsw_sp_port->local_port;
enum mlxsw_flood_table_type table_type;
u16 range = idx_end - idx_begin + 1;
u16 flood_index = fid->fid;
char *sftr_pl;
int err;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
else
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
if (mlxsw_sp_fid_is_vfid(fid->fid)) {
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
flood_index = mlxsw_sp_fid_to_vfid(fid->fid);
}
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin,
table_type, range, local_port, set);
mlxsw_reg_sftr_pack(sftr_pl, table, flood_index, table_type, 1,
local_port, member);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
kfree(sftr_pl);
return err;
}
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 idx_begin, u16 idx_end, bool uc_set,
bool bc_set, bool mc_set)
static int
mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan,
enum mlxsw_sp_flood_table table,
bool member)
{
int err;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
MLXSW_SP_FLOOD_TABLE_UC, uc_set);
if (err)
return err;
err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
MLXSW_SP_FLOOD_TABLE_BC, bc_set);
if (err)
goto err_flood_bm_set;
list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
bridge_vlan_node) {
if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
continue;
return mlxsw_sp_port_fid_flood_set(mlxsw_sp_port,
mlxsw_sp_port_vlan->fid,
table, member);
}
err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
MLXSW_SP_FLOOD_TABLE_MC, mc_set);
if (err)
goto err_flood_mc_set;
return 0;
err_flood_mc_set:
__mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
MLXSW_SP_FLOOD_TABLE_BC, !bc_set);
err_flood_bm_set:
__mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
MLXSW_SP_FLOOD_TABLE_UC, !uc_set);
return err;
}
static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
enum mlxsw_sp_flood_table table,
bool set)
bool member)
{
struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, last_visited_vid;
struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
u16 vfid = mlxsw_sp_fid_to_vfid(fid);
return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid,
vfid, table, set);
}
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid,
table, set);
if (err) {
last_visited_vid = vid;
goto err_port_flood_set;
}
list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
bridge_vlan, table,
member);
if (err)
goto err_port_bridge_vlan_flood_set;
}
return 0;
err_port_flood_set:
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
__mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table,
!set);
netdev_err(dev, "Failed to configure unicast flooding\n");
return err;
}
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
bool mc_disabled)
{
int set;
int err = 0;
if (switchdev_trans_ph_prepare(trans))
return 0;
if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) {
set = mc_disabled ?
mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
MLXSW_SP_FLOOD_TABLE_MC,
set);
}
if (!err)
mlxsw_sp_port->mc_disabled = mc_disabled;
err_port_bridge_vlan_flood_set:
list_for_each_entry_continue_reverse(bridge_vlan,
&bridge_port->vlans_list, list)
mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
table, !member);
return err;
}
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
static int
mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan,
bool set)
{
bool mc_set = set;
u16 vfid;
/* In case of vFIDs, index into the flooding table is relative to
* the start of the vFIDs range.
*/
vfid = mlxsw_sp_fid_to_vfid(fid);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
u16 vid = bridge_vlan->vid;
if (set)
mc_set = mlxsw_sp_vport->mc_disabled ?
mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router;
list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
bridge_vlan_node) {
if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
continue;
return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
}
return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set,
mc_set);
return 0;
}
static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
bool set)
{
u16 vid;
struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
}
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
bridge_vlan, set);
if (err)
goto err_port_vid_learning_set;
goto err_port_bridge_vlan_learning_set;
}
return 0;
err_port_vid_learning_set:
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, !set);
err_port_bridge_vlan_learning_set:
list_for_each_entry_continue_reverse(bridge_vlan,
&bridge_port->vlans_list, list)
mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
bridge_vlan, !set);
return err;
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
struct net_device *orig_dev,
unsigned long brport_flags)
{
unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
struct mlxsw_sp_bridge_port *bridge_port;
int err;
if (switchdev_trans_ph_prepare(trans))
return 0;
if ((uc_flood ^ brport_flags) & BR_FLOOD) {
err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TABLE_UC,
!mlxsw_sp_port->uc_flood);
brport_flags & BR_FLOOD);
if (err)
return err;
}
if ((learning ^ brport_flags) & BR_LEARNING) {
err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
!mlxsw_sp_port->learning);
err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
brport_flags & BR_LEARNING);
if (err)
goto err_port_learning_set;
}
return err;
mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
return 0;
err_port_learning_set:
if ((uc_flood ^ brport_flags) & BR_FLOOD)
mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
MLXSW_SP_FLOOD_TABLE_UC,
mlxsw_sp_port->uc_flood);
return err;
}
static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
......@@ -417,29 +707,77 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool vlan_enabled)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
/* SWITCHDEV_TRANS_PREPARE phase */
if ((!vlan_enabled) &&
(mlxsw_sp->bridge->master_bridge.dev == orig_dev)) {
netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
if (!switchdev_trans_ph_prepare(trans))
return 0;
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_device))
return -EINVAL;
}
if (bridge_device->vlan_enabled == vlan_enabled)
return 0;
netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
return -EINVAL;
}
static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
struct net_device *orig_dev,
bool is_port_mc_router)
{
struct mlxsw_sp_bridge_port *bridge_port;
if (switchdev_trans_ph_prepare(trans))
return 0;
mlxsw_sp_port->mc_router = is_port_mc_router;
if (!mlxsw_sp_port->mc_disabled)
return mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
if (!bridge_port->bridge_device->multicast_enabled)
return 0;
return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TABLE_MC,
is_port_mc_router);
}
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
struct net_device *orig_dev,
bool mc_disabled)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
int err;
if (switchdev_trans_ph_prepare(trans))
return 0;
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_device)
return 0;
list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
enum mlxsw_sp_flood_table table = MLXSW_SP_FLOOD_TABLE_MC;
bool member = mc_disabled ? true : bridge_port->mrouter;
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
bridge_port, table,
member);
if (err)
return err;
}
bridge_device->multicast_enabled = !mc_disabled;
return 0;
}
......@@ -449,19 +787,17 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
struct switchdev_trans *trans)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
if (!mlxsw_sp_port)
return -EINVAL;
int err;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
attr->orig_dev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
......@@ -475,10 +811,12 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
attr->orig_dev,
attr->u.mrouter);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
attr->orig_dev,
attr->u.mc_disabled);
break;
default:
......@@ -489,178 +827,337 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
return err;
}
static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
{
return valid ? MLXSW_REG_SFMR_OP_CREATE_FID :
MLXSW_REG_SFMR_OP_DESTROY_FID;
}
int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index, bool valid)
{
u16 fid_offset = fid_index < MLXSW_SP_VFID_BASE ? fid_index : 0;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid_index,
fid_offset);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
bool valid)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
char svfa_pl[MLXSW_REG_SVFA_LEN];
mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid_index, fid_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp,
u16 fid_index)
{
struct mlxsw_sp_fid *f;
struct mlxsw_sp_fid *fid;
int err;
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return NULL;
err = mlxsw_sp_fid_op(mlxsw_sp, fid_index, true);
if (err)
return ERR_PTR(err);
err = mlxsw_sp_fid_map(mlxsw_sp, fid_index, true);
if (err)
goto err_fid_map;
fid = kzalloc(sizeof(*fid), GFP_KERNEL);
if (!fid) {
err = -ENOMEM;
goto err_allocate_fid;
}
f->fid = fid;
fid->fid = fid_index;
fid->ref_count = 1;
list_add(&fid->list, &mlxsw_sp->fids);
return f;
return fid;
err_allocate_fid:
mlxsw_sp_fid_map(mlxsw_sp, fid_index, false);
err_fid_map:
mlxsw_sp_fid_op(mlxsw_sp, fid_index, false);
return ERR_PTR(err);
}
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid)
{
struct mlxsw_sp_fid *f;
u16 fid_index = fid->fid;
list_del(&fid->list);
if (fid->rif)
mlxsw_sp_rif_bridge_destroy(mlxsw_sp, fid->rif);
kfree(fid);
mlxsw_sp_fid_map(mlxsw_sp, fid_index, false);
mlxsw_sp_fid_op(mlxsw_sp, fid_index, false);
}
static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev)
{
u16 vfid_index, fid_index;
struct mlxsw_sp_fid *fid;
int err;
err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
if (err)
return ERR_PTR(err);
vfid_index = find_first_zero_bit(mlxsw_sp->vfids.mapped,
MLXSW_SP_VFID_MAX);
if (vfid_index == MLXSW_SP_VFID_MAX)
return ERR_PTR(-ENOBUFS);
/* Although all the ports member in the FID might be using a
* {Port, VID} to FID mapping, we create a global VID-to-FID
* mapping. This allows a port to transition to VLAN mode,
* knowing the global mapping exists.
*/
err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
fid_index = mlxsw_sp_vfid_to_fid(vfid_index);
err = mlxsw_sp_fid_op(mlxsw_sp, fid_index, true);
if (err)
goto err_fid_map;
return ERR_PTR(err);
f = mlxsw_sp_fid_alloc(fid);
if (!f) {
fid = kzalloc(sizeof(*fid), GFP_KERNEL);
if (!fid) {
err = -ENOMEM;
goto err_allocate_fid;
}
list_add(&f->list, &mlxsw_sp->fids);
fid->fid = fid_index;
fid->ref_count = 1;
fid->dev = dev;
list_add(&fid->list, &mlxsw_sp->vfids.list);
__set_bit(vfid_index, mlxsw_sp->vfids.mapped);
return f;
return fid;
err_allocate_fid:
mlxsw_sp_fid_map(mlxsw_sp, fid, false);
err_fid_map:
mlxsw_sp_fid_op(mlxsw_sp, fid, false);
mlxsw_sp_fid_op(mlxsw_sp, fid_index, false);
return ERR_PTR(err);
}
void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid)
{
u16 vfid_index = mlxsw_sp_fid_to_vfid(fid->fid);
u16 fid_index = fid->fid;
__clear_bit(vfid_index, mlxsw_sp->vfids.mapped);
list_del(&fid->list);
if (fid->rif)
mlxsw_sp_rif_bridge_destroy(mlxsw_sp, fid->rif);
kfree(fid);
mlxsw_sp_fid_op(mlxsw_sp, fid_index, false);
}
static struct mlxsw_sp_fid *__mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
u16 fid_index)
{
struct mlxsw_sp_fid *fid;
fid = mlxsw_sp_fid_find(mlxsw_sp, fid_index);
if (fid) {
fid->ref_count++;
return fid;
}
return mlxsw_sp_fid_create(mlxsw_sp, fid_index);
}
static struct mlxsw_sp_fid *mlxsw_sp_vfid_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev)
{
u16 fid = f->fid;
struct mlxsw_sp_fid *fid;
list_del(&f->list);
fid = mlxsw_sp_vfid_find(mlxsw_sp, dev);
if (fid) {
fid->ref_count++;
return fid;
}
if (f->rif)
mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
return mlxsw_sp_vfid_create(mlxsw_sp, dev);
}
kfree(f);
static struct mlxsw_sp_fid *
mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp, u16 vid,
struct mlxsw_sp_bridge_device *bridge_device)
{
if (bridge_device->vlan_enabled)
return __mlxsw_sp_fid_get(mlxsw_sp, vid);
else
return mlxsw_sp_vfid_get(mlxsw_sp, bridge_device->dev);
}
mlxsw_sp_fid_map(mlxsw_sp, fid, false);
static void __mlxsw_sp_fid_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid)
{
if (--fid->ref_count == 0)
mlxsw_sp_fid_destroy(mlxsw_sp, fid);
}
mlxsw_sp_fid_op(mlxsw_sp, fid, false);
static void mlxsw_sp_vfid_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid)
{
if (--fid->ref_count == 0)
mlxsw_sp_vfid_destroy(mlxsw_sp, fid);
}
static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid)
static void mlxsw_sp_fid_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid)
{
struct mlxsw_sp_fid *f;
if (!mlxsw_sp_fid_is_vfid(fid->fid))
__mlxsw_sp_fid_put(mlxsw_sp, fid);
else
mlxsw_sp_vfid_put(mlxsw_sp, fid);
}
if (test_bit(fid, mlxsw_sp_port->active_vlans))
return 0;
static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
{
const struct mlxsw_sp_bridge_device *bridge_device;
f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
if (!f) {
f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
if (IS_ERR(f))
return PTR_ERR(f);
}
bridge_device = bridge_port->bridge_device;
return !bridge_device->multicast_enabled ? true : bridge_port->mrouter;
}
f->ref_count++;
static int __mlxsw_sp_port_vid_fid_map(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, u16 fid_index)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
int err;
netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid_index,
vid);
if (err)
return err;
if (mlxsw_sp_port->nr_port_vid_map++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
goto err_port_vp_mode_trans;
}
return 0;
err_port_vp_mode_trans:
mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid_index, vid);
return err;
}
static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid)
static int __mlxsw_sp_port_vid_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, u16 fid_index)
{
struct mlxsw_sp_fid *f;
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
if (WARN_ON(!f))
return;
if (mlxsw_sp_port->nr_port_vid_map == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp_port->nr_port_vid_map--;
mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid_index, vid);
return 0;
}
static int mlxsw_sp_port_vid_fid_map(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, u16 fid_index)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
if (mlxsw_sp_fid_is_vfid(fid_index))
return __mlxsw_sp_port_vid_fid_map(mlxsw_sp_port, vid,
fid_index);
mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
if (mlxsw_sp_port->nr_port_vid_map == 0)
return 0;
if (--f->ref_count == 0)
mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid_index,
fid_index);
}
static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
bool valid)
static int mlxsw_sp_port_vid_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, u16 fid_index)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
/* If port doesn't have vPorts, then it can use the global
* VID-to-FID mapping.
*/
if (mlxsw_sp_fid_is_vfid(fid_index))
return __mlxsw_sp_port_vid_fid_unmap(mlxsw_sp_port, vid,
fid_index);
if (mlxsw_sp_port->nr_port_vid_map == 0)
return 0;
return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid_index,
fid_index);
}
static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
static int
mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
struct mlxsw_sp_bridge_port *bridge_port)
{
bool mc_flood;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_fid *fid;
int err;
err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
fid = mlxsw_sp_fid_get(mlxsw_sp, vid, bridge_port->bridge_device);
if (IS_ERR(fid))
return PTR_ERR(fid);
err = mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid,
MLXSW_SP_FLOOD_TABLE_UC,
bridge_port->flags & BR_FLOOD);
if (err)
return err;
goto err_port_fid_uc_flood_set;
mc_flood = mlxsw_sp_port->mc_disabled ?
mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
err = mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid,
MLXSW_SP_FLOOD_TABLE_MC,
mlxsw_sp_mc_flood(bridge_port));
if (err)
goto err_port_fid_mc_flood_set;
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid, fid,
mlxsw_sp_port->uc_flood, true,
mc_flood);
err = mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid,
MLXSW_SP_FLOOD_TABLE_BC, true);
if (err)
goto err_port_flood_set;
goto err_port_fid_bc_flood_set;
err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
err = mlxsw_sp_port_vid_fid_map(mlxsw_sp_port, vid, fid->fid);
if (err)
goto err_port_fid_map;
goto err_port_vid_fid_map;
mlxsw_sp_port_vlan->fid = fid;
return 0;
err_port_fid_map:
__mlxsw_sp_port_flood_set(mlxsw_sp_port, fid, fid, false, false, false);
err_port_flood_set:
__mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
err_port_vid_fid_map:
mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_BC,
false);
err_port_fid_bc_flood_set:
mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_MC,
false);
err_port_fid_mc_flood_set:
mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_UC,
false);
err_port_fid_uc_flood_set:
mlxsw_sp_fid_put(mlxsw_sp, fid);
return err;
}
static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid)
static void
mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
__mlxsw_sp_port_flood_set(mlxsw_sp_port, fid, fid, false,
false, false);
__mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
u16 vid = mlxsw_sp_port_vlan->vid;
mlxsw_sp_port_vlan->fid = NULL;
mlxsw_sp_port_vid_fid_unmap(mlxsw_sp_port, vid, fid->fid);
mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_BC,
false);
mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_MC,
false);
mlxsw_sp_port_fid_flood_set(mlxsw_sp_port, fid, MLXSW_SP_FLOOD_TABLE_UC,
false);
mlxsw_sp_fid_put(mlxsw_sp, fid);
}
static u16
......@@ -675,52 +1172,124 @@ mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port->pvid;
}
static int mlxsw_sp_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool is_untagged, bool is_pvid)
static int
mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
struct mlxsw_sp_bridge_port *bridge_port)
{
u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
u16 old_pvid = mlxsw_sp_port->pvid;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_bridge_vlan *bridge_vlan;
u16 vid = mlxsw_sp_port_vlan->vid;
int err;
err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid);
if (err)
return err;
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
is_untagged);
if (err)
goto err_port_vlan_set;
/* No need to continue if only VLAN flags were changed */
if (mlxsw_sp_port_vlan->bridge_port)
return 0;
err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
if (err)
goto err_port_pvid_set;
return err;
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
mlxsw_sp_port->learning);
bridge_port->flags & BR_LEARNING);
if (err)
goto err_port_vid_learning_set;
err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
mlxsw_sp_port->stp_state);
bridge_port->stp_state);
if (err)
goto err_port_vid_stp_set;
if (is_untagged)
__set_bit(vid, mlxsw_sp_port->untagged_vlans);
else
__clear_bit(vid, mlxsw_sp_port->untagged_vlans);
__set_bit(vid, mlxsw_sp_port->active_vlans);
bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
if (!bridge_vlan) {
err = -ENOMEM;
goto err_bridge_vlan_get;
}
list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
&bridge_vlan->port_vlan_list);
mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
bridge_port->dev);
mlxsw_sp_port_vlan->bridge_port = bridge_port;
return 0;
err_bridge_vlan_get:
mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
err_port_vid_stp_set:
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
err_port_vid_learning_set:
mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
return err;
}
void
mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
struct mlxsw_sp_bridge_vlan *bridge_vlan;
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid = mlxsw_sp_port_vlan->vid;
bool last;
bridge_port = mlxsw_sp_port_vlan->bridge_port;
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
last = list_is_singular(&bridge_vlan->port_vlan_list);
list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
mlxsw_sp_bridge_vlan_put(bridge_vlan);
mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
if (last)
mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
bridge_port, fid->fid);
mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
mlxsw_sp_port_vlan->bridge_port = NULL;
}
static int
mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
u16 vid, bool is_untagged, bool is_pvid)
{
u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_vlan *bridge_vlan;
u16 old_pvid = mlxsw_sp_port->pvid;
int err;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
if (IS_ERR(mlxsw_sp_port_vlan))
return PTR_ERR(mlxsw_sp_port_vlan);
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
is_untagged);
if (err)
goto err_port_vlan_set;
err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
if (err)
goto err_port_pvid_set;
err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
if (err)
goto err_port_vlan_bridge_join;
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
bridge_vlan->egress_untagged = is_untagged;
bridge_vlan->pvid = is_pvid;
return 0;
err_port_vlan_bridge_join:
mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
err_port_pvid_set:
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
err_port_vlan_set:
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
return err;
}
......@@ -730,15 +1299,26 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
if (switchdev_trans_ph_prepare(trans))
return 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
int err;
err = mlxsw_sp_port_vlan_add(mlxsw_sp_port, vid, flag_untagged,
err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
vid, flag_untagged,
flag_pvid);
if (err)
return err;
......@@ -747,6 +1327,29 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
{
return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
}
static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
u16 fid_index)
{
bool lagged = bridge_port->lagged;
char sfdf_pl[MLXSW_REG_SFDF_LEN];
u16 system_port;
system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
{
return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
......@@ -822,24 +1425,39 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
{
u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
u16 lag_vid = 0;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = fdb->obj.orig_dev;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
u16 fid_index, vid;
if (switchdev_trans_ph_prepare(trans))
return 0;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
}
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device,
fdb->vid);
if (!mlxsw_sp_port_vlan)
return 0;
fid_index = mlxsw_sp_port_vlan->fid->fid;
vid = mlxsw_sp_port_vlan->vid;
if (!mlxsw_sp_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
mlxsw_sp_port->local_port,
fdb->addr, fid, true, false);
fdb->addr, fid_index, true,
false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
mlxsw_sp_port->lag_id,
fdb->addr, fid, lag_vid,
fdb->addr, fid_index, vid,
true, false);
}
......@@ -939,17 +1557,34 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_mid *mid;
u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
u16 fid_index;
int err = 0;
if (switchdev_trans_ph_prepare(trans))
return 0;
mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device,
mdb->vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
fid_index = mlxsw_sp_port_vlan->fid->fid;
mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
netdev_err(dev, "Unable to allocate MC group\n");
return -ENOMEM;
......@@ -965,8 +1600,8 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
}
if (mid->ref_count == 1) {
err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
true);
err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
mid->mid, true);
if (err) {
netdev_err(dev, "Unable to set MC SFD\n");
goto err_out;
......@@ -987,15 +1622,8 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
if (!mlxsw_sp_port)
return -EINVAL;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
return 0;
err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
......@@ -1018,57 +1646,78 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
return err;
}
static void mlxsw_sp_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
static void
mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
{
u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
__clear_bit(vid, mlxsw_sp_port->active_vlans);
mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return;
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
mlxsw_sp_port_vlan_del(mlxsw_sp_port, vid);
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
}
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
{
u16 vid;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
mlxsw_sp_port_vlan_del(mlxsw_sp_port, vid);
return 0;
}
static int
mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb)
{
u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
u16 lag_vid = 0;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = fdb->obj.orig_dev;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
u16 fid_index, vid;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
}
bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device,
fdb->vid);
if (!mlxsw_sp_port_vlan)
return 0;
fid_index = mlxsw_sp_port_vlan->fid->fid;
vid = mlxsw_sp_port_vlan->vid;
if (!mlxsw_sp_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
mlxsw_sp_port->local_port,
fdb->addr, fid,
false, false);
fdb->addr, fid_index, false,
false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
mlxsw_sp_port->lag_id,
fdb->addr, fid, lag_vid,
fdb->addr, fid_index, vid,
false, false);
}
......@@ -1076,13 +1725,30 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_mid *mid;
u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
u16 fid_index;
u16 mid_idx;
int err = 0;
mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device,
mdb->vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
fid_index = mlxsw_sp_port_vlan->fid->fid;
mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
......@@ -1094,8 +1760,8 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
mid_idx = mid->mid;
if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
false);
err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
mid_idx, false);
if (err)
netdev_err(dev, "Unable to remove MC SFD\n");
}
......@@ -1109,15 +1775,8 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
if (!mlxsw_sp_port)
return -EINVAL;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
return 0;
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
......@@ -1156,32 +1815,32 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_obj_port_fdb *fdb,
switchdev_obj_dump_cb_t *cb,
struct net_device *orig_dev)
switchdev_obj_dump_cb_t *cb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port *tmp;
struct mlxsw_sp_fid *f;
u16 vport_fid;
char *sfd_pl;
struct net_device *orig_dev = fdb->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
u16 lag_id, fid_index;
char mac[ETH_ALEN];
u16 fid;
u8 local_port;
u16 lag_id;
u8 num_rec;
int stored_err = 0;
int i;
char *sfd_pl;
u8 num_rec;
int err;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
return 0;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
if (!sfd_pl)
return -ENOMEM;
f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
vport_fid = f ? f->fid : 0;
mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
do {
struct mlxsw_sp_port *tmp;
u8 local_port;
int i;
mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
......@@ -1198,48 +1857,44 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
for (i = 0; i < num_rec; i++) {
switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
case MLXSW_REG_SFD_REC_TYPE_UNICAST:
mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac,
&fid_index,
&local_port);
if (local_port == mlxsw_sp_port->local_port) {
if (vport_fid && vport_fid == fid)
fdb->vid = 0;
else if (!vport_fid &&
!mlxsw_sp_fid_is_vfid(fid))
fdb->vid = fid;
else
if (bridge_port->lagged)
continue;
if (bridge_port->system_port != local_port)
continue;
if (bridge_port->bridge_device->vlan_enabled)
fdb->vid = fid_index;
else
fdb->vid = 0;
ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE;
err = cb(&fdb->obj);
if (err)
stored_err = err;
}
break;
case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
mac, &fid, &lag_id);
mac, &fid_index,
&lag_id);
if (!bridge_port->lagged)
continue;
if (bridge_port->lag_id != lag_id)
continue;
tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
if (tmp && tmp->local_port ==
mlxsw_sp_port->local_port) {
/* LAG records can only point to LAG
* devices or VLAN devices on top.
*/
if (!netif_is_lag_master(orig_dev) &&
!is_vlan_dev(orig_dev))
if (tmp->local_port !=
mlxsw_sp_port->local_port)
continue;
if (vport_fid && vport_fid == fid)
fdb->vid = 0;
else if (!vport_fid &&
!mlxsw_sp_fid_is_vfid(fid))
fdb->vid = fid;
if (bridge_port->bridge_device->vlan_enabled)
fdb->vid = fid_index;
else
continue;
fdb->vid = 0;
ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE;
err = cb(&fdb->obj);
if (err)
stored_err = err;
}
break;
}
}
......@@ -1254,28 +1909,32 @@ static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_obj_port_vlan *vlan,
switchdev_obj_dump_cb_t *cb)
{
u16 vid;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err = 0;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
vlan->flags = 0;
vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
return cb(&vlan->obj);
}
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
vlan->flags = 0;
if (vid == mlxsw_sp_port->pvid)
if (bridge_vlan->pvid)
vlan->flags |= BRIDGE_VLAN_INFO_PVID;
if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
if (bridge_vlan->egress_untagged)
vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
vlan->vid_begin = vid;
vlan->vid_end = vid;
vlan->vid_begin = bridge_vlan->vid;
vlan->vid_end = bridge_vlan->vid;
err = cb(&vlan->obj);
if (err)
break;
}
return err;
}
......@@ -1286,10 +1945,6 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
if (!mlxsw_sp_port)
return -EINVAL;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
......@@ -1297,8 +1952,7 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj), cb,
obj->orig_dev);
SWITCHDEV_OBJ_PORT_FDB(obj), cb);
break;
default:
err = -EOPNOTSUPP;
......@@ -1316,6 +1970,154 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
};
static int
mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
if (is_vlan_dev(bridge_port->dev))
return -EINVAL;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
/* Let VLAN-aware bridge take care of its own VLANs */
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
return 0;
}
static void
mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
/* Make sure untagged frames are allowed to ingress */
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
}
static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
.port_join = mlxsw_sp_bridge_8021q_port_join,
.port_leave = mlxsw_sp_bridge_8021q_port_leave,
};
static bool
mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
const struct net_device *br_dev)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
list) {
if (mlxsw_sp_port_vlan->bridge_port &&
mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
br_dev)
return true;
}
return false;
}
static int
mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_fid *fid;
u16 vid;
if (!is_vlan_dev(bridge_port->dev))
return -EINVAL;
vid = vlan_dev_vlan_id(bridge_port->dev);
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
fid = mlxsw_sp_port_vlan->fid;
if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n");
return -EINVAL;
}
/* Port is no longer usable as a router interface */
if (fid)
fid->leave(mlxsw_sp_port_vlan);
return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
}
static void
mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
u16 vid = vlan_dev_vlan_id(bridge_port->dev);
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return;
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
}
static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
.port_join = mlxsw_sp_bridge_8021d_port_join,
.port_leave = mlxsw_sp_bridge_8021d_port_leave,
};
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
int err;
bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
if (IS_ERR(bridge_port))
return PTR_ERR(bridge_port);
bridge_device = bridge_port->bridge_device;
err = bridge_device->ops->port_join(bridge_device, bridge_port,
mlxsw_sp_port);
if (err)
goto err_port_join;
return 0;
err_port_join:
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
return err;
}
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (!bridge_device)
return;
bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
if (!bridge_port)
return;
bridge_device->ops->port_leave(bridge_device, bridge_port,
mlxsw_sp_port);
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
}
static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
char *mac, u16 vid,
struct net_device *dev)
......@@ -1335,6 +2137,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
char mac[ETH_ALEN];
u8 local_port;
......@@ -1349,22 +2154,21 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
if (mlxsw_sp_fid_is_vfid(fid)) {
struct mlxsw_sp_port *mlxsw_sp_vport;
mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
fid);
if (!mlxsw_sp_vport) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
goto just_remove;
}
vid = 0;
/* Override the physical port with the vPort. */
mlxsw_sp_port = mlxsw_sp_vport;
} else {
vid = fid;
bridge_port = mlxsw_sp_port_vlan->bridge_port;
if (!bridge_port) {
netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
goto just_remove;
}
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
adding, true);
......@@ -1375,8 +2179,8 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
if (!do_notification)
return;
mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
adding, mac, vid, mlxsw_sp_port->dev);
mlxsw_sp_fdb_call_notifiers(bridge_port->flags & BR_LEARNING_SYNC,
adding, mac, vid, bridge_port->dev);
return;
just_remove:
......@@ -1389,8 +2193,10 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
char mac[ETH_ALEN];
u16 lag_vid = 0;
u16 lag_id;
......@@ -1405,26 +2211,22 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
if (mlxsw_sp_fid_is_vfid(fid)) {
struct mlxsw_sp_port *mlxsw_sp_vport;
mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
fid);
if (!mlxsw_sp_vport) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
goto just_remove;
}
lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
dev = mlxsw_sp_vport->dev;
vid = 0;
/* Override the physical port with the vPort. */
mlxsw_sp_port = mlxsw_sp_vport;
} else {
dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
vid = fid;
bridge_port = mlxsw_sp_port_vlan->bridge_port;
if (!bridge_port) {
netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
goto just_remove;
}
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
lag_vid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true);
......@@ -1435,8 +2237,8 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
if (!do_notification)
return;
mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
vid, dev);
mlxsw_sp_fdb_call_notifiers(bridge_port->flags & BR_LEARNING_SYNC,
adding, mac, vid, bridge_port->dev);
return;
just_remove:
......@@ -1540,8 +2342,12 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp->bridge = bridge;
bridge->mlxsw_sp = mlxsw_sp;
INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list);
bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
return mlxsw_sp_fdb_init(mlxsw_sp);
}
......@@ -1549,6 +2355,7 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
kfree(mlxsw_sp->bridge);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment