Commit 631581bf authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Preparations-for-restructuring'

Jiri Pirko says:

====================
mlxsw: Preparations for restructuring

This patchset doesn't introduce any functional changes and merely meant
to make the code base more receptive for upcoming restructuring.

The first six patches mainly shuffle code in order to reduce the scope of
structs that shouldn't be defined in the main driver header. Most of them
will be later expanded, so it makes sense to correctly place them now.

The last patches mostly simplify bridge-related functions, so that they
could be more easily modified later on.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 29db3984 45a4a16c
...@@ -210,6 +210,41 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, ...@@ -210,6 +210,41 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
} }
int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u8 state)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
enum mlxsw_reg_spms_state spms_state;
char *spms_pl;
int err;
switch (state) {
case BR_STATE_FORWARDING:
spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
break;
case BR_STATE_LEARNING:
spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
break;
case BR_STATE_LISTENING: /* fall-through */
case BR_STATE_DISABLED: /* fall-through */
case BR_STATE_BLOCKING:
spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
break;
default:
BUG();
}
spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
if (!spms_pl)
return -ENOMEM;
mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
kfree(spms_pl);
return err;
}
static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
{ {
char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
...@@ -631,8 +666,7 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -631,8 +666,7 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
} }
int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u16 vid_begin, u16 vid_end,
bool learn_enable) bool learn_enable)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
...@@ -642,18 +676,56 @@ int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -642,18 +676,56 @@ int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
if (!spvmlr_pl) if (!spvmlr_pl)
return -ENOMEM; return -ENOMEM;
mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin, mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
vid_end, learn_enable); learn_enable);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
kfree(spvmlr_pl); kfree(spvmlr_pl);
return err; return err;
} }
static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, bool learn_enable) u16 vid)
{ {
return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
learn_enable); char spvid_pl[MLXSW_REG_SPVID_LEN];
mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
}
static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool allow)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char spaft_pl[MLXSW_REG_SPAFT_LEN];
mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
}
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
int err;
if (!vid) {
err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
if (err)
return err;
} else {
err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
if (err)
return err;
err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
if (err)
goto err_port_allow_untagged_set;
}
mlxsw_sp_port->pvid = vid;
return 0;
err_port_allow_untagged_set:
__mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
return err;
} }
static int static int
...@@ -2547,6 +2619,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, ...@@ -2547,6 +2619,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_dcb_init; goto err_port_dcb_init;
} }
err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set non-virtual mode\n",
mlxsw_sp_port->local_port);
goto err_port_vp_mode_set;
}
err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
if (err) { if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
...@@ -2574,6 +2653,7 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, ...@@ -2574,6 +2653,7 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
err_port_pvid_vport_create: err_port_pvid_vport_create:
err_port_vp_mode_set:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init: err_port_dcb_init:
err_port_ets_init: err_port_ets_init:
...@@ -3312,7 +3392,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, ...@@ -3312,7 +3392,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->bus_info = mlxsw_bus_info; mlxsw_sp->bus_info = mlxsw_bus_info;
INIT_LIST_HEAD(&mlxsw_sp->fids); INIT_LIST_HEAD(&mlxsw_sp->fids);
INIT_LIST_HEAD(&mlxsw_sp->vfids.list); INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
err = mlxsw_sp_base_mac_get(mlxsw_sp); err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) { if (err) {
...@@ -3659,21 +3738,26 @@ static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp) ...@@ -3659,21 +3738,26 @@ static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
struct net_device *br_dev) struct net_device *br_dev)
{ {
return !mlxsw_sp->master_bridge.dev || struct mlxsw_sp_upper *master_bridge = mlxsw_sp_master_bridge(mlxsw_sp);
mlxsw_sp->master_bridge.dev == br_dev;
return !master_bridge->dev || master_bridge->dev == br_dev;
} }
static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
struct net_device *br_dev) struct net_device *br_dev)
{ {
mlxsw_sp->master_bridge.dev = br_dev; struct mlxsw_sp_upper *master_bridge = mlxsw_sp_master_bridge(mlxsw_sp);
mlxsw_sp->master_bridge.ref_count++;
master_bridge->dev = br_dev;
master_bridge->ref_count++;
} }
static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
{ {
if (--mlxsw_sp->master_bridge.ref_count == 0) { struct mlxsw_sp_upper *master_bridge = mlxsw_sp_master_bridge(mlxsw_sp);
mlxsw_sp->master_bridge.dev = NULL;
if (--master_bridge->ref_count == 0) {
master_bridge->dev = NULL;
/* It's possible upper VLAN devices are still holding /* It's possible upper VLAN devices are still holding
* references to underlying FIDs. Drop the reference * references to underlying FIDs. Drop the reference
* and release the resources if it was the last one. * and release the resources if it was the last one.
...@@ -4272,7 +4356,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, ...@@ -4272,7 +4356,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
if (!is_vlan_dev(upper_dev)) if (!is_vlan_dev(upper_dev))
return -EINVAL; return -EINVAL;
if (is_vlan_dev(upper_dev) && if (is_vlan_dev(upper_dev) &&
br_dev != mlxsw_sp->master_bridge.dev) br_dev != mlxsw_sp_master_bridge(mlxsw_sp)->dev)
return -EINVAL; return -EINVAL;
break; break;
case NETDEV_CHANGEUPPER: case NETDEV_CHANGEUPPER:
......
...@@ -110,70 +110,6 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid) ...@@ -110,70 +110,6 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID; return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
} }
struct mlxsw_sp_sb_pr {
enum mlxsw_reg_sbpr_mode mode;
u32 size;
};
struct mlxsw_cp_sb_occ {
u32 cur;
u32 max;
};
struct mlxsw_sp_sb_cm {
u32 min_buff;
u32 max_buff;
u8 pool;
struct mlxsw_cp_sb_occ occ;
};
struct mlxsw_sp_sb_pm {
u32 min_buff;
u32 max_buff;
struct mlxsw_cp_sb_occ occ;
};
#define MLXSW_SP_SB_POOL_COUNT 4
#define MLXSW_SP_SB_TC_COUNT 8
struct mlxsw_sp_sb_port {
struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
};
struct mlxsw_sp_sb {
struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
struct mlxsw_sp_sb_port *ports;
u32 cell_size;
};
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
struct mlxsw_sp_prefix_usage {
DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
};
enum mlxsw_sp_l3proto {
MLXSW_SP_L3_PROTO_IPV4,
MLXSW_SP_L3_PROTO_IPV6,
};
struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
unsigned int ref_count;
enum mlxsw_sp_l3proto proto;
struct mlxsw_sp_prefix_usage prefix_usage;
};
struct mlxsw_sp_fib;
struct mlxsw_sp_vr {
u16 id; /* virtual router ID */
u32 tb_id; /* kernel fib table id */
unsigned int rif_count;
struct mlxsw_sp_fib *fib4;
};
enum mlxsw_sp_span_type { enum mlxsw_sp_span_type {
MLXSW_SP_SPAN_EGRESS, MLXSW_SP_SPAN_EGRESS,
MLXSW_SP_SPAN_INGRESS MLXSW_SP_SPAN_INGRESS
...@@ -212,25 +148,9 @@ struct mlxsw_sp_port_mall_tc_entry { ...@@ -212,25 +148,9 @@ struct mlxsw_sp_port_mall_tc_entry {
}; };
}; };
struct mlxsw_sp_router { struct mlxsw_sp_sb;
struct mlxsw_sp_vr *vrs; struct mlxsw_sp_bridge;
struct rhashtable neigh_ht; struct mlxsw_sp_router;
struct rhashtable nexthop_group_ht;
struct rhashtable nexthop_ht;
struct {
struct mlxsw_sp_lpm_tree *trees;
unsigned int tree_count;
} lpm;
struct {
struct delayed_work dw;
unsigned long interval; /* ms */
} neighs_update;
struct delayed_work nexthop_probe_dw;
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
struct list_head nexthop_neighs_list;
bool aborted;
};
struct mlxsw_sp_acl; struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool; struct mlxsw_sp_counter_pool;
...@@ -239,30 +159,16 @@ struct mlxsw_sp { ...@@ -239,30 +159,16 @@ struct mlxsw_sp {
struct list_head list; struct list_head list;
DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX); DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
} vfids; } vfids;
struct {
struct list_head list;
DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
} br_mids;
struct list_head fids; /* VLAN-aware bridge FIDs */ struct list_head fids; /* VLAN-aware bridge FIDs */
struct mlxsw_sp_rif **rifs;
struct mlxsw_sp_port **ports; struct mlxsw_sp_port **ports;
struct mlxsw_core *core; struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info; const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN]; unsigned char base_mac[ETH_ALEN];
struct {
struct delayed_work dw;
#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
unsigned int interval; /* ms */
} fdb_notify;
#define MLXSW_SP_MIN_AGEING_TIME 10
#define MLXSW_SP_MAX_AGEING_TIME 1000000
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper *lags; struct mlxsw_sp_upper *lags;
u8 *port_to_module; u8 *port_to_module;
struct mlxsw_sp_sb sb; struct mlxsw_sp_sb *sb;
struct mlxsw_sp_router router; struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
struct mlxsw_sp_acl *acl; struct mlxsw_sp_acl *acl;
struct { struct {
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
...@@ -273,7 +179,6 @@ struct mlxsw_sp { ...@@ -273,7 +179,6 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries; struct mlxsw_sp_span_entry *entries;
int entries_count; int entries_count;
} span; } span;
struct notifier_block fib_nb;
}; };
static inline struct mlxsw_sp_upper * static inline struct mlxsw_sp_upper *
...@@ -282,18 +187,6 @@ mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) ...@@ -282,18 +187,6 @@ mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
return &mlxsw_sp->lags[lag_id]; return &mlxsw_sp->lags[lag_id];
} }
static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp,
u32 cells)
{
return mlxsw_sp->sb.cell_size * cells;
}
static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp,
u32 bytes)
{
return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size);
}
struct mlxsw_sp_port_pcpu_stats { struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets; u64 rx_packets;
u64 rx_bytes; u64 rx_bytes;
...@@ -515,7 +408,10 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, ...@@ -515,7 +408,10 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index, unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type, enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max); u32 *p_cur, u32 *p_max);
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
struct mlxsw_sp_upper *mlxsw_sp_master_bridge(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
...@@ -529,7 +425,6 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, ...@@ -529,7 +425,6 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
bool set); bool set);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid); int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding); bool adding);
...@@ -546,9 +441,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, ...@@ -546,9 +441,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate); u8 next_index, u32 maxrate);
int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u16 vid_begin, u16 vid_end, u8 state);
int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool learn_enable); bool learn_enable);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
#ifdef CONFIG_MLXSW_SPECTRUM_DCB #ifdef CONFIG_MLXSW_SPECTRUM_DCB
......
...@@ -43,25 +43,72 @@ ...@@ -43,25 +43,72 @@
#include "port.h" #include "port.h"
#include "reg.h" #include "reg.h"
struct mlxsw_sp_sb_pr {
enum mlxsw_reg_sbpr_mode mode;
u32 size;
};
struct mlxsw_cp_sb_occ {
u32 cur;
u32 max;
};
struct mlxsw_sp_sb_cm {
u32 min_buff;
u32 max_buff;
u8 pool;
struct mlxsw_cp_sb_occ occ;
};
struct mlxsw_sp_sb_pm {
u32 min_buff;
u32 max_buff;
struct mlxsw_cp_sb_occ occ;
};
#define MLXSW_SP_SB_POOL_COUNT 4
#define MLXSW_SP_SB_TC_COUNT 8
struct mlxsw_sp_sb_port {
struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
};
struct mlxsw_sp_sb {
struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
struct mlxsw_sp_sb_port *ports;
u32 cell_size;
};
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
{
return mlxsw_sp->sb->cell_size * cells;
}
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
{
return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
}
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
u8 pool, u8 pool,
enum mlxsw_reg_sbxx_dir dir) enum mlxsw_reg_sbxx_dir dir)
{ {
return &mlxsw_sp->sb.prs[dir][pool]; return &mlxsw_sp->sb->prs[dir][pool];
} }
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 pg_buff, u8 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir) enum mlxsw_reg_sbxx_dir dir)
{ {
return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff]; return &mlxsw_sp->sb->ports[local_port].cms[dir][pg_buff];
} }
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 pool, u8 local_port, u8 pool,
enum mlxsw_reg_sbxx_dir dir) enum mlxsw_reg_sbxx_dir dir)
{ {
return &mlxsw_sp->sb.ports[local_port].pms[dir][pool]; return &mlxsw_sp->sb->ports[local_port].pms[dir][pool];
} }
static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool, static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
...@@ -215,16 +262,17 @@ static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp) ...@@ -215,16 +262,17 @@ static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
{ {
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port), mlxsw_sp->sb->ports = kcalloc(max_ports,
sizeof(struct mlxsw_sp_sb_port),
GFP_KERNEL); GFP_KERNEL);
if (!mlxsw_sp->sb.ports) if (!mlxsw_sp->sb->ports)
return -ENOMEM; return -ENOMEM;
return 0; return 0;
} }
static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
{ {
kfree(mlxsw_sp->sb.ports); kfree(mlxsw_sp->sb->ports);
} }
#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000 #define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
...@@ -551,15 +599,19 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) ...@@ -551,15 +599,19 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE)) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
return -EIO; return -EIO;
mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE)) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
return -EIO; return -EIO;
sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE); sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
if (!mlxsw_sp->sb)
return -ENOMEM;
mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
err = mlxsw_sp_sb_ports_init(mlxsw_sp); err = mlxsw_sp_sb_ports_init(mlxsw_sp);
if (err) if (err)
return err; goto err_sb_ports_init;
err = mlxsw_sp_sb_prs_init(mlxsw_sp); err = mlxsw_sp_sb_prs_init(mlxsw_sp);
if (err) if (err)
goto err_sb_prs_init; goto err_sb_prs_init;
...@@ -584,6 +636,8 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) ...@@ -584,6 +636,8 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
err_sb_cpu_port_sb_cms_init: err_sb_cpu_port_sb_cms_init:
err_sb_prs_init: err_sb_prs_init:
mlxsw_sp_sb_ports_fini(mlxsw_sp); mlxsw_sp_sb_ports_fini(mlxsw_sp);
err_sb_ports_init:
kfree(mlxsw_sp->sb);
return err; return err;
} }
...@@ -591,6 +645,7 @@ void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp) ...@@ -591,6 +645,7 @@ void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{ {
devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0); devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
mlxsw_sp_sb_ports_fini(mlxsw_sp); mlxsw_sp_sb_ports_fini(mlxsw_sp);
kfree(mlxsw_sp->sb);
} }
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
......
...@@ -241,10 +241,11 @@ mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled, ...@@ -241,10 +241,11 @@ mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled,
return err; return err;
j = 0; j = 0;
for (; i < rif_count; i++) { for (; i < rif_count; i++) {
if (!mlxsw_sp->rifs[i]) struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
if (!rif)
continue; continue;
err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, rif,
mlxsw_sp->rifs[i],
counters_enabled); counters_enabled);
if (err) if (err)
goto err_entry_get; goto err_entry_get;
...@@ -281,15 +282,15 @@ static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable) ...@@ -281,15 +282,15 @@ static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable)
rtnl_lock(); rtnl_lock();
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
if (!mlxsw_sp->rifs[i]) struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
if (!rif)
continue; continue;
if (enable) if (enable)
mlxsw_sp_rif_counter_alloc(mlxsw_sp, mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
mlxsw_sp->rifs[i],
MLXSW_SP_RIF_COUNTER_EGRESS); MLXSW_SP_RIF_COUNTER_EGRESS);
else else
mlxsw_sp_rif_counter_free(mlxsw_sp, mlxsw_sp_rif_counter_free(mlxsw_sp, rif,
mlxsw_sp->rifs[i],
MLXSW_SP_RIF_COUNTER_EGRESS); MLXSW_SP_RIF_COUNTER_EGRESS);
} }
rtnl_unlock(); rtnl_unlock();
......
...@@ -42,6 +42,8 @@ enum mlxsw_sp_rif_counter_dir { ...@@ -42,6 +42,8 @@ enum mlxsw_sp_rif_counter_dir {
MLXSW_SP_RIF_COUNTER_EGRESS, MLXSW_SP_RIF_COUNTER_EGRESS,
}; };
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment