Commit 8693115a authored by Parav Pandit's avatar Parav Pandit Committed by Saeed Mahameed

{IB,net}/mlx5: Constify rep ops functions pointers

Currently for every representor type and for every single vport,
representer function pointers copy is stored even though they don't
change from one to other vport.

Additionally priv data entry for the rep is not passed during
registration, but its copied. It is used (set and cleared) by the user
of the reps.

As we want to scale vports, to simplify and also to split constants
from data,

1. Rename mlx5_eswitch_rep_if to mlx5_eswitch_rep_ops as to match _ops
prefix with other standard netdev, ibdev ops.
2. Constify the IB and Ethernet rep ops structure.
3. Instead of storing copy of all rep function pointers, store copy
per eswitch rep type.
4. Split data and function pointers to mlx5_eswitch_rep_ops and
mlx5_eswitch_rep_data.
Signed-off-by: default avatarParav Pandit <parav@mellanox.com>
Reviewed-by: default avatarMark Bloch <markb@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent c94ff748
...@@ -60,7 +60,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -60,7 +60,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
if (!__mlx5_ib_add(ibdev, profile)) if (!__mlx5_ib_add(ibdev, profile))
return -EINVAL; return -EINVAL;
rep->rep_if[REP_IB].priv = ibdev; rep->rep_data[REP_IB].priv = ibdev;
return 0; return 0;
} }
...@@ -70,13 +70,13 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) ...@@ -70,13 +70,13 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{ {
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
if (!rep->rep_if[REP_IB].priv || if (!rep->rep_data[REP_IB].priv ||
rep->vport != MLX5_VPORT_UPLINK) rep->vport != MLX5_VPORT_UPLINK)
return; return;
dev = mlx5_ib_rep_to_dev(rep); dev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
rep->rep_if[REP_IB].priv = NULL; rep->rep_data[REP_IB].priv = NULL;
} }
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
...@@ -84,16 +84,17 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) ...@@ -84,16 +84,17 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
return mlx5_ib_rep_to_dev(rep); return mlx5_ib_rep_to_dev(rep);
} }
static const struct mlx5_eswitch_rep_ops rep_ops = {
.load = mlx5_ib_vport_rep_load,
.unload = mlx5_ib_vport_rep_unload,
.get_proto_dev = mlx5_ib_vport_get_proto_dev,
};
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
{ {
struct mlx5_eswitch *esw = mdev->priv.eswitch; struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_eswitch_rep_if rep_if = {};
rep_if.load = mlx5_ib_vport_rep_load;
rep_if.unload = mlx5_ib_vport_rep_unload;
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB); mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
} }
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
......
...@@ -72,6 +72,6 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, ...@@ -72,6 +72,6 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
static inline static inline
struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep) struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
{ {
return rep->rep_if[REP_IB].priv; return rep->rep_data[REP_IB].priv;
} }
#endif /* __MLX5_IB_REP_H__ */ #endif /* __MLX5_IB_REP_H__ */
...@@ -1752,7 +1752,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -1752,7 +1752,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
} }
rpriv->netdev = netdev; rpriv->netdev = netdev;
rep->rep_if[REP_ETH].priv = rpriv; rep->rep_data[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list); INIT_LIST_HEAD(&rpriv->vport_sqs_list);
if (rep->vport == MLX5_VPORT_UPLINK) { if (rep->vport == MLX5_VPORT_UPLINK) {
...@@ -1826,16 +1826,17 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) ...@@ -1826,16 +1826,17 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
return rpriv->netdev; return rpriv->netdev;
} }
static const struct mlx5_eswitch_rep_ops rep_ops = {
.load = mlx5e_vport_rep_load,
.unload = mlx5e_vport_rep_unload,
.get_proto_dev = mlx5e_vport_rep_get_proto_dev
};
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev) void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
{ {
struct mlx5_eswitch *esw = mdev->priv.eswitch; struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_eswitch_rep_if rep_if = {};
rep_if.load = mlx5e_vport_rep_load;
rep_if.unload = mlx5e_vport_rep_unload;
rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_ETH); mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
} }
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev) void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
......
...@@ -91,7 +91,7 @@ struct mlx5e_rep_priv { ...@@ -91,7 +91,7 @@ struct mlx5e_rep_priv {
static inline static inline
struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep) struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
{ {
return rep->rep_if[REP_ETH].priv; return rep->rep_data[REP_ETH].priv;
} }
struct mlx5e_neigh { struct mlx5e_neigh {
......
...@@ -173,6 +173,7 @@ struct mlx5_esw_offload { ...@@ -173,6 +173,7 @@ struct mlx5_esw_offload {
struct mutex peer_mutex; struct mutex peer_mutex;
DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(encap_tbl, 8);
DECLARE_HASHTABLE(mod_hdr_tbl, 8); DECLARE_HASHTABLE(mod_hdr_tbl, 8);
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode; u8 inline_mode;
u64 num_flows; u64 num_flows;
u8 encap; u8 encap;
......
...@@ -332,7 +332,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) ...@@ -332,7 +332,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport]; rep = &esw->offloads.vport_reps[vf_vport];
if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED) if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue; continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
...@@ -1276,7 +1276,7 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) ...@@ -1276,7 +1276,7 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
ether_addr_copy(rep->hw_id, hw_id); ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
atomic_set(&rep->rep_if[rep_type].state, atomic_set(&rep->rep_data[rep_type].state,
REP_UNREGISTERED); REP_UNREGISTERED);
} }
...@@ -1286,9 +1286,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) ...@@ -1286,9 +1286,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type) struct mlx5_eswitch_rep *rep, u8 rep_type)
{ {
if (atomic_cmpxchg(&rep->rep_if[rep_type].state, if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_LOADED, REP_REGISTERED) == REP_LOADED) REP_LOADED, REP_REGISTERED) == REP_LOADED)
rep->rep_if[rep_type].unload(rep); esw->offloads.rep_ops[rep_type]->unload(rep);
} }
static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
...@@ -1349,11 +1349,11 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, ...@@ -1349,11 +1349,11 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
{ {
int err = 0; int err = 0;
if (atomic_cmpxchg(&rep->rep_if[rep_type].state, if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
err = rep->rep_if[rep_type].load(esw->dev, rep); err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
if (err) if (err)
atomic_set(&rep->rep_if[rep_type].state, atomic_set(&rep->rep_data[rep_type].state,
REP_REGISTERED); REP_REGISTERED);
} }
...@@ -2216,21 +2216,17 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) ...@@ -2216,21 +2216,17 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
} }
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep_if *__rep_if, const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type) u8 rep_type)
{ {
struct mlx5_eswitch_rep_if *rep_if; struct mlx5_eswitch_rep_data *rep_data;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int i; int i;
esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_all_reps(esw, i, rep) { mlx5_esw_for_all_reps(esw, i, rep) {
rep_if = &rep->rep_if[rep_type]; rep_data = &rep->rep_data[rep_type];
rep_if->load = __rep_if->load; atomic_set(&rep_data->state, REP_REGISTERED);
rep_if->unload = __rep_if->unload;
rep_if->get_proto_dev = __rep_if->get_proto_dev;
rep_if->priv = __rep_if->priv;
atomic_set(&rep_if->state, REP_REGISTERED);
} }
} }
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
...@@ -2245,7 +2241,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) ...@@ -2245,7 +2241,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
__unload_reps_all_vport(esw, max_vf, rep_type); __unload_reps_all_vport(esw, max_vf, rep_type);
mlx5_esw_for_all_reps(esw, i, rep) mlx5_esw_for_all_reps(esw, i, rep)
atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED); atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
} }
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
...@@ -2254,7 +2250,7 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) ...@@ -2254,7 +2250,7 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
return rep->rep_if[rep_type].priv; return rep->rep_data[rep_type].priv;
} }
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
...@@ -2265,9 +2261,9 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, ...@@ -2265,9 +2261,9 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
rep = mlx5_eswitch_get_rep(esw, vport); rep = mlx5_eswitch_get_rep(esw, vport);
if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED && if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
rep->rep_if[rep_type].get_proto_dev) esw->offloads.rep_ops[rep_type]->get_proto_dev)
return rep->rep_if[rep_type].get_proto_dev(rep); return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
return NULL; return NULL;
} }
EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
......
...@@ -29,17 +29,19 @@ enum { ...@@ -29,17 +29,19 @@ enum {
}; };
struct mlx5_eswitch_rep; struct mlx5_eswitch_rep;
struct mlx5_eswitch_rep_if { struct mlx5_eswitch_rep_ops {
int (*load)(struct mlx5_core_dev *dev, int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
struct mlx5_eswitch_rep *rep);
void (*unload)(struct mlx5_eswitch_rep *rep); void (*unload)(struct mlx5_eswitch_rep *rep);
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
};
struct mlx5_eswitch_rep_data {
void *priv; void *priv;
atomic_t state; atomic_t state;
}; };
struct mlx5_eswitch_rep { struct mlx5_eswitch_rep {
struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES];
u16 vport; u16 vport;
u8 hw_id[ETH_ALEN]; u8 hw_id[ETH_ALEN];
u16 vlan; u16 vlan;
...@@ -47,7 +49,7 @@ struct mlx5_eswitch_rep { ...@@ -47,7 +49,7 @@ struct mlx5_eswitch_rep {
}; };
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep_if *rep_if, const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type); u8 rep_type);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type); void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment