Commit 84562f99 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: prepare representor for SF support

Michal Swiatkowski says:

This is a series to prepare port representor for supporting also
subfunctions. We need correct devlink locking and the possibility to
update parent VSI after port representor is created.

Refactor how devlink lock is taken to suite the subfunction use case.

VSI configuration needs to be done after port representor is created.
Port representor needs only allocated VSI. It doesn't need to be
configured before.

VSI needs to be reconfigured when update function is called.

The code for this patchset was split from (too big) patchset [1].

[1] https://lore.kernel.org/netdev/20240213072724.77275-1-michal.swiatkowski@linux.intel.com/
---
Originally from https://lore.kernel.org/netdev/20240605-next-2024-06-03-intel-next-batch-v2-0-39c23963fa78@intel.com/
Changes:
- delete ice_repr_get_by_vsi() from header
- rephrase commit message in moving devlink locking
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 185d7211 fff5cca3
......@@ -794,10 +794,8 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v
tc_node = pi->root->children[0];
mutex_lock(&pi->sched_lock);
devl_lock(devlink);
for (i = 0; i < tc_node->num_children; i++)
ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
devl_unlock(devlink);
mutex_unlock(&pi->sched_lock);
return 0;
......
......@@ -407,7 +407,7 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
err = devlink_port_register(devlink, devlink_port, vsi->idx);
err = devl_port_register(devlink, devlink_port, vsi->idx);
if (err) {
dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
vf->vf_id, err);
......@@ -426,5 +426,5 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
void ice_devlink_destroy_vf_port(struct ice_vf *vf)
{
devl_rate_leaf_destroy(&vf->devlink_port);
devlink_port_unregister(&vf->devlink_port);
devl_port_unregister(&vf->devlink_port);
}
......@@ -117,17 +117,10 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!repr->dst)
goto err_add_mac_fltr;
if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof))
goto err_dst_free;
if (ice_vsi_add_vlan_zero(vsi))
goto err_update_security;
return -ENOMEM;
netif_keep_dst(uplink_vsi->netdev);
......@@ -136,16 +129,48 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
dst->u.port_info.lower_dev = uplink_vsi->netdev;
return 0;
}
err_update_security:
/**
* ice_eswitch_cfg_vsi - configure VSI to work in slow-path
* @vsi: VSI structure of representee
* @mac: representee MAC
*
* Return: 0 on success, non-zero on error.
*/
int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
{
int err;
ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (err)
goto err_update_security;
err = ice_vsi_add_vlan_zero(vsi);
if (err)
goto err_vlan_zero;
return 0;
err_vlan_zero:
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
err_dst_free:
metadata_dst_free(repr->dst);
repr->dst = NULL;
err_add_mac_fltr:
ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI);
err_update_security:
ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
return -ENODEV;
return err;
}
/**
* ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev
* @vsi: VSI structure of representee
* @mac: representee MAC
*/
void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac)
{
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
}
/**
......@@ -153,16 +178,16 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
* @repr_id: representor ID
* @vsi: VSI for which port representor is configured
*/
void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_repr *repr;
int ret;
int err;
if (!ice_is_switchdev_running(pf))
return;
repr = xa_load(&pf->eswitch.reprs, repr_id);
repr = xa_load(&pf->eswitch.reprs, *repr_id);
if (!repr)
return;
......@@ -172,12 +197,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
if (repr->br_port)
repr->br_port->vsi = vsi;
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
ICE_FWD_TO_VSI);
err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac);
if (err)
dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
repr->id);
/* The VSI number is different, reload the PR with new id */
if (repr->id != vsi->vsi_num) {
xa_erase(&pf->eswitch.reprs, repr->id);
repr->id = vsi->vsi_num;
if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL))
dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d",
repr->id);
*repr_id = repr->id;
}
}
......@@ -423,6 +455,7 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf)
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct ice_repr *repr;
int err;
......@@ -437,7 +470,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_stop_reprs(pf);
devl_lock(devlink);
repr = ice_repr_add_vf(vf);
devl_unlock(devlink);
if (IS_ERR(repr)) {
err = PTR_ERR(repr);
goto err_create_repr;
......@@ -460,7 +495,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err_xa_alloc:
ice_eswitch_release_repr(pf, repr);
err_setup_repr:
devl_lock(devlink);
ice_repr_rem_vf(repr);
devl_unlock(devlink);
err_create_repr:
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
......@@ -484,6 +521,7 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_disable_switchdev(pf);
ice_eswitch_release_repr(pf, repr);
devl_lock(devlink);
ice_repr_rem_vf(repr);
if (xa_empty(&pf->eswitch.reprs)) {
......@@ -491,12 +529,11 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
* no point in keeping the nodes
*/
ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
devl_lock(devlink);
devl_rate_nodes_destroy(devlink);
devl_unlock(devlink);
} else {
ice_eswitch_start_reprs(pf);
}
devl_unlock(devlink);
}
/**
......
......@@ -18,7 +18,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi);
void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
......@@ -28,6 +28,9 @@ netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc);
int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac);
void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac);
#else /* CONFIG_ICE_SWITCHDEV */
static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
......@@ -44,7 +47,7 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off) { }
static inline void
ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { }
ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
......@@ -85,5 +88,12 @@ ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
{
return rx_ring->netdev;
}
static inline int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
{
return -EOPNOTSUPP;
}
static inline void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) { }
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */
......@@ -896,7 +896,8 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
vsi->back->br_port = NULL;
} else {
struct ice_repr *repr = ice_repr_get_by_vsi(vsi);
struct ice_repr *repr =
ice_repr_get(vsi->back, br_port->repr_id);
if (repr)
repr->br_port = NULL;
......@@ -937,6 +938,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
br_port->vsi = repr->src_vsi;
br_port->vsi_idx = br_port->vsi->idx;
br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
br_port->repr_id = repr->id;
repr->br_port = br_port;
err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
......
......@@ -46,6 +46,7 @@ struct ice_esw_br_port {
enum ice_esw_br_port_type type;
u16 vsi_idx;
u16 pvid;
u32 repr_id;
struct xarray vlans;
};
......
......@@ -285,9 +285,7 @@ ice_repr_reg_netdev(struct net_device *netdev)
static void ice_repr_remove_node(struct devlink_port *devlink_port)
{
devl_lock(devlink_port->devlink);
devl_rate_leaf_destroy(devlink_port);
devl_unlock(devlink_port->devlink);
}
/**
......@@ -308,6 +306,7 @@ static void ice_repr_rem(struct ice_repr *repr)
void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_repr_remove_node(&repr->vf->devlink_port);
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
......@@ -403,11 +402,17 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
if (err)
goto err_netdev;
err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
if (err)
goto err_cfg_vsi;
ice_virtchnl_set_repr_ops(vf);
ice_repr_set_tx_topology(vf->pf);
return repr;
err_cfg_vsi:
unregister_netdev(repr->netdev);
err_netdev:
ice_repr_rem(repr);
err_repr_add:
......@@ -415,12 +420,9 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
return ERR_PTR(err);
}
struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
{
if (!vsi->vf)
return NULL;
return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
return xa_load(&pf->eswitch.reprs, id);
}
/**
......
......@@ -35,9 +35,8 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr);
struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
int xmit_status);
void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id);
#endif
......@@ -948,7 +948,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
goto out_unlock;
}
ice_eswitch_update_repr(vf->repr_id, vsi);
ice_eswitch_update_repr(&vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment