Commit 604283e9 authored by Michal Swiatkowski's avatar Michal Swiatkowski Committed by Tony Nguyen

ice: make representor code generic

Representor code needs to be independent from specific device type, like
in this case VF. Make generic add / remove representor function and
specific add VF / rem VF function. New device types will follow this
scheme.

In bridge offload code there is a need to get representor pointer based
on VSI. Implement helper function to achieve that.
Reviewed-by: default avatarPiotr Raczynski <piotr.raczynski@intel.com>
Reviewed-by: default avatarWojciech Drewek <wojciech.drewek@intel.com>
Signed-off-by: default avatarMichal Swiatkowski <michal.swiatkowski@linux.intel.com>
Tested-by: default avatarSujai Buvaneswaran <sujai.buvaneswaran@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent e4c46abc
...@@ -285,17 +285,22 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) ...@@ -285,17 +285,22 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
/** /**
* ice_eswitch_update_repr - reconfigure port representor * ice_eswitch_update_repr - reconfigure port representor
* @repr: pointer to repr struct * @repr_id: representor ID
* @vsi: VSI for which port representor is configured * @vsi: VSI for which port representor is configured
*/ */
void ice_eswitch_update_repr(struct ice_repr *repr, struct ice_vsi *vsi) void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_repr *repr;
int ret; int ret;
if (!ice_is_switchdev_running(pf)) if (!ice_is_switchdev_running(pf))
return; return;
repr = xa_load(&pf->eswitch.reprs, repr_id);
if (!repr)
return;
repr->src_vsi = vsi; repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num; repr->dst->u.port_info.port_id = vsi->vsi_num;
......
...@@ -17,7 +17,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, ...@@ -17,7 +17,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
void ice_eswitch_update_repr(struct ice_repr *repr, struct ice_vsi *vsi); void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
...@@ -35,7 +35,7 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, ...@@ -35,7 +35,7 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off) { } struct ice_tx_offload_params *off) { }
static inline void static inline void
ice_eswitch_update_repr(struct ice_repr *repr, struct ice_vsi *vsi) { } ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf) static inline int ice_eswitch_configure(struct ice_pf *pf)
{ {
......
...@@ -893,10 +893,14 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, ...@@ -893,10 +893,14 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
} }
if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
vsi->back->br_port = NULL; vsi->back->br_port = NULL;
else if (vsi->vf && vsi->vf->repr) } else {
vsi->vf->repr->br_port = NULL; struct ice_repr *repr = ice_repr_get_by_vsi(vsi);
if (repr)
repr->br_port = NULL;
}
xa_erase(&bridge->ports, br_port->vsi_idx); xa_erase(&bridge->ports, br_port->vsi_idx);
ice_eswitch_br_port_vlans_flush(br_port); ice_eswitch_br_port_vlans_flush(br_port);
......
...@@ -519,16 +519,14 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d ...@@ -519,16 +519,14 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
{ {
struct ice_q_vector *q_vector = (struct ice_q_vector *)data; struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
struct ice_pf *pf = q_vector->vsi->back; struct ice_pf *pf = q_vector->vsi->back;
struct ice_vf *vf; struct ice_repr *repr;
unsigned int bkt; unsigned long id;
if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED; return IRQ_HANDLED;
rcu_read_lock(); xa_for_each(&pf->eswitch.reprs, id, repr)
ice_for_each_vf_rcu(pf, bkt, vf) napi_schedule(&repr->q_vector->napi);
napi_schedule(&vf->repr->q_vector->napi);
rcu_read_unlock();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
*/ */
static int ice_repr_get_sw_port_id(struct ice_repr *repr) static int ice_repr_get_sw_port_id(struct ice_repr *repr)
{ {
return repr->vf->pf->hw.port_info->lport; return repr->src_vsi->back->hw.port_info->lport;
} }
/** /**
...@@ -35,7 +35,7 @@ ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) ...@@ -35,7 +35,7 @@ ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
return -EOPNOTSUPP; return -EOPNOTSUPP;
res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
repr->vf->vf_id); repr->id);
if (res <= 0) if (res <= 0)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return 0; return 0;
...@@ -279,24 +279,72 @@ ice_repr_reg_netdev(struct net_device *netdev) ...@@ -279,24 +279,72 @@ ice_repr_reg_netdev(struct net_device *netdev)
} }
/** /**
* ice_repr_add - add representor for VF * ice_repr_rem - remove representor from VF
* @vf: pointer to VF structure * @reprs: xarray storing representors
* @repr: pointer to representor structure
*/ */
static int ice_repr_add(struct ice_vf *vf) static void ice_repr_rem(struct xarray *reprs, struct ice_repr *repr)
{
xa_erase(reprs, repr->id);
kfree(repr->q_vector);
free_netdev(repr->netdev);
kfree(repr);
}
static void ice_repr_rem_vf(struct ice_vf *vf)
{
struct ice_repr *repr = xa_load(&vf->pf->eswitch.reprs, vf->repr_id);
if (!repr)
return;
unregister_netdev(repr->netdev);
ice_repr_rem(&vf->pf->eswitch.reprs, repr);
ice_devlink_destroy_vf_port(vf);
ice_virtchnl_set_dflt_ops(vf);
}
/**
* ice_repr_rem_from_all_vfs - remove port representor for all VFs
* @pf: pointer to PF structure
*/
void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
{
struct devlink *devlink;
struct ice_vf *vf;
unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf)
ice_repr_rem_vf(vf);
/* since all port representors are destroyed, there is
* no point in keeping the nodes
*/
devlink = priv_to_devlink(pf);
devl_lock(devlink);
devl_rate_nodes_destroy(devlink);
devl_unlock(devlink);
}
/**
* ice_repr_add - add representor for generic VSI
* @pf: pointer to PF structure
* @src_vsi: pointer to VSI structure of device to represent
* @parent_mac: device MAC address
*/
static struct ice_repr *
ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
{ {
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
struct ice_netdev_priv *np; struct ice_netdev_priv *np;
struct ice_repr *repr; struct ice_repr *repr;
struct ice_vsi *vsi;
int err; int err;
vsi = ice_get_vf_vsi(vf);
if (!vsi)
return -EINVAL;
repr = kzalloc(sizeof(*repr), GFP_KERNEL); repr = kzalloc(sizeof(*repr), GFP_KERNEL);
if (!repr) if (!repr)
return -ENOMEM; return ERR_PTR(-ENOMEM);
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) { if (!repr->netdev) {
...@@ -304,10 +352,7 @@ static int ice_repr_add(struct ice_vf *vf) ...@@ -304,10 +352,7 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_alloc; goto err_alloc;
} }
repr->src_vsi = vsi; repr->src_vsi = src_vsi;
repr->vf = vf;
repr->q_id = vf->vf_id;
vf->repr = repr;
np = netdev_priv(repr->netdev); np = netdev_priv(repr->netdev);
np->repr = repr; np->repr = repr;
...@@ -318,14 +363,47 @@ static int ice_repr_add(struct ice_vf *vf) ...@@ -318,14 +363,47 @@ static int ice_repr_add(struct ice_vf *vf)
} }
repr->q_vector = q_vector; repr->q_vector = q_vector;
err = xa_alloc(&vf->pf->eswitch.reprs, &repr->id, repr, err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr,
xa_limit_32b, GFP_KERNEL); XA_LIMIT(1, INT_MAX), GFP_KERNEL);
if (err) if (err)
goto err_xa_alloc; goto err_xa_alloc;
repr->q_id = repr->id;
ether_addr_copy(repr->parent_mac, parent_mac);
return repr;
err_xa_alloc:
kfree(repr->q_vector);
err_alloc_q_vector:
free_netdev(repr->netdev);
err_alloc:
kfree(repr);
return ERR_PTR(err);
}
static int ice_repr_add_vf(struct ice_vf *vf)
{
struct ice_repr *repr;
struct ice_vsi *vsi;
int err;
vsi = ice_get_vf_vsi(vf);
if (!vsi)
return -EINVAL;
err = ice_devlink_create_vf_port(vf); err = ice_devlink_create_vf_port(vf);
if (err) if (err)
goto err_devlink; return err;
repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
if (IS_ERR(repr)) {
err = PTR_ERR(repr);
goto err_repr_add;
}
vf->repr_id = repr->id;
repr->vf = vf;
repr->netdev->min_mtu = ETH_MIN_MTU; repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU; repr->netdev->max_mtu = ICE_MAX_MTU;
...@@ -336,73 +414,17 @@ static int ice_repr_add(struct ice_vf *vf) ...@@ -336,73 +414,17 @@ static int ice_repr_add(struct ice_vf *vf)
if (err) if (err)
goto err_netdev; goto err_netdev;
ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
ice_virtchnl_set_repr_ops(vf); ice_virtchnl_set_repr_ops(vf);
return 0; return 0;
err_netdev: err_netdev:
ice_repr_rem(&vf->pf->eswitch.reprs, repr);
err_repr_add:
ice_devlink_destroy_vf_port(vf); ice_devlink_destroy_vf_port(vf);
err_devlink:
xa_erase(&vf->pf->eswitch.reprs, repr->id);
err_xa_alloc:
kfree(repr->q_vector);
vf->repr->q_vector = NULL;
err_alloc_q_vector:
free_netdev(repr->netdev);
repr->netdev = NULL;
err_alloc:
kfree(repr);
vf->repr = NULL;
return err; return err;
} }
/**
* ice_repr_rem - remove representor from VF
* @vf: pointer to VF structure
*/
static void ice_repr_rem(struct ice_vf *vf)
{
struct ice_repr *repr = vf->repr;
if (!repr)
return;
kfree(repr->q_vector);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(vf);
xa_erase(&vf->pf->eswitch.reprs, repr->id);
free_netdev(repr->netdev);
kfree(repr);
vf->repr = NULL;
ice_virtchnl_set_dflt_ops(vf);
}
/**
* ice_repr_rem_from_all_vfs - remove port representor for all VFs
* @pf: pointer to PF structure
*/
void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
{
struct devlink *devlink;
struct ice_vf *vf;
unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf)
ice_repr_rem(vf);
/* since all port representors are destroyed, there is
* no point in keeping the nodes
*/
devlink = priv_to_devlink(pf);
devl_lock(devlink);
devl_rate_nodes_destroy(devlink);
devl_unlock(devlink);
}
/** /**
* ice_repr_add_for_all_vfs - add port representor for all VFs * ice_repr_add_for_all_vfs - add port representor for all VFs
* @pf: pointer to PF structure * @pf: pointer to PF structure
...@@ -417,7 +439,7 @@ int ice_repr_add_for_all_vfs(struct ice_pf *pf) ...@@ -417,7 +439,7 @@ int ice_repr_add_for_all_vfs(struct ice_pf *pf)
lockdep_assert_held(&pf->vfs.table_lock); lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) { ice_for_each_vf(pf, bkt, vf) {
err = ice_repr_add(vf); err = ice_repr_add_vf(vf);
if (err) if (err)
goto err; goto err;
} }
...@@ -437,6 +459,14 @@ int ice_repr_add_for_all_vfs(struct ice_pf *pf) ...@@ -437,6 +459,14 @@ int ice_repr_add_for_all_vfs(struct ice_pf *pf)
return err; return err;
} }
struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
{
if (!vsi->vf)
return NULL;
return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
}
/** /**
* ice_repr_start_tx_queues - start Tx queues of port representor * ice_repr_start_tx_queues - start Tx queues of port representor
* @repr: pointer to repr structure * @repr: pointer to repr structure
......
...@@ -32,4 +32,6 @@ void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); ...@@ -32,4 +32,6 @@ void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev); bool ice_is_port_repr_netdev(const struct net_device *netdev);
struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
#endif #endif
...@@ -928,7 +928,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) ...@@ -928,7 +928,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
goto out_unlock; goto out_unlock;
} }
ice_eswitch_update_repr(vf->repr, vsi); ice_eswitch_update_repr(vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */ /* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info); ice_mbx_clear_malvf(&vf->mbx_info);
......
...@@ -130,7 +130,7 @@ struct ice_vf { ...@@ -130,7 +130,7 @@ struct ice_vf {
struct ice_mdd_vf_events mdd_tx_events; struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
struct ice_repr *repr; unsigned long repr_id;
const struct ice_virtchnl_ops *virtchnl_ops; const struct ice_virtchnl_ops *virtchnl_ops;
const struct ice_vf_ops *vf_ops; const struct ice_vf_ops *vf_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment