Commit 1a1c40df authored by Grzegorz Nitka's avatar Grzegorz Nitka Committed by Tony Nguyen

ice: set and release switchdev environment

Switchdev environment has to be set up when user create VFs
and eswitch mode is switchdev. Release is done when user
delete all VFs.

Data path in this implementation is based on control plane VSI.
This VSI is used to pass traffic from port representors to
corresponding VFs and vice versa. Default TX rule has to be
added to forward packet to control plane VSI. This will redirect
packets from VFs which don't match other rules to control plane
VSI.

On RX side default rule is added on uplink VSI to receive all
traffic that doesn't match other rules. When setting switchdev
environment all other rules from VFs should be removed. Packet to
VFs will be forwarded by control plane VSI.

As VF without any mac rules can't send any packet because of
antispoof mechanism, VSI antispoof should be turned off on each VFs.

To send packet from representor to correct VSI, destination VSI
field in TX descriptor will have to be filled. Allow that by
setting destination override bit in control plane VSI security config.

Packet from VFs will be received on control plane VSI. Driver
should decide to which netdev forward the packet. Decision is
made based on src_vsi field from descriptor. There is a target
netdev list in control plane VSI struct which choose netdev
based on src_vsi number.
Co-developed-by: default avatarMichal Swiatkowski <michal.swiatkowski@linux.intel.com>
Signed-off-by: default avatarMichal Swiatkowski <michal.swiatkowski@linux.intel.com>
Signed-off-by: default avatarGrzegorz Nitka <grzegorz.nitka@intel.com>
Tested-by: default avatarSandeep Penigalapati <sandeep.penigalapati@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent bd676b29
...@@ -351,6 +351,8 @@ struct ice_vsi { ...@@ -351,6 +351,8 @@ struct ice_vsi {
u16 num_xdp_txq; /* Used XDP queues */ u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
struct net_device **target_netdevs;
/* setup back reference, to which aggregator node this VSI /* setup back reference, to which aggregator node this VSI
* corresponds to * corresponds to
*/ */
...@@ -410,6 +412,12 @@ enum ice_pf_flags { ...@@ -410,6 +412,12 @@ enum ice_pf_flags {
ICE_PF_FLAGS_NBITS /* must be last */ ICE_PF_FLAGS_NBITS /* must be last */
}; };
struct ice_switchdev_info {
struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
bool is_running;
};
struct ice_agg_node { struct ice_agg_node {
u32 agg_id; u32 agg_id;
#define ICE_MAX_VSIS_IN_AGG_NODE 64 #define ICE_MAX_VSIS_IN_AGG_NODE 64
...@@ -508,6 +516,8 @@ struct ice_pf { ...@@ -508,6 +516,8 @@ struct ice_pf {
struct ice_link_default_override_tlv link_dflt_override; struct ice_link_default_override_tlv link_dflt_override;
struct ice_lag *lag; /* Link Aggregation information */ struct ice_lag *lag; /* Link Aggregation information */
struct ice_switchdev_info switchdev;
#define ICE_INVALID_AGG_NODE_ID 0 #define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1 #define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32 #define ICE_MAX_PF_AGG_NODES 32
...@@ -617,6 +627,18 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) ...@@ -617,6 +627,18 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
return pf->vsi[pf->ctrl_vsi_idx]; return pf->vsi[pf->ctrl_vsi_idx];
} }
/**
* ice_is_switchdev_running - check if switchdev is configured
* @pf: pointer to PF structure
*
* Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV
* and switchdev is configured, false otherwise.
*/
static inline bool ice_is_switchdev_running(struct ice_pf *pf)
{
return pf->switchdev.is_running;
}
/** /**
* ice_set_sriov_cap - enable SRIOV in PF flags * ice_set_sriov_cap - enable SRIOV in PF flags
* @pf: PF struct * @pf: PF struct
...@@ -645,6 +667,7 @@ bool netif_is_ice(struct net_device *dev); ...@@ -645,6 +667,7 @@ bool netif_is_ice(struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi); int ice_vsi_open_ctrl(struct ice_vsi *vsi);
int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_txq_count(struct ice_pf *pf);
......
...@@ -2,9 +2,371 @@ ...@@ -2,9 +2,371 @@
/* Copyright (C) 2019-2021, Intel Corporation. */ /* Copyright (C) 2019-2021, Intel Corporation. */
#include "ice.h" #include "ice.h"
#include "ice_lib.h"
#include "ice_eswitch.h" #include "ice_eswitch.h"
#include "ice_fltr.h"
#include "ice_repr.h"
#include "ice_devlink.h" #include "ice_devlink.h"
/**
* ice_eswitch_setup_env - configure switchdev HW filters
* @pf: pointer to PF struct
*
* This function adds HW filters configuration specific for switchdev
* mode.
*/
static int ice_eswitch_setup_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
struct ice_port_info *pi = pf->hw.port_info;
bool rule_added = false;
ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
goto err_def_rx;
if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
goto err_def_rx;
rule_added = true;
}
if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
goto err_def_tx;
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink;
if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_control;
if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
ICE_FLTR_TX,
ICE_SINGLE_ACT_LB_ENABLE))
goto err_update_action;
return 0;
err_update_action:
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
err_def_tx:
if (rule_added)
ice_clear_dflt_vsi(uplink_vsi->vsw);
err_def_rx:
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
return -ENODEV;
}
/**
* ice_eswitch_remap_ring - reconfigure ring of switchdev ctrl VSI
* @ring: pointer to ring
* @q_vector: pointer of q_vector which is connected with this ring
* @netdev: netdevice connected with this ring
*/
static void
ice_eswitch_remap_ring(struct ice_ring *ring, struct ice_q_vector *q_vector,
struct net_device *netdev)
{
ring->q_vector = q_vector;
ring->next = NULL;
ring->netdev = netdev;
}
/**
* ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
* @pf: pointer to PF struct
*
* In switchdev number of allocated Tx/Rx rings is equal.
*
* This function fills q_vectors structures associated with representor and
* move each ring pairs to port representor netdevs. Each port representor
* will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
* number of VFs.
*/
static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
{
struct ice_vsi *vsi = pf->switchdev.control_vsi;
int q_id;
ice_for_each_txq(vsi, q_id) {
struct ice_repr *repr = pf->vf[q_id].repr;
struct ice_q_vector *q_vector = repr->q_vector;
struct ice_ring *tx_ring = vsi->tx_rings[q_id];
struct ice_ring *rx_ring = vsi->rx_rings[q_id];
q_vector->vsi = vsi;
q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
q_vector->num_ring_tx = 1;
q_vector->tx.ring = tx_ring;
ice_eswitch_remap_ring(tx_ring, q_vector, repr->netdev);
/* In switchdev mode, from OS stack perspective, there is only
* one queue for given netdev, so it needs to be indexed as 0.
*/
tx_ring->q_index = 0;
q_vector->num_ring_rx = 1;
q_vector->rx.ring = rx_ring;
ice_eswitch_remap_ring(rx_ring, q_vector, repr->netdev);
}
}
/**
* ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
* @pf: pointer to PF struct
*/
static int ice_eswitch_setup_reprs(struct ice_pf *pf)
{
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
int max_vsi_num = 0;
int i;
ice_for_each_vf(pf, i) {
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
struct ice_vf *vf = &pf->vf[i];
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!vf->repr->dst) {
ice_fltr_add_mac_and_broadcast(vsi,
vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
goto err;
}
if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
ice_fltr_add_mac_and_broadcast(vsi,
vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
goto err;
}
if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
ice_fltr_add_mac_and_broadcast(vsi,
vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
goto err;
}
if (max_vsi_num < vsi->vsi_num)
max_vsi_num = vsi->vsi_num;
netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
NAPI_POLL_WEIGHT);
netif_keep_dst(vf->repr->netdev);
}
kfree(ctrl_vsi->target_netdevs);
ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
sizeof(*ctrl_vsi->target_netdevs),
GFP_KERNEL);
if (!ctrl_vsi->target_netdevs)
goto err;
ice_for_each_vf(pf, i) {
struct ice_repr *repr = pf->vf[i].repr;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
dst = repr->dst;
dst->u.port_info.port_id = vsi->vsi_num;
dst->u.port_info.lower_dev = repr->netdev;
ice_repr_set_traffic_vsi(repr, ctrl_vsi);
}
return 0;
err:
for (i = i - 1; i >= 0; i--) {
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
struct ice_vf *vf = &pf->vf[i];
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
}
return -ENODEV;
}
/**
* ice_eswitch_release_reprs - clear PR VSIs configuration
* @pf: poiner to PF struct
* @ctrl_vsi: pointer to switchdev control VSI
*/
static void
ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
{
int i;
kfree(ctrl_vsi->target_netdevs);
ice_for_each_vf(pf, i) {
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
struct ice_vf *vf = &pf->vf[i];
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
netif_napi_del(&vf->repr->q_vector->napi);
}
}
/**
* ice_eswitch_release_env - clear switchdev HW filters
* @pf: pointer to PF struct
*
* This function removes HW filters configuration specific for switchdev
* mode and restores default legacy mode settings.
*/
static void ice_eswitch_release_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
ice_clear_dflt_vsi(uplink_vsi->vsw);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
}
/**
* ice_eswitch_vsi_setup - configure switchdev control VSI
* @pf: pointer to PF structure
* @pi: pointer to port_info structure
*/
static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
return NULL;
}
/**
* ice_eswitch_napi_enable - enable NAPI for all port representors
* @pf: pointer to PF structure
*/
static void ice_eswitch_napi_enable(struct ice_pf *pf)
{
int i;
ice_for_each_vf(pf, i)
napi_enable(&pf->vf[i].repr->q_vector->napi);
}
/**
* ice_eswitch_napi_disable - disable NAPI for all port representors
* @pf: pointer to PF structure
*/
static void ice_eswitch_napi_disable(struct ice_pf *pf)
{
int i;
ice_for_each_vf(pf, i)
napi_disable(&pf->vf[i].repr->q_vector->napi);
}
/**
* ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
* @vsi: VSI to setup rxdid on
* @rxdid: flex descriptor id
*/
static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
{
struct ice_hw *hw = &vsi->back->hw;
int i;
ice_for_each_rxq(vsi, i) {
struct ice_ring *ring = vsi->rx_rings[i];
u16 pf_q = vsi->rxq_map[ring->q_index];
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
}
}
/**
* ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
* @pf: pointer to PF structure
*/
static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
{
struct ice_vsi *ctrl_vsi;
pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
if (!pf->switchdev.control_vsi)
return -ENODEV;
ctrl_vsi = pf->switchdev.control_vsi;
pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
if (!pf->switchdev.uplink_vsi)
goto err_vsi;
if (ice_eswitch_setup_env(pf))
goto err_vsi;
if (ice_repr_add_for_all_vfs(pf))
goto err_repr_add;
if (ice_eswitch_setup_reprs(pf))
goto err_setup_reprs;
ice_eswitch_remap_rings_to_vectors(pf);
if (ice_vsi_open(ctrl_vsi))
goto err_setup_reprs;
ice_eswitch_napi_enable(pf);
ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
return 0;
err_setup_reprs:
ice_repr_rem_from_all_vfs(pf);
err_repr_add:
ice_eswitch_release_env(pf);
err_vsi:
ice_vsi_release(ctrl_vsi);
return -ENODEV;
}
/**
* ice_eswitch_disable_switchdev - disable switchdev resources
* @pf: pointer to PF structure
*/
static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
{
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
ice_eswitch_napi_disable(pf);
ice_eswitch_release_env(pf);
ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi);
ice_repr_rem_from_all_vfs(pf);
}
/** /**
* ice_eswitch_mode_set - set new eswitch mode * ice_eswitch_mode_set - set new eswitch mode
* @devlink: pointer to devlink structure * @devlink: pointer to devlink structure
...@@ -60,3 +422,35 @@ int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) ...@@ -60,3 +422,35 @@ int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
*mode = pf->eswitch_mode; *mode = pf->eswitch_mode;
return 0; return 0;
} }
/**
* ice_eswitch_release - cleanup eswitch
* @pf: pointer to PF structure
*/
void ice_eswitch_release(struct ice_pf *pf)
{
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
return;
ice_eswitch_disable_switchdev(pf);
pf->switchdev.is_running = false;
}
/**
* ice_eswitch_configure - configure eswitch
* @pf: pointer to PF structure
*/
int ice_eswitch_configure(struct ice_pf *pf)
{
int status;
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
return 0;
status = ice_eswitch_enable_switchdev(pf);
if (status)
return status;
pf->switchdev.is_running = true;
return 0;
}
...@@ -7,14 +7,23 @@ ...@@ -7,14 +7,23 @@
#include <net/devlink.h> #include <net/devlink.h>
#ifdef CONFIG_ICE_SWITCHDEV #ifdef CONFIG_ICE_SWITCHDEV
void ice_eswitch_release(struct ice_pf *pf);
int ice_eswitch_configure(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int int
ice_eswitch_mode_set(struct devlink *devlink, u16 mode, ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
#else /* CONFIG_ICE_SWITCHDEV */ #else /* CONFIG_ICE_SWITCHDEV */
static inline int static inline void ice_eswitch_release(struct ice_pf *pf) { }
ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
return -EOPNOTSUPP;
}
static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{ {
return DEVLINK_ESWITCH_MODE_LEGACY; return DEVLINK_ESWITCH_MODE_LEGACY;
} }
...@@ -26,8 +35,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, ...@@ -26,8 +35,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline bool static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
{ {
return false; return false;
} }
......
...@@ -46,7 +46,6 @@ static DEFINE_IDA(ice_aux_ida); ...@@ -46,7 +46,6 @@ static DEFINE_IDA(ice_aux_ida);
static struct workqueue_struct *ice_wq; static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops; static const struct net_device_ops ice_netdev_ops;
static int ice_vsi_open(struct ice_vsi *vsi);
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
...@@ -6159,7 +6158,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi) ...@@ -6159,7 +6158,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi)
* *
* Returns 0 on success, negative value on error * Returns 0 on success, negative value on error
*/ */
static int ice_vsi_open(struct ice_vsi *vsi) int ice_vsi_open(struct ice_vsi *vsi)
{ {
char int_name[ICE_INT_NAME_STR_LEN]; char int_name[ICE_INT_NAME_STR_LEN];
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
...@@ -6184,6 +6183,7 @@ static int ice_vsi_open(struct ice_vsi *vsi) ...@@ -6184,6 +6183,7 @@ static int ice_vsi_open(struct ice_vsi *vsi)
if (err) if (err)
goto err_setup_rx; goto err_setup_rx;
if (vsi->type == ICE_VSI_PF) {
/* Notify the stack of the actual queue counts. */ /* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
if (err) if (err)
...@@ -6192,6 +6192,7 @@ static int ice_vsi_open(struct ice_vsi *vsi) ...@@ -6192,6 +6192,7 @@ static int ice_vsi_open(struct ice_vsi *vsi)
err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
if (err) if (err)
goto err_set_qs; goto err_set_qs;
}
err = ice_up_complete(vsi); err = ice_up_complete(vsi);
if (err) if (err)
......
...@@ -265,3 +265,15 @@ void ice_repr_rem_from_all_vfs(struct ice_pf *pf) ...@@ -265,3 +265,15 @@ void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
ice_vc_set_dflt_vf_ops(&vf->vc_ops); ice_vc_set_dflt_vf_ops(&vf->vc_ops);
} }
} }
/**
* ice_repr_set_traffic_vsi - set traffic VSI for port representor
* @repr: repr on with VSI will be set
* @vsi: pointer to VSI that will be used by port representor to pass traffic
*/
void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
{
struct ice_netdev_priv *np = netdev_priv(repr->netdev);
np->vsi = vsi;
}
...@@ -18,6 +18,8 @@ struct ice_repr { ...@@ -18,6 +18,8 @@ struct ice_repr {
int ice_repr_add_for_all_vfs(struct ice_pf *pf); int ice_repr_add_for_all_vfs(struct ice_pf *pf);
void ice_repr_rem_from_all_vfs(struct ice_pf *pf); void ice_repr_rem_from_all_vfs(struct ice_pf *pf);
void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
bool ice_is_port_repr_netdev(struct net_device *netdev); bool ice_is_port_repr_netdev(struct net_device *netdev);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment