Commit dedb0809 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2021-10-20

Sudheer Mogilappagari says:

This series introduces initial support for Application Device Queues(ADQ)
in ice driver. ADQ provides traffic isolation for application flows in
hardware and ability to steer traffic to a given traffic class. This
helps in aligning NIC queues to application threads.

Traffic classes are configured using mqprio framework of tc command
and mapped to HW channels(VSIs) in the driver. The queue set of each
traffic class is managed by corresponding VSI. Each traffic channel
can be configured with bandwidth rate-limiting limits and is offloaded
to the hardware through the mqprio framework by specifying the mode
option as 'channel' and shaper option as 'bw_rlimit'.

Next, the flows of application can be steered into a given traffic class
using "tc filter" command. The option "skip_sw hw_tc x" indicates
hw-offload of filtering and steering filtered traffic into specified TC.
Non-matching traffic flows through TC0.

When channel configuration are removed queue configuration is set to
default and filters configured on individual traffic classes are deleted.

example:
$ ethtool -K eth0 hw-tc-offload on

Configure 3 traffic classes and map priority 0,1,2 to TC0, TC1 and TC2
respectively. TC0 has 2 queues from offset 0 & TC1 has 8 queues from
offset 2 and TC2 has 4 queues from offset 10. Enable hardware offload
of channels.

$ tc qdisc add dev eth0 root mqprio num_tc 3 map 0 1 2 queues \
        2@0 8@2 4@10 hw 1 mode channel

$ tc qdisc show dev eth0
qdisc mqprio 8001: root  tc 2 map 0 1 2 0 0 0 0 0 0 0 0 0 0 0 0 0
             queues:(0:1) (2:9) (10:13)
             mode:channel

Configure two filters to match based on dst ipaddr, dst tcp port and
redirect to TC1 and TC2.
$ tc qdisc add dev eth0 clsact

$ tc filter add dev eth0 protocol ip ingress prio 1 flower\
  dst_ip 192.168.1.1/32 ip_proto tcp dst_port 80\
  skip_sw hw_tc 1
$ tc filter add dev eth0 protocol ip ingress prio 1 flower\
  dst_ip 192.168.1.1/32 ip_proto tcp dst_port 5001\
  skip_sw hw_tc 2

$ tc filter show dev eth0 ingress

Delete traffic classes configuration:
$ sudo tc qdisc del dev eth0 root
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7d4f4d14 9fea7498
......@@ -38,6 +38,10 @@
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_gact.h>
#include <net/ip.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
......@@ -55,6 +59,7 @@
#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
#include "ice_virtchnl_pf.h"
......@@ -104,6 +109,10 @@
#define ICE_INVAL_VFID 256
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
#define ICE_CHNL_START_TC 1
#define ICE_CHNL_MAX_TC 16
#define ICE_MAX_RESET_WAIT 20
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
......@@ -121,6 +130,13 @@
#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
/* Minimum BW limit is 500 Kbps for any scheduler node */
#define ICE_MIN_BW_LIMIT 500
/* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes.
* use it to convert user specified BW limit into Kbps
*/
#define ICE_BW_KBPS_DIVISOR 125
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
......@@ -145,6 +161,9 @@
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
#define ice_for_each_chnl_tc(i) \
for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
......@@ -172,6 +191,21 @@ enum ice_feature {
DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
struct ice_channel {
struct list_head list;
u8 type;
u16 sw_id;
u16 base_q;
u16 num_rxq;
u16 num_txq;
u16 vsi_num;
u8 ena_tc;
struct ice_aqc_vsi_props info;
u64 max_tx_rate;
u64 min_tx_rate;
struct ice_vsi *ch_vsi;
};
struct ice_txq_meta {
u32 q_teid; /* Tx-scheduler element identifier */
u16 q_id; /* Entry in VSI's txq_map bitmap */
......@@ -189,7 +223,7 @@ struct ice_tc_info {
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
u8 ena_tc; /* Tx map */
u16 ena_tc; /* Tx map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
......@@ -361,6 +395,35 @@ struct ice_vsi {
struct net_device **target_netdevs;
struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
/* Channel Specific Fields */
struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC];
u16 cnt_q_avail;
u16 next_base_q; /* next queue to be used for channel setup */
struct list_head ch_list;
u16 num_chnl_rxq;
u16 num_chnl_txq;
u16 ch_rss_size;
u16 num_chnl_fltr;
/* store away rss size info before configuring ADQ channels so that,
* it can be used after tc-qdisc delete, to get back RSS setting as
* they were before
*/
u16 orig_rss_size;
/* this keeps tracks of all enabled TC with and without DCB
* and inclusive of ADQ, vsi->mqprio_opt keeps track of queue
* information
*/
u8 all_numtc;
u16 all_enatc;
/* store away TC info, to be used for rebuild logic */
u8 old_numtc;
u16 old_ena_tc;
struct ice_channel *ch;
/* setup back reference, to which aggregator node this VSI
* corresponds to
*/
......@@ -389,6 +452,8 @@ struct ice_q_vector {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
struct ice_channel *ch;
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
......@@ -407,6 +472,7 @@ enum ice_pf_flags {
ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
......@@ -519,7 +585,10 @@ struct ice_pf {
struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
/* count of tc_flower filters specific to channel (aka where filter
* action is "hw_tc <tc_num>")
*/
u16 num_dmac_chnl_fltrs;
struct hlist_head tc_flower_fltr_list;
__le64 nvm_phy_type_lo; /* NVM PHY type low */
......@@ -543,6 +612,17 @@ struct ice_netdev_priv {
struct ice_repr *repr;
};
/**
* ice_vector_ch_enabled
* @qv: pointer to q_vector, can be NULL
*
* This function returns true if vector is channel enabled otherwise false
*/
static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
{
return !!qv->ch; /* Enable it to run with TC */
}
/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to HW struct
......@@ -704,6 +784,30 @@ static inline void ice_clear_sriov_cap(struct ice_pf *pf)
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
/**
* ice_is_adq_active - any active ADQs
* @pf: pointer to PF
*
* This function returns true if there are any ADQs configured (which is
* determined by looking at VSI type (which should be VSI_PF), numtc, and
* TC_MQPRIO flag) otherwise return false
*/
static inline bool ice_is_adq_active(struct ice_pf *pf)
{
struct ice_vsi *vsi;
vsi = ice_get_main_vsi(pf);
if (!vsi)
return false;
/* is ADQ configured */
if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC &&
test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
return true;
return false;
}
bool netif_is_ice(struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
......
......@@ -213,6 +213,9 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
if (ring->ch)
return ring->q_index - ring->ch->base_q;
/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
* and as a result we get the queue's index within TC.
......@@ -300,7 +303,10 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
case ICE_VSI_LB:
case ICE_VSI_CTRL:
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
if (ring->ch)
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
else
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
......@@ -315,7 +321,10 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
}
/* make sure the context is associated with the right VSI */
tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
if (ring->ch)
tlan_ctx->src_vsi = ring->ch->vsi_num;
else
tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
/* Restrict Tx timestamps to the PF VSI */
switch (vsi->type) {
......@@ -747,6 +756,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
u8 buf_len = struct_size(qg_buf, txqs, 1);
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_aqc_add_txqs_perq *txq;
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
......@@ -785,8 +795,14 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
}
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL);
if (ch)
status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
ring->q_handle, 1, qg_buf, buf_len,
NULL);
else
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
ring->q_handle, 1, qg_buf, buf_len,
NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
ice_stat_str(status));
......@@ -967,6 +983,7 @@ void
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
struct ice_channel *ch = ring->ch;
u8 tc;
if (IS_ENABLED(CONFIG_DCB))
......@@ -977,6 +994,11 @@ ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
txq_meta->q_id = ring->reg_idx;
txq_meta->q_teid = ring->txq_teid;
txq_meta->q_handle = ring->q_handle;
txq_meta->vsi_idx = vsi->idx;
txq_meta->tc = tc;
if (ch) {
txq_meta->vsi_idx = ch->ch_vsi->idx;
txq_meta->tc = 0;
} else {
txq_meta->vsi_idx = vsi->idx;
txq_meta->tc = tc;
}
}
......@@ -4,53 +4,11 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
/**
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
* @vsi: the VSI being configured
* @ena_tc: TC map to be enabled
*/
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
{
struct net_device *netdev = vsi->netdev;
struct ice_pf *pf = vsi->back;
struct ice_dcbx_cfg *dcbcfg;
u8 netdev_tc;
int i;
if (!netdev)
return;
if (!ena_tc) {
netdev_reset_tc(netdev);
return;
}
if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
return;
dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
netdev_set_tc_queue(netdev,
vsi->tc_cfg.tc_info[i].netdev_tc,
vsi->tc_cfg.tc_info[i].qcount_tx,
vsi->tc_cfg.tc_info[i].qoffset);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
u8 ets_tc = dcbcfg->etscfg.prio_table[i];
/* Get the mapped netdev TC# for the UP */
netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
netdev_set_prio_tc_map(netdev, i, netdev_tc);
}
}
/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
* @dcbcfg: DCB config to evaluate for enabled TCs
*/
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
static u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
{
u8 i, num_tc, ena_tc = 1;
......@@ -178,6 +136,67 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
return ret;
}
/**
* ice_get_first_droptc - returns number of first droptc
* @vsi: used to find the first droptc
*
* This function returns the value of first_droptc.
* When DCB is enabled, first droptc information is derived from enabled_tc
* and PFC enabled bits. otherwise this function returns 0 as there is one
* TC without DCB (tc0)
*/
static u8 ice_get_first_droptc(struct ice_vsi *vsi)
{
struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
struct device *dev = ice_pf_to_dev(vsi->back);
u8 num_tc, ena_tc_map, pfc_ena_map;
u8 i;
num_tc = ice_dcb_get_num_tc(cfg);
/* get bitmap of enabled TCs */
ena_tc_map = ice_dcb_get_ena_tc(cfg);
/* get bitmap of PFC enabled TCs */
pfc_ena_map = cfg->pfc.pfcena;
/* get first TC that is not PFC enabled */
for (i = 0; i < num_tc; i++) {
if ((ena_tc_map & BIT(i)) && (!(pfc_ena_map & BIT(i)))) {
dev_dbg(dev, "first drop tc = %d\n", i);
return i;
}
}
dev_dbg(dev, "first drop tc = 0\n");
return 0;
}
/**
* ice_vsi_set_dcb_tc_cfg - Set VSI's TC based on DCB configuration
* @vsi: pointer to the VSI instance
*/
void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
{
struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
switch (vsi->type) {
case ICE_VSI_PF:
vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
break;
case ICE_VSI_CHNL:
vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
vsi->tc_cfg.numtc = 1;
break;
case ICE_VSI_CTRL:
case ICE_VSI_LB:
default:
vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
vsi->tc_cfg.numtc = 1;
}
}
/**
* ice_dcb_get_tc - Get the TC associated with the queue
* @vsi: ptr to the VSI
......@@ -218,11 +237,68 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
qoffset = vsi->tc_cfg.tc_info[n].qoffset;
qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
for (i = qoffset; i < (qoffset + qcount); i++) {
tx_ring = vsi->tx_rings[i];
rx_ring = vsi->rx_rings[i];
tx_ring->dcb_tc = n;
rx_ring->dcb_tc = n;
for (i = qoffset; i < (qoffset + qcount); i++)
vsi->tx_rings[i]->dcb_tc = n;
qcount = vsi->tc_cfg.tc_info[n].qcount_rx;
for (i = qoffset; i < (qoffset + qcount); i++)
vsi->rx_rings[i]->dcb_tc = n;
}
/* applicable only if "all_enatc" is set, which will be set from
* setup_tc method as part of configuring channels
*/
if (vsi->all_enatc) {
u8 first_droptc = ice_get_first_droptc(vsi);
/* When DCB is configured, TC for ADQ queues (which are really
* PF queues) should be the first drop TC of the main VSI
*/
ice_for_each_chnl_tc(n) {
if (!(vsi->all_enatc & BIT(n)))
break;
qoffset = vsi->mqprio_qopt.qopt.offset[n];
qcount = vsi->mqprio_qopt.qopt.count[n];
for (i = qoffset; i < (qoffset + qcount); i++) {
vsi->tx_rings[i]->dcb_tc = first_droptc;
vsi->rx_rings[i]->dcb_tc = first_droptc;
}
}
}
}
/**
* ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig
* @pf: pointer to the PF instance
* @ena: true to enable VSIs, false to disable
* @locked: true if caller holds RTNL lock, false otherwise
*
* Before a new DCB configuration can be applied, VSIs of type PF, SWITCHDEV
* and CHNL need to be brought down. Following completion of DCB configuration
* the VSIs that were downed need to be brought up again. This helper function
* does both.
*/
static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
{
int i;
ice_for_each_vsi(pf, i) {
struct ice_vsi *vsi = pf->vsi[i];
if (!vsi)
continue;
switch (vsi->type) {
case ICE_VSI_CHNL:
case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
if (ena)
ice_ena_vsi(vsi, locked);
else
ice_dis_vsi(vsi, locked);
break;
default:
continue;
}
}
}
......@@ -331,7 +407,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
*/
if (!locked)
rtnl_lock();
ice_dis_vsi(pf_vsi, true);
/* disable VSIs affected by DCB changes */
ice_dcb_ena_dis_vsi(pf, false, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
......@@ -359,7 +437,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
ice_pf_dcb_recfg(pf);
out:
ice_ena_vsi(pf_vsi, true);
/* enable previously downed VSIs */
ice_dcb_ena_dis_vsi(pf, true, true);
if (!locked)
rtnl_unlock();
free_cfg:
......@@ -674,6 +753,8 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
tc_map = ICE_DFLT_TRAFFIC_CLASS;
ice_dcb_noncontig_cfg(pf);
}
} else if (vsi->type == ICE_VSI_CHNL) {
tc_map = BIT(ice_get_first_droptc(vsi));
} else {
tc_map = ICE_DFLT_TRAFFIC_CLASS;
}
......@@ -684,10 +765,11 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
vsi->idx);
continue;
}
/* no need to proceed with remaining cfg if it is switchdev
* VSI
/* no need to proceed with remaining cfg if it is CHNL
* or switchdev VSI
*/
if (vsi->type == ICE_VSI_SWITCHDEV_CTRL)
if (vsi->type == ICE_VSI_CHNL ||
vsi->type == ICE_VSI_SWITCHDEV_CTRL)
continue;
ice_vsi_map_rings_to_vectors(vsi);
......@@ -862,7 +944,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false;
struct ice_port_info *pi;
struct ice_vsi *pf_vsi;
u8 mib_type;
int ret;
......@@ -938,14 +1019,9 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) {
dev_dbg(dev, "PF VSI doesn't exist\n");
goto out;
}
rtnl_lock();
ice_dis_vsi(pf_vsi, true);
/* disable VSIs affected by DCB changes */
ice_dcb_ena_dis_vsi(pf, false, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
......@@ -956,7 +1032,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
ice_ena_vsi(pf_vsi, true);
/* enable previously downed VSIs */
ice_dcb_ena_dis_vsi(pf, true, true);
unlock_rtnl:
rtnl_unlock();
out:
......
......@@ -16,7 +16,6 @@
void ice_dcb_rebuild(struct ice_pf *pf);
int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi);
bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue);
......@@ -34,8 +33,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
/**
* ice_find_q_in_range
* @low: start of queue range for a TC i.e. offset of TC
......@@ -69,6 +66,12 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
#else
static inline void ice_dcb_rebuild(struct ice_pf *pf) { }
static inline void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
{
vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
vsi->tc_cfg.numtc = 1;
}
static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
return ICE_DFLT_TRAFFIC_CLASS;
......@@ -130,7 +133,6 @@ static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { }
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
......@@ -329,7 +329,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID);
return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
}
/**
......
......@@ -3194,6 +3194,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -EIO;
}
if (ice_is_adq_active(pf)) {
netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n");
return -EOPNOTSUPP;
}
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
......@@ -3404,6 +3409,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
return -EINVAL;
if (ice_is_adq_active(pf)) {
netdev_err(dev, "Cannot set channels with ADQ configured.\n");
return -EOPNOTSUPP;
}
if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
return -EOPNOTSUPP;
......
This diff is collapsed.
......@@ -51,13 +51,18 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
#ifdef CONFIG_DCB
void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi);
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
#endif /* CONFIG_DCB */
int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type vsi_type, u16 vf_id);
enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch);
void ice_napi_del(struct ice_vsi *vsi);
......@@ -119,6 +124,7 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_clear_dflt_vsi(struct ice_sw *sw);
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
int ice_get_link_speed_kbps(struct ice_vsi *vsi);
int ice_get_link_speed_mbps(struct ice_vsi *vsi);
int
ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *));
......
This diff is collapsed.
......@@ -2998,6 +2998,43 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
}
}
/**
* ice_sched_save_vsi_bw - save VSI node's BW information
* @pi: port information structure
* @vsi_handle: sw VSI handle
* @tc: traffic class
* @rl_type: rate limit type min, max, or shared
* @bw: bandwidth in Kbps - Kilo bits per sec
*
* Save BW information of VSI type node for post replay use.
*/
static int
ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
struct ice_vsi_ctx *vsi_ctx;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return -EINVAL;
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
if (!vsi_ctx)
return -EINVAL;
switch (rl_type) {
case ICE_MIN_BW:
ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
break;
case ICE_MAX_BW:
ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
break;
case ICE_SHARED_BW:
ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
break;
default:
return -EINVAL;
}
return 0;
}
/**
* ice_sched_calc_wakeup - calculate RL profile wakeup parameter
* @hw: pointer to the HW struct
......@@ -3875,9 +3912,17 @@ enum ice_status
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
return ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
ICE_AGG_TYPE_VSI,
tc, rl_type, bw);
int status;
status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
ICE_AGG_TYPE_VSI,
tc, rl_type, bw);
if (!status) {
mutex_lock(&pi->sched_lock);
status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
mutex_unlock(&pi->sched_lock);
}
return status;
}
/**
......@@ -3894,10 +3939,19 @@ enum ice_status
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type)
{
return ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
ICE_AGG_TYPE_VSI,
tc, rl_type,
ICE_SCHED_DFLT_BW);
int status;
status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
ICE_AGG_TYPE_VSI,
tc, rl_type,
ICE_SCHED_DFLT_BW);
if (!status) {
mutex_lock(&pi->sched_lock);
status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
ICE_SCHED_DFLT_BW);
mutex_unlock(&pi->sched_lock);
}
return status;
}
/**
......
......@@ -58,6 +58,8 @@ struct ice_sched_agg_info {
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u32 agg_id;
enum ice_agg_type agg_type;
/* bw_t_info saves aggregator BW information */
struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
/* save aggregator TC bitmap */
DECLARE_BITMAP(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
......
......@@ -2272,6 +2272,125 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
return status;
}
/**
* ice_mac_fltr_exist - does this MAC filter exist for given VSI
* @hw: pointer to the hardware structure
* @mac: MAC address to be checked (for MAC filter)
* @vsi_handle: check MAC filter for this VSI
*/
bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
{
struct ice_fltr_mgmt_list_entry *entry;
struct list_head *rule_head;
struct ice_switch_info *sw;
struct mutex *rule_lock; /* Lock to protect filter rule list */
u16 hw_vsi_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
return false;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
sw = hw->switch_info;
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
if (!rule_head)
return false;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
mutex_lock(rule_lock);
list_for_each_entry(entry, rule_head, list_entry) {
struct ice_fltr_info *f_info = &entry->fltr_info;
u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
if (is_zero_ether_addr(mac_addr))
continue;
if (f_info->flag != ICE_FLTR_TX ||
f_info->src_id != ICE_SRC_ID_VSI ||
f_info->lkup_type != ICE_SW_LKUP_MAC ||
f_info->fltr_act != ICE_FWD_TO_VSI ||
hw_vsi_id != f_info->fwd_id.hw_vsi_id)
continue;
if (ether_addr_equal(mac, mac_addr)) {
mutex_unlock(rule_lock);
return true;
}
}
mutex_unlock(rule_lock);
return false;
}
/**
* ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
* @hw: pointer to the hardware structure
* @vlan_id: VLAN ID
* @vsi_handle: check MAC filter for this VSI
*/
bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
{
struct ice_fltr_mgmt_list_entry *entry;
struct list_head *rule_head;
struct ice_switch_info *sw;
struct mutex *rule_lock; /* Lock to protect filter rule list */
u16 hw_vsi_id;
if (vlan_id > ICE_MAX_VLAN_ID)
return false;
if (!ice_is_vsi_valid(hw, vsi_handle))
return false;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
sw = hw->switch_info;
rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
if (!rule_head)
return false;
rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
mutex_lock(rule_lock);
list_for_each_entry(entry, rule_head, list_entry) {
struct ice_fltr_info *f_info = &entry->fltr_info;
u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
struct ice_vsi_list_map_info *map_info;
if (entry_vlan_id > ICE_MAX_VLAN_ID)
continue;
if (f_info->flag != ICE_FLTR_TX ||
f_info->src_id != ICE_SRC_ID_VSI ||
f_info->lkup_type != ICE_SW_LKUP_VLAN)
continue;
/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
if (f_info->fltr_act != ICE_FWD_TO_VSI &&
f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
continue;
if (f_info->fltr_act == ICE_FWD_TO_VSI) {
if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
continue;
} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
/* If filter_action is FWD_TO_VSI_LIST, make sure
* that VSI being checked is part of VSI list
*/
if (entry->vsi_count == 1 &&
entry->vsi_list_info) {
map_info = entry->vsi_list_info;
if (!test_bit(vsi_handle, map_info->vsi_map))
continue;
}
}
if (vlan_id == entry_vlan_id) {
mutex_unlock(rule_lock);
return true;
}
}
mutex_unlock(rule_lock);
return false;
}
/**
* ice_add_mac - Add a MAC address based filter rule
* @hw: pointer to the hardware structure
......
......@@ -335,6 +335,8 @@ enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
......
......@@ -303,6 +303,136 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
return ret;
}
/**
* ice_add_tc_flower_adv_fltr - add appropriate filter rules
* @vsi: Pointer to VSI
* @tc_fltr: Pointer to TC flower filter structure
*
* based on filter parameters using Advance recipes supported
* by OS package.
*/
static int
ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_tc_flower_fltr *tc_fltr)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
struct ice_adv_rule_info rule_info = {0};
struct ice_rule_query_data rule_added;
struct ice_adv_lkup_elem *list;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 flags = tc_fltr->flags;
struct ice_vsi *ch_vsi;
struct device *dev;
u16 lkups_cnt = 0;
u16 l4_proto = 0;
int ret = 0;
u16 i = 0;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
return -EOPNOTSUPP;
}
if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
return -EOPNOTSUPP;
}
/* get the channel (aka ADQ VSI) */
if (tc_fltr->dest_vsi)
ch_vsi = tc_fltr->dest_vsi;
else
ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
if (i != lkups_cnt) {
ret = -EINVAL;
goto exit;
}
rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
if (!ch_vsi) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
ret = -EINVAL;
goto exit;
}
rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
rule_info.sw_act.vsi_handle = ch_vsi->idx;
rule_info.priority = 7;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
tc_fltr->action.tc_class,
rule_info.sw_act.vsi_handle, lkups_cnt);
} else {
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.rx = false;
}
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = tc_fltr->cookie;
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter due to error");
ret = -EIO;
goto exit;
}
/* store the output params, which are needed later for removing
* advanced switch filter
*/
tc_fltr->rid = rule_added.rid;
tc_fltr->rule_id = rule_added.rule_id;
if (tc_fltr->action.tc_class > 0 && ch_vsi) {
/* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
* for PF ADQ filter, it is not yet set in tc_fltr,
* hence store the dest_vsi ptr in tc_fltr
*/
if (ch_vsi->type == ICE_VSI_CHNL)
tc_fltr->dest_vsi = ch_vsi;
/* keep track of advanced switch filter for
* destination VSI (channel VSI)
*/
ch_vsi->num_chnl_fltr++;
/* in this case, dest_id is VSI handle (sw handle) */
tc_fltr->dest_id = rule_added.vsi_handle;
/* keeps track of channel filters for PF VSI */
if (vsi->type == ICE_VSI_PF &&
(flags & (ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
pf->num_dmac_chnl_fltrs++;
}
dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
lkups_cnt, flags,
tc_fltr->action.tc_class, rule_added.rid,
rule_added.rule_id, rule_added.vsi_handle);
exit:
kfree(list);
return ret;
}
/**
* ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
* @match: Pointer to flow match structure
......@@ -561,10 +691,13 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
static int
ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{
if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
return -EOPNOTSUPP;
if (ice_is_eswitch_mode_switchdev(vsi->back))
return ice_eswitch_add_tc_fltr(vsi, fltr);
return -EOPNOTSUPP;
return ice_add_tc_flower_adv_fltr(vsi, fltr);
}
/**
......@@ -581,6 +714,7 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
struct ice_tc_flower_fltr *fltr)
{
int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
struct ice_vsi *main_vsi;
if (tc < 0) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
......@@ -591,13 +725,69 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
return -EINVAL;
}
if (!(vsi->tc_cfg.ena_tc & BIT(tc))) {
if (!(vsi->all_enatc & BIT(tc))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
return -EINVAL;
}
/* Redirect to a TC class or Queue Group */
fltr->action.fltr_act = ICE_FWD_TO_QGRP;
main_vsi = ice_get_main_vsi(vsi->back);
if (!main_vsi || !main_vsi->netdev) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because of invalid netdevice");
return -EINVAL;
}
if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
(fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_SRC_MAC))) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
return -EOPNOTSUPP;
}
/* For ADQ, filter must include dest MAC address, otherwise unwanted
* packets with unrelated MAC address get delivered to ADQ VSIs as long
* as remaining filter criteria is satisfied such as dest IP address
* and dest/src L4 port. Following code is trying to handle:
* 1. For non-tunnel, if user specify MAC addresses, use them (means
* this code won't do anything
* 2. For non-tunnel, if user didn't specify MAC address, add implicit
* dest MAC to be lower netdev's active unicast MAC address
*/
if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
main_vsi->netdev->dev_addr);
eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
}
/* validate specified dest MAC address, make sure either it belongs to
* lower netdev or any of MACVLAN. MACVLANs MAC address are added as
* unicast MAC filter destined to main VSI.
*/
if (!ice_mac_fltr_exist(&main_vsi->back->hw,
fltr->outer_headers.l2_key.dst_mac,
main_vsi->idx)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because legacy MAC filter for specified destination doesn't exist");
return -EINVAL;
}
/* Make sure VLAN is already added to main VSI, before allowing ADQ to
* add a VLAN based filter such as MAC + VLAN + L4 port.
*/
if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
main_vsi->idx)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
return -EINVAL;
}
}
fltr->action.fltr_act = ICE_FWD_TO_VSI;
fltr->action.tc_class = tc;
return 0;
......@@ -639,8 +829,8 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
/* Drop action */
if (act->id == FLOW_ACTION_DROP) {
fltr->action.fltr_act = ICE_DROP_PACKET;
return 0;
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
return -EINVAL;
}
fltr->action.fltr_act = ICE_FWD_TO_VSI;
}
......@@ -673,6 +863,20 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
return -EIO;
}
/* update advanced switch filter count for destination
* VSI if filter destination was VSI
*/
if (fltr->dest_vsi) {
if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
fltr->dest_vsi->num_chnl_fltr--;
/* keeps track of channel filters for PF VSI */
if (vsi->type == ICE_VSI_PF &&
(fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
pf->num_dmac_chnl_fltrs--;
}
}
return 0;
}
......@@ -811,7 +1015,8 @@ ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
/* find filter */
fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
if (!fltr) {
if (hlist_empty(&pf->tc_flower_fltr_list))
if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
hlist_empty(&pf->tc_flower_fltr_list))
return 0;
NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
......
......@@ -120,6 +120,28 @@ struct ice_tc_flower_fltr {
struct netlink_ext_ack *extack;
};
/**
* ice_is_chnl_fltr - is this a valid channel filter
* @f: Pointer to tc-flower filter
*
* Criteria to determine of given filter is valid channel filter
* or not is based on its "destination". If destination is hw_tc (aka tc_class)
* and it is non-zero, then it is valid channel (aka ADQ) filter
*/
static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f)
{
return !!f->action.tc_class;
}
/**
* ice_chnl_dmac_fltr_cnt - DMAC based CHNL filter count
* @pf: Pointer to PF
*/
static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
{
return pf->num_dmac_chnl_fltrs;
}
int
ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
struct flow_cls_offload *cls_flower);
......
......@@ -287,6 +287,7 @@ struct ice_rx_ring {
struct rcu_head rcu; /* to avoid race on free */
/* CL4 - 3rd cacheline starts here */
struct ice_channel *ch;
struct bpf_prog *xdp_prog;
struct ice_tx_ring *xdp_ring;
struct xsk_buff_pool *xsk_pool;
......@@ -328,6 +329,7 @@ struct ice_tx_ring {
/* CL3 - 3rd cacheline starts here */
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct ice_channel *ch;
struct ice_ptp_tx *tx_tstamps;
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
......@@ -352,6 +354,11 @@ static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
}
static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
{
return !!ring->ch;
}
static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
{
return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
......
......@@ -138,6 +138,7 @@ enum ice_vsi_type {
ICE_VSI_PF = 0,
ICE_VSI_VF = 1,
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
ICE_VSI_SWITCHDEV_CTRL = 7,
};
......@@ -570,6 +571,8 @@ struct ice_sched_vsi_info {
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
/* bw_t_info saves VSI BW information */
struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
};
/* driver defines the policy */
......
......@@ -832,7 +832,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
......@@ -859,7 +859,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
ice_vf_ctrl_invalidate_vsi(vf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment