Commit 9904e1ee authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2021-04-22

This series contains updates to virtchnl header file, ice, and iavf
drivers.

Vignesh adds support to warn about potentially malicious VFs; those that
are overflowing the mailbox for the ice driver.

Michal adds support for an allowlist/denylist of VF commands based on
supported capabilities for the ice driver.

Brett adds support for iavf UDP segmentation offload by adding the
capability bit to virtchnl, advertising support in the ice driver, and
enabling it in the iavf driver. He also adds a helper function for
getting the VF VSI for ice.

Colin Ian King removes an unneeded pointer assignment.

Qi enables support in the ice driver to support virtchnl requests from
the iavf to configure its own RSS input set. This includes adding new
capability bits, structures, and commands to virtchnl header file.

Haiyue enables configuring RSS flow hash via ethtool to support TCP, UDP
and SCTP protocols in iavf.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3197a98c e41985f0
......@@ -12,4 +12,5 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
iavf_adv_rss.o \
iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o
......@@ -38,6 +38,7 @@
#include <linux/avf/virtchnl.h>
#include "iavf_txrx.h"
#include "iavf_fdir.h"
#include "iavf_adv_rss.h"
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
......@@ -303,6 +304,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25)
#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27)
#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28)
/* OS defined structs */
struct net_device *netdev;
......@@ -345,6 +348,8 @@ struct iavf_adapter {
VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_FDIR_PF)
#define ADV_RSS_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
......@@ -372,6 +377,9 @@ struct iavf_adapter {
u16 fdir_active_fltr;
struct list_head fdir_list_head;
spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
struct list_head adv_rss_list_head;
spinlock_t adv_rss_lock; /* protect the RSS management list */
};
......@@ -444,6 +452,8 @@ void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
void iavf_add_fdir_filter(struct iavf_adapter *adapter);
void iavf_del_fdir_filter(struct iavf_adapter *adapter);
void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr);
#endif /* _IAVF_H_ */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021, Intel Corporation. */
/* advanced RSS configuration ethtool support for iavf */
#include "iavf.h"
/**
* iavf_fill_adv_rss_ip4_hdr - fill the IPv4 RSS protocol header
* @hdr: the virtchnl message protocol header data structure
* @hash_flds: the RSS configuration protocol hash fields
*/
static void
iavf_fill_adv_rss_ip4_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
{
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_SA)
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_DA)
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
}
/**
* iavf_fill_adv_rss_ip6_hdr - fill the IPv6 RSS protocol header
* @hdr: the virtchnl message protocol header data structure
* @hash_flds: the RSS configuration protocol hash fields
*/
static void
iavf_fill_adv_rss_ip6_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
{
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_SA)
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_DA)
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
}
/**
* iavf_fill_adv_rss_tcp_hdr - fill the TCP RSS protocol header
* @hdr: the virtchnl message protocol header data structure
* @hash_flds: the RSS configuration protocol hash fields
*/
static void
iavf_fill_adv_rss_tcp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
{
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT)
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT)
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
}
/**
* iavf_fill_adv_rss_udp_hdr - fill the UDP RSS protocol header
* @hdr: the virtchnl message protocol header data structure
* @hash_flds: the RSS configuration protocol hash fields
*/
static void
iavf_fill_adv_rss_udp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
{
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT)
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT)
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
}
/**
* iavf_fill_adv_rss_sctp_hdr - fill the SCTP RSS protocol header
* @hdr: the virtchnl message protocol header data structure
* @hash_flds: the RSS configuration protocol hash fields
*/
static void
iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
{
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
}
/**
* iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message
* @rss_cfg: the virtchnl message to be filled with RSS configuration setting
* @packet_hdrs: the RSS configuration protocol header types
* @hash_flds: the RSS configuration protocol hash fields
*
* Returns 0 if the RSS configuration virtchnl message is filled successfully
*/
int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
u32 packet_hdrs, u64 hash_flds)
{
struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs;
struct virtchnl_proto_hdr *hdr;
rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
proto_hdrs->tunnel_level = 0; /* always outer layer */
hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) {
case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4:
iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds);
break;
case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6:
iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds);
break;
default:
return -EINVAL;
}
hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) {
case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP:
iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds);
break;
case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP:
iavf_fill_adv_rss_udp_hdr(hdr, hash_flds);
break;
case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP:
iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds);
break;
default:
return -EINVAL;
}
return 0;
}
/**
* iavf_find_adv_rss_cfg_by_hdrs - find RSS configuration with header type
* @adapter: pointer to the VF adapter structure
* @packet_hdrs: protocol header type to find.
*
* Returns pointer to advance RSS configuration if found or null
*/
struct iavf_adv_rss *
iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs)
{
struct iavf_adv_rss *rss;
list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
if (rss->packet_hdrs == packet_hdrs)
return rss;
return NULL;
}
/**
* iavf_print_adv_rss_cfg
* @adapter: pointer to the VF adapter structure
* @rss: pointer to the advance RSS configuration to print
* @action: the string description about how to handle the RSS
* @result: the string description about the virtchnl result
*
* Print the advance RSS configuration
**/
void
iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss,
const char *action, const char *result)
{
u32 packet_hdrs = rss->packet_hdrs;
u64 hash_flds = rss->hash_flds;
static char hash_opt[300];
const char *proto;
if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_TCP)
proto = "TCP";
else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_UDP)
proto = "UDP";
else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP)
proto = "SCTP";
else
return;
memset(hash_opt, 0, sizeof(hash_opt));
strcat(hash_opt, proto);
if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4)
strcat(hash_opt, "v4 ");
else
strcat(hash_opt, "v6 ");
if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
strcat(hash_opt, "IP SA,");
if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
strcat(hash_opt, "IP DA,");
if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
strcat(hash_opt, "src port,");
if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
strcat(hash_opt, "dst port,");
if (!action)
action = "";
if (!result)
result = "";
dev_info(&adapter->pdev->dev, "%s %s %s\n", action, hash_opt, result);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2021, Intel Corporation. */
#ifndef _IAVF_ADV_RSS_H_
#define _IAVF_ADV_RSS_H_
struct iavf_adapter;
/* State of advanced RSS configuration */
enum iavf_adv_rss_state_t {
IAVF_ADV_RSS_ADD_REQUEST, /* User requests to add RSS */
IAVF_ADV_RSS_ADD_PENDING, /* RSS pending add by the PF */
IAVF_ADV_RSS_DEL_REQUEST, /* Driver requests to delete RSS */
IAVF_ADV_RSS_DEL_PENDING, /* RSS pending delete by the PF */
IAVF_ADV_RSS_ACTIVE, /* RSS configuration is active */
};
enum iavf_adv_rss_flow_seg_hdr {
IAVF_ADV_RSS_FLOW_SEG_HDR_NONE = 0x00000000,
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4 = 0x00000001,
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6 = 0x00000002,
IAVF_ADV_RSS_FLOW_SEG_HDR_TCP = 0x00000004,
IAVF_ADV_RSS_FLOW_SEG_HDR_UDP = 0x00000008,
IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP = 0x00000010,
};
#define IAVF_ADV_RSS_FLOW_SEG_HDR_L3 \
(IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4 | \
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6)
#define IAVF_ADV_RSS_FLOW_SEG_HDR_L4 \
(IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | \
IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | \
IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP)
enum iavf_adv_rss_flow_field {
/* L3 */
IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA,
IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_DA,
IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_SA,
IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_DA,
/* L4 */
IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_SRC_PORT,
IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_DST_PORT,
IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_SRC_PORT,
IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT,
IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT,
IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT,
/* The total number of enums must not exceed 64 */
IAVF_ADV_RSS_FLOW_FIELD_IDX_MAX
};
#define IAVF_ADV_RSS_HASH_INVALID 0
#define IAVF_ADV_RSS_HASH_FLD_IPV4_SA \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA)
#define IAVF_ADV_RSS_HASH_FLD_IPV6_SA \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_SA)
#define IAVF_ADV_RSS_HASH_FLD_IPV4_DA \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_DA)
#define IAVF_ADV_RSS_HASH_FLD_IPV6_DA \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_DA)
#define IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_SRC_PORT)
#define IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_DST_PORT)
#define IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_SRC_PORT)
#define IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT)
#define IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT)
#define IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT \
BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT)
/* bookkeeping of advanced RSS configuration */
struct iavf_adv_rss {
enum iavf_adv_rss_state_t state;
struct list_head list;
u32 packet_hdrs;
u64 hash_flds;
struct virtchnl_rss_cfg cfg_msg;
};
int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
u32 packet_hdrs, u64 hash_flds);
struct iavf_adv_rss *
iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs);
void
iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss,
const char *action, const char *result);
#endif /* _IAVF_ADV_RSS_H_ */
......@@ -1418,6 +1418,252 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
return err;
}
/**
* iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
* @cmd: ethtool rxnfc command
*
* This function parses the rxnfc command and returns intended
* header types for RSS configuration
*/
static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
{
u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
switch (cmd->flow_type) {
case TCP_V4_FLOW:
hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
break;
case UDP_V4_FLOW:
hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
break;
case SCTP_V4_FLOW:
hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
break;
case TCP_V6_FLOW:
hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
break;
case UDP_V6_FLOW:
hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
break;
case SCTP_V6_FLOW:
hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
break;
default:
break;
}
return hdrs;
}
/**
* iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
* @cmd: ethtool rxnfc command
*
* This function parses the rxnfc command and returns intended hash fields for
* RSS configuration
*/
static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd)
{
u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
switch (cmd->flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
if (cmd->data & RXH_IP_SRC)
hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
if (cmd->data & RXH_IP_DST)
hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
if (cmd->data & RXH_IP_SRC)
hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
if (cmd->data & RXH_IP_DST)
hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
break;
default:
break;
}
}
if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
switch (cmd->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
if (cmd->data & RXH_L4_B_0_1)
hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
if (cmd->data & RXH_L4_B_2_3)
hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
break;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
if (cmd->data & RXH_L4_B_0_1)
hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
if (cmd->data & RXH_L4_B_2_3)
hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
break;
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
if (cmd->data & RXH_L4_B_0_1)
hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
if (cmd->data & RXH_L4_B_2_3)
hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
break;
default:
break;
}
}
return hfld;
}
/**
* iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
* @adapter: pointer to the VF adapter structure
* @cmd: ethtool rxnfc command
*
* Returns Success if the flow input set is supported.
*/
static int
iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
int count = 50, err = 0;
u64 hash_flds;
u32 hdrs;
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
hdrs = iavf_adv_rss_parse_hdrs(cmd);
if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
return -EINVAL;
hash_flds = iavf_adv_rss_parse_hash_flds(cmd);
if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
return -EINVAL;
rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL);
if (!rss_new)
return -ENOMEM;
if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) {
kfree(rss_new);
return -EINVAL;
}
while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
&adapter->crit_section)) {
if (--count == 0) {
kfree(rss_new);
return -EINVAL;
}
udelay(1);
}
spin_lock_bh(&adapter->adv_rss_lock);
rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
if (rss_old) {
if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
err = -EBUSY;
} else if (rss_old->hash_flds != hash_flds) {
rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
rss_old->hash_flds = hash_flds;
memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
sizeof(rss_new->cfg_msg));
adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
} else {
err = -EEXIST;
}
} else {
rss_new_add = true;
rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
rss_new->packet_hdrs = hdrs;
rss_new->hash_flds = hash_flds;
list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
}
spin_unlock_bh(&adapter->adv_rss_lock);
if (!err)
mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
if (!rss_new_add)
kfree(rss_new);
return err;
}
/**
* iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
* @adapter: pointer to the VF adapter structure
* @cmd: ethtool rxnfc command
*
* Returns Success if the flow input set is supported.
*/
static int
iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
struct iavf_adv_rss *rss;
u64 hash_flds;
u32 hdrs;
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
cmd->data = 0;
hdrs = iavf_adv_rss_parse_hdrs(cmd);
if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
return -EINVAL;
spin_lock_bh(&adapter->adv_rss_lock);
rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
if (rss)
hash_flds = rss->hash_flds;
else
hash_flds = IAVF_ADV_RSS_HASH_INVALID;
spin_unlock_bh(&adapter->adv_rss_lock);
if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
return -EINVAL;
if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
cmd->data |= (u64)RXH_IP_SRC;
if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
cmd->data |= (u64)RXH_IP_DST;
if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
cmd->data |= (u64)RXH_L4_B_0_1;
if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
cmd->data |= (u64)RXH_L4_B_2_3;
return 0;
}
/**
* iavf_set_rxnfc - command to set Rx flow rules.
* @netdev: network interface device structure
......@@ -1437,6 +1683,9 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = iavf_del_fdir_ethtool(adapter, cmd);
break;
case ETHTOOL_SRXFH:
ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
break;
default:
break;
}
......@@ -1477,8 +1726,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
break;
case ETHTOOL_GRXFH:
netdev_info(netdev,
"RSS hash info is not available to vf, use pf.\n");
ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
break;
default:
break;
......
......@@ -962,6 +962,7 @@ void iavf_down(struct iavf_adapter *adapter)
struct iavf_cloud_filter *cf;
struct iavf_fdir_fltr *fdir;
struct iavf_mac_filter *f;
struct iavf_adv_rss *rss;
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
......@@ -1004,6 +1005,12 @@ void iavf_down(struct iavf_adapter *adapter)
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
/* remove all advance RSS configuration */
spin_lock_bh(&adapter->adv_rss_lock);
list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
rss->state = IAVF_ADV_RSS_DEL_REQUEST;
spin_unlock_bh(&adapter->adv_rss_lock);
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __IAVF_RESETTING) {
/* cancel any current operation */
......@@ -1016,6 +1023,7 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
......@@ -1646,6 +1654,14 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_del_fdir_filter(adapter);
return IAVF_SUCCESS;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
iavf_add_adv_rss_cfg(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
iavf_del_adv_rss_cfg(adapter);
return 0;
}
return -EAGAIN;
}
......@@ -3542,6 +3558,8 @@ int iavf_process_config(struct iavf_adapter *adapter)
/* Enable cloud filter if ADQ is supported */
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
hw_features |= NETIF_F_HW_TC;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
hw_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= hw_features;
......@@ -3756,11 +3774,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->mac_vlan_list_lock);
spin_lock_init(&adapter->cloud_filter_list_lock);
spin_lock_init(&adapter->fdir_fltr_lock);
spin_lock_init(&adapter->adv_rss_lock);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
INIT_LIST_HEAD(&adapter->cloud_filter_list);
INIT_LIST_HEAD(&adapter->fdir_list_head);
INIT_LIST_HEAD(&adapter->adv_rss_list_head);
INIT_WORK(&adapter->reset_task, iavf_reset_task);
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
......@@ -3866,6 +3886,7 @@ static void iavf_remove(struct pci_dev *pdev)
struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_hw *hw = &adapter->hw;
......@@ -3953,6 +3974,14 @@ static void iavf_remove(struct pci_dev *pdev)
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
spin_lock_bh(&adapter->adv_rss_lock);
list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
list) {
list_del(&rss->list);
kfree(rss);
}
spin_unlock_bh(&adapter->adv_rss_lock);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
......
......@@ -1905,13 +1905,20 @@ static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
/* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
/* compute length of UDP segmentation header */
*hdr_len = (u8)sizeof(l4.udp) + l4_offset;
} else {
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen));
/* compute length of TCP segmentation header */
*hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset);
}
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
......
......@@ -140,7 +140,9 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_ADQ |
VIRTCHNL_VF_OFFLOAD_USO |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
......@@ -1293,6 +1295,105 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
}
/**
* iavf_add_adv_rss_cfg
* @adapter: the VF adapter structure
*
* Request that the PF add RSS configuration as specified
* by the user via ethtool.
**/
void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
{
struct virtchnl_rss_cfg *rss_cfg;
struct iavf_adv_rss *rss;
bool process_rss = false;
int len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
adapter->current_op);
return;
}
len = sizeof(struct virtchnl_rss_cfg);
rss_cfg = kzalloc(len, GFP_KERNEL);
if (!rss_cfg)
return;
spin_lock_bh(&adapter->adv_rss_lock);
list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
process_rss = true;
rss->state = IAVF_ADV_RSS_ADD_PENDING;
memcpy(rss_cfg, &rss->cfg_msg, len);
iavf_print_adv_rss_cfg(adapter, rss,
"Input set change for",
"is pending");
break;
}
}
spin_unlock_bh(&adapter->adv_rss_lock);
if (process_rss) {
adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
(u8 *)rss_cfg, len);
} else {
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
}
kfree(rss_cfg);
}
/**
* iavf_del_adv_rss_cfg
* @adapter: the VF adapter structure
*
* Request that the PF delete RSS configuration as specified
* by the user via ethtool.
**/
void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
{
struct virtchnl_rss_cfg *rss_cfg;
struct iavf_adv_rss *rss;
bool process_rss = false;
int len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
adapter->current_op);
return;
}
len = sizeof(struct virtchnl_rss_cfg);
rss_cfg = kzalloc(len, GFP_KERNEL);
if (!rss_cfg)
return;
spin_lock_bh(&adapter->adv_rss_lock);
list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
process_rss = true;
rss->state = IAVF_ADV_RSS_DEL_PENDING;
memcpy(rss_cfg, &rss->cfg_msg, len);
break;
}
}
spin_unlock_bh(&adapter->adv_rss_lock);
if (process_rss) {
adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
(u8 *)rss_cfg, len);
} else {
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
}
kfree(rss_cfg);
}
/**
* iavf_request_reset
* @adapter: adapter structure
......@@ -1493,6 +1594,40 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->fdir_fltr_lock);
}
break;
case VIRTCHNL_OP_ADD_RSS_CFG: {
struct iavf_adv_rss *rss, *rss_tmp;
spin_lock_bh(&adapter->adv_rss_lock);
list_for_each_entry_safe(rss, rss_tmp,
&adapter->adv_rss_list_head,
list) {
if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
iavf_print_adv_rss_cfg(adapter, rss,
"Failed to change the input set for",
NULL);
list_del(&rss->list);
kfree(rss);
}
}
spin_unlock_bh(&adapter->adv_rss_lock);
}
break;
case VIRTCHNL_OP_DEL_RSS_CFG: {
struct iavf_adv_rss *rss;
spin_lock_bh(&adapter->adv_rss_lock);
list_for_each_entry(rss, &adapter->adv_rss_list_head,
list) {
if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
rss->state = IAVF_ADV_RSS_ACTIVE;
dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
iavf_stat_str(&adapter->hw,
v_retval));
}
}
spin_unlock_bh(&adapter->adv_rss_lock);
}
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
......@@ -1682,6 +1817,35 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->fdir_fltr_lock);
}
break;
case VIRTCHNL_OP_ADD_RSS_CFG: {
struct iavf_adv_rss *rss;
spin_lock_bh(&adapter->adv_rss_lock);
list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
iavf_print_adv_rss_cfg(adapter, rss,
"Input set change for",
"successful");
rss->state = IAVF_ADV_RSS_ACTIVE;
}
}
spin_unlock_bh(&adapter->adv_rss_lock);
}
break;
case VIRTCHNL_OP_DEL_RSS_CFG: {
struct iavf_adv_rss *rss, *rss_tmp;
spin_lock_bh(&adapter->adv_rss_lock);
list_for_each_entry_safe(rss, rss_tmp,
&adapter->adv_rss_list_head, list) {
if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
list_del(&rss->list);
kfree(rss);
}
}
spin_unlock_bh(&adapter->adv_rss_lock);
}
break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
......
......@@ -26,6 +26,7 @@ ice-y := ice_main.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
......
......@@ -426,6 +426,7 @@ struct ice_pf {
u16 num_msix_per_vf;
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
......
......@@ -2149,6 +2149,94 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
return status;
}
/**
* ice_rem_rss_cfg_sync - remove an existing RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
* @addl_hdrs: Protocol header fields within a packet segment
* @segs_cnt: packet segment count
*
* Assumption: lock has already been acquired for RSS list
*/
static enum ice_status
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
enum ice_status status;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return ICE_ERR_NO_MEMORY;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
addl_hdrs);
if (status)
goto out;
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
status = ICE_ERR_DOES_NOT_EXIST;
goto out;
}
status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
if (status)
goto out;
/* Remove RSS configuration from VSI context before deleting
* the flow profile.
*/
ice_rem_rss_list(hw, vsi_handle, prof);
if (bitmap_empty(prof->vsis, ICE_MAX_VSI))
status = ice_flow_rem_prof(hw, blk, prof->id);
out:
kfree(segs);
return status;
}
/**
* ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
* @addl_hdrs: Protocol header fields within a packet segment
*
* This function will lookup the flow profile based on the input
* hash field bitmap, iterate through the profile entry list of
* that profile and find entry associated with input VSI to be
* removed. Calls are made to underlying flow s which will APIs
* turn build or update buffers for RSS XLT1 section.
*/
enum ice_status __maybe_unused
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
{
enum ice_status status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
mutex_lock(&hw->rss_locks);
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS);
if (!status)
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
addl_hdrs, ICE_RSS_INNER_HEADERS);
mutex_unlock(&hw->rss_locks);
return status;
}
/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
* As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
* convert its values to their appropriate flow L3, L4 values.
......
......@@ -8,6 +8,9 @@
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
/* Generate flow hash field from flow field type(s) */
#define ICE_FLOW_HASH_ETH \
(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
#define ICE_FLOW_HASH_IPV4 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
......@@ -406,5 +409,8 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
enum ice_status
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
#endif /* _ICE_FLOW_H_ */
......@@ -1193,6 +1193,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
qtype = "Mailbox";
/* we are going to try to detect a malicious VF, so set the
* state to begin detection
*/
hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
break;
default:
dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
......@@ -1274,7 +1278,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vf_lan_overflow_event(pf, &event);
break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
if (!ice_is_malicious_vf(pf, &event, i, pending))
ice_vc_process_vf_msg(pf, &event);
break;
case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf);
......
This diff is collapsed.
......@@ -4,7 +4,14 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
#include "ice_common.h"
#include "ice_type.h"
#include "ice_controlq.h"
/* Defining the mailbox message threshold as 63 asynchronous
* pending messages. Normal VF functionality does not require
* sending more than 63 asynchronous pending message.
*/
#define ICE_ASYNC_VF_MSG_THRESHOLD 63
#ifdef CONFIG_PCI_IOV
enum ice_status
......@@ -12,6 +19,17 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
enum ice_status
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
u16 vf_id, bool *is_mal_vf);
enum ice_status
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id);
enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
void ice_mbx_deinit_snapshot(struct ice_hw *hw);
enum ice_status
ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf);
#else /* CONFIG_PCI_IOV */
static inline enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
......
......@@ -630,6 +630,80 @@ struct ice_fw_log_cfg {
struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX];
};
/* Enum defining the different states of the mailbox snapshot in the
* PF-VF mailbox overflow detection algorithm. The snapshot can be in
* states:
* 1. ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT - generate a new static snapshot
* within the mailbox buffer.
* 2. ICE_MAL_VF_DETECT_STATE_TRAVERSE - iterate through the mailbox snaphot
* 3. ICE_MAL_VF_DETECT_STATE_DETECT - track the messages sent per VF via the
* mailbox and mark any VFs sending more messages than the threshold limit set.
* 4. ICE_MAL_VF_DETECT_STATE_INVALID - Invalid mailbox state set to 0xFFFFFFFF.
*/
enum ice_mbx_snapshot_state {
ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT = 0,
ICE_MAL_VF_DETECT_STATE_TRAVERSE,
ICE_MAL_VF_DETECT_STATE_DETECT,
ICE_MAL_VF_DETECT_STATE_INVALID = 0xFFFFFFFF,
};
/* Structure to hold information of the static snapshot and the mailbox
* buffer data used to generate and track the snapshot.
* 1. state: the state of the mailbox snapshot in the malicious VF
* detection state handler ice_mbx_vf_state_handler()
* 2. head: head of the mailbox snapshot in a circular mailbox buffer
* 3. tail: tail of the mailbox snapshot in a circular mailbox buffer
* 4. num_iterations: number of messages traversed in circular mailbox buffer
* 5. num_msg_proc: number of messages processed in mailbox
* 6. num_pending_arq: number of pending asynchronous messages
* 7. max_num_msgs_mbx: maximum messages in mailbox for currently
* serviced work item or interrupt.
*/
struct ice_mbx_snap_buffer_data {
enum ice_mbx_snapshot_state state;
u32 head;
u32 tail;
u32 num_iterations;
u16 num_msg_proc;
u16 num_pending_arq;
u16 max_num_msgs_mbx;
};
/* Structure to track messages sent by VFs on mailbox:
* 1. vf_cntr: a counter array of VFs to track the number of
* asynchronous messages sent by each VF
* 2. vfcntr_len: number of entries in VF counter array
*/
struct ice_mbx_vf_counter {
u32 *vf_cntr;
u32 vfcntr_len;
};
/* Structure to hold data relevant to the captured static snapshot
* of the PF-VF mailbox.
*/
struct ice_mbx_snapshot {
struct ice_mbx_snap_buffer_data mbx_buf;
struct ice_mbx_vf_counter mbx_vf;
};
/* Structure to hold data to be used for capturing or updating a
* static snapshot.
* 1. num_msg_proc: number of messages processed in mailbox
* 2. num_pending_arq: number of pending asynchronous messages
* 3. max_num_msgs_mbx: maximum messages in mailbox for currently
* serviced work item or interrupt.
* 4. async_watermark_val: An upper threshold set by caller to determine
* if the pending arq count is large enough to assume that there is
* the possibility of a mailicious VF.
*/
struct ice_mbx_data {
u16 num_msg_proc;
u16 num_pending_arq;
u16 max_num_msgs_mbx;
u16 async_watermark_val;
};
/* Port hardware description */
struct ice_hw {
u8 __iomem *hw_addr;
......@@ -761,6 +835,7 @@ struct ice_hw {
DECLARE_BITMAP(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX);
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
#include "ice_virtchnl_allowlist.h"
/* Purpose of this file is to share functionality to allowlist or denylist
* opcodes used in PF <-> VF communication. Group of opcodes:
* - default -> should be always allowed after creating VF,
* default_allowlist_opcodes
* - opcodes needed by VF to work correctly, but not associated with caps ->
* should be allowed after successful VF resources allocation,
* working_allowlist_opcodes
* - opcodes needed by VF when caps are activated
*
* Caps that don't use new opcodes (no opcodes should be allowed):
* - VIRTCHNL_VF_OFFLOAD_RSS_AQ
* - VIRTCHNL_VF_OFFLOAD_RSS_REG
* - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
* - VIRTCHNL_VF_OFFLOAD_CRC
* - VIRTCHNL_VF_OFFLOAD_RX_POLLING
* - VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
* - VIRTCHNL_VF_OFFLOAD_ENCAP
* - VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM
* - VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM
* - VIRTCHNL_VF_OFFLOAD_USO
*/
/* default opcodes to communicate with VF */
static const u32 default_allowlist_opcodes[] = {
VIRTCHNL_OP_GET_VF_RESOURCES, VIRTCHNL_OP_VERSION, VIRTCHNL_OP_RESET_VF,
};
/* opcodes supported after successful VIRTCHNL_OP_GET_VF_RESOURCES */
static const u32 working_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_TX_QUEUE, VIRTCHNL_OP_CONFIG_RX_QUEUE,
VIRTCHNL_OP_CONFIG_VSI_QUEUES, VIRTCHNL_OP_CONFIG_IRQ_MAP,
VIRTCHNL_OP_ENABLE_QUEUES, VIRTCHNL_OP_DISABLE_QUEUES,
VIRTCHNL_OP_GET_STATS, VIRTCHNL_OP_EVENT,
};
/* VIRTCHNL_VF_OFFLOAD_L2 */
static const u32 l2_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_ETH_ADDR, VIRTCHNL_OP_DEL_ETH_ADDR,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
};
/* VIRTCHNL_VF_OFFLOAD_REQ_QUEUES */
static const u32 req_queues_allowlist_opcodes[] = {
VIRTCHNL_OP_REQUEST_QUEUES,
};
/* VIRTCHNL_VF_OFFLOAD_VLAN */
static const u32 vlan_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_VLAN, VIRTCHNL_OP_DEL_VLAN,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
};
/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
};
/* VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF */
static const u32 adv_rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_RSS_CFG, VIRTCHNL_OP_DEL_RSS_CFG,
};
/* VIRTCHNL_VF_OFFLOAD_FDIR_PF */
static const u32 fdir_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER,
};
struct allowlist_opcode_info {
const u32 *opcodes;
size_t size;
};
#define BIT_INDEX(caps) (HWEIGHT((caps) - 1))
#define ALLOW_ITEM(caps, list) \
[BIT_INDEX(caps)] = { \
.opcodes = list, \
.size = ARRAY_SIZE(list) \
}
static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_L2, l2_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_REQ_QUEUES, req_queues_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN, vlan_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RSS_PF, rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
};
/**
* ice_vc_is_opcode_allowed - check if this opcode is allowed on this VF
* @vf: pointer to VF structure
* @opcode: virtchnl opcode
*
* Return true if message is allowed on this VF
*/
bool ice_vc_is_opcode_allowed(struct ice_vf *vf, u32 opcode)
{
if (opcode >= VIRTCHNL_OP_MAX)
return false;
return test_bit(opcode, vf->opcodes_allowlist);
}
/**
* ice_vc_allowlist_opcodes - allowlist selected opcodes
* @vf: pointer to VF structure
* @opcodes: array of opocodes to allowlist
* @size: size of opcodes array
*
* Function should be called to allowlist opcodes on VF.
*/
static void
ice_vc_allowlist_opcodes(struct ice_vf *vf, const u32 *opcodes, size_t size)
{
unsigned int i;
for (i = 0; i < size; i++)
set_bit(opcodes[i], vf->opcodes_allowlist);
}
/**
* ice_vc_clear_allowlist - clear all allowlist opcodes
* @vf: pointer to VF structure
*/
static void ice_vc_clear_allowlist(struct ice_vf *vf)
{
bitmap_zero(vf->opcodes_allowlist, VIRTCHNL_OP_MAX);
}
/**
* ice_vc_set_default_allowlist - allowlist default opcodes for VF
* @vf: pointer to VF structure
*/
void ice_vc_set_default_allowlist(struct ice_vf *vf)
{
ice_vc_clear_allowlist(vf);
ice_vc_allowlist_opcodes(vf, default_allowlist_opcodes,
ARRAY_SIZE(default_allowlist_opcodes));
}
/**
* ice_vc_set_working_allowlist - allowlist opcodes needed to by VF to work
* @vf: pointer to VF structure
*
* allowlist opcodes that aren't associated with specific caps, but
* are needed by VF to work.
*/
void ice_vc_set_working_allowlist(struct ice_vf *vf)
{
ice_vc_allowlist_opcodes(vf, working_allowlist_opcodes,
ARRAY_SIZE(working_allowlist_opcodes));
}
/**
* ice_vc_set_caps_allowlist - allowlist VF opcodes according caps
* @vf: pointer to VF structure
*/
void ice_vc_set_caps_allowlist(struct ice_vf *vf)
{
unsigned long caps = vf->driver_caps;
unsigned int i;
for_each_set_bit(i, &caps, ARRAY_SIZE(allowlist_opcodes))
ice_vc_allowlist_opcodes(vf, allowlist_opcodes[i].opcodes,
allowlist_opcodes[i].size);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2021, Intel Corporation. */
#ifndef _ICE_VIRTCHNL_ALLOWLIST_H_
#define _ICE_VIRTCHNL_ALLOWLIST_H_
#include "ice.h"
bool ice_vc_is_opcode_allowed(struct ice_vf *vf, u32 opcode);
void ice_vc_set_default_allowlist(struct ice_vf *vf);
void ice_vc_set_working_allowlist(struct ice_vf *vf);
void ice_vc_set_caps_allowlist(struct ice_vf *vf);
#endif /* _ICE_VIRTCHNL_ALLOWLIST_H_ */
......@@ -103,6 +103,7 @@ struct ice_vf {
u16 num_vf_qs; /* num of queue configured per VF */
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
};
#ifdef CONFIG_PCI_IOV
......@@ -119,6 +120,9 @@ void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
bool
ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
u16 num_msg_proc, u16 num_msg_pending);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
......@@ -158,6 +162,15 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
#define ice_restore_all_vfs_msi_state(pdev) do {} while (0)
static inline bool
ice_is_malicious_vf(struct ice_pf __always_unused *pf,
struct ice_rq_event_info __always_unused *event,
u16 __always_unused num_msg_proc,
u16 __always_unused num_msg_pending)
{
return false;
}
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
bool __always_unused is_vflr)
......
......@@ -136,9 +136,12 @@ enum virtchnl_ops {
VIRTCHNL_OP_DISABLE_CHANNELS = 31,
VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
/* opcode 34 - 46 are reserved */
/* opcode 34 - 44 are reserved */
VIRTCHNL_OP_ADD_RSS_CFG = 45,
VIRTCHNL_OP_DEL_RSS_CFG = 46,
VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
VIRTCHNL_OP_MAX,
};
/* These macros are used to generate compilation errors if a structure/union
......@@ -250,6 +253,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF 0X08000000
#define VIRTCHNL_VF_OFFLOAD_FDIR_PF 0X10000000
/* Define below the capability flags that are not offloads */
......@@ -675,6 +680,14 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
/* Type of RSS algorithm */
enum virtchnl_rss_algorithm {
VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
......@@ -830,6 +843,14 @@ struct virtchnl_proto_hdrs {
VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
struct virtchnl_rss_cfg {
struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
enum virtchnl_rss_algorithm rss_algorithm; /* RSS algorithm type */
u8 reserved[128]; /* reserve for future */
};
VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
/* action configuration for FDIR */
struct virtchnl_filter_action {
enum virtchnl_action type;
......@@ -1098,6 +1119,10 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
case VIRTCHNL_OP_ADD_RSS_CFG:
case VIRTCHNL_OP_DEL_RSS_CFG:
valid_len = sizeof(struct virtchnl_rss_cfg);
break;
case VIRTCHNL_OP_ADD_FDIR_FILTER:
valid_len = sizeof(struct virtchnl_fdir_add);
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment