Commit 14a1d246 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2020-01-25

This series contains updates to the ice driver to add support for RSS.

Henry and Tony enable the driver to write the filtering hardware tables
to allow for changing of RSS rules, also introduced and initialized the
structures for storing the configuration.  Then followed it up by
creating an extraction sequence based on the packet header protocols to
be programmed.  Next was storing the TCAM entry with the profile data
and VSI group in the respective software structures.

Md Fahad sets up the configuration to support RSS tables for the virtual
function (VF) driver (iavf).  Add support for flow types TCP4, TCP6,
UDP4, UDP6, SCTP4 and SCTP6 for RSS via ethtool.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4d8773b6 18a8d358
......@@ -17,7 +17,8 @@ ice-y := ice_main.o \
ice_lib.o \
ice_txrx_lib.o \
ice_txrx.o \
ice_flex_pipe.o \
ice_flex_pipe.o \
ice_flow.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
......
......@@ -232,6 +232,13 @@ struct ice_aqc_get_sw_cfg_resp {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
......@@ -1849,6 +1856,7 @@ enum ice_adminq_opc {
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* debug commands */
......
......@@ -4,6 +4,7 @@
#include "ice_common.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
#define ICE_PF_RESET_WAIT_COUNT 200
......@@ -1496,6 +1497,114 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
}
/**
* ice_aq_alloc_free_res - command to allocate/free resources
* @hw: pointer to the HW struct
* @num_entries: number of resource entries in buffer
* @buf: Indirect buffer to hold data parameters and response
* @buf_size: size of buffer for indirect commands
* @opc: pass in the command opcode
* @cd: pointer to command details structure or NULL
*
* Helper function to allocate/free resources using the admin queue commands
*/
enum ice_status
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_alloc_free_res_cmd *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.sw_res_ctrl;
if (!buf)
return ICE_ERR_PARAM;
if (buf_size < (num_entries * sizeof(buf->elem[0])))
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->num_entries = cpu_to_le16(num_entries);
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
}
/**
* ice_alloc_hw_res - allocate resource
* @hw: pointer to the HW struct
* @type: type of resource
* @num: number of resources to allocate
* @btm: allocate from bottom
* @res: pointer to array that will receive the resources
*/
enum ice_status
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
enum ice_status status;
u16 buf_len;
buf_len = struct_size(buf, elem, num - 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
/* Prepare buffer to allocate resource. */
buf->num_elems = cpu_to_le16(num);
buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
if (btm)
buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
ice_aqc_opc_alloc_res, NULL);
if (status)
goto ice_alloc_res_exit;
memcpy(res, buf->elem, sizeof(buf->elem) * num);
ice_alloc_res_exit:
kfree(buf);
return status;
}
/**
* ice_free_hw_res - free allocated HW resource
* @hw: pointer to the HW struct
* @type: type of resource to free
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
enum ice_status
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
enum ice_status status;
u16 buf_len;
buf_len = struct_size(buf, elem, num - 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
/* Prepare buffer to free resource. */
buf->num_elems = cpu_to_le16(num);
buf->res_type = cpu_to_le16(type);
memcpy(buf->elem, res, sizeof(buf->elem) * num);
status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
ice_aqc_opc_free_res, NULL);
if (status)
ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
kfree(buf);
return status;
}
/**
* ice_get_num_per_func - determine number of resources per PF
* @hw: pointer to the HW structure
......@@ -3406,7 +3515,10 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
if (status)
return status;
}
/* Replay per VSI all RSS configurations */
status = ice_replay_rss_cfg(hw, vsi_handle);
if (status)
return status;
/* Replay per VSI all filters */
status = ice_replay_vsi_all_fltr(hw, vsi_handle);
return status;
......
......@@ -34,10 +34,18 @@ enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
enum ice_status
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
......
......@@ -4,6 +4,7 @@
/* ethtool support for ice */
#include "ice.h"
#include "ice_flow.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
......@@ -2533,6 +2534,243 @@ ice_set_link_ksettings(struct net_device *netdev,
return err;
}
/**
* ice_parse_hdrs - parses headers from RSS hash input
* @nfc: ethtool rxnfc command
*
* This function parses the rxnfc command and returns intended
* header types for RSS configuration
*/
static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
{
u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
switch (nfc->flow_type) {
case TCP_V4_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4;
break;
case UDP_V4_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4;
break;
case SCTP_V4_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
break;
case TCP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
break;
case UDP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6;
break;
case SCTP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
break;
default:
break;
}
return hdrs;
}
#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
/**
* ice_parse_hash_flds - parses hash fields from RSS hash input
* @nfc: ethtool rxnfc command
*
* This function parses the rxnfc command and returns intended
* hash fields for RSS configuration
*/
static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
{
u64 hfld = ICE_HASH_INVALID;
if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) {
switch (nfc->flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
if (nfc->data & RXH_IP_DST)
hfld |= ICE_FLOW_HASH_FLD_IPV4_DA;
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
if (nfc->data & RXH_IP_DST)
hfld |= ICE_FLOW_HASH_FLD_IPV6_DA;
break;
default:
break;
}
}
if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) {
switch (nfc->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
if (nfc->data & RXH_L4_B_0_1)
hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT;
if (nfc->data & RXH_L4_B_2_3)
hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT;
break;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
if (nfc->data & RXH_L4_B_0_1)
hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT;
if (nfc->data & RXH_L4_B_2_3)
hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT;
break;
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
if (nfc->data & RXH_L4_B_0_1)
hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT;
if (nfc->data & RXH_L4_B_2_3)
hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT;
break;
default:
break;
}
}
return hfld;
}
/**
* ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @vsi: the VSI being configured
* @nfc: ethtool rxnfc command
*
* Returns Success if the flow input set is supported.
*/
static int
ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
enum ice_status status;
struct device *dev;
u64 hashed_flds;
u32 hdrs;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
vsi->vsi_num);
return -EINVAL;
}
hashed_flds = ice_parse_hash_flds(nfc);
if (hashed_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
vsi->vsi_num);
return -EINVAL;
}
hdrs = ice_parse_hdrs(nfc);
if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
vsi->vsi_num);
return -EINVAL;
}
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
if (status) {
dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
vsi->vsi_num, status);
return -EINVAL;
}
return 0;
}
/**
* ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type
* @vsi: the VSI being configured
* @nfc: ethtool rxnfc command
*/
static void
ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
u32 hdrs;
dev = ice_pf_to_dev(pf);
nfc->data = 0;
if (ice_is_safe_mode(pf)) {
dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
vsi->vsi_num);
return;
}
hdrs = ice_parse_hdrs(nfc);
if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
vsi->vsi_num);
return;
}
hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
return;
}
if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
hash_flds & ICE_FLOW_HASH_FLD_IPV6_SA)
nfc->data |= (u64)RXH_IP_SRC;
if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_DA ||
hash_flds & ICE_FLOW_HASH_FLD_IPV6_DA)
nfc->data |= (u64)RXH_IP_DST;
if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT ||
hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT ||
hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT)
nfc->data |= (u64)RXH_L4_B_0_1;
if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT ||
hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
nfc->data |= (u64)RXH_L4_B_2_3;
}
/**
* ice_set_rxnfc - command to set Rx flow rules.
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
*
* Returns 0 for success and negative values for errors
*/
static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
return ice_set_rss_hash_opt(vsi, cmd);
default:
break;
}
return -EOPNOTSUPP;
}
/**
* ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
......@@ -2554,6 +2792,10 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = vsi->rss_size;
ret = 0;
break;
case ETHTOOL_GRXFH:
ice_get_rss_hash_opt(vsi, cmd);
ret = 0;
break;
default:
break;
}
......@@ -3857,6 +4099,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_priv_flags = ice_set_priv_flags,
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
.set_rxnfc = ice_set_rxnfc,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
......
......@@ -18,6 +18,13 @@
#define ICE_PKG_CNT 4
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es);
enum ice_status
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_status
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
......@@ -26,4 +33,6 @@ void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
enum ice_status
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
#endif /* _ICE_FLEX_PIPE_H_ */
......@@ -3,6 +3,9 @@
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
#define ICE_FV_OFFSET_INVAL 0x1FF
/* Extraction Sequence (Field Vector) Table */
struct ice_fv_word {
u8 prot_id;
......@@ -105,37 +108,57 @@ struct ice_buf_hdr {
sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
/* ice package section IDs */
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_CDID_KEY_BUILDER_SW 17
#define ICE_SID_CDID_REDIR_SW 18
#define ICE_SID_XLT0_ACL 20
#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_CDID_KEY_BUILDER_ACL 27
#define ICE_SID_CDID_REDIR_ACL 28
#define ICE_SID_XLT0_FD 30
#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_CDID_KEY_BUILDER_FD 37
#define ICE_SID_CDID_REDIR_FD 38
#define ICE_SID_XLT0_RSS 40
#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
......@@ -152,6 +175,19 @@ enum ice_block {
ICE_BLK_COUNT
};
enum ice_sect {
ICE_XLT0 = 0,
ICE_XLT_KB,
ICE_XLT1,
ICE_XLT2,
ICE_PROF_TCAM,
ICE_PROF_REDIR,
ICE_VEC_TBL,
ICE_CDID_KB,
ICE_CDID_REDIR,
ICE_SECT_COUNT
};
/* package labels */
struct ice_label {
__le16 value;
......@@ -234,6 +270,13 @@ struct ice_prof_redir_section {
u8 redir_value[1];
};
/* package buffer building */
struct ice_buf_build {
struct ice_buf buf;
u16 reserved_section_table_entries;
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
......@@ -248,6 +291,12 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
struct ice_pkg_es {
__le16 count;
__le16 offset;
struct ice_fv_word es[1];
};
struct ice_es {
u32 sid;
u16 count;
......@@ -280,6 +329,35 @@ struct ice_ptg_ptype {
u8 ptg;
};
#define ICE_MAX_TCAM_PER_PROFILE 32
#define ICE_MAX_PTG_PER_PROFILE 32
struct ice_prof_map {
struct list_head list;
u64 profile_cookie;
u64 context;
u8 prof_id;
u8 ptg_cnt;
u8 ptg[ICE_MAX_PTG_PER_PROFILE];
};
#define ICE_INVALID_TCAM 0xFFFF
struct ice_tcam_inf {
u16 tcam_idx;
u8 ptg;
u8 prof_id;
u8 in_use;
};
struct ice_vsig_prof {
struct list_head list;
u64 profile_cookie;
u8 prof_id;
u8 tcam_count;
struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE];
};
struct ice_vsig_entry {
struct list_head prop_lst;
struct ice_vsig_vsi *first_vsi;
......@@ -329,6 +407,13 @@ struct ice_xlt2 {
u16 count;
};
/* Profile ID Management */
struct ice_prof_id_key {
__le16 flags;
u8 xlt1;
__le16 xlt2_cdid;
} __packed;
/* Keys are made up of two values, each one-half the size of the key.
* For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
*/
......@@ -371,4 +456,31 @@ struct ice_blk_info {
u8 is_list_init;
};
enum ice_chg_type {
ICE_TCAM_NONE = 0,
ICE_PTG_ES_ADD,
ICE_TCAM_ADD,
ICE_VSIG_ADD,
ICE_VSIG_REM,
ICE_VSI_MOVE,
};
struct ice_chs_chg {
struct list_head list_entry;
enum ice_chg_type type;
u8 add_ptg;
u8 add_vsig;
u8 add_tcam_idx;
u8 add_prof;
u16 ptype;
u8 ptg;
u8 prof_id;
u16 vsi;
u16 vsig;
u16 orig_vsig;
u16 tcam_idx;
};
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
#endif /* _ICE_FLEX_TYPE_H_ */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
/* Generate flow hash field from flow field type(s) */
#define ICE_FLOW_HASH_IPV4 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
#define ICE_FLOW_HASH_IPV6 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
#define ICE_FLOW_HASH_TCP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
#define ICE_FLOW_HASH_UDP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
#define ICE_FLOW_HASH_SCTP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
#define ICE_HASH_INVALID 0
#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
/* Protocol header fields within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
* tunneling or encapsulation protocols for network virtualization such as GRE,
* VxLAN, etc.
*/
enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_NONE = 0x00000000,
ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
};
enum ice_flow_field {
/* L3 */
ICE_FLOW_FIELD_IDX_IPV4_SA,
ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA,
ICE_FLOW_FIELD_IDX_IPV6_DA,
/* L4 */
ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
ICE_AVF_FLOW_FIELD_INVALID = 0,
ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
ICE_AVF_FLOW_FIELD_IPV4_UDP,
ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
ICE_AVF_FLOW_FIELD_IPV4_TCP,
ICE_AVF_FLOW_FIELD_IPV4_SCTP,
ICE_AVF_FLOW_FIELD_IPV4_OTHER,
ICE_AVF_FLOW_FIELD_FRAG_IPV4,
/* Values 37-38 are reserved */
ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
ICE_AVF_FLOW_FIELD_IPV6_UDP,
ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
ICE_AVF_FLOW_FIELD_IPV6_TCP,
ICE_AVF_FLOW_FIELD_IPV6_SCTP,
ICE_AVF_FLOW_FIELD_IPV6_OTHER,
ICE_AVF_FLOW_FIELD_FRAG_IPV6,
ICE_AVF_FLOW_FIELD_RSVD47,
ICE_AVF_FLOW_FIELD_FCOE_OX,
ICE_AVF_FLOW_FIELD_FCOE_RX,
ICE_AVF_FLOW_FIELD_FCOE_OTHER,
/* Values 51-62 are reserved */
ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
ICE_AVF_FLOW_FIELD_MAX
};
/* Supported RSS offloads This macro is defined to support
* VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
* capabilities to the caller of this ops.
*/
#define ICE_DEFAULT_RSS_HENA ( \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
enum ice_flow_dir {
ICE_FLOW_RX = 0x02,
};
enum ice_flow_priority {
ICE_FLOW_PRIO_LOW,
ICE_FLOW_PRIO_NORMAL,
ICE_FLOW_PRIO_HIGH
};
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
struct ice_flow_seg_xtrct {
u8 prot_id; /* Protocol ID of extracted header field */
u16 off; /* Starting offset of the field in header in bytes */
u8 idx; /* Index of FV entry used */
u8 disp; /* Displacement of field in bits fr. FV entry's start */
};
enum ice_flow_fld_match_type {
ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
};
struct ice_flow_fld_loc {
/* Describe offsets of field information relative to the beginning of
* input buffer provided when adding flow entries.
*/
u16 val; /* Offset where the value is located */
u16 mask; /* Offset where the mask/prefix value is located */
u16 last; /* Length or offset where the upper value is located */
};
struct ice_flow_fld_info {
enum ice_flow_fld_match_type type;
/* Location where to retrieve data from an input buffer */
struct ice_flow_fld_loc src;
/* Location where to put the data into the final entry buffer */
struct ice_flow_fld_loc entry;
struct ice_flow_seg_xtrct xtrct;
};
struct ice_flow_seg_info {
u32 hdrs; /* Bitmask indicating protocol headers present */
u64 match; /* Bitmask indicating header fields to be matched */
u64 range; /* Bitmask indicating header fields matched as ranges */
struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
};
struct ice_flow_prof {
struct list_head l_entry;
u64 id;
enum ice_flow_dir dir;
u8 segs_cnt;
/* Keep track of flow entries associated with this flow profile */
struct mutex entries_lock;
struct list_head entries;
struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
/* software VSI handles referenced by this flow profile */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
};
struct ice_rss_cfg {
struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
u64 hashed_flds;
u32 packet_hdr;
};
enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
#endif /* _ICE_FLOW_H_ */
......@@ -199,6 +199,14 @@ enum ice_rxdid {
/* Receive Flex Descriptor Rx opcode values */
#define ICE_RX_OPC_MDID 0x01
/* Receive Descriptor MDID values that access packet flags */
enum ice_flex_mdid_pkt_flags {
ICE_RX_MDID_PKT_FLAGS_15_0 = 20,
ICE_RX_MDID_PKT_FLAGS_31_16,
ICE_RX_MDID_PKT_FLAGS_47_32,
ICE_RX_MDID_PKT_FLAGS_63_48,
};
/* Receive Descriptor MDID values */
enum ice_flex_rx_mdid {
ICE_RX_MDID_FLOW_ID_LOWER = 5,
......
This diff is collapsed.
......@@ -13,7 +13,7 @@
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
#define DRV_VERSION_BUILD 1
#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_PROTOCOL_TYPE_H_
#define _ICE_PROTOCOL_TYPE_H_
/* Decoders for ice_prot_id:
* - F: First
* - I: Inner
* - L: Last
* - O: Outer
* - S: Single
*/
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
#endif /* _ICE_PROTOCOL_TYPE_H_ */
......@@ -26,6 +26,7 @@ enum ice_status {
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_HW_TABLE = -19,
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
......
......@@ -49,42 +49,6 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
/**
* ice_aq_alloc_free_res - command to allocate/free resources
* @hw: pointer to the HW struct
* @num_entries: number of resource entries in buffer
* @buf: Indirect buffer to hold data parameters and response
* @buf_size: size of buffer for indirect commands
* @opc: pass in the command opcode
* @cd: pointer to command details structure or NULL
*
* Helper function to allocate/free resources using the admin queue commands
*/
static enum ice_status
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_alloc_free_res_cmd *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.sw_res_ctrl;
if (!buf)
return ICE_ERR_PARAM;
if (buf_size < (num_entries * sizeof(buf->elem[0])))
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->num_entries = cpu_to_le16(num_entries);
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
}
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
......
......@@ -13,6 +13,7 @@
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
......@@ -41,6 +42,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_QCTX BIT_ULL(6)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_LAN BIT_ULL(8)
#define ICE_DBG_FLOW BIT_ULL(9)
#define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14)
#define ICE_DBG_PKG BIT_ULL(16)
......@@ -559,6 +561,10 @@ struct ice_hw {
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
struct list_head fl_profs[ICE_BLK_COUNT];
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment