Commit e8a8d867 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-10-31

This series contains updates to i40e, i40evf and net/sched.

Arnd Bergmann cleans up the power management code to resolve a build
warning.

Shannon Nelson fixes i40e to only redistribute our vectors when we did
not get the full count that we requested.

Alex reverts a previous commit because it potentially causes a memory leak
when combined with the current page recycling scheme.

Amritha enables configuring cloud filters in i40e using the tc-flower
classifier.  The classification function of the filter is to match a
packet to a traffic class.  cls_flower is extended to offload classid to
hardware.  Hardware traffic classes are identified using classid values
reserved in the range :ffe0 - :ffef.
The cloud filters are added for a VSI and are cleaned up when the VSI is
deleted. The filters that match on L4 ports needs enhanced admin queue
functions with big buffer support for extended fields in cloud filter
commands.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 170b8ffa 2f4b411a
......@@ -55,6 +55,8 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_client.h"
......@@ -253,6 +255,58 @@ struct i40e_fdir_filter {
u32 fd_id;
};
#define I40E_CLOUD_FIELD_OMAC 0x01
#define I40E_CLOUD_FIELD_IMAC 0x02
#define I40E_CLOUD_FIELD_IVLAN 0x04
#define I40E_CLOUD_FIELD_TEN_ID 0x08
#define I40E_CLOUD_FIELD_IIP 0x10
#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC
#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC
#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \
I40E_CLOUD_FIELD_IVLAN)
#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
I40E_CLOUD_FIELD_TEN_ID)
#define I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC (I40E_CLOUD_FIELD_OMAC | \
I40E_CLOUD_FIELD_IMAC | \
I40E_CLOUD_FIELD_TEN_ID)
#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
I40E_CLOUD_FIELD_IVLAN | \
I40E_CLOUD_FIELD_TEN_ID)
#define I40E_CLOUD_FILTER_FLAGS_IIP I40E_CLOUD_FIELD_IIP
struct i40e_cloud_filter {
struct hlist_node cloud_node;
unsigned long cookie;
/* cloud filter input set follows */
u8 dst_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN];
__be16 vlan_id;
u16 seid; /* filter control */
__be16 dst_port;
__be16 src_port;
u32 tenant_id;
union {
struct {
struct in_addr dst_ip;
struct in_addr src_ip;
} v4;
struct {
struct in6_addr dst_ip6;
struct in6_addr src_ip6;
} v6;
} ip;
#define dst_ipv6 ip.v6.dst_ip6.s6_addr32
#define src_ipv6 ip.v6.src_ip6.s6_addr32
#define dst_ipv4 ip.v4.dst_ip.s_addr
#define src_ipv4 ip.v4.src_ip.s_addr
u16 n_proto; /* Ethernet Protocol */
u8 ip_proto; /* IPPROTO value */
u8 flags;
#define I40E_CLOUD_TNL_TYPE_NONE 0xff
u8 tunnel_type;
};
#define I40E_ETH_P_LLDP 0x88cc
#define I40E_DCB_PRIO_TYPE_STRICT 0
......@@ -420,6 +474,9 @@ struct i40e_pf {
struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
u16 pending_udp_bitmap;
struct hlist_head cloud_filter_list;
u16 num_cloud_filters;
enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
......@@ -483,6 +540,8 @@ struct i40e_pf {
#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27)
#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28)
#define I40E_FLAG_TC_MQPRIO BIT(29)
#define I40E_FLAG_FD_SB_INACTIVE BIT(30)
#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(31)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
......@@ -565,6 +624,8 @@ struct i40e_pf {
u16 phy_led_val;
u16 override_q_count;
u16 last_sw_conf_flags;
u16 last_sw_conf_valid_flags;
};
/**
......@@ -739,6 +800,7 @@ struct i40e_vsi {
u16 next_base_queue; /* next queue to be used for channel setup */
struct list_head ch_list;
u16 tc_seid_map[I40E_MAX_TRAFFIC_CLASS];
void *priv; /* client driver data reference. */
......
......@@ -790,7 +790,35 @@ struct i40e_aqc_set_switch_config {
*/
__le16 first_tag;
__le16 second_tag;
u8 reserved[6];
/* Next byte is split into following:
* Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0
* Bit 6 : 0 : Destination Port, 1: source port
* Bit 5..4 : L4 type
* 0: rsvd
* 1: TCP
* 2: UDP
* 3: Both TCP and UDP
* Bits 3:0 Mode
* 0: default mode
* 1: L4 port only mode
* 2: non-tunneled mode
* 3: tunneled mode
*/
#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80
#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40
#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00
#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10
#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20
#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30
#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00
#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01
#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02
#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03
u8 mode;
u8 rsvd5[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
......@@ -1343,14 +1371,16 @@ struct i40e_aqc_add_remove_cloud_filters {
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
u8 reserved2[4];
u8 big_buffer_flag;
#define I40E_AQC_ADD_CLOUD_CMD_BB 1
u8 reserved2[3];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
struct i40e_aqc_add_remove_cloud_filters_element_data {
struct i40e_aqc_cloud_filters_element_data {
u8 outer_mac[6];
u8 inner_mac[6];
__le16 inner_vlan;
......@@ -1362,6 +1392,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
struct {
u8 data[16];
} v6;
struct {
__le16 data[8];
} raw_v6;
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
......@@ -1380,6 +1413,10 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
/* 0x0010 to 0x0017 is for custom filters */
#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
......@@ -1414,6 +1451,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 response_reserved[7];
};
I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
/* i40e_aqc_cloud_filters_element_bb is used when
* I40E_AQC_CLOUD_CMD_BB flag is set.
*/
struct i40e_aqc_cloud_filters_element_bb {
struct i40e_aqc_cloud_filters_element_data element;
u16 general_fields[32];
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
};
I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
struct i40e_aqc_remove_cloud_filters_completion {
__le16 perfect_ovlan_used;
__le16 perfect_ovlan_free;
......@@ -1425,6 +1505,60 @@ struct i40e_aqc_remove_cloud_filters_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
/* Replace filter Command 0x025F
* uses the i40e_aqc_replace_cloud_filters,
* and the generic indirect completion structure
*/
struct i40e_filter_data {
u8 filter_type;
u8 input[3];
};
I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
struct i40e_aqc_replace_cloud_filters_cmd {
u8 valid_flags;
#define I40E_AQC_REPLACE_L1_FILTER 0x0
#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
#define I40E_AQC_GET_CLOUD_FILTERS 0x2
#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
u8 old_filter_type;
u8 new_filter_type;
u8 tr_bit;
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
struct i40e_aqc_replace_cloud_filters_cmd_buf {
u8 data[32];
/* Filter type INPUT codes*/
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7)
/* Field Vector offsets */
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
/* big FLU */
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
/* big FLU */
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
struct i40e_filter_data filters[8];
};
I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
/* Add Mirror Rule (indirect or direct 0x0260)
* Delete Mirror Rule (indirect or direct 0x0261)
* note: some rule types (4,5) do not use an external buffer.
......
......@@ -2407,13 +2407,14 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
* @hw: pointer to the hardware structure
* @flags: bit flag values to set
* @valid_flags: which bit flags to set
* @mode: cloud filter mode
* @cmd_details: pointer to command details structure or NULL
*
* Set switch configuration bits
**/
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 flags,
u16 valid_flags,
u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
......@@ -2425,6 +2426,7 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
i40e_aqc_opc_set_switch_config);
scfg->flags = cpu_to_le16(flags);
scfg->valid_flags = cpu_to_le16(valid_flags);
scfg->mode = mode;
if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
scfg->switch_tag = cpu_to_le16(hw->switch_tag);
scfg->first_tag = cpu_to_le16(hw->first_tag);
......@@ -5434,5 +5436,194 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
}
/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
* @filters: Buffer which contains the filters to be added
* @filter_count: number of filters contained in the buffer
*
* Set the cloud filters for a given VSI. The contents of the
* i40e_aqc_cloud_filters_element_data are filled in by the caller
* of the function.
*
**/
enum i40e_status_code
i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
enum i40e_status_code status;
u16 buff_len;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
}
/**
* i40e_aq_add_cloud_filters_bb
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
* @filters: Buffer which contains the filters in big buffer to be added
* @filter_count: number of filters contained in the buffer
*
* Set the big buffer cloud filters for a given VSI. The contents of the
* i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
* function.
*
**/
i40e_status
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
i40e_status status;
u16 buff_len;
int i;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
for (i = 0; i < filter_count; i++) {
u16 tnl_type;
u32 ti;
tnl_type = (le16_to_cpu(filters[i].element.flags) &
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
/* Due to hardware eccentricities, the VNI for Geneve is shifted
* one more byte further than normally used for Tenant ID in
* other tunnel types.
*/
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
ti = le32_to_cpu(filters[i].element.tenant_id);
filters[i].element.tenant_id = cpu_to_le32(ti << 8);
}
}
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
}
/**
* i40e_aq_rem_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to remove cloud filters from
* @filters: Buffer which contains the filters to be removed
* @filter_count: number of filters contained in the buffer
*
* Remove the cloud filters for a given VSI. The contents of the
* i40e_aqc_cloud_filters_element_data are filled in by the caller
* of the function.
*
**/
enum i40e_status_code
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
enum i40e_status_code status;
u16 buff_len;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
}
/**
* i40e_aq_rem_cloud_filters_bb
* @hw: pointer to the hardware structure
* @seid: VSI seid to remove cloud filters from
* @filters: Buffer which contains the filters in big buffer to be removed
* @filter_count: number of filters contained in the buffer
*
* Remove the big buffer cloud filters for a given VSI. The contents of the
* i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
* function.
*
**/
i40e_status
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
i40e_status status;
u16 buff_len;
int i;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
buff_len = filter_count * sizeof(*filters);
desc.datalen = cpu_to_le16(buff_len);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd->num_filters = filter_count;
cmd->seid = cpu_to_le16(seid);
cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
for (i = 0; i < filter_count; i++) {
u16 tnl_type;
u32 ti;
tnl_type = (le16_to_cpu(filters[i].element.flags) &
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
/* Due to hardware eccentricities, the VNI for Geneve is shifted
* one more byte further than normally used for Tenant ID in
* other tunnel types.
*/
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
ti = le32_to_cpu(filters[i].element.tenant_id);
filters[i].element.tenant_id = cpu_to_le32(ti << 8);
}
}
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
}
......@@ -4343,7 +4343,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
NULL);
0, NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %s aq_err %s\n",
......
......@@ -69,6 +69,15 @@ static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
struct i40e_cloud_filter *filter,
bool add);
static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
struct i40e_cloud_filter *filter,
bool add);
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type);
/* i40e_pci_tbl - PCI Device ID Table
*
......@@ -5480,7 +5489,11 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
**/
static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
{
enum i40e_admin_queue_err last_aq_status;
struct i40e_cloud_filter *cfilter;
struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
int ret, i;
/* Reset rss size that was stored when reconfiguring rss for
......@@ -5521,6 +5534,29 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
"Failed to reset tx rate for ch->seid %u\n",
ch->seid);
/* delete cloud filters associated with this channel */
hlist_for_each_entry_safe(cfilter, node,
&pf->cloud_filter_list, cloud_node) {
if (cfilter->seid != ch->seid)
continue;
hash_del(&cfilter->cloud_node);
if (cfilter->dst_port)
ret = i40e_add_del_cloud_filter_big_buf(vsi,
cfilter,
false);
else
ret = i40e_add_del_cloud_filter(vsi, cfilter,
false);
last_aq_status = pf->hw.aq.asq_last_status;
if (ret)
dev_info(&pf->pdev->dev,
"Failed to delete cloud filter, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, last_aq_status));
kfree(cfilter);
}
/* delete VSI from FW */
ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
NULL);
......@@ -5971,6 +6007,63 @@ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
return ch->initialized ? true : false;
}
/**
* i40e_validate_and_set_switch_mode - sets up switch mode correctly
* @vsi: ptr to VSI which has PF backing
*
* Sets up switch mode correctly if it needs to be changed and perform
* what are allowed modes.
**/
static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
{
u8 mode;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
int ret;
ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
if (ret)
return -EINVAL;
if (hw->dev_caps.switch_mode) {
/* if switch mode is set, support mode2 (non-tunneled for
* cloud filter) for now
*/
u32 switch_mode = hw->dev_caps.switch_mode &
I40E_SWITCH_MODE_MASK;
if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
if (switch_mode == I40E_CLOUD_FILTER_MODE2)
return 0;
dev_err(&pf->pdev->dev,
"Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
hw->dev_caps.switch_mode);
return -EINVAL;
}
}
/* Set Bit 7 to be valid */
mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
/* Set L4type to both TCP and UDP support */
mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
/* Set cloud filter mode */
mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
/* Prep mode field for set_switch_config */
ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
pf->last_sw_conf_valid_flags,
mode, NULL);
if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
dev_err(&pf->pdev->dev,
"couldn't set switch config bits, err %s aq_err %s\n",
i40e_stat_str(hw, ret),
i40e_aq_str(hw,
hw->aq.asq_last_status));
return ret;
}
/**
* i40e_create_queue_channel - function to create channel
* @vsi: VSI to be configured
......@@ -6100,6 +6193,7 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
int ret = 0, i;
/* Create app vsi with the TCs. Main VSI with TC0 is already set up */
vsi->tc_seid_map[0] = vsi->seid;
for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (vsi->tc_config.enabled_tc & BIT(i)) {
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
......@@ -6130,6 +6224,7 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
i, ch->num_queue_pairs);
goto err_free;
}
vsi->tc_seid_map[i] = ch->seid;
}
}
return ret;
......@@ -6748,13 +6843,720 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
return ret;
}
/**
* i40e_set_cld_element - sets cloud filter element data
* @filter: cloud filter rule
* @cld: ptr to cloud filter element data
*
* This is helper function to copy data into cloud filter element
**/
static inline void
i40e_set_cld_element(struct i40e_cloud_filter *filter,
struct i40e_aqc_cloud_filters_element_data *cld)
{
int i, j;
u32 ipa;
memset(cld, 0, sizeof(*cld));
ether_addr_copy(cld->outer_mac, filter->dst_mac);
ether_addr_copy(cld->inner_mac, filter->src_mac);
if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
return;
if (filter->n_proto == ETH_P_IPV6) {
#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
i++, j += 2) {
ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
ipa = cpu_to_le32(ipa);
memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
}
} else {
ipa = be32_to_cpu(filter->dst_ipv4);
memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
}
cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
/* tenant_id is not supported by FW now, once the support is enabled
* fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
*/
if (filter->tenant_id)
return;
}
/**
* i40e_add_del_cloud_filter - Add/del cloud filter
* @vsi: pointer to VSI
* @filter: cloud filter rule
* @add: if true, add, if false, delete
*
* Add or delete a cloud filter for a specific flow spec.
* Returns 0 if the filter were successfully added.
**/
static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
struct i40e_cloud_filter *filter, bool add)
{
struct i40e_aqc_cloud_filters_element_data cld_filter;
struct i40e_pf *pf = vsi->back;
int ret;
static const u16 flag_table[128] = {
[I40E_CLOUD_FILTER_FLAGS_OMAC] =
I40E_AQC_ADD_CLOUD_FILTER_OMAC,
[I40E_CLOUD_FILTER_FLAGS_IMAC] =
I40E_AQC_ADD_CLOUD_FILTER_IMAC,
[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
[I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
[I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
[I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
[I40E_CLOUD_FILTER_FLAGS_IIP] =
I40E_AQC_ADD_CLOUD_FILTER_IIP,
};
if (filter->flags >= ARRAY_SIZE(flag_table))
return I40E_ERR_CONFIG;
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter);
if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
if (filter->n_proto == ETH_P_IPV6)
cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
else
cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
if (add)
ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
&cld_filter, 1);
else
ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
&cld_filter, 1);
if (ret)
dev_dbg(&pf->pdev->dev,
"Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
add ? "add" : "delete", filter->dst_port, ret,
pf->hw.aq.asq_last_status);
else
dev_info(&pf->pdev->dev,
"%s cloud filter for VSI: %d\n",
add ? "Added" : "Deleted", filter->seid);
return ret;
}
/**
* i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
* @vsi: pointer to VSI
* @filter: cloud filter rule
* @add: if true, add, if false, delete
*
* Add or delete a cloud filter for a specific flow spec using big buffer.
* Returns 0 if the filter were successfully added.
**/
static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
struct i40e_cloud_filter *filter,
bool add)
{
struct i40e_aqc_cloud_filters_element_bb cld_filter;
struct i40e_pf *pf = vsi->back;
int ret;
/* Both (src/dst) valid mac_addr are not supported */
if ((is_valid_ether_addr(filter->dst_mac) &&
is_valid_ether_addr(filter->src_mac)) ||
(is_multicast_ether_addr(filter->dst_mac) &&
is_multicast_ether_addr(filter->src_mac)))
return -EINVAL;
/* Make sure port is specified, otherwise bail out, for channel
* specific cloud filter needs 'L4 port' to be non-zero
*/
if (!filter->dst_port)
return -EINVAL;
/* adding filter using src_port/src_ip is not supported at this stage */
if (filter->src_port || filter->src_ipv4 ||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
return -EINVAL;
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter.element);
if (is_valid_ether_addr(filter->dst_mac) ||
is_valid_ether_addr(filter->src_mac) ||
is_multicast_ether_addr(filter->dst_mac) ||
is_multicast_ether_addr(filter->src_mac)) {
/* MAC + IP : unsupported mode */
if (filter->dst_ipv4)
return -EINVAL;
/* since we validated that L4 port must be valid before
* we get here, start with respective "flags" value
* and update if vlan is present or not
*/
cld_filter.element.flags =
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
if (filter->vlan_id) {
cld_filter.element.flags =
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
}
} else if (filter->dst_ipv4 ||
!ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
cld_filter.element.flags =
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
if (filter->n_proto == ETH_P_IPV6)
cld_filter.element.flags |=
cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
else
cld_filter.element.flags |=
cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
} else {
dev_err(&pf->pdev->dev,
"either mac or ip has to be valid for cloud filter\n");
return -EINVAL;
}
/* Now copy L4 port in Byte 6..7 in general fields */
cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
be16_to_cpu(filter->dst_port);
if (add) {
/* Validate current device switch mode, change if necessary */
ret = i40e_validate_and_set_switch_mode(vsi);
if (ret) {
dev_err(&pf->pdev->dev,
"failed to set switch mode, ret %d\n",
ret);
return ret;
}
ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
&cld_filter, 1);
} else {
ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
&cld_filter, 1);
}
if (ret)
dev_dbg(&pf->pdev->dev,
"Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
else
dev_info(&pf->pdev->dev,
"%s cloud filter for VSI: %d, L4 port: %d\n",
add ? "add" : "delete", filter->seid,
ntohs(filter->dst_port));
return ret;
}
/**
* i40e_parse_cls_flower - Parse tc flower filters provided by kernel
* @vsi: Pointer to VSI
* @cls_flower: Pointer to struct tc_cls_flower_offload
* @filter: Pointer to cloud filter structure
*
**/
static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
struct tc_cls_flower_offload *f,
struct i40e_cloud_filter *filter)
{
u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
struct i40e_pf *pf = vsi->back;
u8 field_flags = 0;
if (f->dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_dissector_key_keyid *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID,
f->key);
struct flow_dissector_key_keyid *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID,
f->mask);
if (mask->keyid != 0)
field_flags |= I40E_CLOUD_FIELD_TEN_ID;
filter->tenant_id = be32_to_cpu(key->keyid);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
f->key);
struct flow_dissector_key_basic *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_BASIC,
f->mask);
n_proto_key = ntohs(key->n_proto);
n_proto_mask = ntohs(mask->n_proto);
if (n_proto_key == ETH_P_ALL) {
n_proto_key = 0;
n_proto_mask = 0;
}
filter->n_proto = n_proto_key & n_proto_mask;
filter->ip_proto = key->ip_proto;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->key);
struct flow_dissector_key_eth_addrs *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
f->mask);
/* use is_broadcast and is_zero to check for all 0xf or 0 */
if (!is_zero_ether_addr(mask->dst)) {
if (is_broadcast_ether_addr(mask->dst)) {
field_flags |= I40E_CLOUD_FIELD_OMAC;
} else {
dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
mask->dst);
return I40E_ERR_CONFIG;
}
}
if (!is_zero_ether_addr(mask->src)) {
if (is_broadcast_ether_addr(mask->src)) {
field_flags |= I40E_CLOUD_FIELD_IMAC;
} else {
dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
mask->src);
return I40E_ERR_CONFIG;
}
}
ether_addr_copy(filter->dst_mac, key->dst);
ether_addr_copy(filter->src_mac, key->src);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_dissector_key_vlan *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->key);
struct flow_dissector_key_vlan *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
if (mask->vlan_id) {
if (mask->vlan_id == VLAN_VID_MASK) {
field_flags |= I40E_CLOUD_FIELD_IVLAN;
} else {
dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
mask->vlan_id);
return I40E_ERR_CONFIG;
}
}
filter->vlan_id = cpu_to_be16(key->vlan_id);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_dissector_key_control *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_CONTROL,
f->key);
addr_type = key->addr_type;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->key);
struct flow_dissector_key_ipv4_addrs *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->mask);
if (mask->dst) {
if (mask->dst == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
mask->dst = be32_to_cpu(mask->dst);
dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n",
&mask->dst);
return I40E_ERR_CONFIG;
}
}
if (mask->src) {
if (mask->src == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
mask->src = be32_to_cpu(mask->src);
dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n",
&mask->src);
return I40E_ERR_CONFIG;
}
}
if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
return I40E_ERR_CONFIG;
}
filter->dst_ipv4 = key->dst;
filter->src_ipv4 = key->src;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_dissector_key_ipv6_addrs *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->key);
struct flow_dissector_key_ipv6_addrs *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->mask);
/* src and dest IPV6 address should not be LOOPBACK
* (0:0:0:0:0:0:0:1), which can be represented as ::1
*/
if (ipv6_addr_loopback(&key->dst) ||
ipv6_addr_loopback(&key->src)) {
dev_err(&pf->pdev->dev,
"Bad ipv6, addr is LOOPBACK\n");
return I40E_ERR_CONFIG;
}
if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
field_flags |= I40E_CLOUD_FIELD_IIP;
memcpy(&filter->src_ipv6, &key->src.s6_addr32,
sizeof(filter->src_ipv6));
memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
sizeof(filter->dst_ipv6));
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_PORTS,
f->key);
struct flow_dissector_key_ports *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_PORTS,
f->mask);
if (mask->src) {
if (mask->src == cpu_to_be16(0xffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
be16_to_cpu(mask->src));
return I40E_ERR_CONFIG;
}
}
if (mask->dst) {
if (mask->dst == cpu_to_be16(0xffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
be16_to_cpu(mask->dst));
return I40E_ERR_CONFIG;
}
}
filter->dst_port = key->dst;
filter->src_port = key->src;
switch (filter->ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
break;
default:
dev_err(&pf->pdev->dev,
"Only UDP and TCP transport are supported\n");
return -EINVAL;
}
}
filter->flags = field_flags;
return 0;
}
/**
* i40e_handle_tclass: Forward to a traffic class on the device
* @vsi: Pointer to VSI
* @tc: traffic class index on the device
* @filter: Pointer to cloud filter structure
*
**/
static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
struct i40e_cloud_filter *filter)
{
struct i40e_channel *ch, *ch_tmp;
/* direct to a traffic class on the same device */
if (tc == 0) {
filter->seid = vsi->seid;
return 0;
} else if (vsi->tc_config.enabled_tc & BIT(tc)) {
if (!filter->dst_port) {
dev_err(&vsi->back->pdev->dev,
"Specify destination port to direct to traffic class that is not default\n");
return -EINVAL;
}
if (list_empty(&vsi->ch_list))
return -EINVAL;
list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
list) {
if (ch->seid == vsi->tc_seid_map[tc])
filter->seid = ch->seid;
}
return 0;
}
dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
return -EINVAL;
}
/**
* i40e_configure_clsflower - Configure tc flower filters
* @vsi: Pointer to VSI
* @cls_flower: Pointer to struct tc_cls_flower_offload
*
**/
static int i40e_configure_clsflower(struct i40e_vsi *vsi,
struct tc_cls_flower_offload *cls_flower)
{
int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
struct i40e_cloud_filter *filter = NULL;
struct i40e_pf *pf = vsi->back;
int err = 0;
if (tc < 0) {
dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
return -EINVAL;
}
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
if (pf->fdir_pf_active_filters ||
(!hlist_empty(&pf->fdir_filter_list))) {
dev_err(&vsi->back->pdev->dev,
"Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
return -EINVAL;
}
if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
dev_err(&vsi->back->pdev->dev,
"Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
}
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
return -ENOMEM;
filter->cookie = cls_flower->cookie;
err = i40e_parse_cls_flower(vsi, cls_flower, filter);
if (err < 0)
goto err;
err = i40e_handle_tclass(vsi, tc, filter);
if (err < 0)
goto err;
/* Add cloud filter */
if (filter->dst_port)
err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
else
err = i40e_add_del_cloud_filter(vsi, filter, true);
if (err) {
dev_err(&pf->pdev->dev,
"Failed to add cloud filter, err %s\n",
i40e_stat_str(&pf->hw, err));
err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
goto err;
}
/* add filter to the ordered list */
INIT_HLIST_NODE(&filter->cloud_node);
hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
pf->num_cloud_filters++;
return err;
err:
kfree(filter);
return err;
}
/**
* i40e_find_cloud_filter - Find the could filter in the list
* @vsi: Pointer to VSI
* @cookie: filter specific cookie
*
**/
static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
unsigned long *cookie)
{
struct i40e_cloud_filter *filter = NULL;
struct hlist_node *node2;
hlist_for_each_entry_safe(filter, node2,
&vsi->back->cloud_filter_list, cloud_node)
if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
return filter;
return NULL;
}
/**
* i40e_delete_clsflower - Remove tc flower filters
* @vsi: Pointer to VSI
* @cls_flower: Pointer to struct tc_cls_flower_offload
*
**/
static int i40e_delete_clsflower(struct i40e_vsi *vsi,
struct tc_cls_flower_offload *cls_flower)
{
struct i40e_cloud_filter *filter = NULL;
struct i40e_pf *pf = vsi->back;
int err = 0;
filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
if (!filter)
return -EINVAL;
hash_del(&filter->cloud_node);
if (filter->dst_port)
err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
else
err = i40e_add_del_cloud_filter(vsi, filter, false);
kfree(filter);
if (err) {
dev_err(&pf->pdev->dev,
"Failed to delete cloud filter, err %s\n",
i40e_stat_str(&pf->hw, err));
return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
}
pf->num_cloud_filters--;
if (!pf->num_cloud_filters)
if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
!(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
}
return 0;
}
/**
* i40e_setup_tc_cls_flower - flower classifier offloads
* @netdev: net device to configure
* @type_data: offload data
**/
static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
struct tc_cls_flower_offload *cls_flower)
{
struct i40e_vsi *vsi = np->vsi;
if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return i40e_configure_clsflower(vsi, cls_flower);
case TC_CLSFLOWER_DESTROY:
return i40e_delete_clsflower(vsi, cls_flower);
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
return -EINVAL;
}
}
static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct i40e_netdev_priv *np = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return i40e_setup_tc_cls_flower(np, type_data);
default:
return -EOPNOTSUPP;
}
}
static int i40e_setup_tc_block(struct net_device *dev,
struct tc_block_offload *f)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
np, np);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
if (type != TC_SETUP_MQPRIO)
switch (type) {
case TC_SETUP_MQPRIO:
return i40e_setup_tc(netdev, type_data);
case TC_SETUP_BLOCK:
return i40e_setup_tc_block(netdev, type_data);
default:
return -EOPNOTSUPP;
return i40e_setup_tc(netdev, type_data);
}
}
/**
......@@ -6934,6 +7736,33 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
/**
* i40e_cloud_filter_exit - Cleans up the cloud filters
* @pf: Pointer to PF
*
* This function destroys the hlist where all the cloud filters
* were saved.
**/
static void i40e_cloud_filter_exit(struct i40e_pf *pf)
{
struct i40e_cloud_filter *cfilter;
struct hlist_node *node;
hlist_for_each_entry_safe(cfilter, node,
&pf->cloud_filter_list, cloud_node) {
hlist_del(&cfilter->cloud_node);
kfree(cfilter);
}
pf->num_cloud_filters = 0;
if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
!(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
}
}
/**
* i40e_close - Disables a network interface
* @netdev: network interface device structure
......@@ -8039,7 +8868,8 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
* i40e_get_capabilities - get info about the HW
* @pf: the PF struct
**/
static int i40e_get_capabilities(struct i40e_pf *pf)
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type)
{
struct i40e_aqc_list_capabilities_element_resp *cap_buf;
u16 data_size;
......@@ -8054,9 +8884,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
/* this loads the data into the hw struct for us */
err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
&data_size,
i40e_aqc_opc_list_func_capabilities,
NULL);
&data_size, list_type,
NULL);
/* data loaded, buffer no longer needed */
kfree(cap_buf);
......@@ -8073,26 +8902,44 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
pf->hw.pf_id, pf->hw.func_caps.num_vfs,
pf->hw.func_caps.num_msix_vectors,
pf->hw.func_caps.num_msix_vectors_vf,
pf->hw.func_caps.fd_filters_guaranteed,
pf->hw.func_caps.fd_filters_best_effort,
pf->hw.func_caps.num_tx_qp,
pf->hw.func_caps.num_vsis);
if (pf->hw.debug_mask & I40E_DEBUG_USER) {
if (list_type == i40e_aqc_opc_list_func_capabilities) {
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
pf->hw.pf_id, pf->hw.func_caps.num_vfs,
pf->hw.func_caps.num_msix_vectors,
pf->hw.func_caps.num_msix_vectors_vf,
pf->hw.func_caps.fd_filters_guaranteed,
pf->hw.func_caps.fd_filters_best_effort,
pf->hw.func_caps.num_tx_qp,
pf->hw.func_caps.num_vsis);
} else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
dev_info(&pf->pdev->dev,
"switch_mode=0x%04x, function_valid=0x%08x\n",
pf->hw.dev_caps.switch_mode,
pf->hw.dev_caps.valid_functions);
dev_info(&pf->pdev->dev,
"SR-IOV=%d, num_vfs for all function=%u\n",
pf->hw.dev_caps.sr_iov_1_1,
pf->hw.dev_caps.num_vfs);
dev_info(&pf->pdev->dev,
"num_vsis=%u, num_rx:%u, num_tx=%u\n",
pf->hw.dev_caps.num_vsis,
pf->hw.dev_caps.num_rx_qp,
pf->hw.dev_caps.num_tx_qp);
}
}
if (list_type == i40e_aqc_opc_list_func_capabilities) {
#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
+ pf->hw.func_caps.num_vfs)
if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
dev_info(&pf->pdev->dev,
"got num_vsis %d, setting num_vsis to %d\n",
pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
if (pf->hw.revision_id == 0 &&
pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
dev_info(&pf->pdev->dev,
"got num_vsis %d, setting num_vsis to %d\n",
pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
}
}
return 0;
}
......@@ -8134,6 +8981,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
return;
}
}
......@@ -8155,6 +9003,45 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
i40e_vsi_release(vsi);
}
/**
* i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
* @vsi: PF main vsi
* @seid: seid of main or channel VSIs
*
* Rebuilds cloud filters associated with main VSI and channel VSIs if they
* existed before reset
**/
static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
{
struct i40e_cloud_filter *cfilter;
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
i40e_status ret;
/* Add cloud filters back if they exist */
hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
cloud_node) {
if (cfilter->seid != seid)
continue;
if (cfilter->dst_port)
ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
true);
else
ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
if (ret) {
dev_dbg(&pf->pdev->dev,
"Failed to rebuild cloud filter, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
}
}
return 0;
}
/**
* i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
* @vsi: PF main vsi
......@@ -8194,6 +9081,13 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
credits,
ch->seid);
}
ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
if (ret) {
dev_dbg(&vsi->back->pdev->dev,
"Failed to rebuild cloud filters for channel VSI %u\n",
ch->seid);
return ret;
}
}
return 0;
}
......@@ -8360,7 +9254,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_verify_eeprom(pf);
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
if (ret)
goto end_core_reset;
......@@ -8481,6 +9375,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
vsi->seid);
}
ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
if (ret)
goto end_unlock;
/* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
* for this main VSI if they exist
*/
......@@ -9346,7 +10244,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
} else if (!vectors_left) {
} else if (v_actual != v_budget) {
/* If we have limited resources, we will start with no vectors
* for the special features and then allocate vectors to some
* of these features based on the policy and at the end disable
......@@ -9355,7 +10253,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
int vec;
dev_info(&pf->pdev->dev,
"MSI-X vector limit reached, attempting to redistribute vectors\n");
"MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
v_actual, v_budget);
/* reserve the misc vector */
vec = v_actual - 1;
......@@ -9403,6 +10302,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
(pf->num_fdsb_msix == 0)) {
dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
}
if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
(pf->num_vmdq_msix == 0)) {
......@@ -9520,6 +10420,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
/* rework the queue expectations without MSIX */
i40e_determine_queue_usage(pf);
......@@ -9557,7 +10458,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
return 0;
}
#ifdef CONFIG_PM
/**
* i40e_restore_interrupt_scheme - Restore the interrupt scheme
* @pf: private board data structure
......@@ -9606,7 +10506,6 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
return err;
}
#endif /* CONFIG_PM */
/**
* i40e_setup_misc_vector - Setup the misc vector to handle non queue events
......@@ -10262,9 +11161,13 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
/* Enable filters and mark for reset */
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
need_reset = true;
/* enable FD_SB only if there is MSI-X vector */
if (pf->num_fdsb_msix > 0)
/* enable FD_SB only if there is MSI-X vector and no cloud
* filters exist
*/
if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
}
} else {
/* turn off filters, mark for reset and clear SW filter list */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
......@@ -10273,6 +11176,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
}
pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_SB_AUTO_DISABLED);
pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
/* reset fd counters */
pf->fd_add_err = 0;
pf->fd_atr_cnt = 0;
......@@ -10334,6 +11239,12 @@ static int i40e_set_features(struct net_device *netdev,
else
i40e_vlan_stripping_disable(vsi);
if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
dev_err(&pf->pdev->dev,
"Offloaded tc filters active, can't turn hw_tc_offload off");
return -EINVAL;
}
need_reset = i40e_set_ntuple(pf, features);
if (need_reset)
......@@ -10853,7 +11764,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
hw_features = hw_enc_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
......@@ -12158,14 +13070,16 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
*/
if ((pf->hw.pf_id == 0) &&
!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
pf->last_sw_conf_flags = flags;
}
if (pf->hw.pf_id == 0) {
u16 valid_flags;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
......@@ -12175,6 +13089,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
pf->last_sw_conf_valid_flags = valid_flags;
}
/* first time setup */
......@@ -12195,6 +13110,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
if (!vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
return -EAGAIN;
}
......@@ -12271,6 +13187,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
......@@ -12285,6 +13202,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
} else {
/* Not enough queues for all TCs */
if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
......@@ -12308,6 +13226,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= 1; /* save 1 queue for FD */
} else {
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
}
}
......@@ -12611,7 +13530,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
if (err)
goto err_adminq_setup;
......@@ -13029,6 +13948,8 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
i40e_cloud_filter_exit(pf);
/* remove attached clients */
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
ret_code = i40e_lan_del_device(pf);
......@@ -13260,6 +14181,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
/* Client close must be called explicitly here because the timer
......@@ -13285,12 +14207,11 @@ static void i40e_shutdown(struct pci_dev *pdev)
}
}
#ifdef CONFIG_PM
/**
* i40e_suspend - PM callback for moving to D3
* @dev: generic device information structure
**/
static int i40e_suspend(struct device *dev)
static int __maybe_unused i40e_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct i40e_pf *pf = pci_get_drvdata(pdev);
......@@ -13328,7 +14249,7 @@ static int i40e_suspend(struct device *dev)
* i40e_resume - PM callback for waking up from D3
* @dev: generic device information structure
**/
static int i40e_resume(struct device *dev)
static int __maybe_unused i40e_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct i40e_pf *pf = pci_get_drvdata(pdev);
......@@ -13360,8 +14281,6 @@ static int i40e_resume(struct device *dev)
return 0;
}
#endif /* CONFIG_PM */
static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset,
......@@ -13377,11 +14296,9 @@ static struct pci_driver i40e_driver = {
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
#ifdef CONFIG_PM
.driver = {
.pm = &i40e_pm_ops,
},
#endif /* CONFIG_PM */
.shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
......
......@@ -190,7 +190,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 flags,
u16 valid_flags,
u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
......@@ -283,6 +283,22 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count);
enum i40e_status_code
i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count);
enum i40e_status_code
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count);
i40e_status
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count);
i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg);
/* i40e_common */
......
......@@ -1407,15 +1407,6 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
/* Hardware only fetches new descriptors in cache lines of 8,
* essentially ignoring the lower 3 bits of the tail register. We want
* to ensure our tail writes are aligned to avoid unnecessary work. We
* can't simply round down the cleaned count, since we might fail to
* allocate some buffers. What we really want is to ensure that
* next_to_used + cleaned_count produces an aligned value.
*/
cleaned_count -= (ntu + cleaned_count) & 0x7;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
......
......@@ -283,6 +283,16 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
/* Cloud filter modes:
* Mode1: Filter on L4 port only
* Mode2: Filter for non-tunneled traffic
* Mode3: Filter for tunnel traffic
*/
#define I40E_CLOUD_FILTER_MODE1 0x6
#define I40E_CLOUD_FILTER_MODE2 0x7
#define I40E_CLOUD_FILTER_MODE3 0x8
#define I40E_SWITCH_MODE_MASK 0xF
u32 management_mode;
u32 mng_protocols_over_mctp;
#define I40E_MNG_PROTOCOL_PLDM 0x2
......
......@@ -1339,14 +1339,16 @@ struct i40e_aqc_add_remove_cloud_filters {
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
u8 reserved2[4];
u8 big_buffer_flag;
#define I40E_AQC_ADD_CLOUD_CMD_BB 1
u8 reserved2[3];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
struct i40e_aqc_add_remove_cloud_filters_element_data {
struct i40e_aqc_cloud_filters_element_data {
u8 outer_mac[6];
u8 inner_mac[6];
__le16 inner_vlan;
......@@ -1358,6 +1360,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
struct {
u8 data[16];
} v6;
struct {
__le16 data[8];
} raw_v6;
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
......@@ -1376,6 +1381,10 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
/* 0x0010 to 0x0017 is for custom filters */
#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
......@@ -1410,6 +1419,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 response_reserved[7];
};
I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
/* i40e_aqc_cloud_filters_element_bb is used when
* I40E_AQC_ADD_CLOUD_CMD_BB flag is set.
*/
struct i40e_aqc_cloud_filters_element_bb {
struct i40e_aqc_cloud_filters_element_data element;
u16 general_fields[32];
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
};
I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
struct i40e_aqc_remove_cloud_filters_completion {
__le16 perfect_ovlan_used;
__le16 perfect_ovlan_free;
......@@ -1421,6 +1473,60 @@ struct i40e_aqc_remove_cloud_filters_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
/* Replace filter Command 0x025F
* uses the i40e_aqc_replace_cloud_filters,
* and the generic indirect completion structure
*/
struct i40e_filter_data {
u8 filter_type;
u8 input[3];
};
I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
struct i40e_aqc_replace_cloud_filters_cmd {
u8 valid_flags;
#define I40E_AQC_REPLACE_L1_FILTER 0x0
#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
#define I40E_AQC_GET_CLOUD_FILTERS 0x2
#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
u8 old_filter_type;
u8 new_filter_type;
u8 tr_bit;
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
struct i40e_aqc_replace_cloud_filters_cmd_buf {
u8 data[32];
/* Filter type INPUT codes*/
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7)
/* Field Vector offsets */
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
/* big FLU */
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
/* big FLU */
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
struct i40e_filter_data filters[8];
};
I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
/* Add Mirror Rule (indirect or direct 0x0260)
* Delete Mirror Rule (indirect or direct 0x0261)
* note: some rule types (4,5) do not use an external buffer.
......
......@@ -711,15 +711,6 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
/* Hardware only fetches new descriptors in cache lines of 8,
* essentially ignoring the lower 3 bits of the tail register. We want
* to ensure our tail writes are aligned to avoid unnecessary work. We
* can't simply round down the cleaned count, since we might fail to
* allocate some buffers. What we really want is to ensure that
* next_to_used + cleaned_count produces an aligned value.
*/
cleaned_count -= (ntu + cleaned_count) & 0x7;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
......
......@@ -666,6 +666,7 @@ struct tc_cls_flower_offload {
struct fl_flow_key *mask;
struct fl_flow_key *key;
struct tcf_exts *exts;
u32 classid;
};
enum tc_matchall_command {
......
......@@ -411,6 +411,13 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
return NULL;
}
static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
{
u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
}
int qdisc_class_hash_init(struct Qdisc_class_hash *);
void qdisc_class_hash_insert(struct Qdisc_class_hash *,
struct Qdisc_class_common *);
......
......@@ -241,6 +241,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
cls_flower.mask = mask;
cls_flower.key = &f->mkey;
cls_flower.exts = &f->exts;
cls_flower.classid = f->res.classid;
err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
&cls_flower, skip_sw);
......@@ -266,6 +267,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f;
cls_flower.exts = &f->exts;
cls_flower.classid = f->res.classid;
tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
&cls_flower, false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment