Commit 072eff2d authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2018-10-03

This series contains updates to ice and virtchnl.

Yashaswini Raghuram adds a new virtchnl capability flag to support the
exchange of additional supported speeds.

Anirudh adds support for SR-IOV for the ice driver.  Added code to
initialize, configure and use mailbox queues for PF and VF
communication.  Updated the VSI and queue management to handle both PF
and VF VSI type.  Added "Adaptive Virtual Function (AVF)" support for
the ice PF driver by implementing virtchnl commands.  Extended the
malicious driver detection logic to include the VF driver as well.
Fixed the queue region size which needs to be log base 2 of the number
of queues in region.

Brett fixes an issue which was causing switch rules to be lost, by
making a call to ice_update_pkt_fwd_rule() with the necessary changes.
Fixed how the PF and VF assigned the ITR index by adding a struct member
itr_idx to be used to dynamically program the correct ITR index.

Dave fixed a potential NULL pointer dereference by adding checks in the
filter handling.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents db3408a1 5cc6c8b3
...@@ -16,3 +16,4 @@ ice-y := ice_main.o \ ...@@ -16,3 +16,4 @@ ice-y := ice_main.o \
ice_lib.o \ ice_lib.o \
ice_txrx.o \ ice_txrx.o \
ice_ethtool.o ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/if_bridge.h> #include <linux/if_bridge.h>
#include <linux/avf/virtchnl.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include "ice_devids.h" #include "ice_devids.h"
#include "ice_type.h" #include "ice_type.h"
...@@ -35,6 +36,8 @@ ...@@ -35,6 +36,8 @@
#include "ice_switch.h" #include "ice_switch.h"
#include "ice_common.h" #include "ice_common.h"
#include "ice_sched.h" #include "ice_sched.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
extern const char ice_drv_ver[]; extern const char ice_drv_ver[];
#define ICE_BAR0 0 #define ICE_BAR0 0
...@@ -46,6 +49,7 @@ extern const char ice_drv_ver[]; ...@@ -46,6 +49,7 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32 #define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64 #define ICE_AQ_LEN 64
#define ICE_MBXQ_LEN 64
#define ICE_MIN_MSIX 2 #define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff #define ICE_NO_VSI 0xffff
#define ICE_MAX_VSI_ALLOC 130 #define ICE_MAX_VSI_ALLOC 130
...@@ -63,6 +67,14 @@ extern const char ice_drv_ver[]; ...@@ -63,6 +67,14 @@ extern const char ice_drv_ver[];
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256 #define ICE_INVAL_VFID 256
#define ICE_MAX_VF_COUNT 256
#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
#define ICE_DFLT_QS_PER_VF 4
#define ICE_MAX_BASE_QS_PER_VF 16
#define ICE_MAX_INTR_PER_VF 65
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
...@@ -133,9 +145,21 @@ enum ice_state { ...@@ -133,9 +145,21 @@ enum ice_state {
__ICE_EMPR_RECV, /* set by OICR handler */ __ICE_EMPR_RECV, /* set by OICR handler */
__ICE_SUSPENDED, /* set on module remove path */ __ICE_SUSPENDED, /* set on module remove path */
__ICE_RESET_FAILED, /* set by reset/rebuild */ __ICE_RESET_FAILED, /* set by reset/rebuild */
/* When checking for the PF to be in a nominal operating state, the
* bits that are grouped at the beginning of the list need to be
* checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
* be checked. If you need to add a bit into consideration for nominal
* operating state, it must be added before
* __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
* without appropriate consideration.
*/
__ICE_STATE_NOMINAL_CHECK_BITS,
__ICE_ADMINQ_EVENT_PENDING, __ICE_ADMINQ_EVENT_PENDING,
__ICE_MAILBOXQ_EVENT_PENDING,
__ICE_MDD_EVENT_PENDING, __ICE_MDD_EVENT_PENDING,
__ICE_VFLR_EVENT_PENDING,
__ICE_FLTR_OVERFLOW_PROMISC, __ICE_FLTR_OVERFLOW_PROMISC,
__ICE_VF_DIS,
__ICE_CFG_BUSY, __ICE_CFG_BUSY,
__ICE_SERVICE_SCHED, __ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS, __ICE_SERVICE_DIS,
...@@ -181,6 +205,8 @@ struct ice_vsi { ...@@ -181,6 +205,8 @@ struct ice_vsi {
/* Interrupt thresholds */ /* Interrupt thresholds */
u16 work_lmt; u16 work_lmt;
s16 vf_id; /* VF ID for SR-IOV VSIs */
/* RSS config */ /* RSS config */
u16 rss_table_size; /* HW RSS table size */ u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */ u16 rss_size; /* Allocated RSS queues */
...@@ -240,6 +266,8 @@ enum ice_pf_flags { ...@@ -240,6 +266,8 @@ enum ice_pf_flags {
ICE_FLAG_MSIX_ENA, ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC, ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA, ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
ICE_PF_FLAGS_NBITS /* must be last */ ICE_PF_FLAGS_NBITS /* must be last */
}; };
...@@ -255,6 +283,12 @@ struct ice_pf { ...@@ -255,6 +283,12 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */ struct ice_sw *first_sw; /* first switch created by firmware */
/* Virtchnl/SR-IOV config info */
struct ice_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_vf_qps; /* num queue pairs per VF */
u16 num_vf_msix; /* num vectors per VF */
DECLARE_BITMAP(state, __ICE_STATE_NBITS); DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS); DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS); DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
......
...@@ -87,6 +87,8 @@ struct ice_aqc_list_caps { ...@@ -87,6 +87,8 @@ struct ice_aqc_list_caps {
/* Device/Function buffer entry, repeated per reported capability */ /* Device/Function buffer entry, repeated per reported capability */
struct ice_aqc_list_caps_elem { struct ice_aqc_list_caps_elem {
__le16 cap; __le16 cap;
#define ICE_AQC_CAPS_SRIOV 0x0012
#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017 #define ICE_AQC_CAPS_VSI 0x0017
#define ICE_AQC_CAPS_RSS 0x0040 #define ICE_AQC_CAPS_RSS 0x0040
#define ICE_AQC_CAPS_RXQS 0x0041 #define ICE_AQC_CAPS_RXQS 0x0041
...@@ -1075,6 +1077,19 @@ struct ice_aqc_nvm { ...@@ -1075,6 +1077,19 @@ struct ice_aqc_nvm {
__le32 addr_low; __le32 addr_low;
}; };
/**
* Send to PF command (indirect 0x0801) id is only used by PF
*
* Send to VF command (indirect 0x0802) id is only used by PF
*
*/
struct ice_aqc_pf_vf_msg {
__le32 id;
u32 reserved;
__le32 addr_high;
__le32 addr_low;
};
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */ /* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key { struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
...@@ -1332,6 +1347,7 @@ struct ice_aq_desc { ...@@ -1332,6 +1347,7 @@ struct ice_aq_desc {
struct ice_aqc_query_txsched_res query_sched_res; struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_add_move_delete_elem add_move_delete_elem; struct ice_aqc_add_move_delete_elem add_move_delete_elem;
struct ice_aqc_nvm nvm; struct ice_aqc_nvm nvm;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_get_set_rss_lut get_set_rss_lut; struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key; struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs; struct ice_aqc_add_txqs add_txqs;
...@@ -1429,6 +1445,10 @@ enum ice_adminq_opc { ...@@ -1429,6 +1445,10 @@ enum ice_adminq_opc {
/* NVM commands */ /* NVM commands */
ice_aqc_opc_nvm_read = 0x0701, ice_aqc_opc_nvm_read = 0x0701,
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
ice_mbx_opc_send_msg_to_vf = 0x0802,
/* RSS commands */ /* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02, ice_aqc_opc_set_rss_key = 0x0B02,
ice_aqc_opc_set_rss_lut = 0x0B03, ice_aqc_opc_set_rss_lut = 0x0B03,
......
...@@ -1406,6 +1406,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, ...@@ -1406,6 +1406,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
u16 cap = le16_to_cpu(cap_resp->cap); u16 cap = le16_to_cpu(cap_resp->cap);
switch (cap) { switch (cap) {
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
break;
case ICE_AQC_CAPS_VF:
if (dev_p) {
dev_p->num_vfs_exposed = number;
ice_debug(hw, ICE_DBG_INIT,
"HW caps: VFs exposed = %d\n",
dev_p->num_vfs_exposed);
} else if (func_p) {
func_p->num_allocd_vfs = number;
func_p->vf_base_id = logical_id;
ice_debug(hw, ICE_DBG_INIT,
"HW caps: VFs allocated = %d\n",
func_p->num_allocd_vfs);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: VF base_id = %d\n",
func_p->vf_base_id);
}
break;
case ICE_AQC_CAPS_VSI: case ICE_AQC_CAPS_VSI:
if (dev_p) { if (dev_p) {
dev_p->num_vsi_allocd_to_host = number; dev_p->num_vsi_allocd_to_host = number;
...@@ -2265,6 +2287,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, ...@@ -2265,6 +2287,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
* @num_qgrps: number of groups in the list * @num_qgrps: number of groups in the list
* @qg_list: the list of groups to disable * @qg_list: the list of groups to disable
* @buf_size: the total size of the qg_list buffer in bytes * @buf_size: the total size of the qg_list buffer in bytes
* @rst_src: if called due to reset, specifies the RST source
* @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL * @cd: pointer to command details structure or NULL
* *
* Disable LAN Tx queue (0x0C31) * Disable LAN Tx queue (0x0C31)
...@@ -2272,6 +2296,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, ...@@ -2272,6 +2296,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
static enum ice_status static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
struct ice_aqc_dis_txqs *cmd; struct ice_aqc_dis_txqs *cmd;
...@@ -2281,14 +2306,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, ...@@ -2281,14 +2306,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
cmd = &desc.params.dis_txqs; cmd = &desc.params.dis_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
if (!qg_list) /* qg_list can be NULL only in VM/VF reset flow */
if (!qg_list && !rst_src)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->num_entries = num_qgrps; cmd->num_entries = num_qgrps;
cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
ICE_AQC_Q_DIS_TIMEOUT_M);
switch (rst_src) {
case ICE_VM_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
cmd->vmvf_and_timeout |=
cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
break;
case ICE_VF_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
/* In this case, FW expects vmvf_num to be absolute VF id */
cmd->vmvf_and_timeout |=
cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
ICE_AQC_Q_DIS_VMVF_NUM_M);
break;
case ICE_NO_RESET:
default:
break;
}
/* If no queue group info, we are in a reset flow. Issue the AQ */
if (!qg_list)
goto do_aq;
/* set RD bit to indicate that command buffer is provided by the driver
* and it needs to be read by the firmware
*/
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
for (i = 0; i < num_qgrps; ++i) { for (i = 0; i < num_qgrps; ++i) {
/* Calculate the size taken up by the queue IDs in this group */ /* Calculate the size taken up by the queue IDs in this group */
sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
...@@ -2304,6 +2360,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, ...@@ -2304,6 +2360,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (buf_size != sz) if (buf_size != sz)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
do_aq:
return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
} }
...@@ -2610,13 +2667,16 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, ...@@ -2610,13 +2667,16 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
* @num_queues: number of queues * @num_queues: number of queues
* @q_ids: pointer to the q_id array * @q_ids: pointer to the q_id array
* @q_teids: pointer to queue node teids * @q_teids: pointer to queue node teids
* @rst_src: if called due to reset, specifies the RST source
* @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL * @cd: pointer to command details structure or NULL
* *
* This function removes queues and their corresponding nodes in SW DB * This function removes queues and their corresponding nodes in SW DB
*/ */
enum ice_status enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
u32 *q_teids, struct ice_sq_cd *cd) u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{ {
enum ice_status status = ICE_ERR_DOES_NOT_EXIST; enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item qg_list; struct ice_aqc_dis_txq_item qg_list;
...@@ -2625,6 +2685,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, ...@@ -2625,6 +2685,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG; return ICE_ERR_CFG;
/* if queue is disabled already yet the disable queue command has to be
* sent to complete the VF reset, then call ice_aq_dis_lan_txq without
* any queue information
*/
if (!num_queues && rst_src)
return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
NULL);
mutex_lock(&pi->sched_lock); mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) { for (i = 0; i < num_queues; i++) {
...@@ -2637,7 +2706,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, ...@@ -2637,7 +2706,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
qg_list.num_qs = 1; qg_list.num_qs = 1;
qg_list.q_id[0] = cpu_to_le16(q_ids[i]); qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
sizeof(qg_list), cd); sizeof(qg_list), rst_src, vmvf_num,
cd);
if (status) if (status)
break; break;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "ice.h" #include "ice.h"
#include "ice_type.h" #include "ice_type.h"
#include "ice_switch.h" #include "ice_switch.h"
#include <linux/avf/virtchnl.h>
void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
u16 buf_len); u16 buf_len);
...@@ -89,7 +90,8 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, ...@@ -89,7 +90,8 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
u32 *q_teids, struct ice_sq_cd *cmd_details); u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cmd_details);
enum ice_status enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs); u16 *max_lanqs);
......
...@@ -32,6 +32,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw) ...@@ -32,6 +32,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
cq->rq.head_mask = PF_FW_ARQH_ARQH_M; cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
} }
/**
* ice_mailbox_init_regs - Initialize Mailbox registers
* @hw: pointer to the hardware structure
*
* This assumes the alloc_sq and alloc_rq functions have already been called
*/
static void ice_mailbox_init_regs(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->mailboxq;
/* set head and tail registers in our local struct */
cq->sq.head = PF_MBX_ATQH;
cq->sq.tail = PF_MBX_ATQT;
cq->sq.len = PF_MBX_ATQLEN;
cq->sq.bah = PF_MBX_ATQBAH;
cq->sq.bal = PF_MBX_ATQBAL;
cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
cq->rq.head = PF_MBX_ARQH;
cq->rq.tail = PF_MBX_ARQT;
cq->rq.len = PF_MBX_ARQLEN;
cq->rq.bah = PF_MBX_ARQBAH;
cq->rq.bal = PF_MBX_ARQBAL;
cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
}
/** /**
* ice_check_sq_alive * ice_check_sq_alive
* @hw: pointer to the hw struct * @hw: pointer to the hw struct
...@@ -639,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ...@@ -639,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
ice_adminq_init_regs(hw); ice_adminq_init_regs(hw);
cq = &hw->adminq; cq = &hw->adminq;
break; break;
case ICE_CTL_Q_MAILBOX:
ice_mailbox_init_regs(hw);
cq = &hw->mailboxq;
break;
default: default:
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
} }
...@@ -696,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) ...@@ -696,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (ret_code) if (ret_code)
return ret_code; return ret_code;
return ice_init_check_adminq(hw); ret_code = ice_init_check_adminq(hw);
if (ret_code)
return ret_code;
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
} }
/** /**
...@@ -714,6 +753,9 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ...@@ -714,6 +753,9 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
if (ice_check_sq_alive(hw, cq)) if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true); ice_aq_q_shutdown(hw, true);
break; break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
break;
default: default:
return; return;
} }
...@@ -736,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) ...@@ -736,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{ {
/* Shutdown FW admin queue */ /* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
/* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
} }
/** /**
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
/* Maximum buffer lengths for all control queue types */ /* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096 #define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_MBXQ_MAX_BUF_LEN 4096
#define ICE_CTL_Q_DESC(R, i) \ #define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
...@@ -28,6 +29,7 @@ ...@@ -28,6 +29,7 @@
enum ice_ctl_q { enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0, ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN, ICE_CTL_Q_ADMIN,
ICE_CTL_Q_MAILBOX,
}; };
/* Control Queue default settings */ /* Control Queue default settings */
......
...@@ -29,6 +29,22 @@ ...@@ -29,6 +29,22 @@
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30) #define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31) #define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400 #define PF_FW_ATQT 0x00080400
#define PF_MBX_ARQBAH 0x0022E400
#define PF_MBX_ARQBAL 0x0022E380
#define PF_MBX_ARQH 0x0022E500
#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0)
#define PF_MBX_ARQLEN 0x0022E480
#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
#define PF_MBX_ARQT 0x0022E580
#define PF_MBX_ATQBAH 0x0022E180
#define PF_MBX_ATQBAL 0x0022E100
#define PF_MBX_ATQH 0x0022E280
#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN 0x0022E200
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
...@@ -74,10 +90,16 @@ ...@@ -74,10 +90,16 @@
#define GLGEN_RTRIG_CORER_M BIT(0) #define GLGEN_RTRIG_CORER_M BIT(0)
#define GLGEN_RTRIG_GLOBR_M BIT(1) #define GLGEN_RTRIG_GLOBR_M BIT(1)
#define GLGEN_STAT 0x000B612C #define GLGEN_STAT 0x000B612C
#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
#define PFGEN_CTRL 0x00091000 #define PFGEN_CTRL 0x00091000
#define PFGEN_CTRL_PFSWR_M BIT(0) #define PFGEN_CTRL_PFSWR_M BIT(0)
#define PFGEN_STATE 0x00088000 #define PFGEN_STATE 0x00088000
#define PRTGEN_STATUS 0x000B8100 #define PRTGEN_STATUS 0x000B8100
#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4))
#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4))
#define VPGEN_VFRSTAT_VFRD_M BIT(0)
#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4))
#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
#define PFHMC_ERRORDATA 0x00520500 #define PFHMC_ERRORDATA 0x00520500
#define PFHMC_ERRORINFO 0x00520400 #define PFHMC_ERRORINFO 0x00520400
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) #define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
...@@ -90,11 +112,23 @@ ...@@ -90,11 +112,23 @@
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) #define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) #define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))
#define GLINT_RATE_INTRL_ENA_M BIT(6) #define GLINT_RATE_INTRL_ENA_M BIT(6)
#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4))
#define GLINT_VECT2FUNC_VF_NUM_S 0
#define GLINT_VECT2FUNC_VF_NUM_M ICE_M(0xFF, 0)
#define GLINT_VECT2FUNC_PF_NUM_S 12
#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12)
#define GLINT_VECT2FUNC_IS_PF_S 16
#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
#define PFINT_FW_CTL 0x0016C800 #define PFINT_FW_CTL 0x0016C800
#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) #define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_FW_CTL_ITR_INDX_S 11 #define PFINT_FW_CTL_ITR_INDX_S 11
#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11) #define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11)
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30) #define PFINT_FW_CTL_CAUSE_ENA_M BIT(30)
#define PFINT_MBX_CTL 0x0016B280
#define PFINT_MBX_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_MBX_CTL_ITR_INDX_S 11
#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11)
#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define PFINT_OICR 0x0016CA00 #define PFINT_OICR 0x0016CA00
#define PFINT_OICR_ECC_ERR_M BIT(16) #define PFINT_OICR_ECC_ERR_M BIT(16)
#define PFINT_OICR_MAL_DETECT_M BIT(19) #define PFINT_OICR_MAL_DETECT_M BIT(19)
...@@ -102,6 +136,7 @@ ...@@ -102,6 +136,7 @@
#define PFINT_OICR_PCI_EXCEPTION_M BIT(21) #define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
#define PFINT_OICR_HMC_ERR_M BIT(26) #define PFINT_OICR_HMC_ERR_M BIT(26)
#define PFINT_OICR_PE_CRITERR_M BIT(28) #define PFINT_OICR_PE_CRITERR_M BIT(28)
#define PFINT_OICR_VFLR_M BIT(29)
#define PFINT_OICR_CTL 0x0016CA80 #define PFINT_OICR_CTL 0x0016CA80
#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) #define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_OICR_CTL_ITR_INDX_S 11 #define PFINT_OICR_CTL_ITR_INDX_S 11
...@@ -116,6 +151,12 @@ ...@@ -116,6 +151,12 @@
#define QINT_TQCTL_MSIX_INDX_S 0 #define QINT_TQCTL_MSIX_INDX_S 0
#define QINT_TQCTL_ITR_INDX_S 11 #define QINT_TQCTL_ITR_INDX_S 11
#define QINT_TQCTL_CAUSE_ENA_M BIT(30) #define QINT_TQCTL_CAUSE_ENA_M BIT(30)
#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
#define VPINT_ALLOC_FIRST_S 0
#define VPINT_ALLOC_FIRST_M ICE_M(0x7FF, 0)
#define VPINT_ALLOC_LAST_S 12
#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12)
#define VPINT_ALLOC_VALID_M BIT(31)
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) #define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) #define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
#define QRX_CTRL_MAX_INDEX 2047 #define QRX_CTRL_MAX_INDEX 2047
...@@ -128,6 +169,20 @@ ...@@ -128,6 +169,20 @@
#define QRX_TAIL_MAX_INDEX 2047 #define QRX_TAIL_MAX_INDEX 2047
#define QRX_TAIL_TAIL_S 0 #define QRX_TAIL_TAIL_S 0
#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0) #define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0)
#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4))
#define VPLAN_RX_QBASE_VFFIRSTQ_S 0
#define VPLAN_RX_QBASE_VFFIRSTQ_M ICE_M(0x7FF, 0)
#define VPLAN_RX_QBASE_VFNUMQ_S 16
#define VPLAN_RX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4))
#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0)
#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4))
#define VPLAN_TX_QBASE_VFFIRSTQ_S 0
#define VPLAN_TX_QBASE_VFFIRSTQ_M ICE_M(0x3FFF, 0)
#define VPLAN_TX_QBASE_VFNUMQ_S 16
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
#define GL_MDET_RX 0x00294C00 #define GL_MDET_RX 0x00294C00
#define GL_MDET_RX_QNUM_S 0 #define GL_MDET_RX_QNUM_S 0
#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0) #define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
...@@ -164,6 +219,14 @@ ...@@ -164,6 +219,14 @@
#define PF_MDET_TX_PQM_VALID_M BIT(0) #define PF_MDET_TX_PQM_VALID_M BIT(0)
#define PF_MDET_TX_TCLAN 0x000FC000 #define PF_MDET_TX_TCLAN 0x000FC000
#define PF_MDET_TX_TCLAN_VALID_M BIT(0) #define PF_MDET_TX_TCLAN_VALID_M BIT(0)
#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4))
#define VP_MDET_RX_VALID_M BIT(0)
#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4))
#define VP_MDET_TX_PQM_VALID_M BIT(0)
#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4))
#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
#define VP_MDET_TX_TDPU_VALID_M BIT(0)
#define GLNVM_FLA 0x000B6108 #define GLNVM_FLA 0x000B6108
#define GLNVM_FLA_LOCKED_M BIT(6) #define GLNVM_FLA_LOCKED_M BIT(6)
#define GLNVM_GENS 0x000B6100 #define GLNVM_GENS 0x000B6100
...@@ -175,6 +238,9 @@ ...@@ -175,6 +238,9 @@
#define PF_FUNC_RID 0x0009E880 #define PF_FUNC_RID 0x0009E880
#define PF_FUNC_RID_FUNC_NUM_S 0 #define PF_FUNC_RID_FUNC_NUM_S 0
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) #define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
#define PF_PCI_CIAA 0x0009E580
#define PF_PCI_CIAA_VF_NUM_S 12
#define PF_PCI_CIAD 0x0009E500
#define GL_PWR_MODE_CTL 0x000B820C #define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
...@@ -255,5 +321,8 @@ ...@@ -255,5 +321,8 @@
#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) #define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#endif /* _ICE_HW_AUTOGEN_H_ */ #endif /* _ICE_HW_AUTOGEN_H_ */
...@@ -418,6 +418,7 @@ struct ice_tlan_ctx { ...@@ -418,6 +418,7 @@ struct ice_tlan_ctx {
u8 pf_num; u8 pf_num;
u16 vmvf_num; u16 vmvf_num;
u8 vmvf_type; u8 vmvf_type;
#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi; u16 src_vsi;
...@@ -473,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) ...@@ -473,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
{ {
return ice_ptype_lkup[ptype]; return ice_ptype_lkup[ptype];
} }
#define ICE_LINK_SPEED_UNKNOWN 0
#define ICE_LINK_SPEED_10MBPS 10
#define ICE_LINK_SPEED_100MBPS 100
#define ICE_LINK_SPEED_1000MBPS 1000
#define ICE_LINK_SPEED_2500MBPS 2500
#define ICE_LINK_SPEED_5000MBPS 5000
#define ICE_LINK_SPEED_10000MBPS 10000
#define ICE_LINK_SPEED_20000MBPS 20000
#define ICE_LINK_SPEED_25000MBPS 25000
#define ICE_LINK_SPEED_40000MBPS 40000
#endif /* _ICE_LAN_TX_RX_H_ */ #endif /* _ICE_LAN_TX_RX_H_ */
This diff is collapsed.
...@@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi); ...@@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi); int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_tx_rings(struct ice_vsi *vsi); int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include "ice.h" #include "ice.h"
#include "ice_lib.h" #include "ice_lib.h"
#define DRV_VERSION "0.7.1-k" #define DRV_VERSION "0.7.2-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION; const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY; static const char ice_driver_string[] = DRV_SUMMARY;
...@@ -342,6 +342,10 @@ ice_prepare_for_reset(struct ice_pf *pf) ...@@ -342,6 +342,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
{ {
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
/* Notify VFs of impending reset */
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
/* disable the VSIs and their queues that are not already DOWN */ /* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf); ice_pf_dis_all_vsi(pf);
...@@ -661,6 +665,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) ...@@ -661,6 +665,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
} }
} }
ice_vc_notify_link_state(pf);
return 0; return 0;
} }
...@@ -711,6 +717,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ...@@ -711,6 +717,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
cq = &hw->adminq; cq = &hw->adminq;
qtype = "Admin"; qtype = "Admin";
break; break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
qtype = "Mailbox";
break;
default: default:
dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
q_type); q_type);
...@@ -792,6 +802,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ...@@ -792,6 +802,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Could not handle link event\n"); "Could not handle link event\n");
break; break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
case ice_aqc_opc_fw_logging: case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf); ice_output_fw_log(hw, &event.desc, event.msg_buf);
break; break;
...@@ -850,6 +863,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) ...@@ -850,6 +863,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
ice_flush(hw); ice_flush(hw);
} }
/**
* ice_clean_mailboxq_subtask - clean the MailboxQ rings
* @pf: board private structure
*/
static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
return;
if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
return;
clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
if (ice_ctrlq_pending(hw, &hw->mailboxq))
__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
ice_flush(hw);
}
/** /**
* ice_service_task_schedule - schedule the service task to wake up * ice_service_task_schedule - schedule the service task to wake up
* @pf: board private structure * @pf: board private structure
...@@ -916,6 +951,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -916,6 +951,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
bool mdd_detected = false; bool mdd_detected = false;
u32 reg; u32 reg;
int i;
if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
return; return;
...@@ -1005,6 +1041,51 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1005,6 +1041,51 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
} }
} }
/* see if one of the VFs needs to be reset */
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
struct ice_vf *vf = &pf->vf[i];
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i);
}
if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
dev_info(&pf->pdev->dev,
"Too many MDD events on VF %d, disabled\n", i);
dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
}
}
/* re-enable MDD interrupt cause */ /* re-enable MDD interrupt cause */
clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, PFINT_OICR_ENA); reg = rd32(hw, PFINT_OICR_ENA);
...@@ -1038,8 +1119,10 @@ static void ice_service_task(struct work_struct *work) ...@@ -1038,8 +1119,10 @@ static void ice_service_task(struct work_struct *work)
ice_check_for_hang_subtask(pf); ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf); ice_sync_fltr_subtask(pf);
ice_handle_mdd_event(pf); ice_handle_mdd_event(pf);
ice_process_vflr_event(pf);
ice_watchdog_subtask(pf); ice_watchdog_subtask(pf);
ice_clean_adminq_subtask(pf); ice_clean_adminq_subtask(pf);
ice_clean_mailboxq_subtask(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf); ice_service_task_complete(pf);
...@@ -1050,6 +1133,8 @@ static void ice_service_task(struct work_struct *work) ...@@ -1050,6 +1133,8 @@ static void ice_service_task(struct work_struct *work)
*/ */
if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies); mod_timer(&pf->serv_tmr, jiffies);
} }
...@@ -1064,6 +1149,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) ...@@ -1064,6 +1149,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
} }
/** /**
...@@ -1197,6 +1286,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf) ...@@ -1197,6 +1286,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
PFINT_OICR_MAL_DETECT_M | PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M | PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_VFLR_M |
PFINT_OICR_HMC_ERR_M | PFINT_OICR_HMC_ERR_M |
PFINT_OICR_PE_CRITERR_M); PFINT_OICR_PE_CRITERR_M);
...@@ -1220,6 +1310,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ...@@ -1220,6 +1310,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
u32 oicr, ena_mask; u32 oicr, ena_mask;
set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR); oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA); ena_mask = rd32(hw, PFINT_OICR_ENA);
...@@ -1228,6 +1319,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ...@@ -1228,6 +1319,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ena_mask &= ~PFINT_OICR_MAL_DETECT_M; ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
set_bit(__ICE_MDD_EVENT_PENDING, pf->state); set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
} }
if (oicr & PFINT_OICR_VFLR_M) {
ena_mask &= ~PFINT_OICR_VFLR_M;
set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
}
if (oicr & PFINT_OICR_GRST_M) { if (oicr & PFINT_OICR_GRST_M) {
u32 reset; u32 reset;
...@@ -1406,6 +1501,11 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) ...@@ -1406,6 +1501,11 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
PFINT_FW_CTL_CAUSE_ENA_M); PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val); wr32(hw, PFINT_FW_CTL, val);
/* This enables Mailbox queue Interrupt causes */
val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
itr_gran = hw->itr_gran; itr_gran = hw->itr_gran;
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
...@@ -1775,6 +1875,15 @@ static void ice_init_pf(struct ice_pf *pf) ...@@ -1775,6 +1875,15 @@ static void ice_init_pf(struct ice_pf *pf)
{ {
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
set_bit(ICE_FLAG_MSIX_ENA, pf->flags); set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
struct ice_hw *hw = &pf->hw;
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
ICE_MAX_VF_COUNT);
}
#endif /* CONFIG_PCI_IOV */
mutex_init(&pf->sw_mutex); mutex_init(&pf->sw_mutex);
mutex_init(&pf->avail_q_mutex); mutex_init(&pf->avail_q_mutex);
...@@ -2138,6 +2247,8 @@ static void ice_remove(struct pci_dev *pdev) ...@@ -2138,6 +2247,8 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state); set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf); ice_service_task_stop(pf);
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
ice_free_vfs(pf);
ice_vsi_release_all(pf); ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf); ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) { ice_for_each_vsi(pf, i) {
...@@ -2173,6 +2284,7 @@ static struct pci_driver ice_driver = { ...@@ -2173,6 +2284,7 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl, .id_table = ice_pci_tbl,
.probe = ice_probe, .probe = ice_probe,
.remove = ice_remove, .remove = ice_remove,
.sriov_configure = ice_sriov_configure,
}; };
/** /**
...@@ -2908,7 +3020,7 @@ int ice_down(struct ice_vsi *vsi) ...@@ -2908,7 +3020,7 @@ int ice_down(struct ice_vsi *vsi)
} }
ice_vsi_dis_irq(vsi); ice_vsi_dis_irq(vsi);
tx_err = ice_vsi_stop_tx_rings(vsi); tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
if (tx_err) if (tx_err)
netdev_err(vsi->netdev, netdev_err(vsi->netdev,
"Failed stop Tx rings, VSI %d error %d\n", "Failed stop Tx rings, VSI %d error %d\n",
...@@ -3102,13 +3214,14 @@ static void ice_dis_vsi(struct ice_vsi *vsi) ...@@ -3102,13 +3214,14 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
set_bit(__ICE_NEEDS_RESTART, vsi->state); set_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && netif_running(vsi->netdev) && if (vsi->type == ICE_VSI_PF && vsi->netdev) {
vsi->type == ICE_VSI_PF) { if (netif_running(vsi->netdev)) {
rtnl_lock(); rtnl_lock();
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
rtnl_unlock(); rtnl_unlock();
} else { } else {
ice_vsi_close(vsi); ice_vsi_close(vsi);
}
} }
} }
...@@ -3120,12 +3233,16 @@ static int ice_ena_vsi(struct ice_vsi *vsi) ...@@ -3120,12 +3233,16 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
{ {
int err = 0; int err = 0;
if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
if (vsi->netdev && netif_running(vsi->netdev)) { vsi->netdev) {
if (netif_running(vsi->netdev)) {
rtnl_lock(); rtnl_lock();
err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
rtnl_unlock(); rtnl_unlock();
} else {
err = ice_vsi_open(vsi);
} }
}
return err; return err;
} }
...@@ -3174,6 +3291,10 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf) ...@@ -3174,6 +3291,10 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
if (!pf->vsi[i]) if (!pf->vsi[i])
continue; continue;
/* VF VSI rebuild isn't supported yet */
if (pf->vsi[i]->type == ICE_VSI_VF)
continue;
err = ice_vsi_rebuild(pf->vsi[i]); err = ice_vsi_rebuild(pf->vsi[i]);
if (err) { if (err) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
...@@ -3310,6 +3431,7 @@ static void ice_rebuild(struct ice_pf *pf) ...@@ -3310,6 +3431,7 @@ static void ice_rebuild(struct ice_pf *pf)
goto err_vsi_rebuild; goto err_vsi_rebuild;
} }
ice_reset_all_vfs(pf, true);
/* if we get here, reset flow is successful */ /* if we get here, reset flow is successful */
clear_bit(__ICE_RESET_FAILED, pf->state); clear_bit(__ICE_RESET_FAILED, pf->state);
return; return;
...@@ -3818,6 +3940,12 @@ static const struct net_device_ops ice_netdev_ops = { ...@@ -3818,6 +3940,12 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu, .ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64, .ndo_get_stats64 = ice_get_stats64,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
.ndo_set_vf_trust = ice_set_vf_trust,
.ndo_set_vf_vlan = ice_set_vf_port_vlan,
.ndo_set_vf_link_state = ice_set_vf_link_state,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features, .ndo_set_features = ice_set_features,
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
#include "ice_adminq_cmd.h"
#include "ice_sriov.h"
/**
* ice_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: VF ID to send msg
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cd: pointer to command details
*
* Send message to VF driver (0x0802) using mailbox
* queue and asynchronously sending message via
* ice_sq_send_cmd() function
*/
enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd)
{
struct ice_aqc_pf_vf_msg *cmd;
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
cmd = &desc.params.virt;
cmd->id = cpu_to_le32(vfid);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
/**
* ice_conv_link_speed_to_virtchnl
* @adv_link_support: determines the format of the returned link speed
* @link_speed: variable containing the link_speed to be converted
*
* Convert link speed supported by HW to link speed supported by virtchnl.
* If adv_link_support is true, then return link speed in Mbps. Else return
* link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
* needs to cast back to an enum virtchnl_link_speed in the case where
* adv_link_support is false, but when adv_link_support is true the caller can
* expect the speed in Mbps.
*/
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
u32 speed;
if (adv_link_support)
switch (link_speed) {
case ICE_AQ_LINK_SPEED_10MB:
speed = ICE_LINK_SPEED_10MBPS;
break;
case ICE_AQ_LINK_SPEED_100MB:
speed = ICE_LINK_SPEED_100MBPS;
break;
case ICE_AQ_LINK_SPEED_1000MB:
speed = ICE_LINK_SPEED_1000MBPS;
break;
case ICE_AQ_LINK_SPEED_2500MB:
speed = ICE_LINK_SPEED_2500MBPS;
break;
case ICE_AQ_LINK_SPEED_5GB:
speed = ICE_LINK_SPEED_5000MBPS;
break;
case ICE_AQ_LINK_SPEED_10GB:
speed = ICE_LINK_SPEED_10000MBPS;
break;
case ICE_AQ_LINK_SPEED_20GB:
speed = ICE_LINK_SPEED_20000MBPS;
break;
case ICE_AQ_LINK_SPEED_25GB:
speed = ICE_LINK_SPEED_25000MBPS;
break;
case ICE_AQ_LINK_SPEED_40GB:
speed = ICE_LINK_SPEED_40000MBPS;
break;
default:
speed = ICE_LINK_SPEED_UNKNOWN;
break;
}
else
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
switch (link_speed) {
case ICE_AQ_LINK_SPEED_10MB:
case ICE_AQ_LINK_SPEED_100MB:
speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
break;
case ICE_AQ_LINK_SPEED_1000MB:
case ICE_AQ_LINK_SPEED_2500MB:
case ICE_AQ_LINK_SPEED_5GB:
speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
break;
case ICE_AQ_LINK_SPEED_10GB:
speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
break;
case ICE_AQ_LINK_SPEED_20GB:
speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
break;
case ICE_AQ_LINK_SPEED_25GB:
speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
break;
case ICE_AQ_LINK_SPEED_40GB:
/* fall through */
speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break;
default:
speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
break;
}
return speed;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
#include "ice_common.h"
#ifdef CONFIG_PCI_IOV
enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
#else /* CONFIG_PCI_IOV */
static inline enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
u16 __always_unused vfid, u32 __always_unused v_opcode,
u32 __always_unused v_retval, u8 __always_unused *msg,
u16 __always_unused msglen,
struct ice_sq_cd __always_unused *cd)
{
return 0;
}
static inline u32
ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
u16 __always_unused link_speed)
{
return 0;
}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
...@@ -6,6 +6,9 @@ ...@@ -6,6 +6,9 @@
/* Error Codes */ /* Error Codes */
enum ice_status { enum ice_status {
ICE_SUCCESS = 0,
/* Generic codes : Range -1..-49 */
ICE_ERR_PARAM = -1, ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2, ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3, ICE_ERR_NOT_READY = -3,
......
...@@ -187,6 +187,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, ...@@ -187,6 +187,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
if (!vsi_ctx->alloc_from_pool) if (!vsi_ctx->alloc_from_pool)
cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
ICE_AQ_VSI_IS_VALID); ICE_AQ_VSI_IS_VALID);
cmd->vf_id = vsi_ctx->vf_num;
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
...@@ -655,6 +656,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, ...@@ -655,6 +656,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
u8 *eth_hdr; u8 *eth_hdr;
u32 act = 0; u32 act = 0;
__be16 *off; __be16 *off;
u8 q_rgn;
if (opc == ice_aqc_opc_remove_sw_rules) { if (opc == ice_aqc_opc_remove_sw_rules) {
s_rule->pdata.lkup_tx_rx.act = 0; s_rule->pdata.lkup_tx_rx.act = 0;
...@@ -693,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, ...@@ -693,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
ICE_SINGLE_ACT_Q_INDEX_M; ICE_SINGLE_ACT_Q_INDEX_M;
break; break;
case ICE_DROP_PACKET:
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
ICE_SINGLE_ACT_VALID_BIT;
break;
case ICE_FWD_TO_QGRP: case ICE_FWD_TO_QGRP:
q_rgn = f_info->qgrp_size > 0 ?
(u8)ilog2(f_info->qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q; act |= ICE_SINGLE_ACT_TO_Q;
act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) & act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
ICE_SINGLE_ACT_Q_INDEX_M;
act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
ICE_SINGLE_ACT_Q_REGION_M; ICE_SINGLE_ACT_Q_REGION_M;
break; break;
case ICE_DROP_PACKET:
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
break;
default: default:
return; return;
} }
...@@ -1415,8 +1422,8 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, ...@@ -1415,8 +1422,8 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
fm_list->vsi_count--; fm_list->vsi_count--;
clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
(fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
struct ice_vsi_list_map_info *vsi_list_info = struct ice_vsi_list_map_info *vsi_list_info =
fm_list->vsi_list_info; fm_list->vsi_list_info;
u16 rem_vsi_handle; u16 rem_vsi_handle;
...@@ -1425,6 +1432,8 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, ...@@ -1425,6 +1432,8 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
ICE_MAX_VSI); ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle)) if (!ice_is_vsi_valid(hw, rem_vsi_handle))
return ICE_ERR_OUT_OF_RANGE; return ICE_ERR_OUT_OF_RANGE;
/* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
vsi_list_id, true, vsi_list_id, true,
ice_aqc_opc_update_sw_rules, ice_aqc_opc_update_sw_rules,
...@@ -1432,16 +1441,34 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, ...@@ -1432,16 +1441,34 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
if (status) if (status)
return status; return status;
tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
tmp_fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, rem_vsi_handle);
tmp_fltr_info.vsi_handle = rem_vsi_handle;
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
if (status) {
ice_debug(hw, ICE_DBG_SW,
"Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
tmp_fltr_info.fwd_id.hw_vsi_id, status);
return status;
}
fm_list->fltr_info = tmp_fltr_info;
}
if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
(fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
struct ice_vsi_list_map_info *vsi_list_info =
fm_list->vsi_list_info;
/* Remove the VSI list since it is no longer used */ /* Remove the VSI list since it is no longer used */
status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
if (status) if (status) {
ice_debug(hw, ICE_DBG_SW,
"Failed to remove VSI list %d, error %d\n",
vsi_list_id, status);
return status; return status;
}
/* Change the list entry action from VSI_LIST to VSI */
fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
fm_list->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, rem_vsi_handle);
fm_list->fltr_info.vsi_handle = rem_vsi_handle;
list_del(&vsi_list_info->list_entry); list_del(&vsi_list_info->list_entry);
devm_kfree(ice_hw_to_dev(hw), vsi_list_info); devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
...@@ -1983,12 +2010,12 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) ...@@ -1983,12 +2010,12 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
enum ice_status enum ice_status
ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{ {
struct ice_fltr_list_entry *list_itr; struct ice_fltr_list_entry *list_itr, *tmp;
if (!m_list) if (!m_list)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
list_for_each_entry(list_itr, m_list, list_entry) { list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_MAC) if (l_type != ICE_SW_LKUP_MAC)
...@@ -2010,12 +2037,12 @@ ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) ...@@ -2010,12 +2037,12 @@ ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
enum ice_status enum ice_status
ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
{ {
struct ice_fltr_list_entry *v_list_itr; struct ice_fltr_list_entry *v_list_itr, *tmp;
if (!v_list || !hw) if (!v_list || !hw)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
list_for_each_entry(v_list_itr, v_list, list_entry) { list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_VLAN) if (l_type != ICE_SW_LKUP_VLAN)
...@@ -2115,7 +2142,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, ...@@ -2115,7 +2142,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_info *fi; struct ice_fltr_info *fi;
fi = &fm_entry->fltr_info; fi = &fm_entry->fltr_info;
if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
continue; continue;
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
...@@ -2232,7 +2259,8 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, ...@@ -2232,7 +2259,8 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
goto end; goto end;
continue; continue;
} }
if (!test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) if (!itr->vsi_list_info ||
!test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
continue; continue;
/* Clearing it so that the logic can add it back */ /* Clearing it so that the logic can add it back */
clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
......
...@@ -19,6 +19,7 @@ struct ice_vsi_ctx { ...@@ -19,6 +19,7 @@ struct ice_vsi_ctx {
struct ice_aqc_vsi_props info; struct ice_aqc_vsi_props info;
struct ice_sched_vsi_info sched; struct ice_sched_vsi_info sched;
u8 alloc_from_pool; u8 alloc_from_pool;
u8 vf_num;
}; };
enum ice_sw_fwd_act_type { enum ice_sw_fwd_act_type {
......
...@@ -105,8 +105,9 @@ enum ice_rx_dtype { ...@@ -105,8 +105,9 @@ enum ice_rx_dtype {
#define ICE_TX_ITR ICE_IDX_ITR1 #define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define ICE_ITR_8K 125 #define ICE_ITR_8K 125
#define ICE_DFLT_TX_ITR ICE_ITR_8K #define ICE_ITR_20K 50
#define ICE_DFLT_RX_ITR ICE_ITR_8K #define ICE_DFLT_TX_ITR ICE_ITR_20K
#define ICE_DFLT_RX_ITR ICE_ITR_20K
/* apply ITR granularity translation to program the register. itr_gran is either /* apply ITR granularity translation to program the register. itr_gran is either
* 2 or 4 usecs so we need to divide by 2 first then shift by that value * 2 or 4 usecs so we need to divide by 2 first then shift by that value
*/ */
...@@ -135,13 +136,6 @@ struct ice_ring { ...@@ -135,13 +136,6 @@ struct ice_ring {
u16 q_index; /* Queue number of ring */ u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */ u32 txq_teid; /* Added Tx queue TEID */
/* high bit set means dynamic, use accessor routines to read/write.
* hardware supports 4us/2us resolution for the ITR registers.
* these values always store the USER setting, and must be converted
* before programming to a register.
*/
u16 itr_setting;
u16 count; /* Number of descriptors */ u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */ u16 reg_idx; /* HW register index of the ring */
...@@ -178,6 +172,7 @@ struct ice_ring_container { ...@@ -178,6 +172,7 @@ struct ice_ring_container {
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */ unsigned int total_pkts; /* total packets processed this int */
enum ice_latency_range latency_range; enum ice_latency_range latency_range;
int itr_idx; /* index in the interrupt vector */
u16 itr; u16 itr;
}; };
......
...@@ -84,6 +84,7 @@ enum ice_media_type { ...@@ -84,6 +84,7 @@ enum ice_media_type {
enum ice_vsi_type { enum ice_vsi_type {
ICE_VSI_PF = 0, ICE_VSI_PF = 0,
ICE_VSI_VF,
}; };
struct ice_link_status { struct ice_link_status {
...@@ -103,6 +104,15 @@ struct ice_link_status { ...@@ -103,6 +104,15 @@ struct ice_link_status {
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
}; };
/* Different reset sources for which a disable queue AQ call has to be made in
* order to clean the TX scheduler as a part of the reset
*/
enum ice_disq_rst_src {
ICE_NO_RESET = 0,
ICE_VM_RESET,
ICE_VF_RESET,
};
/* PHY info such as phy_type, etc... */ /* PHY info such as phy_type, etc... */
struct ice_phy_info { struct ice_phy_info {
struct ice_link_status link_info; struct ice_link_status link_info;
...@@ -127,6 +137,9 @@ struct ice_hw_common_caps { ...@@ -127,6 +137,9 @@ struct ice_hw_common_caps {
/* Max MTU for function or device */ /* Max MTU for function or device */
u16 max_mtu; u16 max_mtu;
/* Virtualization support */
u8 sr_iov_1_1; /* SR-IOV enabled */
/* RSS related capabilities */ /* RSS related capabilities */
u16 rss_table_size; /* 512 for PFs and 64 for VFs */ u16 rss_table_size; /* 512 for PFs and 64 for VFs */
u8 rss_table_entry_width; /* RSS Entry width in bits */ u8 rss_table_entry_width; /* RSS Entry width in bits */
...@@ -135,12 +148,15 @@ struct ice_hw_common_caps { ...@@ -135,12 +148,15 @@ struct ice_hw_common_caps {
/* Function specific capabilities */ /* Function specific capabilities */
struct ice_hw_func_caps { struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap; struct ice_hw_common_caps common_cap;
u32 num_allocd_vfs; /* Number of allocated VFs */
u32 vf_base_id; /* Logical ID of the first VF */
u32 guaranteed_num_vsi; u32 guaranteed_num_vsi;
}; };
/* Device wide capabilities */ /* Device wide capabilities */
struct ice_hw_dev_caps { struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap; struct ice_hw_common_caps common_cap;
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
}; };
...@@ -321,6 +337,7 @@ struct ice_hw { ...@@ -321,6 +337,7 @@ struct ice_hw {
/* Control Queue info */ /* Control Queue info */
struct ice_ctl_q_info adminq; struct ice_ctl_q_info adminq;
struct ice_ctl_q_info mailboxq;
u8 api_branch; /* API branch version */ u8 api_branch; /* API branch version */
u8 api_maj_ver; /* API major version */ u8 api_maj_ver; /* API major version */
...@@ -426,4 +443,7 @@ struct ice_hw_port_stats { ...@@ -426,4 +443,7 @@ struct ice_hw_port_stats {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512 #define ICE_SR_WORDS_IN_1KB 512
/* Hash redirection LUT for VSI - maximum array size */
#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
#endif /* _ICE_TYPE_H_ */ #endif /* _ICE_TYPE_H_ */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_VIRTCHNL_PF_H_
#define _ICE_VIRTCHNL_PF_H_
#include "ice.h"
#define ICE_MAX_VLANID 4095
#define ICE_VLAN_PRIORITY_S 12
#define ICE_VLAN_M 0xFFF
#define ICE_PRIORITY_M 0x7000
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
#define ICE_MAX_MACADDR_PER_VF 12
/* Malicious Driver Detection */
#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3
#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
/* Static VF transaction/status register def */
#define VF_DEVICE_STATUS 0xAA
#define VF_TRANS_PENDING_M 0x20
/* Specific VF states */
enum ice_vf_states {
ICE_VF_STATE_INIT = 0,
ICE_VF_STATE_ACTIVE,
ICE_VF_STATE_ENA,
ICE_VF_STATE_DIS,
ICE_VF_STATE_MC_PROMISC,
ICE_VF_STATE_UC_PROMISC,
/* state to indicate if PF needs to do vector assignment for VF.
* This needs to be set during first time VF initialization or later
* when VF asks for more Vectors through virtchnl OP.
*/
ICE_VF_STATE_CFG_INTR,
ICE_VF_STATES_NBITS
};
/* VF capabilities */
enum ice_virtchnl_cap {
ICE_VIRTCHNL_VF_CAP_L2 = 0,
ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
};
/* VF information structure */
struct ice_vf {
struct ice_pf *pf;
s16 vf_id; /* VF id in the PF space */
u32 driver_caps; /* reported by VF driver */
int first_vector_idx; /* first vector index of this VF */
struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
struct virtchnl_ether_addr dflt_lan_addr;
u16 port_vlan_id;
u8 pf_set_mac; /* VF MAC address set by VMM admin */
u8 trusted;
u16 lan_vsi_idx; /* index into PF struct */
u16 lan_vsi_num; /* ID as used by firmware */
u64 num_mdd_events; /* number of mdd events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* vf's adv. capabilities */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
u8 link_forced;
u8 link_up; /* only valid if VF link is forced */
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
u8 num_req_qs; /* num of queue pairs requested by VF */
};
#ifdef CONFIG_PCI_IOV
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
struct ifla_vf_info *ivi);
void ice_free_vfs(struct ice_pf *pf);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
u16 vlan_id, u8 qos, __be16 vlan_proto);
int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
#define ice_vc_process_vf_msg(pf, event) do {} while (0)
#define ice_vc_notify_link_state(pf) do {} while (0)
#define ice_vc_notify_reset(pf) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
bool __always_unused is_vflr)
{
return true;
}
static inline int
ice_sriov_configure(struct pci_dev __always_unused *pdev,
int __always_unused num_vfs)
{
return -EOPNOTSUPP;
}
static inline int
ice_set_vf_mac(struct net_device __always_unused *netdev,
int __always_unused vf_id, u8 __always_unused *mac)
{
return -EOPNOTSUPP;
}
static inline int
ice_get_vf_cfg(struct net_device __always_unused *netdev,
int __always_unused vf_id,
struct ifla_vf_info __always_unused *ivi)
{
return -EOPNOTSUPP;
}
static inline int
ice_set_vf_trust(struct net_device __always_unused *netdev,
int __always_unused vf_id, bool __always_unused trusted)
{
return -EOPNOTSUPP;
}
static inline int
ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
int __always_unused vf_id, u16 __always_unused vid,
u8 __always_unused qos, __be16 __always_unused v_proto)
{
return -EOPNOTSUPP;
}
static inline int
ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
int __always_unused vf_id, bool __always_unused ena)
{
return -EOPNOTSUPP;
}
static inline int
ice_set_vf_link_state(struct net_device __always_unused *netdev,
int __always_unused vf_id, int __always_unused link_state)
{
return -EOPNOTSUPP;
}
static inline int
ice_set_vf_bw(struct net_device __always_unused *netdev,
int __always_unused vf_id, int __always_unused min_tx_rate,
int __always_unused max_tx_rate)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
...@@ -252,6 +252,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); ...@@ -252,6 +252,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 #define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
/* Define below the capability flags that are not offloads */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \ VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF) VIRTCHNL_VF_OFFLOAD_RSS_PF)
...@@ -596,10 +598,23 @@ enum virtchnl_event_codes { ...@@ -596,10 +598,23 @@ enum virtchnl_event_codes {
struct virtchnl_pf_event { struct virtchnl_pf_event {
enum virtchnl_event_codes event; enum virtchnl_event_codes event;
union { union {
/* If the PF driver does not support the new speed reporting
* capabilities then use link_event else use link_event_adv to
* get the speed and link information. The ability to understand
* new speeds is indicated by setting the capability flag
* VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
* in virtchnl_vf_resource struct and can be used to determine
* which link event struct to use below.
*/
struct { struct {
enum virtchnl_link_speed link_speed; enum virtchnl_link_speed link_speed;
bool link_status; bool link_status;
} link_event; } link_event;
struct {
/* link_speed provided in Mbps */
u32 link_speed;
u8 link_status;
} link_event_adv;
} event_data; } event_data;
int severity; int severity;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment