Commit 632d1a48 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: some code optimizations & cleanups & bugfixes

This patch-set includes code optimizations, cleanups and bugfixes for
the HNS3 ethernet controller driver.

[patch 1/12] logs more detail error info for ROCE RAS errors.

[patch 2/12] fixes a wrong size issue for mailbox responding.

[patch 3/12] makes HW GRO handing compliant with SW one.

[patch 4/12] refactors hns3_get_new_int_gl.

[patch 5/12] adds handling for VF's over_8bd_nfe_err.

[patch 6/12 - 12/12] adds some code optimizations and cleanups, to
make the code more readable and compliant with some static code
analysis tools, these modifications do not change the logic of
the code.

Change log:
V1->V2: fixes comment from David Miller.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 900d96e4 9b2f3477
...@@ -69,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode { ...@@ -69,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode {
}; };
#define HCLGE_MBX_MAX_MSG_SIZE 16 #define HCLGE_MBX_MAX_MSG_SIZE 16
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16 #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
......
...@@ -16,14 +16,10 @@ static LIST_HEAD(hnae3_ae_dev_list); ...@@ -16,14 +16,10 @@ static LIST_HEAD(hnae3_ae_dev_list);
*/ */
static DEFINE_MUTEX(hnae3_common_lock); static DEFINE_MUTEX(hnae3_common_lock);
static bool hnae3_client_match(enum hnae3_client_type client_type, static bool hnae3_client_match(enum hnae3_client_type client_type)
enum hnae3_dev_type dev_type)
{ {
if ((dev_type == HNAE3_DEV_KNIC) && (client_type == HNAE3_CLIENT_KNIC || if (client_type == HNAE3_CLIENT_KNIC ||
client_type == HNAE3_CLIENT_ROCE)) client_type == HNAE3_CLIENT_ROCE)
return true;
if (dev_type == HNAE3_DEV_UNIC && client_type == HNAE3_CLIENT_UNIC)
return true; return true;
return false; return false;
...@@ -39,9 +35,6 @@ void hnae3_set_client_init_flag(struct hnae3_client *client, ...@@ -39,9 +35,6 @@ void hnae3_set_client_init_flag(struct hnae3_client *client,
case HNAE3_CLIENT_KNIC: case HNAE3_CLIENT_KNIC:
hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break; break;
case HNAE3_CLIENT_UNIC:
hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_ROCE: case HNAE3_CLIENT_ROCE:
hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break; break;
...@@ -61,10 +54,6 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, ...@@ -61,10 +54,6 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
inited = hnae3_get_bit(ae_dev->flag, inited = hnae3_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B); HNAE3_KNIC_CLIENT_INITED_B);
break; break;
case HNAE3_CLIENT_UNIC:
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_UNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_ROCE: case HNAE3_CLIENT_ROCE:
inited = hnae3_get_bit(ae_dev->flag, inited = hnae3_get_bit(ae_dev->flag,
HNAE3_ROCE_CLIENT_INITED_B); HNAE3_ROCE_CLIENT_INITED_B);
...@@ -82,7 +71,7 @@ static int hnae3_init_client_instance(struct hnae3_client *client, ...@@ -82,7 +71,7 @@ static int hnae3_init_client_instance(struct hnae3_client *client,
int ret; int ret;
/* check if this client matches the type of ae_dev */ /* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) && if (!(hnae3_client_match(client->type) &&
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0; return 0;
} }
...@@ -99,7 +88,7 @@ static void hnae3_uninit_client_instance(struct hnae3_client *client, ...@@ -99,7 +88,7 @@ static void hnae3_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev) struct hnae3_ae_dev *ae_dev)
{ {
/* check if this client matches the type of ae_dev */ /* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) && if (!(hnae3_client_match(client->type) &&
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
return; return;
......
...@@ -102,15 +102,9 @@ enum hnae3_loop { ...@@ -102,15 +102,9 @@ enum hnae3_loop {
enum hnae3_client_type { enum hnae3_client_type {
HNAE3_CLIENT_KNIC, HNAE3_CLIENT_KNIC,
HNAE3_CLIENT_UNIC,
HNAE3_CLIENT_ROCE, HNAE3_CLIENT_ROCE,
}; };
enum hnae3_dev_type {
HNAE3_DEV_KNIC,
HNAE3_DEV_UNIC,
};
/* mac media type */ /* mac media type */
enum hnae3_media_type { enum hnae3_media_type {
HNAE3_MEDIA_TYPE_UNKNOWN, HNAE3_MEDIA_TYPE_UNKNOWN,
...@@ -220,7 +214,6 @@ struct hnae3_ae_dev { ...@@ -220,7 +214,6 @@ struct hnae3_ae_dev {
struct list_head node; struct list_head node;
u32 flag; u32 flag;
u8 override_pci_need_reset; /* fix to stop multiple reset happening */ u8 override_pci_need_reset; /* fix to stop multiple reset happening */
enum hnae3_dev_type dev_type;
enum hnae3_reset_type reset_type; enum hnae3_reset_type reset_type;
void *priv; void *priv;
}; };
......
...@@ -4,8 +4,7 @@ ...@@ -4,8 +4,7 @@
#include "hnae3.h" #include "hnae3.h"
#include "hns3_enet.h" #include "hns3_enet.h"
static static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) ...@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) ...@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) ...@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
......
...@@ -60,6 +60,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { ...@@ -60,6 +60,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128
#define HNS3_NIC_LB_SETUP_USEC 10000
/* Nic loopback test err */ /* Nic loopback test err */
#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
...@@ -117,7 +118,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) ...@@ -117,7 +118,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret; return ret;
ret = hns3_lp_setup(ndev, loop_mode, true); ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000); usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return ret; return ret;
} }
...@@ -132,7 +133,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) ...@@ -132,7 +133,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret; return ret;
} }
usleep_range(10000, 20000); usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return 0; return 0;
} }
...@@ -149,6 +150,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) ...@@ -149,6 +150,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
/* The dst mac addr of loopback packet is the same as the host'
* mac addr, the SSU component may loop back the packet to host
* before the packet reaches mac or serdes, which will defect
* the purpose of mac or serdes selftest.
*/
ethh->h_dest[5] += 0x1f; ethh->h_dest[5] += 0x1f;
eth_zero_addr(ethh->h_source); eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP); ethh->h_proto = htons(ETH_P_ARP);
...@@ -435,7 +442,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -435,7 +442,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) { switch (stringset) {
case ETH_SS_STATS: case ETH_SS_STATS:
buff = hns3_get_strings_tqps(h, buff); buff = hns3_get_strings_tqps(h, buff);
h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); ops->get_strings(h, stringset, (u8 *)buff);
break; break;
case ETH_SS_TEST: case ETH_SS_TEST:
ops->get_strings(h, stringset, data); ops->get_strings(h, stringset, data);
...@@ -510,6 +517,11 @@ static void hns3_get_drvinfo(struct net_device *netdev, ...@@ -510,6 +517,11 @@ static void hns3_get_drvinfo(struct net_device *netdev,
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
if (!h->ae_algo->ops->get_fw_version) {
netdev_err(netdev, "could not get fw version!\n");
return;
}
strncpy(drvinfo->version, hns3_driver_version, strncpy(drvinfo->version, hns3_driver_version,
sizeof(drvinfo->version)); sizeof(drvinfo->version));
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
...@@ -530,7 +542,7 @@ static u32 hns3_get_link(struct net_device *netdev) ...@@ -530,7 +542,7 @@ static u32 hns3_get_link(struct net_device *netdev)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) if (h->ae_algo->ops->get_status)
return h->ae_algo->ops->get_status(h); return h->ae_algo->ops->get_status(h);
else else
return 0; return 0;
...@@ -560,7 +572,7 @@ static void hns3_get_pauseparam(struct net_device *netdev, ...@@ -560,7 +572,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) if (h->ae_algo->ops->get_pauseparam)
h->ae_algo->ops->get_pauseparam(h, &param->autoneg, h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
&param->rx_pause, &param->tx_pause); &param->rx_pause, &param->tx_pause);
} }
...@@ -610,9 +622,6 @@ static int hns3_get_link_ksettings(struct net_device *netdev, ...@@ -610,9 +622,6 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
u8 media_type; u8 media_type;
u8 link_stat; u8 link_stat;
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
ops = h->ae_algo->ops; ops = h->ae_algo->ops;
if (ops->get_media_type) if (ops->get_media_type)
ops->get_media_type(h, &media_type, &module_type); ops->get_media_type(h, &media_type, &module_type);
...@@ -740,8 +749,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev) ...@@ -740,8 +749,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || if (!h->ae_algo->ops->get_rss_key_size)
!h->ae_algo->ops->get_rss_key_size)
return 0; return 0;
return h->ae_algo->ops->get_rss_key_size(h); return h->ae_algo->ops->get_rss_key_size(h);
...@@ -751,8 +759,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev) ...@@ -751,8 +759,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || if (!h->ae_algo->ops->get_rss_indir_size)
!h->ae_algo->ops->get_rss_indir_size)
return 0; return 0;
return h->ae_algo->ops->get_rss_indir_size(h); return h->ae_algo->ops->get_rss_indir_size(h);
...@@ -763,7 +770,7 @@ static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, ...@@ -763,7 +770,7 @@ static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) if (!h->ae_algo->ops->get_rss)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return h->ae_algo->ops->get_rss(h, indir, key, hfunc); return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
...@@ -774,7 +781,7 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir, ...@@ -774,7 +781,7 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if ((h->pdev->revision == 0x20 && if ((h->pdev->revision == 0x20 &&
...@@ -799,9 +806,6 @@ static int hns3_get_rxnfc(struct net_device *netdev, ...@@ -799,9 +806,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
switch (cmd->cmd) { switch (cmd->cmd) {
case ETHTOOL_GRXRINGS: case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tqps; cmd->data = h->kinfo.num_tqps;
...@@ -915,9 +919,6 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) ...@@ -915,9 +919,6 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
switch (cmd->cmd) { switch (cmd->cmd) {
case ETHTOOL_SRXFH: case ETHTOOL_SRXFH:
if (h->ae_algo->ops->set_rss_tuple) if (h->ae_algo->ops->set_rss_tuple)
...@@ -1193,7 +1194,7 @@ static int hns3_set_phys_id(struct net_device *netdev, ...@@ -1193,7 +1194,7 @@ static int hns3_set_phys_id(struct net_device *netdev,
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id) if (!h->ae_algo->ops->set_led_id)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return h->ae_algo->ops->set_led_id(h, state); return h->ae_algo->ops->set_led_id(h, state);
......
...@@ -180,6 +180,9 @@ enum hclge_opcode_type { ...@@ -180,6 +180,9 @@ enum hclge_opcode_type {
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20, HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22, HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
/* PPU commands */
HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
/* TSO command */ /* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10, HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
...@@ -268,6 +271,8 @@ enum hclge_opcode_type { ...@@ -268,6 +271,8 @@ enum hclge_opcode_type {
HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580, HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581, HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584, HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
HCLGE_IGU_EGU_TNL_INT_EN = 0x1803, HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
HCLGE_IGU_COMMON_INT_EN = 0x1806, HCLGE_IGU_COMMON_INT_EN = 0x1806,
HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14, HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
...@@ -644,6 +649,11 @@ enum hclge_mac_vlan_tbl_opcode { ...@@ -644,6 +649,11 @@ enum hclge_mac_vlan_tbl_opcode {
HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
}; };
enum hclge_mac_vlan_add_resp_code {
HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */
HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */
};
#define HCLGE_MAC_VLAN_BIT0_EN_B 0 #define HCLGE_MAC_VLAN_BIT0_EN_B 0
#define HCLGE_MAC_VLAN_BIT1_EN_B 1 #define HCLGE_MAC_VLAN_BIT1_EN_B 1
#define HCLGE_MAC_EPORT_SW_EN_B 12 #define HCLGE_MAC_EPORT_SW_EN_B 12
...@@ -977,6 +987,20 @@ struct hclge_get_m7_bd_cmd { ...@@ -977,6 +987,20 @@ struct hclge_get_m7_bd_cmd {
u8 rsv[20]; u8 rsv[20];
}; };
struct hclge_query_ppu_pf_other_int_dfx_cmd {
__le16 over_8bd_no_fe_qid;
__le16 over_8bd_no_fe_vf_id;
__le16 tso_mss_cmp_min_err_qid;
__le16 tso_mss_cmp_min_err_vf_id;
__le16 tso_mss_cmp_max_err_qid;
__le16 tso_mss_cmp_max_err_vf_id;
__le16 tx_rd_fbd_poison_qid;
__le16 tx_rd_fbd_poison_vf_id;
__le16 rx_rd_fbd_poison_qid;
__le16 rx_rd_fbd_poison_vf_id;
u8 rsv[4];
};
int hclge_cmd_init(struct hclge_dev *hdev); int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{ {
......
...@@ -64,6 +64,8 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, ...@@ -64,6 +64,8 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
char *cmd_buf, int msg_num, int offset, char *cmd_buf, int msg_num, int offset,
enum hclge_opcode_type cmd) enum hclge_opcode_type cmd)
{ {
#define BD_DATA_NUM 6
struct hclge_desc *desc_src; struct hclge_desc *desc_src;
struct hclge_desc *desc; struct hclge_desc *desc;
int bd_num, buf_len; int bd_num, buf_len;
...@@ -92,14 +94,16 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, ...@@ -92,14 +94,16 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return; return;
} }
max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num; max = (bd_num * BD_DATA_NUM) <= msg_num ?
(bd_num * BD_DATA_NUM) : msg_num;
desc = desc_src; desc = desc_src;
for (i = 0; i < max; i++) { for (i = 0; i < max; i++) {
(((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc; ((i > 0) && ((i % BD_DATA_NUM) == 0)) ? desc++ : desc;
if (dfx_message->flag) if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n", dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message, desc->data[i % 6]); dfx_message->message,
desc->data[i % BD_DATA_NUM]);
dfx_message++; dfx_message++;
} }
...@@ -395,7 +399,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -395,7 +399,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret) if (ret)
goto err_tm_pg_cmd_send; goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]); dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -403,7 +407,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -403,7 +407,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret) if (ret)
goto err_tm_pg_cmd_send; goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]); dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -412,9 +416,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -412,9 +416,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
goto err_tm_pg_cmd_send; goto err_tm_pg_cmd_send;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n", dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
bp_to_qs_map_cmd->tc_id); bp_to_qs_map_cmd->tc_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n", dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id); bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
bp_to_qs_map_cmd->qs_bit_map); bp_to_qs_map_cmd->qs_bit_map);
...@@ -473,7 +477,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) ...@@ -473,7 +477,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id); dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n", dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
nq_to_qs_map->qset_id); nq_to_qs_map->qset_id);
cmd = HCLGE_OPC_TM_PG_WEIGHT; cmd = HCLGE_OPC_TM_PG_WEIGHT;
......
...@@ -673,19 +673,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, ...@@ -673,19 +673,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
enum hclge_err_int_type int_type) enum hclge_err_int_type int_type)
{ {
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
int num = 1; int desc_num = 1;
int ret; int ret;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true); hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
if (flag) { if (flag) {
desc[0].flag |= cpu_to_le16(flag); desc[0].flag |= cpu_to_le16(flag);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true); hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
num = 2; desc_num = 2;
} }
if (w_num) if (w_num)
desc[0].data[w_num] = cpu_to_le32(int_type); desc[0].data[w_num] = cpu_to_le32(int_type);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num); ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret) if (ret)
dev_err(dev, "query error cmd failed (%d)\n", ret); dev_err(dev, "query error cmd failed (%d)\n", ret);
...@@ -941,7 +941,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -941,7 +941,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
{ {
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2]; struct hclge_desc desc[2];
int num = 1; int desc_num = 1;
int ret; int ret;
/* configure PPU error interrupts */ /* configure PPU error interrupts */
...@@ -960,7 +960,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -960,7 +960,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK; desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK; desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK; desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
num = 2; desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false); hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en) if (en)
...@@ -978,7 +978,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -978,7 +978,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
return -EINVAL; return -EINVAL;
} }
ret = hclge_cmd_send(&hdev->hw, &desc[0], num); ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
return ret; return ret;
} }
...@@ -1388,6 +1388,66 @@ static int hclge_handle_all_ras_errors(struct hclge_dev *hdev) ...@@ -1388,6 +1388,66 @@ static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
return ret; return ret;
} }
static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[3];
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE AXI error sts\n", ret);
return ret;
}
dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
dev_info(dev, "AXI3: %08X %08X %08X %08X\n",
le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
return 0;
}
static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
int ret;
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
HCLGE_CMD_FLAG_NEXT, 0, 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
}
dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
return 0;
}
static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
{ {
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
...@@ -1395,8 +1455,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) ...@@ -1395,8 +1455,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
int ret; int ret;
/* read overflow error status */ /* read overflow error status */
ret = hclge_cmd_query_error(hdev, &desc[0], ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
HCLGE_ROCEE_PF_RAS_INT_CMD,
0, 0, 0); 0, 0, 0);
if (ret) { if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
...@@ -1456,19 +1515,27 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) ...@@ -1456,19 +1515,27 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
status = le32_to_cpu(desc[0].data[0]); status = le32_to_cpu(desc[0].data[0]);
if (status & HCLGE_ROCEE_RERR_INT_MASK) { if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
if (status & HCLGE_ROCEE_RERR_INT_MASK)
dev_warn(dev, "ROCEE RAS AXI rresp error\n"); dev_warn(dev, "ROCEE RAS AXI rresp error\n");
reset_type = HNAE3_FUNC_RESET;
}
if (status & HCLGE_ROCEE_BERR_INT_MASK) { if (status & HCLGE_ROCEE_BERR_INT_MASK)
dev_warn(dev, "ROCEE RAS AXI bresp error\n"); dev_warn(dev, "ROCEE RAS AXI bresp error\n");
reset_type = HNAE3_FUNC_RESET; reset_type = HNAE3_FUNC_RESET;
ret = hclge_log_rocee_axi_error(hdev);
if (ret)
return HNAE3_GLOBAL_RESET;
} }
if (status & HCLGE_ROCEE_ECC_INT_MASK) { if (status & HCLGE_ROCEE_ECC_INT_MASK) {
dev_warn(dev, "ROCEE RAS 2bit ECC error\n"); dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
reset_type = HNAE3_GLOBAL_RESET; reset_type = HNAE3_GLOBAL_RESET;
ret = hclge_log_rocee_ecc_error(hdev);
if (ret)
return HNAE3_GLOBAL_RESET;
} }
if (status & HCLGE_ROCEE_OVF_INT_MASK) { if (status & HCLGE_ROCEE_OVF_INT_MASK) {
...@@ -1478,7 +1545,6 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) ...@@ -1478,7 +1545,6 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
/* reset everything for now */ /* reset everything for now */
return HNAE3_GLOBAL_RESET; return HNAE3_GLOBAL_RESET;
} }
reset_type = HNAE3_FUNC_RESET;
} }
/* clear error status */ /* clear error status */
...@@ -1620,6 +1686,81 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) ...@@ -1620,6 +1686,81 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
return PCI_ERS_RESULT_RECOVERED; return PCI_ERS_RESULT_RECOVERED;
} }
/* hclge_query_8bd_info: query information about over_8bd_nfe_err
* @hdev: pointer to struct hclge_dev
* @vf_id: Index of the virtual function with error
* @q_id: Physical index of the queue with error
*
* This function get specific index of queue and function which causes
* over_8bd_nfe_err by using command. If vf_id is 0, it means error is
* caused by PF instead of VF.
*/
static int hclge_query_over_8bd_err_info(struct hclge_dev *hdev, u16 *vf_id,
u16 *q_id)
{
struct hclge_query_ppu_pf_other_int_dfx_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PPU_PF_OTHER_INT_DFX, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
return ret;
req = (struct hclge_query_ppu_pf_other_int_dfx_cmd *)desc.data;
*vf_id = le16_to_cpu(req->over_8bd_no_fe_vf_id);
*q_id = le16_to_cpu(req->over_8bd_no_fe_qid);
return 0;
}
/* hclge_handle_over_8bd_err: handle MSI-X error named over_8bd_nfe_err
* @hdev: pointer to struct hclge_dev
* @reset_requests: reset level that we need to trigger later
*
* over_8bd_nfe_err is a special MSI-X because it may caused by a VF, in
* that case, we need to trigger VF reset. Otherwise, a PF reset is needed.
*/
static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
struct device *dev = &hdev->pdev->dev;
u16 vf_id;
u16 q_id;
int ret;
ret = hclge_query_over_8bd_err_info(hdev, &vf_id, &q_id);
if (ret) {
dev_err(dev, "fail(%d) to query over_8bd_no_fe info\n",
ret);
return;
}
dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n",
vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
dev_err(dev, "invalid vf id(%d)\n", vf_id);
return;
}
/* If we need to trigger other reset whose level is higher
* than HNAE3_VF_FUNC_RESET, no need to trigger a VF reset
* here.
*/
if (*reset_requests != 0)
return;
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
dev_warn(dev, "inform reset to vf(%d) failed %d!\n",
hdev->vport->vport_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
}
int hclge_handle_hw_msix_error(struct hclge_dev *hdev, int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests) unsigned long *reset_requests)
{ {
...@@ -1732,6 +1873,10 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, ...@@ -1732,6 +1873,10 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
set_bit(reset_level, reset_requests); set_bit(reset_level, reset_requests);
} }
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK;
if (status)
hclge_handle_over_8bd_err(hdev, reset_requests);
/* clear all PF MSIx errors */ /* clear all PF MSIx errors */
hclge_cmd_reuse_desc(&desc[0], false); hclge_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
......
...@@ -83,7 +83,8 @@ ...@@ -83,7 +83,8 @@
#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0) #define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28) #define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28)
#define HCLGE_PPU_PF_INT_RAS_MASK 0x18 #define HCLGE_PPU_PF_INT_RAS_MASK 0x18
#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27 #define HCLGE_PPU_PF_INT_MSIX_MASK 0x26
#define HCLGE_PPU_PF_OVER_8BD_ERR_MASK 0x01
#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0) #define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0) #define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0) #define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0)
...@@ -94,6 +95,7 @@ ...@@ -94,6 +95,7 @@
#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1 #define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1
#define HCLGE_ROCEE_RERR_INT_MASK BIT(0) #define HCLGE_ROCEE_RERR_INT_MASK BIT(0)
#define HCLGE_ROCEE_BERR_INT_MASK BIT(1) #define HCLGE_ROCEE_BERR_INT_MASK BIT(1)
#define HCLGE_ROCEE_AXI_ERR_INT_MASK GENMASK(1, 0)
#define HCLGE_ROCEE_ECC_INT_MASK BIT(2) #define HCLGE_ROCEE_ECC_INT_MASK BIT(2)
#define HCLGE_ROCEE_OVF_INT_MASK BIT(3) #define HCLGE_ROCEE_OVF_INT_MASK BIT(3)
#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000 #define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
......
...@@ -474,6 +474,7 @@ enum HCLGE_FD_KEY_TYPE { ...@@ -474,6 +474,7 @@ enum HCLGE_FD_KEY_TYPE {
enum HCLGE_FD_STAGE { enum HCLGE_FD_STAGE {
HCLGE_FD_STAGE_1, HCLGE_FD_STAGE_1,
HCLGE_FD_STAGE_2, HCLGE_FD_STAGE_2,
MAX_STAGE_NUM,
}; };
/* OUTER_XXX indicates tuples in tunnel header of tunnel packet /* OUTER_XXX indicates tuples in tunnel header of tunnel packet
...@@ -528,7 +529,7 @@ enum HCLGE_FD_META_DATA { ...@@ -528,7 +529,7 @@ enum HCLGE_FD_META_DATA {
struct key_info { struct key_info {
u8 key_type; u8 key_type;
u8 key_length; u8 key_length; /* use bit as unit */
}; };
static const struct key_info meta_data_key_info[] = { static const struct key_info meta_data_key_info[] = {
...@@ -612,18 +613,23 @@ struct hclge_fd_key_cfg { ...@@ -612,18 +613,23 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg { struct hclge_fd_cfg {
u8 fd_mode; u8 fd_mode;
u16 max_key_length; u16 max_key_length; /* use bit as unit */
u32 proto_support; u32 proto_support;
u32 rule_num[2]; /* rule entry number */ u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[2]; /* rule hit counter number */ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[2]; struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
}; };
#define IPV4_INDEX 3
#define IPV6_SIZE 4
struct hclge_fd_rule_tuples { struct hclge_fd_rule_tuples {
u8 src_mac[6]; u8 src_mac[ETH_ALEN];
u8 dst_mac[6]; u8 dst_mac[ETH_ALEN];
u32 src_ip[4]; /* Be compatible for ip address of both ipv4 and ipv6.
u32 dst_ip[4]; * For ipv4 address, we store it in src/dst_ip[3].
*/
u32 src_ip[IPV6_SIZE];
u32 dst_ip[IPV6_SIZE];
u16 src_port; u16 src_port;
u16 dst_port; u16 dst_port;
u16 vlan_tag1; u16 vlan_tag1;
...@@ -693,6 +699,8 @@ struct hclge_mac_tnl_stats { ...@@ -693,6 +699,8 @@ struct hclge_mac_tnl_stats {
u32 status; u32 status;
}; };
#define HCLGE_RESET_INTERVAL (10 * HZ)
/* For each bit of TCAM entry, it uses a pair of 'x' and /* For each bit of TCAM entry, it uses a pair of 'x' and
* 'y' to indicate which value to match, like below: * 'y' to indicate which value to match, like below:
* ---------------------------------- * ----------------------------------
......
...@@ -93,7 +93,7 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) ...@@ -93,7 +93,7 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
else if (hdev->reset_type == HNAE3_FLR_RESET) else if (hdev->reset_type == HNAE3_FLR_RESET)
reset_type = HNAE3_VF_FULL_RESET; reset_type = HNAE3_VF_FULL_RESET;
else else
return -EINVAL; reset_type = HNAE3_VF_FUNC_RESET;
memcpy(&msg_data[0], &reset_type, sizeof(u16)); memcpy(&msg_data[0], &reset_type, sizeof(u16));
...@@ -369,7 +369,7 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport, ...@@ -369,7 +369,7 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
vf_tc_map |= BIT(i); vf_tc_map |= BIT(i);
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map, ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
sizeof(u8)); sizeof(vf_tc_map));
return ret; return ret;
} }
......
...@@ -83,8 +83,7 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, ...@@ -83,8 +83,7 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
HCLGEVF_TQP_INTR_GL2_REG, HCLGEVF_TQP_INTR_GL2_REG,
HCLGEVF_TQP_INTR_RL_REG}; HCLGEVF_TQP_INTR_RL_REG};
static inline struct hclgevf_dev *hclgevf_ae_get_hdev( static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
struct hnae3_handle *handle)
{ {
if (!handle->client) if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic); return container_of(handle, struct hclgevf_dev, nic);
...@@ -232,7 +231,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) ...@@ -232,7 +231,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
int status; int status;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
true, &resp_msg, sizeof(u8)); true, &resp_msg, sizeof(resp_msg));
if (status) { if (status) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF request to get TC info from PF failed %d", "VF request to get TC info from PF failed %d",
...@@ -321,7 +320,8 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) ...@@ -321,7 +320,8 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
2, true, resp_data, 2); sizeof(msg_data), true, resp_data,
sizeof(resp_data));
if (!ret) if (!ret)
qid_in_pf = *(u16 *)resp_data; qid_in_pf = *(u16 *)resp_data;
...@@ -418,7 +418,7 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev) ...@@ -418,7 +418,7 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
u8 resp_msg; u8 resp_msg;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
0, false, &resp_msg, sizeof(u8)); 0, false, &resp_msg, sizeof(resp_msg));
if (status) if (status)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF failed to fetch link status(%d) from PF", status); "VF failed to fetch link status(%d) from PF", status);
...@@ -453,11 +453,13 @@ static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) ...@@ -453,11 +453,13 @@ static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
u8 resp_msg; u8 resp_msg;
send_msg = HCLGEVF_ADVERTISING; send_msg = HCLGEVF_ADVERTISING;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
sizeof(u8), false, &resp_msg, sizeof(u8)); &send_msg, sizeof(send_msg), false,
&resp_msg, sizeof(resp_msg));
send_msg = HCLGEVF_SUPPORTED; send_msg = HCLGEVF_SUPPORTED;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
sizeof(u8), false, &resp_msg, sizeof(u8)); &send_msg, sizeof(send_msg), false,
&resp_msg, sizeof(resp_msg));
} }
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
...@@ -470,12 +472,6 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) ...@@ -470,12 +472,6 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->numa_node_mask = hdev->numa_node_mask; nic->numa_node_mask = hdev->numa_node_mask;
nic->flags |= HNAE3_SUPPORT_VF; nic->flags |= HNAE3_SUPPORT_VF;
if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
hdev->ae_dev->dev_type);
return -EINVAL;
}
ret = hclgevf_knic_setup(hdev); ret = hclgevf_knic_setup(hdev);
if (ret) if (ret)
dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
...@@ -545,13 +541,15 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, ...@@ -545,13 +541,15 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
{ {
struct hclgevf_rss_config_cmd *req; struct hclgevf_rss_config_cmd *req;
struct hclgevf_desc desc; struct hclgevf_desc desc;
int key_offset; int key_offset = 0;
int key_counts;
int key_size; int key_size;
int ret; int ret;
key_counts = HCLGEVF_RSS_KEY_SIZE;
req = (struct hclgevf_rss_config_cmd *)desc.data; req = (struct hclgevf_rss_config_cmd *)desc.data;
for (key_offset = 0; key_offset < 3; key_offset++) { while (key_counts) {
hclgevf_cmd_setup_basic_desc(&desc, hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_RSS_GENERIC_CONFIG, HCLGEVF_OPC_RSS_GENERIC_CONFIG,
false); false);
...@@ -560,15 +558,12 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, ...@@ -560,15 +558,12 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
req->hash_config |= req->hash_config |=
(key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
if (key_offset == 2) key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
key_size =
HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
else
key_size = HCLGEVF_RSS_HASH_KEY_NUM;
memcpy(req->hash_key, memcpy(req->hash_key,
key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
key_counts -= key_size;
key_offset++;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -1193,7 +1188,7 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, ...@@ -1193,7 +1188,7 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
HCLGE_MBX_MAC_VLAN_UC_MODIFY; HCLGE_MBX_MAC_VLAN_UC_MODIFY;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
subcode, msg_data, ETH_ALEN * 2, subcode, msg_data, sizeof(msg_data),
true, NULL, 0); true, NULL, 0);
if (!status) if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
...@@ -1249,7 +1244,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, ...@@ -1249,7 +1244,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
if (vlan_id > 4095) if (vlan_id > HCLGEVF_MAX_VLAN_ID)
return -EINVAL; return -EINVAL;
if (proto != htons(ETH_P_8021Q)) if (proto != htons(ETH_P_8021Q))
...@@ -1280,7 +1275,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -1280,7 +1275,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
u8 msg_data[2]; u8 msg_data[2];
int ret; int ret;
memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); memcpy(msg_data, &queue_id, sizeof(queue_id));
/* disable vf queue before send queue reset msg to PF */ /* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
...@@ -1288,7 +1283,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -1288,7 +1283,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
return ret; return ret;
return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
2, true, NULL, 0); sizeof(msg_data), true, NULL, 0);
} }
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
...@@ -1658,7 +1653,8 @@ static void hclgevf_service_timer(struct timer_list *t) ...@@ -1658,7 +1653,8 @@ static void hclgevf_service_timer(struct timer_list *t)
{ {
struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + 5 * HZ); mod_timer(&hdev->service_timer, jiffies +
HCLGEVF_GENERAL_TASK_INTERVAL * HZ);
hdev->stats_timer++; hdev->stats_timer++;
hclgevf_task_schedule(hdev); hclgevf_task_schedule(hdev);
...@@ -1678,9 +1674,9 @@ static void hclgevf_reset_service_task(struct work_struct *work) ...@@ -1678,9 +1674,9 @@ static void hclgevf_reset_service_task(struct work_struct *work)
if (test_and_clear_bit(HCLGEVF_RESET_PENDING, if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) { &hdev->reset_state)) {
/* PF has initmated that it is about to reset the hardware. /* PF has initmated that it is about to reset the hardware.
* We now have to poll & check if harware has actually completed * We now have to poll & check if hardware has actually
* the reset sequence. On hardware reset completion, VF needs to * completed the reset sequence. On hardware reset completion,
* reset the client and ae device. * VF needs to reset the client and ae device.
*/ */
hdev->reset_attempts = 0; hdev->reset_attempts = 0;
...@@ -1696,7 +1692,7 @@ static void hclgevf_reset_service_task(struct work_struct *work) ...@@ -1696,7 +1692,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) { &hdev->reset_state)) {
/* we could be here when either of below happens: /* we could be here when either of below happens:
* 1. reset was initiated due to watchdog timeout due to * 1. reset was initiated due to watchdog timeout caused by
* a. IMP was earlier reset and our TX got choked down and * a. IMP was earlier reset and our TX got choked down and
* which resulted in watchdog reacting and inducing VF * which resulted in watchdog reacting and inducing VF
* reset. This also means our cmdq would be unreliable. * reset. This also means our cmdq would be unreliable.
...@@ -1758,7 +1754,8 @@ static void hclgevf_keep_alive_timer(struct timer_list *t) ...@@ -1758,7 +1754,8 @@ static void hclgevf_keep_alive_timer(struct timer_list *t)
struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
schedule_work(&hdev->keep_alive_task); schedule_work(&hdev->keep_alive_task);
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); mod_timer(&hdev->keep_alive_timer, jiffies +
HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
} }
static void hclgevf_keep_alive_task(struct work_struct *work) static void hclgevf_keep_alive_task(struct work_struct *work)
...@@ -1773,7 +1770,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work) ...@@ -1773,7 +1770,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
return; return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
0, false, &respmsg, sizeof(u8)); 0, false, &respmsg, sizeof(respmsg));
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF sends keep alive cmd failed(=%d)\n", ret); "VF sends keep alive cmd failed(=%d)\n", ret);
...@@ -2005,7 +2002,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ...@@ -2005,7 +2002,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
} }
/* Initialize RSS indirect table for each vport */ /* Initialize RSS indirect table */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
...@@ -2018,9 +2015,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ...@@ -2018,9 +2015,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{ {
/* other vlan config(like, VLAN TX/RX offload) would also be added
* here later
*/
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false); false);
} }
...@@ -2042,7 +2036,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) ...@@ -2042,7 +2036,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle); hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev); hclgevf_request_link_info(hdev);
...@@ -2066,7 +2059,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) ...@@ -2066,7 +2059,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
if (hclgevf_reset_tqp(handle, i)) if (hclgevf_reset_tqp(handle, i))
break; break;
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle); hclgevf_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0); hclgevf_update_link_status(hdev, 0);
} }
...@@ -2090,7 +2082,8 @@ static int hclgevf_client_start(struct hnae3_handle *handle) ...@@ -2090,7 +2082,8 @@ static int hclgevf_client_start(struct hnae3_handle *handle)
if (ret) if (ret)
return ret; return ret;
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); mod_timer(&hdev->keep_alive_timer, jiffies +
HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
return 0; return 0;
} }
...@@ -2322,16 +2315,6 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, ...@@ -2322,16 +2315,6 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
if (ret) if (ret)
goto clear_roce; goto clear_roce;
break;
case HNAE3_CLIENT_UNIC:
hdev->nic_client = client;
hdev->nic.client = client;
ret = client->ops->init_instance(&hdev->nic);
if (ret)
goto clear_nic;
hnae3_set_client_init_flag(client, ae_dev, 1);
break; break;
case HNAE3_CLIENT_ROCE: case HNAE3_CLIENT_ROCE:
if (hnae3_dev_roce_supported(hdev)) { if (hnae3_dev_roce_supported(hdev)) {
......
...@@ -12,9 +12,12 @@ ...@@ -12,9 +12,12 @@
#define HCLGEVF_MOD_VERSION "1.0" #define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf" #define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_MAX_VLAN_ID 4095
#define HCLGEVF_MISC_VECTOR_NUM 0 #define HCLGEVF_MISC_VECTOR_NUM 0
#define HCLGEVF_INVALID_VPORT 0xffff #define HCLGEVF_INVALID_VPORT 0xffff
#define HCLGEVF_GENERAL_TASK_INTERVAL 5
#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2
/* This number in actual depends upon the total number of VFs /* This number in actual depends upon the total number of VFs
* created by physical function. But the maximum number of * created by physical function. But the maximum number of
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment