Commit c1957126 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
code optimizations & bugfixes for HNS3 driver

This patch-set includes code optimizations and bugfixes for the HNS3
ethernet controller driver.

[patch 1/12 - 4/12] optimizes the VLAN freature and adds support for port
based VLAN, fixes some related bugs about the current implementation.

[patch 5/12 - 12/12] includes some other code optimizations for the HNS3
ethernet controller driver.

Change log:
V1->V2: modifies some patches' commint log and code.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 81f2eeb3 6814b590
......@@ -43,6 +43,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
......@@ -63,6 +64,8 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
......
......@@ -147,6 +147,13 @@ enum hnae3_flr_state {
HNAE3_FLR_DONE,
};
enum hnae3_port_base_vlan_state {
HNAE3_PORT_BASE_VLAN_DISABLE,
HNAE3_PORT_BASE_VLAN_ENABLE,
HNAE3_PORT_BASE_VLAN_MODIFY,
HNAE3_PORT_BASE_VLAN_NOCHANGE,
};
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
......@@ -578,6 +585,8 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
enum hnae3_port_base_vlan_state port_base_vlan_state;
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
};
......
......@@ -963,6 +963,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
{
#define HNS3_TX_VLAN_PRIO_SHIFT 13
struct hnae3_handle *handle = tx_ring->tqp->handle;
/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
* header is allowed in skb, otherwise it will cause RAS error.
*/
if (unlikely(skb_vlan_tagged_multi(skb) &&
handle->port_base_vlan_state ==
HNAE3_PORT_BASE_VLAN_ENABLE))
return -EINVAL;
if (skb->protocol == htons(ETH_P_8021Q) &&
!(tx_ring->tqp->handle->kinfo.netdev->features &
NETIF_F_HW_VLAN_CTAG_TX)) {
......@@ -984,8 +994,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
*out_vtag = vlan_tag;
if (handle->port_base_vlan_state ==
HNAE3_PORT_BASE_VLAN_DISABLE){
hns3_set_field(*out_vlan_flag,
HNS3_TXD_OVLAN_B, 1);
*out_vtag = vlan_tag;
} else {
hns3_set_field(*inner_vlan_flag,
HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
}
} else {
hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
......@@ -2313,17 +2331,50 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
}
}
static int hns3_gro_complete(struct sk_buff *skb)
{
__be16 type = skb->protocol;
struct tcphdr *th;
int depth = 0;
while (type == htons(ETH_P_8021Q)) {
struct vlan_hdr *vh;
if ((depth + VLAN_HLEN) > skb_headlen(skb))
return -EFAULT;
vh = (struct vlan_hdr *)(skb->data + depth);
type = vh->h_vlan_encapsulated_proto;
depth += VLAN_HLEN;
}
if (type == htons(ETH_P_IP)) {
depth += sizeof(struct iphdr);
} else if (type == htons(ETH_P_IPV6)) {
depth += sizeof(struct ipv6hdr);
} else {
netdev_err(skb->dev,
"Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
be16_to_cpu(type), depth);
return -EFAULT;
}
th = (struct tcphdr *)(skb->data + depth);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
}
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
struct hns3_desc *desc)
u32 l234info, u32 bd_base_info)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int l3_type, l4_type;
u32 bd_base_info;
int ol4_type;
u32 l234info;
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
l234info = le32_to_cpu(desc->rx.l234_info);
skb->ip_summed = CHECKSUM_NONE;
......@@ -2332,12 +2383,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
/* We MUST enable hardware checksum before enabling hardware GRO */
if (skb_shinfo(skb)->gso_size) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
}
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
......@@ -2390,6 +2435,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
struct hns3_desc *desc, u32 l234info,
u16 *vlan_tag)
{
struct hnae3_handle *handle = ring->tqp->handle;
struct pci_dev *pdev = ring->tqp->handle->pdev;
if (pdev->revision == 0x20) {
......@@ -2402,14 +2448,35 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
#define HNS3_STRP_BOTH 0x3
/* Hardware always insert VLAN tag into RX descriptor when
* remove the tag from packet, driver needs to determine
* reporting which tag to stack.
*/
switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
if (handle->port_base_vlan_state !=
HNAE3_PORT_BASE_VLAN_DISABLE)
return false;
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
return true;
case HNS3_STRP_INNER_VLAN:
if (handle->port_base_vlan_state !=
HNAE3_PORT_BASE_VLAN_DISABLE)
return false;
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
return true;
case HNS3_STRP_BOTH:
if (handle->port_base_vlan_state ==
HNAE3_PORT_BASE_VLAN_DISABLE)
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
else
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
return true;
default:
return false;
......@@ -2532,8 +2599,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
return 0;
}
static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
u32 bd_base_info)
static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 l234info,
u32 bd_base_info)
{
u16 gro_count;
u32 l3_type;
......@@ -2541,12 +2609,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
/* if there is no HW GRO, do not set gro params */
if (!gro_count)
return;
if (!gro_count) {
hns3_rx_checksum(ring, skb, l234info, bd_base_info);
return 0;
}
/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
* to skb_shinfo(skb)->gso_segs
*/
NAPI_GRO_CB(skb)->count = gro_count;
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
......@@ -2556,13 +2623,13 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
else if (l3_type == HNS3_L3_TYPE_IPV6)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
else
return;
return -EFAULT;
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
if (skb_shinfo(skb)->gso_size)
tcp_gro_complete(skb);
return hns3_gro_complete(skb);
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
......@@ -2587,16 +2654,85 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb,
struct hns3_desc *desc)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
u32 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
u32 l234info = le32_to_cpu(desc->rx.l234_info);
enum hns3_pkt_l2t_type l2_frame_type;
unsigned int len;
int ret;
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
*/
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tag);
}
if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.non_vld_descs++;
u64_stats_update_end(&ring->syncp);
return -EINVAL;
}
if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
BIT(HNS3_RXD_L2E_B))))) {
u64_stats_update_begin(&ring->syncp);
if (l234info & BIT(HNS3_RXD_L2E_B))
ring->stats.l2_err++;
else
ring->stats.err_pkt_len++;
u64_stats_update_end(&ring->syncp);
return -EFAULT;
}
len = skb->len;
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
/* This is needed in order to enable forwarding support */
ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info);
if (unlikely(ret)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.rx_err_cnt++;
u64_stats_update_end(&ring->syncp);
return ret;
}
l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
HNS3_RXD_DMAC_S);
u64_stats_update_begin(&ring->syncp);
ring->stats.rx_pkts++;
ring->stats.rx_bytes += len;
if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
ring->stats.rx_multicast++;
u64_stats_update_end(&ring->syncp);
ring->tqp_vector->rx_group.total_bytes += len;
return 0;
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
u32 bd_base_info;
u32 l234info;
int length;
int ret;
......@@ -2656,62 +2792,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ALIGN(ring->pull_len, sizeof(long)));
}
l234info = le32_to_cpu(desc->rx.l234_info);
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
*/
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
__vlan_hwaccel_put_tag(skb,
htons(ETH_P_8021Q),
vlan_tag);
}
if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.non_vld_descs++;
u64_stats_update_end(&ring->syncp);
dev_kfree_skb_any(skb);
return -EINVAL;
}
if (unlikely((!desc->rx.pkt_len) ||
(l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
BIT(HNS3_RXD_L2E_B))))) {
u64_stats_update_begin(&ring->syncp);
if (l234info & BIT(HNS3_RXD_L2E_B))
ring->stats.l2_err++;
else
ring->stats.err_pkt_len++;
u64_stats_update_end(&ring->syncp);
ret = hns3_handle_bdinfo(ring, skb, desc);
if (unlikely(ret)) {
dev_kfree_skb_any(skb);
return -EFAULT;
return ret;
}
l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
HNS3_RXD_DMAC_S);
u64_stats_update_begin(&ring->syncp);
if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
ring->stats.rx_multicast++;
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
u64_stats_update_end(&ring->syncp);
ring->tqp_vector->rx_group.total_bytes += skb->len;
/* This is needed in order to enable forwarding support */
hns3_set_gro_param(skb, l234info, bd_base_info);
hns3_rx_checksum(ring, skb, desc);
*out_skb = skb;
hns3_set_rx_skb_rss_type(ring, skb);
......@@ -2723,7 +2809,6 @@ int hns3_clean_rx_ring(
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
struct sk_buff *skb = ring->skb;
......@@ -2760,8 +2845,6 @@ int hns3_clean_rx_ring(
continue;
}
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
recv_bds += ring->pending_buf;
clean_count += ring->pending_buf;
......@@ -3877,6 +3960,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ring_ptr_move_fw(ring, next_to_use);
}
/* Free the pending skb in rx ring */
if (ring->skb) {
dev_kfree_skb_any(ring->skb);
ring->skb = NULL;
ring->pending_buf = 0;
}
return 0;
}
......
......@@ -355,7 +355,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock_bh(&hdev->hw.cmq.crq.lock);
spin_lock(&hdev->hw.cmq.crq.lock);
hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0;
......@@ -364,7 +364,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hclge_cmd_init_regs(&hdev->hw);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
......
......@@ -4,287 +4,468 @@
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
{ .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
{ .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
{ .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
{ .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
{ .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
{ .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
{ .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
{ .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
{ .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
{ .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
{ .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
{ .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
{ .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
{ .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
{ .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
{ .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
{ .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
{ .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
{ .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
{ .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
{ .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
{ .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
{ .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
{ .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
{ .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
{ .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_int[] = {
{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
.reset_level = HNAE3_CORE_RESET },
{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
.reset_level = HNAE3_CORE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
{ .int_msk = BIT(0), .msg = "rx_buf_overflow" },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
{ .int_msk = BIT(3), .msg = "tx_buf_overflow" },
{ .int_msk = BIT(4), .msg = "tx_buf_underrun" },
{ .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
{ .int_msk = BIT(0), .msg = "rx_buf_overflow",
.reset_level = HNAE3_CORE_RESET },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
.reset_level = HNAE3_CORE_RESET },
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
.reset_level = HNAE3_CORE_RESET },
{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
.reset_level = HNAE3_CORE_RESET },
{ .int_msk = BIT(4), .msg = "tx_buf_underrun",
.reset_level = HNAE3_CORE_RESET },
{ .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
.reset_level = HNAE3_CORE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ncsi_err_int[] = {
{ .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
.reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
{ .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
{ .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
{ .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
{ .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
{ .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
{ .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
{ .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
{ .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
{ .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
{ .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
{ .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
{ .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
{ .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
{ .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
{ .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
{ .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
{ .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
{ .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
{ .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
{ .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
{ .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
{ .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
{ .int_msk = BIT(27),
.msg = "flow_director_ad_mem0_ecc_mbit_err" },
{ .int_msk = BIT(28),
.msg = "flow_director_ad_mem1_ecc_mbit_err" },
{ .int_msk = BIT(29),
.msg = "rx_vlan_tag_memory_ecc_mbit_err" },
{ .int_msk = BIT(30),
.msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
{ .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
{ .int_msk = BIT(0), .msg = "tx_vlan_tag_err" },
{ .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
{ .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
.reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
{ .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
{ .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
{ .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_tm_sch_rint[] = {
{ .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" },
{ .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" },
{ .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" },
{ .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" },
{ .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" },
{ .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" },
{ .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" },
{ .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" },
{ .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" },
{ .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" },
{ .int_msk = BIT(12),
.msg = "tm_sch_port_shap_offset_fifo_wr_err" },
{ .int_msk = BIT(13),
.msg = "tm_sch_port_shap_offset_fifo_rd_err" },
{ .int_msk = BIT(14),
.msg = "tm_sch_pg_pshap_offset_fifo_wr_err" },
{ .int_msk = BIT(15),
.msg = "tm_sch_pg_pshap_offset_fifo_rd_err" },
{ .int_msk = BIT(16),
.msg = "tm_sch_pg_cshap_offset_fifo_wr_err" },
{ .int_msk = BIT(17),
.msg = "tm_sch_pg_cshap_offset_fifo_rd_err" },
{ .int_msk = BIT(18),
.msg = "tm_sch_pri_pshap_offset_fifo_wr_err" },
{ .int_msk = BIT(19),
.msg = "tm_sch_pri_pshap_offset_fifo_rd_err" },
{ .int_msk = BIT(20),
.msg = "tm_sch_pri_cshap_offset_fifo_wr_err" },
{ .int_msk = BIT(21),
.msg = "tm_sch_pri_cshap_offset_fifo_rd_err" },
{ .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" },
{ .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" },
{ .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" },
{ .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" },
{ .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" },
{ .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" },
{ .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" },
{ .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" },
{ .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" },
{ .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" },
{ .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
{ .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" },
{ .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" },
{ .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" },
{ .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" },
{ .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" },
{ .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" },
{ .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" },
{ .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" },
{ .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" },
{ .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" },
{ .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" },
{ .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" },
{ .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" },
{ .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" },
{ .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" },
{ .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" },
{ .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" },
{ .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" },
{ .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
{ .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
{ .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
{ .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
{ .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
{ .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
{ .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
{ .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
{ .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
{ .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" },
{ .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" },
{ .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" },
{ .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" },
{ .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
{ .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
{ .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
{ .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" },
{ .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" },
{ .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" },
{ .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" },
{ .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" },
{ .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" },
{ .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
{ .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" },
{ .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" },
{ .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" },
{ .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" },
{ .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" },
{ .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" },
{ .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" },
{ .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" },
{ .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" },
{ .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" },
{ .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" },
{ .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" },
{ .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" },
{ .int_msk = BIT(26), .msg = "rd_bus_err" },
{ .int_msk = BIT(27), .msg = "wr_bus_err" },
{ .int_msk = BIT(28), .msg = "reg_search_miss" },
{ .int_msk = BIT(29), .msg = "rx_q_search_miss" },
{ .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" },
{ .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" },
{ .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(26), .msg = "rd_bus_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(27), .msg = "wr_bus_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(28), .msg = "reg_search_miss",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(29), .msg = "rx_q_search_miss",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
{ .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" },
{ .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" },
{ .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
.reset_level = HNAE3_CORE_RESET },
{ .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
.reset_level = HNAE3_CORE_RESET },
{ .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
.reset_level = HNAE3_CORE_RESET },
{ .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
.reset_level = HNAE3_CORE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
{ .int_msk = BIT(0), .msg = "over_8bd_no_fe" },
{ .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" },
{ .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" },
{ .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" },
{ .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" },
{ .int_msk = BIT(5), .msg = "buf_wait_timeout" },
{ .int_msk = BIT(0), .msg = "over_8bd_no_fe",
.reset_level = HNAE3_FUNC_RESET },
{ .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
.reset_level = HNAE3_FUNC_RESET },
{ .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
.reset_level = HNAE3_FUNC_RESET },
{ .int_msk = BIT(5), .msg = "buf_wait_timeout",
.reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
{ .int_msk = BIT(0), .msg = "buf_sum_err" },
{ .int_msk = BIT(1), .msg = "ppp_mb_num_err" },
{ .int_msk = BIT(2), .msg = "ppp_mbid_err" },
{ .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" },
{ .int_msk = BIT(4), .msg = "ppp_rlt_host_err" },
{ .int_msk = BIT(5), .msg = "cks_edit_position_err" },
{ .int_msk = BIT(6), .msg = "cks_edit_condition_err" },
{ .int_msk = BIT(7), .msg = "vlan_edit_condition_err" },
{ .int_msk = BIT(8), .msg = "vlan_num_ot_err" },
{ .int_msk = BIT(9), .msg = "vlan_num_in_err" },
{ .int_msk = BIT(0), .msg = "buf_sum_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(1), .msg = "ppp_mb_num_err",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(2), .msg = "ppp_mbid_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "cks_edit_position_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(6), .msg = "cks_edit_condition_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(8), .msg = "vlan_num_ot_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(9), .msg = "vlan_num_in_err",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
#define HCLGE_SSU_MEM_ECC_ERR(x) \
{ .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" }
{ .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
.reset_level = HNAE3_GLOBAL_RESET }
static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
HCLGE_SSU_MEM_ECC_ERR(0),
......@@ -323,62 +504,106 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
};
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
{ .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" },
{ .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" },
{ .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" },
{ .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" },
{ .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" },
{ .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" },
{ .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" },
{ .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" },
{ .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" },
{ .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" },
{ .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" },
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
{ .int_msk = BIT(0), .msg = "ig_mac_inf_int" },
{ .int_msk = BIT(1), .msg = "ig_host_inf_int" },
{ .int_msk = BIT(2), .msg = "ig_roc_buf_int" },
{ .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" },
{ .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" },
{ .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" },
{ .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" },
{ .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" },
{ .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" },
{ .int_msk = BIT(9), .msg = "qm_eof_fifo_int" },
{ .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" },
{ .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" },
{ .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" },
{ .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" },
{ .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" },
{ .int_msk = BIT(15), .msg = "host_cmd_fifo_int" },
{ .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" },
{ .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" },
{ .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" },
{ .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" },
{ .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" },
{ .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" },
{ .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" },
{ .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" },
{ .int_msk = BIT(0), .msg = "ig_mac_inf_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "ig_host_inf_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "ig_roc_buf_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
{ .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" },
{ .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" },
{ .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" },
{ .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" },
{ .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
{ .int_msk = BIT(9), .msg = "low_water_line_err_port" },
{ .int_msk = BIT(10), .msg = "hi_water_line_err_port" },
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(9), .msg = "low_water_line_err_port",
.reset_level = HNAE3_NONE_RESET },
{ .int_msk = BIT(10), .msg = "hi_water_line_err_port",
.reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
......@@ -406,16 +631,29 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
{ /* sentinel */ }
};
static void hclge_log_error(struct device *dev, char *reg,
const struct hclge_hw_error *err,
u32 err_sts)
static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg,
const struct hclge_hw_error *err,
u32 err_sts)
{
enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET;
bool need_reset = false;
while (err->msg) {
if (err->int_msk & err_sts)
if (err->int_msk & err_sts) {
dev_warn(dev, "%s %s found [error status=0x%x]\n",
reg, err->msg, err_sts);
if (err->reset_level != HNAE3_NONE_RESET &&
err->reset_level >= reset_level) {
reset_level = err->reset_level;
need_reset = true;
}
}
err++;
}
if (need_reset)
return reset_level;
else
return HNAE3_NONE_RESET;
}
/* hclge_cmd_query_error: read the error information
......@@ -826,6 +1064,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
int num)
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
enum hnae3_reset_type reset_level;
struct device *dev = &hdev->pdev->dev;
__le32 *desc_data;
u32 status;
......@@ -845,78 +1084,94 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
/* log HNS common errors */
status = le32_to_cpu(desc[0].data[0]);
if (status) {
hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
&hclge_imp_tcm_ecc_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
&hclge_imp_tcm_ecc_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[1]);
if (status) {
hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
&hclge_cmdq_nic_mem_ecc_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
&hclge_cmdq_nic_mem_ecc_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
dev_warn(dev, "imp_rd_data_poison_err found\n");
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET);
}
status = le32_to_cpu(desc[0].data[3]);
if (status) {
hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
&hclge_tqp_int_ecc_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
&hclge_tqp_int_ecc_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[4]);
if (status) {
hclge_log_error(dev, "MSIX_ECC_INT_STS",
&hclge_msix_sram_ecc_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS",
&hclge_msix_sram_ecc_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log SSU(Storage Switch Unit) errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*(desc_data + 2));
if (status) {
hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
&hclge_ssu_mem_ecc_err_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
&hclge_ssu_mem_ecc_err_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
if (status) {
hclge_log_error(dev, "SSU_COMMON_ERR_INT",
&hclge_ssu_com_err_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT",
&hclge_ssu_com_err_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log IGU(Ingress Unit) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
if (status)
hclge_log_error(dev, "IGU_INT_STS",
&hclge_igu_int[0], status);
if (status) {
reset_level = hclge_log_error(dev, "IGU_INT_STS",
&hclge_igu_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log PPP(Programmable Packet Process) errors */
desc_data = (__le32 *)&desc[4];
status = le32_to_cpu(*(desc_data + 1));
if (status)
hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
&hclge_ppp_mpf_abnormal_int_st1[0], status);
if (status) {
reset_level =
hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
&hclge_ppp_mpf_abnormal_int_st1[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
if (status)
hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
&hclge_ppp_mpf_abnormal_int_st3[0], status);
if (status) {
reset_level =
hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
&hclge_ppp_mpf_abnormal_int_st3[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[5];
......@@ -924,55 +1179,60 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
if (status) {
dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
"rpu_rx_pkt_ecc_mbit_err");
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
status = le32_to_cpu(*(desc_data + 2));
if (status) {
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
&hclge_ppu_mpf_abnormal_int_st2[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
reset_level =
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
&hclge_ppu_mpf_abnormal_int_st2[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
if (status) {
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
&hclge_ppu_mpf_abnormal_int_st3[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
reset_level =
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
&hclge_ppu_mpf_abnormal_int_st3[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log TM(Traffic Manager) errors */
desc_data = (__le32 *)&desc[6];
status = le32_to_cpu(*desc_data);
if (status) {
hclge_log_error(dev, "TM_SCH_RINT",
&hclge_tm_sch_rint[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
reset_level = hclge_log_error(dev, "TM_SCH_RINT",
&hclge_tm_sch_rint[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log QCN(Quantized Congestion Control) errors */
desc_data = (__le32 *)&desc[7];
status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
if (status) {
hclge_log_error(dev, "QCN_FIFO_RINT",
&hclge_qcn_fifo_rint[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
reset_level = hclge_log_error(dev, "QCN_FIFO_RINT",
&hclge_qcn_fifo_rint[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
if (status) {
hclge_log_error(dev, "QCN_ECC_RINT",
&hclge_qcn_ecc_rint[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
reset_level = hclge_log_error(dev, "QCN_ECC_RINT",
&hclge_qcn_ecc_rint[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log NCSI errors */
desc_data = (__le32 *)&desc[9];
status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
if (status) {
hclge_log_error(dev, "NCSI_ECC_INT_RPT",
&hclge_ncsi_err_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT",
&hclge_ncsi_err_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* clear all main PF RAS errors */
......@@ -1000,6 +1260,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev;
enum hnae3_reset_type reset_level;
__le32 *desc_data;
u32 status;
int ret;
......@@ -1018,38 +1279,47 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* log SSU(Storage Switch Unit) errors */
status = le32_to_cpu(desc[0].data[0]);
if (status) {
hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
&hclge_ssu_port_based_err_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
&hclge_ssu_port_based_err_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[1]);
if (status) {
hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
&hclge_ssu_fifo_overflow_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
&hclge_ssu_fifo_overflow_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[2]);
if (status) {
hclge_log_error(dev, "SSU_ETS_TCG_INT",
&hclge_ssu_ets_tcg_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT",
&hclge_ssu_ets_tcg_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
if (status)
hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
&hclge_igu_egu_tnl_int[0], status);
if (status) {
reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
&hclge_igu_egu_tnl_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
if (status)
hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
&hclge_ppu_pf_abnormal_int[0], status);
if (status) {
reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
&hclge_ppu_pf_abnormal_int[0],
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
......@@ -1343,14 +1613,12 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
{
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
enum hnae3_reset_type reset_level;
struct hclge_desc desc_bd;
struct hclge_desc *desc;
__le32 *desc_data;
int ret = 0;
u32 status;
/* set default handling */
set_bit(HNAE3_FUNC_RESET, reset_requests);
int ret;
/* query the number of bds for the MSIx int status */
hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
......@@ -1390,9 +1658,10 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data);
if (status) {
hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
&hclge_mac_afifo_tnl_int[0], status);
set_bit(HNAE3_GLOBAL_RESET, reset_requests);
reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
&hclge_mac_afifo_tnl_int[0],
status);
set_bit(reset_level, reset_requests);
}
/* log PPU(RCB) MPF errors */
......@@ -1400,9 +1669,11 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status) {
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
&hclge_ppu_mpf_abnormal_int_st2[0], status);
set_bit(HNAE3_CORE_RESET, reset_requests);
reset_level =
hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
&hclge_ppu_mpf_abnormal_int_st2[0],
status);
set_bit(reset_level, reset_requests);
}
/* clear all main PF MSIx errors */
......@@ -1436,24 +1707,31 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
/* log SSU PF errors */
status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
if (status) {
hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
&hclge_ssu_port_based_pf_int[0], status);
set_bit(HNAE3_GLOBAL_RESET, reset_requests);
reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
&hclge_ssu_port_based_pf_int[0],
status);
set_bit(reset_level, reset_requests);
}
/* read and log PPP PF errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*desc_data);
if (status)
hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
&hclge_ppp_pf_abnormal_int[0], status);
if (status) {
reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
&hclge_ppp_pf_abnormal_int[0],
status);
set_bit(reset_level, reset_requests);
}
/* log PPU(RCB) PF errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
if (status)
hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
&hclge_ppu_pf_abnormal_int[0], status);
if (status) {
reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
&hclge_ppu_pf_abnormal_int[0],
status);
set_bit(reset_level, reset_requests);
}
/* clear all PF MSIx errors */
hclge_cmd_reuse_desc(&desc[0], false);
......
......@@ -112,6 +112,7 @@ struct hclge_hw_blk {
struct hclge_hw_error {
u32 int_msk;
const char *msg;
enum hnae3_reset_type reset_level;
};
int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
......
......@@ -1358,6 +1358,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->back = hdev;
vport->vport_id = i;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
......@@ -1420,7 +1422,7 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
return ret;
}
static int hclge_get_tc_num(struct hclge_dev *hdev)
static u32 hclge_get_tc_num(struct hclge_dev *hdev)
{
int i, cnt = 0;
......@@ -1430,17 +1432,6 @@ static int hclge_get_tc_num(struct hclge_dev *hdev)
return cnt;
}
static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
{
int i, cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i) &&
hdev->tm_info.hw_pfc_map & BIT(i))
cnt++;
return cnt;
}
/* Get the number of pfc enabled TCs, which have private buffer */
static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
......@@ -1504,14 +1495,12 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc,
u32 rx_all)
{
u32 shared_buf_min, shared_buf_tc, shared_std;
int tc_num, pfc_enable_num;
u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
u32 tc_num = hclge_get_tc_num(hdev);
u32 shared_buf, aligned_mps;
u32 rx_priv;
int i;
tc_num = hclge_get_tc_num(hdev);
pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev))
......@@ -1520,9 +1509,7 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ hdev->dv_buf_size;
shared_buf_tc = pfc_enable_num * aligned_mps +
(tc_num - pfc_enable_num) * aligned_mps / 2 +
aligned_mps;
shared_buf_tc = tc_num * aligned_mps + aligned_mps;
shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
HCLGE_BUF_SIZE_UNIT);
......@@ -1539,19 +1526,26 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
} else {
buf_alloc->s_buf.self.high = aligned_mps +
HCLGE_NON_DCB_ADDITIONAL_BUF;
buf_alloc->s_buf.self.low =
roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
buf_alloc->s_buf.self.low = aligned_mps;
}
if (hnae3_dev_dcb_supported(hdev)) {
if (tc_num)
hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
else
hi_thrd = shared_buf - hdev->dv_buf_size;
hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
lo_thrd = hi_thrd - aligned_mps / 2;
} else {
hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
lo_thrd = aligned_mps;
}
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
if ((hdev->hw_tc_map & BIT(i)) &&
(hdev->tm_info.hw_pfc_map & BIT(i))) {
buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
} else {
buf_alloc->s_buf.tc_thrd[i].low = 0;
buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
}
buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
}
return true;
......@@ -6583,30 +6577,6 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return ret;
}
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
0, is_kill);
}
static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
}
static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
{
struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
......@@ -6680,6 +6650,52 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
return status;
}
static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
u16 port_base_vlan_state,
u16 vlan_tag)
{
int ret;
if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
vport->txvlan_cfg.accept_tag1 = true;
vport->txvlan_cfg.insert_tag1_en = false;
vport->txvlan_cfg.default_tag1 = 0;
} else {
vport->txvlan_cfg.accept_tag1 = false;
vport->txvlan_cfg.insert_tag1_en = true;
vport->txvlan_cfg.default_tag1 = vlan_tag;
}
vport->txvlan_cfg.accept_untag1 = true;
/* accept_tag2 and accept_untag2 are not supported on
* pdev revision(0x20), new revision support them,
* this two fields can not be configured by user.
*/
vport->txvlan_cfg.accept_tag2 = true;
vport->txvlan_cfg.accept_untag2 = true;
vport->txvlan_cfg.insert_tag2_en = false;
vport->txvlan_cfg.default_tag2 = 0;
if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
vport->rxvlan_cfg.strip_tag1_en = false;
vport->rxvlan_cfg.strip_tag2_en =
vport->rxvlan_cfg.rx_vlan_offload_en;
} else {
vport->rxvlan_cfg.strip_tag1_en =
vport->rxvlan_cfg.rx_vlan_offload_en;
vport->rxvlan_cfg.strip_tag2_en = true;
}
vport->rxvlan_cfg.vlan1_vlan_prionly = false;
vport->rxvlan_cfg.vlan2_vlan_prionly = false;
ret = hclge_set_vlan_tx_offload_cfg(vport);
if (ret)
return ret;
return hclge_set_vlan_rx_offload_cfg(vport);
}
static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
{
struct hclge_rx_vlan_type_cfg_cmd *rx_req;
......@@ -6770,34 +6786,14 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return ret;
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
vport->txvlan_cfg.accept_tag1 = true;
vport->txvlan_cfg.accept_untag1 = true;
/* accept_tag2 and accept_untag2 are not supported on
* pdev revision(0x20), new revision support them. The
* value of this two fields will not return error when driver
* send command to fireware in revision(0x20).
* This two fields can not configured by user.
*/
vport->txvlan_cfg.accept_tag2 = true;
vport->txvlan_cfg.accept_untag2 = true;
vport->txvlan_cfg.insert_tag1_en = false;
vport->txvlan_cfg.insert_tag2_en = false;
vport->txvlan_cfg.default_tag1 = 0;
vport->txvlan_cfg.default_tag2 = 0;
u16 vlan_tag;
ret = hclge_set_vlan_tx_offload_cfg(vport);
if (ret)
return ret;
vport->rxvlan_cfg.strip_tag1_en = false;
vport->rxvlan_cfg.strip_tag2_en = true;
vport->rxvlan_cfg.vlan1_vlan_prionly = false;
vport->rxvlan_cfg.vlan2_vlan_prionly = false;
vport = &hdev->vport[i];
vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
ret = hclge_set_vlan_rx_offload_cfg(vport);
ret = hclge_vlan_offload_cfg(vport,
vport->port_base_vlan_cfg.state,
vlan_tag);
if (ret)
return ret;
}
......@@ -6805,7 +6801,8 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool writen_to_tbl)
{
struct hclge_vport_vlan_cfg *vlan;
......@@ -6817,14 +6814,38 @@ void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
if (!vlan)
return;
vlan->hd_tbl_status = true;
vlan->hd_tbl_status = writen_to_tbl;
vlan->vlan_id = vlan_id;
list_add_tail(&vlan->node, &vport->vlan_list);
}
void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool is_write_tbl)
static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
{
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
int ret;
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (!vlan->hd_tbl_status) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id,
vlan->vlan_id, 0, false);
if (ret) {
dev_err(&hdev->pdev->dev,
"restore vport vlan list failed, ret=%d\n",
ret);
return ret;
}
}
vlan->hd_tbl_status = true;
}
return 0;
}
static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool is_write_tbl)
{
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
......@@ -6887,14 +6908,201 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
vport->rxvlan_cfg.strip_tag1_en = false;
vport->rxvlan_cfg.strip_tag2_en = enable;
if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
vport->rxvlan_cfg.strip_tag1_en = false;
vport->rxvlan_cfg.strip_tag2_en = enable;
} else {
vport->rxvlan_cfg.strip_tag1_en = enable;
vport->rxvlan_cfg.strip_tag2_en = true;
}
vport->rxvlan_cfg.vlan1_vlan_prionly = false;
vport->rxvlan_cfg.vlan2_vlan_prionly = false;
vport->rxvlan_cfg.rx_vlan_offload_en = enable;
return hclge_set_vlan_rx_offload_cfg(vport);
}
static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
u16 port_base_vlan_state,
struct hclge_vlan_info *new_info,
struct hclge_vlan_info *old_info)
{
struct hclge_dev *hdev = vport->back;
int ret;
if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
hclge_rm_vport_all_vlan_table(vport, false);
return hclge_set_vlan_filter_hw(hdev,
htons(new_info->vlan_proto),
vport->vport_id,
new_info->vlan_tag,
new_info->qos, false);
}
ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
vport->vport_id, old_info->vlan_tag,
old_info->qos, true);
if (ret)
return ret;
return hclge_add_vport_all_vlan_table(vport);
}
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
struct hnae3_handle *nic = &vport->nic;
struct hclge_vlan_info *old_vlan_info;
struct hclge_dev *hdev = vport->back;
int ret;
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
if (ret)
return ret;
if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
/* add new VLAN tag */
ret = hclge_set_vlan_filter_hw(hdev, vlan_info->vlan_proto,
vport->vport_id,
vlan_info->vlan_tag,
vlan_info->qos, false);
if (ret)
return ret;
/* remove old VLAN tag */
ret = hclge_set_vlan_filter_hw(hdev, old_vlan_info->vlan_proto,
vport->vport_id,
old_vlan_info->vlan_tag,
old_vlan_info->qos, true);
if (ret)
return ret;
goto update;
}
ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
old_vlan_info);
if (ret)
return ret;
/* update state only when disable/enable port based VLAN */
vport->port_base_vlan_cfg.state = state;
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
update:
vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
return 0;
}
static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
enum hnae3_port_base_vlan_state state,
u16 vlan)
{
if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
if (!vlan)
return HNAE3_PORT_BASE_VLAN_NOCHANGE;
else
return HNAE3_PORT_BASE_VLAN_ENABLE;
} else {
if (!vlan)
return HNAE3_PORT_BASE_VLAN_DISABLE;
else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
return HNAE3_PORT_BASE_VLAN_NOCHANGE;
else
return HNAE3_PORT_BASE_VLAN_MODIFY;
}
}
static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_vlan_info vlan_info;
u16 state;
int ret;
if (hdev->pdev->revision == 0x20)
return -EOPNOTSUPP;
/* qos is a 3 bits value, so can not be bigger than 7 */
if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
return -EINVAL;
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
vport = &hdev->vport[vfid];
state = hclge_get_port_base_vlan_state(vport,
vport->port_base_vlan_cfg.state,
vlan);
if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
return 0;
vlan_info.vlan_tag = vlan;
vlan_info.qos = qos;
vlan_info.vlan_proto = ntohs(proto);
/* update port based VLAN for PF */
if (!vfid) {
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
return ret;
}
if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
return hclge_update_port_base_vlan_cfg(vport, state,
&vlan_info);
} else {
ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
(u8)vfid, state,
vlan, qos,
ntohs(proto));
return ret;
}
}
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
bool writen_to_tbl = false;
int ret = 0;
/* when port based VLAN enabled, we use port based VLAN as the VLAN
* filter entry. In this case, we don't update VLAN filter table
* when user add new VLAN or remove exist VLAN, just update the vport
* VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
* table until port based VLAN disabled
*/
if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
vlan_id, 0, is_kill);
writen_to_tbl = true;
}
if (ret)
return ret;
if (is_kill)
hclge_rm_vport_vlan_table(vport, vlan_id, false);
else
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
return 0;
}
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
{
struct hclge_config_max_frm_size_cmd *req;
......
......@@ -807,10 +807,11 @@ struct hclge_tx_vtag_cfg {
/* VPort level vlan tag configuration for RX direction */
struct hclge_rx_vtag_cfg {
bool strip_tag1_en; /* Whether strip inner vlan tag */
bool strip_tag2_en; /* Whether strip outer vlan tag */
bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */
u8 strip_tag1_en; /* Whether strip inner vlan tag */
u8 strip_tag2_en; /* Whether strip outer vlan tag */
u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
};
struct hclge_rss_tuple_cfg {
......@@ -829,6 +830,17 @@ enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_MAX
};
struct hclge_vlan_info {
u16 vlan_proto; /* so far support 802.1Q only */
u16 qos;
u16 vlan_tag;
};
struct hclge_port_base_vlan_config {
u16 state;
struct hclge_vlan_info vlan_info;
};
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
......@@ -845,6 +857,7 @@ struct hclge_vport {
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
struct hclge_port_base_vlan_config port_base_vlan_cfg;
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
......@@ -924,9 +937,11 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type);
void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id);
void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool is_write_tbl);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state, u16 vlan_tag, u16 qos,
u16 vlan_proto);
#endif
......@@ -289,9 +289,25 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
return 0;
}
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state, u16 vlan_tag, u16 qos,
u16 vlan_proto)
{
#define MSG_DATA_SIZE 8
u8 msg_data[MSG_DATA_SIZE];
memcpy(&msg_data[0], &state, sizeof(u16));
memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
memcpy(&msg_data[4], &qos, sizeof(u16));
memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HLCGE_MBX_PUSH_VLAN_INFO, vfid);
}
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
int status = 0;
......@@ -305,19 +321,27 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
if (!status)
is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false)
: hclge_add_vport_vlan_table(vport, vlan);
} else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
bool en = mbx_req->msg[2] ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en);
} else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
struct hclge_vlan_info *vlan_info;
u16 *state;
state = (u16 *)&mbx_req->msg[2];
vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
status = hclge_update_port_base_vlan_cfg(vport, *state,
vlan_info);
} else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
u8 state;
state = vport->port_base_vlan_cfg.state;
status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
sizeof(u8));
}
if (gen_resp)
status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
return status;
}
......@@ -587,7 +611,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_SET_VLAN:
ret = hclge_set_vf_vlan_cfg(vport, req, false);
ret = hclge_set_vf_vlan_cfg(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF failed(%d) to config VF's VLAN\n",
......
......@@ -121,12 +121,18 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
int hclge_mac_mdio_config(struct hclge_dev *hdev)
{
#define PHY_INEXISTENT 255
struct hclge_mac *mac = &hdev->hw.mac;
struct phy_device *phydev;
struct mii_bus *mdio_bus;
int ret;
if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) {
dev_info(&hdev->pdev->dev,
"no phy device is connected to mdio bus\n");
return 0;
} else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
hdev->hw.mac.phy_addr);
return -EINVAL;
......
......@@ -334,7 +334,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock_bh(&hdev->hw.cmq.crq.lock);
spin_lock(&hdev->hw.cmq.crq.lock);
/* initialize the pointers of async rx queue of mailbox */
hdev->arq.hdev = hdev;
......@@ -348,7 +348,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
hclgevf_cmd_init_regs(&hdev->hw);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
......
......@@ -245,6 +245,27 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
return 0;
}
static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
{
struct hnae3_handle *nic = &hdev->nic;
u8 resp_msg;
int ret;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,
NULL, 0, true, &resp_msg, sizeof(u8));
if (ret) {
dev_err(&hdev->pdev->dev,
"VF request to get port based vlan state failed %d",
ret);
return ret;
}
nic->port_base_vlan_state = resp_msg;
return 0;
}
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_RSS_INFO_LEN 6
......@@ -1834,6 +1855,11 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
{
int ret;
/* get current port based vlan state from PF */
ret = hclgevf_get_port_base_vlan_filter_state(hdev);
if (ret)
return ret;
/* get queue configuration from PF */
ret = hclgevf_get_queue_info(hdev);
if (ret)
......@@ -2790,6 +2816,31 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
}
}
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
u8 *port_base_vlan_info, u8 data_size)
{
struct hnae3_handle *nic = &hdev->nic;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
rtnl_unlock();
/* send msg to PF and wait update port based vlan info */
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG,
port_base_vlan_info, data_size,
false, NULL, 0);
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
}
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
......
......@@ -290,4 +290,6 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
u8 duplex);
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
u8 *port_base_vlan_info, u8 data_size);
#endif
......@@ -198,6 +198,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
case HLCGE_MBX_PUSH_VLAN_INFO:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
......@@ -243,8 +244,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
enum hnae3_reset_type reset_type;
u16 link_status;
u16 *msg_q;
u16 link_status, state;
u16 *msg_q, *vlan_info;
u8 duplex;
u32 speed;
u32 tail;
......@@ -299,6 +300,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
hclgevf_reset_task_schedule(hdev);
break;
case HLCGE_MBX_PUSH_VLAN_INFO:
state = le16_to_cpu(msg_q[1]);
vlan_info = &msg_q[1];
hclgevf_update_port_base_vlan_info(hdev, state,
(u8 *)vlan_info, 8);
break;
default:
dev_err(&hdev->pdev->dev,
"fetched unsupported(%d) message from arq\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment