Commit 632d1a48 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: some code optimizations & cleanups & bugfixes

This patch-set includes code optimizations, cleanups and bugfixes for
the HNS3 ethernet controller driver.

[patch 1/12] logs more detail error info for ROCE RAS errors.

[patch 2/12] fixes a wrong size issue for mailbox responding.

[patch 3/12] makes HW GRO handing compliant with SW one.

[patch 4/12] refactors hns3_get_new_int_gl.

[patch 5/12] adds handling for VF's over_8bd_nfe_err.

[patch 6/12 - 12/12] adds some code optimizations and cleanups, to
make the code more readable and compliant with some static code
analysis tools, these modifications do not change the logic of
the code.

Change log:
V1->V2: fixes comment from David Miller.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 900d96e4 9b2f3477
......@@ -69,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode {
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
......
......@@ -16,14 +16,10 @@ static LIST_HEAD(hnae3_ae_dev_list);
*/
static DEFINE_MUTEX(hnae3_common_lock);
static bool hnae3_client_match(enum hnae3_client_type client_type,
enum hnae3_dev_type dev_type)
static bool hnae3_client_match(enum hnae3_client_type client_type)
{
if ((dev_type == HNAE3_DEV_KNIC) && (client_type == HNAE3_CLIENT_KNIC ||
client_type == HNAE3_CLIENT_ROCE))
return true;
if (dev_type == HNAE3_DEV_UNIC && client_type == HNAE3_CLIENT_UNIC)
if (client_type == HNAE3_CLIENT_KNIC ||
client_type == HNAE3_CLIENT_ROCE)
return true;
return false;
......@@ -39,9 +35,6 @@ void hnae3_set_client_init_flag(struct hnae3_client *client,
case HNAE3_CLIENT_KNIC:
hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_UNIC:
hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_ROCE:
hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break;
......@@ -61,10 +54,6 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_UNIC:
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_UNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_ROCE:
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_ROCE_CLIENT_INITED_B);
......@@ -82,7 +71,7 @@ static int hnae3_init_client_instance(struct hnae3_client *client,
int ret;
/* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
if (!(hnae3_client_match(client->type) &&
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0;
}
......@@ -99,7 +88,7 @@ static void hnae3_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
/* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
if (!(hnae3_client_match(client->type) &&
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
return;
......
......@@ -102,15 +102,9 @@ enum hnae3_loop {
enum hnae3_client_type {
HNAE3_CLIENT_KNIC,
HNAE3_CLIENT_UNIC,
HNAE3_CLIENT_ROCE,
};
enum hnae3_dev_type {
HNAE3_DEV_KNIC,
HNAE3_DEV_UNIC,
};
/* mac media type */
enum hnae3_media_type {
HNAE3_MEDIA_TYPE_UNKNOWN,
......@@ -220,7 +214,6 @@ struct hnae3_ae_dev {
struct list_head node;
u32 flag;
u8 override_pci_need_reset; /* fix to stop multiple reset happening */
enum hnae3_dev_type dev_type;
enum hnae3_reset_type reset_type;
void *priv;
};
......
......@@ -4,8 +4,7 @@
#include "hnae3.h"
#include "hns3_enet.h"
static
int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
......@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP;
}
static
int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
......@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP;
}
static
int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
......@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP;
}
static
int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
......
......@@ -17,6 +17,7 @@
#include <linux/sctp.h>
#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/vxlan.h>
......@@ -138,8 +139,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
tqp_vectors->name,
tqp_vectors);
tqp_vectors->name, tqp_vectors);
if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n",
tqp_vectors->vector_irq);
......@@ -276,8 +276,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) {
netdev_err(netdev,
"netif_set_real_num_tx_queues fail, ret=%d!\n",
ret);
"netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
return ret;
}
......@@ -372,7 +371,7 @@ static int hns3_nic_net_up(struct net_device *netdev)
/* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv);
if (ret) {
netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
netdev_err(netdev, "init irq failed! ret=%d\n", ret);
goto free_rmap;
}
......@@ -448,16 +447,13 @@ static int hns3_nic_net_open(struct net_device *netdev)
ret = hns3_nic_net_up(netdev);
if (ret) {
netdev_err(netdev,
"hns net up fail, ret=%d!\n", ret);
netdev_err(netdev, "net up fail, ret=%d!\n", ret);
return ret;
}
kinfo = &h->kinfo;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
netdev_set_prio_tc_map(netdev, i,
kinfo->prio_tc[i]);
}
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
......@@ -662,7 +658,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
if (l3.v4->version == 4)
l3.v4->check = 0;
/* tunnel packet.*/
/* tunnel packet */
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_UDP_TUNNEL |
......@@ -687,11 +683,11 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
l3.v4->check = 0;
}
/* normal or tunnel packet*/
/* normal or tunnel packet */
l4_offset = l4.hdr - skb->data;
hdr_len = (l4.tcp->doff << 2) + l4_offset;
/* remove payload length from inner pseudo checksum when tso*/
/* remove payload length from inner pseudo checksum when tso */
l4_paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(l4_paylen));
......@@ -799,7 +795,7 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
il2_hdr = skb_inner_mac_header(skb);
/* compute OL4 header size, defined in 4 Bytes. */
/* compute OL4 header size, defined in 4 Bytes */
l4_len = il2_hdr - l4.hdr;
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
......@@ -1059,8 +1055,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len =
cpu_to_le32(type_cs_vlan_tso);
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
......@@ -1119,7 +1114,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
/* move ring pointer to next.*/
/* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
desc_cb = &ring->desc_cb[ring->next_to_use];
......@@ -1827,8 +1822,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct hnae3_ae_dev *ae_dev;
int ret;
ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
GFP_KERNEL);
ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
if (!ae_dev) {
ret = -ENOMEM;
return ret;
......@@ -1836,7 +1830,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
ae_dev->dev_type = HNAE3_DEV_KNIC;
ae_dev->reset_type = HNAE3_NONE_RESET;
hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
......@@ -2209,8 +2202,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
ring->desc_cb[i].reuse_flag = 0;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
+ ring->desc_cb[i].page_offset);
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
}
......@@ -2312,8 +2305,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
static void
hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
int cleand_count)
{
struct hns3_desc_cb *desc_cb;
struct hns3_desc_cb res_cbs;
......@@ -2366,7 +2359,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Avoid re-using remote pages, or the stack is still using the page
* when page_offset rollback to zero, flag default unreuse
*/
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
(!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
return;
......@@ -2375,7 +2368,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given*/
/* Bump ref count on page before it is given */
get_page(desc_cb->priv);
} else if (page_count(desc_cb->priv) == 1) {
desc_cb->reuse_flag = 1;
......@@ -2384,13 +2377,13 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
}
}
static int hns3_gro_complete(struct sk_buff *skb)
static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
__be16 type = skb->protocol;
struct tcphdr *th;
int depth = 0;
while (type == htons(ETH_P_8021Q)) {
while (eth_type_vlan(type)) {
struct vlan_hdr *vh;
if ((depth + VLAN_HLEN) > skb_headlen(skb))
......@@ -2401,10 +2394,24 @@ static int hns3_gro_complete(struct sk_buff *skb)
depth += VLAN_HLEN;
}
skb_set_network_header(skb, depth);
if (type == htons(ETH_P_IP)) {
const struct iphdr *iph = ip_hdr(skb);
depth += sizeof(struct iphdr);
skb_set_transport_header(skb, depth);
th = tcp_hdr(skb);
th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
iph->daddr, 0);
} else if (type == htons(ETH_P_IPV6)) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
depth += sizeof(struct ipv6hdr);
skb_set_transport_header(skb, depth);
th = tcp_hdr(skb);
th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
&iph->daddr, 0);
} else {
netdev_err(skb->dev,
"Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
......@@ -2412,13 +2419,16 @@ static int hns3_gro_complete(struct sk_buff *skb)
return -EFAULT;
}
th = (struct tcphdr *)(skb->data + depth);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
skb->csum_start = (unsigned char *)th - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
return 0;
}
......@@ -2565,7 +2575,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */
if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
put_page(desc_cb->priv);
......@@ -2656,21 +2666,22 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 l234info,
u32 bd_base_info, u32 ol_info)
{
u16 gro_count;
u32 l3_type;
gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
/* if there is no HW GRO, do not set gro params */
if (!gro_count) {
if (!skb_shinfo(skb)->gso_size) {
hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
return 0;
}
NAPI_GRO_CB(skb)->count = gro_count;
NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
HNS3_RXD_L3ID_S);
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
if (l3_type == HNS3_L3_TYPE_IPV4)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
else if (l3_type == HNS3_L3_TYPE_IPV6)
......@@ -2678,11 +2689,7 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
else
return -EFAULT;
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
return hns3_gro_complete(skb);
return hns3_gro_complete(skb, l234info);
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
......@@ -2862,8 +2869,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return 0;
}
int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
......@@ -2916,42 +2922,25 @@ int hns3_clean_rx_ring(
out:
/* Make all data has been write before submit */
if (clean_count + unused_count > 0)
hns3_nic_alloc_rx_buffers(ring,
clean_count + unused_count);
hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
return recv_pkts;
}
static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
{
struct hns3_enet_tqp_vector *tqp_vector =
ring_group->ring->tqp_vector;
#define HNS3_RX_LOW_BYTE_RATE 10000
#define HNS3_RX_MID_BYTE_RATE 20000
#define HNS3_RX_ULTRA_PACKET_RATE 40
enum hns3_flow_level_range new_flow_level;
int packets_per_msecs;
int bytes_per_msecs;
struct hns3_enet_tqp_vector *tqp_vector;
int packets_per_msecs, bytes_per_msecs;
u32 time_passed_ms;
u16 new_int_gl;
if (!tqp_vector->last_jiffies)
return false;
if (ring_group->total_packets == 0) {
ring_group->coal.int_gl = HNS3_INT_GL_50K;
ring_group->coal.flow_level = HNS3_FLOW_LOW;
return true;
}
/* Simple throttlerate management
* 0-10MB/s lower (50000 ints/s)
* 10-20MB/s middle (20000 ints/s)
* 20-1249MB/s high (18000 ints/s)
* > 40000pps ultra (8000 ints/s)
*/
new_flow_level = ring_group->coal.flow_level;
new_int_gl = ring_group->coal.int_gl;
tqp_vector = ring_group->ring->tqp_vector;
time_passed_ms =
jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
if (!time_passed_ms)
return false;
......@@ -2961,9 +2950,14 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
do_div(ring_group->total_bytes, time_passed_ms);
bytes_per_msecs = ring_group->total_bytes;
#define HNS3_RX_LOW_BYTE_RATE 10000
#define HNS3_RX_MID_BYTE_RATE 20000
new_flow_level = ring_group->coal.flow_level;
/* Simple throttlerate management
* 0-10MB/s lower (50000 ints/s)
* 10-20MB/s middle (20000 ints/s)
* 20-1249MB/s high (18000 ints/s)
* > 40000pps ultra (8000 ints/s)
*/
switch (new_flow_level) {
case HNS3_FLOW_LOW:
if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
......@@ -2983,13 +2977,40 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
break;
}
#define HNS3_RX_ULTRA_PACKET_RATE 40
if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
&tqp_vector->rx_group == ring_group)
new_flow_level = HNS3_FLOW_ULTRA;
switch (new_flow_level) {
ring_group->total_bytes = 0;
ring_group->total_packets = 0;
ring_group->coal.flow_level = new_flow_level;
return true;
}
static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
{
struct hns3_enet_tqp_vector *tqp_vector;
u16 new_int_gl;
if (!ring_group->ring)
return false;
tqp_vector = ring_group->ring->tqp_vector;
if (!tqp_vector->last_jiffies)
return false;
if (ring_group->total_packets == 0) {
ring_group->coal.int_gl = HNS3_INT_GL_50K;
ring_group->coal.flow_level = HNS3_FLOW_LOW;
return true;
}
if (!hns3_get_new_flow_lvl(ring_group))
return false;
new_int_gl = ring_group->coal.int_gl;
switch (ring_group->coal.flow_level) {
case HNS3_FLOW_LOW:
new_int_gl = HNS3_INT_GL_50K;
break;
......@@ -3006,9 +3027,6 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
break;
}
ring_group->total_bytes = 0;
ring_group->total_packets = 0;
ring_group->coal.flow_level = new_flow_level;
if (new_int_gl != ring_group->coal.int_gl) {
ring_group->coal.int_gl = new_int_gl;
return true;
......@@ -3309,6 +3327,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
if (!vector)
return -ENOMEM;
/* save the actual available vector number */
vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
priv->vector_num = vector_num;
......@@ -3577,8 +3596,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
struct hnae3_queue *q = ring->tqp;
if (!HNAE3_IS_TX_RING(ring)) {
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
(u32)dma);
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
......@@ -4024,8 +4042,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ret);
return ret;
}
hns3_replace_buffer(ring, ring->next_to_use,
&res_cbs);
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
}
ring_ptr_move_fw(ring, next_to_use);
}
......@@ -4200,7 +4217,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
if (ret) {
set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
netdev_err(kinfo->netdev,
"hns net up fail, ret=%d!\n", ret);
"net up fail, ret=%d!\n", ret);
return ret;
}
}
......
......@@ -60,6 +60,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
#define HNS3_NIC_LB_SETUP_USEC 10000
/* Nic loopback test err */
#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
......@@ -117,7 +118,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret;
ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000);
usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return ret;
}
......@@ -132,7 +133,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret;
}
usleep_range(10000, 20000);
usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return 0;
}
......@@ -149,6 +150,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
/* The dst mac addr of loopback packet is the same as the host'
* mac addr, the SSU component may loop back the packet to host
* before the packet reaches mac or serdes, which will defect
* the purpose of mac or serdes selftest.
*/
ethh->h_dest[5] += 0x1f;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
......@@ -435,7 +442,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
buff = hns3_get_strings_tqps(h, buff);
h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff);
ops->get_strings(h, stringset, (u8 *)buff);
break;
case ETH_SS_TEST:
ops->get_strings(h, stringset, data);
......@@ -510,6 +517,11 @@ static void hns3_get_drvinfo(struct net_device *netdev,
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
if (!h->ae_algo->ops->get_fw_version) {
netdev_err(netdev, "could not get fw version!\n");
return;
}
strncpy(drvinfo->version, hns3_driver_version,
sizeof(drvinfo->version));
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
......@@ -530,7 +542,7 @@ static u32 hns3_get_link(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status)
if (h->ae_algo->ops->get_status)
return h->ae_algo->ops->get_status(h);
else
return 0;
......@@ -560,7 +572,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam)
if (h->ae_algo->ops->get_pauseparam)
h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
&param->rx_pause, &param->tx_pause);
}
......@@ -610,9 +622,6 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
u8 media_type;
u8 link_stat;
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
ops = h->ae_algo->ops;
if (ops->get_media_type)
ops->get_media_type(h, &media_type, &module_type);
......@@ -740,8 +749,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops ||
!h->ae_algo->ops->get_rss_key_size)
if (!h->ae_algo->ops->get_rss_key_size)
return 0;
return h->ae_algo->ops->get_rss_key_size(h);
......@@ -751,8 +759,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops ||
!h->ae_algo->ops->get_rss_indir_size)
if (!h->ae_algo->ops->get_rss_indir_size)
return 0;
return h->ae_algo->ops->get_rss_indir_size(h);
......@@ -763,7 +770,7 @@ static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss)
if (!h->ae_algo->ops->get_rss)
return -EOPNOTSUPP;
return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
......@@ -774,7 +781,7 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
if ((h->pdev->revision == 0x20 &&
......@@ -799,9 +806,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tqps;
......@@ -915,9 +919,6 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
if (h->ae_algo->ops->set_rss_tuple)
......@@ -1193,7 +1194,7 @@ static int hns3_set_phys_id(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id)
if (!h->ae_algo->ops->set_led_id)
return -EOPNOTSUPP;
return h->ae_algo->ops->set_led_id(h, state);
......
......@@ -180,6 +180,9 @@ enum hclge_opcode_type {
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
/* PPU commands */
HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
......@@ -268,6 +271,8 @@ enum hclge_opcode_type {
HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
HCLGE_IGU_COMMON_INT_EN = 0x1806,
HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
......@@ -644,6 +649,11 @@ enum hclge_mac_vlan_tbl_opcode {
HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
};
enum hclge_mac_vlan_add_resp_code {
HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */
HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */
};
#define HCLGE_MAC_VLAN_BIT0_EN_B 0
#define HCLGE_MAC_VLAN_BIT1_EN_B 1
#define HCLGE_MAC_EPORT_SW_EN_B 12
......@@ -977,6 +987,20 @@ struct hclge_get_m7_bd_cmd {
u8 rsv[20];
};
struct hclge_query_ppu_pf_other_int_dfx_cmd {
__le16 over_8bd_no_fe_qid;
__le16 over_8bd_no_fe_vf_id;
__le16 tso_mss_cmp_min_err_qid;
__le16 tso_mss_cmp_min_err_vf_id;
__le16 tso_mss_cmp_max_err_qid;
__le16 tso_mss_cmp_max_err_vf_id;
__le16 tx_rd_fbd_poison_qid;
__le16 tx_rd_fbd_poison_vf_id;
__le16 rx_rd_fbd_poison_qid;
__le16 rx_rd_fbd_poison_vf_id;
u8 rsv[4];
};
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
......
......@@ -64,6 +64,8 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
char *cmd_buf, int msg_num, int offset,
enum hclge_opcode_type cmd)
{
#define BD_DATA_NUM 6
struct hclge_desc *desc_src;
struct hclge_desc *desc;
int bd_num, buf_len;
......@@ -92,14 +94,16 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return;
}
max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num;
max = (bd_num * BD_DATA_NUM) <= msg_num ?
(bd_num * BD_DATA_NUM) : msg_num;
desc = desc_src;
for (i = 0; i < max; i++) {
(((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc;
((i > 0) && ((i % BD_DATA_NUM) == 0)) ? desc++ : desc;
if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message, desc->data[i % 6]);
dfx_message->message,
desc->data[i % BD_DATA_NUM]);
dfx_message++;
}
......@@ -395,7 +399,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]);
dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
......@@ -403,7 +407,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]);
dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
......@@ -412,9 +416,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
goto err_tm_pg_cmd_send;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n",
dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
bp_to_qs_map_cmd->tc_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n",
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
bp_to_qs_map_cmd->qs_bit_map);
......@@ -473,7 +477,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n",
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
nq_to_qs_map->qset_id);
cmd = HCLGE_OPC_TM_PG_WEIGHT;
......
......@@ -673,19 +673,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
enum hclge_err_int_type int_type)
{
struct device *dev = &hdev->pdev->dev;
int num = 1;
int desc_num = 1;
int ret;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
if (flag) {
desc[0].flag |= cpu_to_le16(flag);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
num = 2;
desc_num = 2;
}
if (w_num)
desc[0].data[w_num] = cpu_to_le32(int_type);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret)
dev_err(dev, "query error cmd failed (%d)\n", ret);
......@@ -941,7 +941,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
int num = 1;
int desc_num = 1;
int ret;
/* configure PPU error interrupts */
......@@ -960,7 +960,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
num = 2;
desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en)
......@@ -978,7 +978,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
return -EINVAL;
}
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
return ret;
}
......@@ -1388,6 +1388,66 @@ static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
return ret;
}
static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[3];
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE AXI error sts\n", ret);
return ret;
}
dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
dev_info(dev, "AXI3: %08X %08X %08X %08X\n",
le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
return 0;
}
static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
int ret;
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
HCLGE_CMD_FLAG_NEXT, 0, 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
}
dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
return 0;
}
static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
......@@ -1395,8 +1455,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
int ret;
/* read overflow error status */
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_ROCEE_PF_RAS_INT_CMD,
ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
0, 0, 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
......@@ -1456,19 +1515,27 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
status = le32_to_cpu(desc[0].data[0]);
if (status & HCLGE_ROCEE_RERR_INT_MASK) {
if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
if (status & HCLGE_ROCEE_RERR_INT_MASK)
dev_warn(dev, "ROCEE RAS AXI rresp error\n");
reset_type = HNAE3_FUNC_RESET;
}
if (status & HCLGE_ROCEE_BERR_INT_MASK) {
if (status & HCLGE_ROCEE_BERR_INT_MASK)
dev_warn(dev, "ROCEE RAS AXI bresp error\n");
reset_type = HNAE3_FUNC_RESET;
ret = hclge_log_rocee_axi_error(hdev);
if (ret)
return HNAE3_GLOBAL_RESET;
}
if (status & HCLGE_ROCEE_ECC_INT_MASK) {
dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
reset_type = HNAE3_GLOBAL_RESET;
ret = hclge_log_rocee_ecc_error(hdev);
if (ret)
return HNAE3_GLOBAL_RESET;
}
if (status & HCLGE_ROCEE_OVF_INT_MASK) {
......@@ -1478,7 +1545,6 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
/* reset everything for now */
return HNAE3_GLOBAL_RESET;
}
reset_type = HNAE3_FUNC_RESET;
}
/* clear error status */
......@@ -1620,6 +1686,81 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
return PCI_ERS_RESULT_RECOVERED;
}
/* hclge_query_8bd_info: query information about over_8bd_nfe_err
* @hdev: pointer to struct hclge_dev
* @vf_id: Index of the virtual function with error
* @q_id: Physical index of the queue with error
*
* This function get specific index of queue and function which causes
* over_8bd_nfe_err by using command. If vf_id is 0, it means error is
* caused by PF instead of VF.
*/
static int hclge_query_over_8bd_err_info(struct hclge_dev *hdev, u16 *vf_id,
u16 *q_id)
{
struct hclge_query_ppu_pf_other_int_dfx_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PPU_PF_OTHER_INT_DFX, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
return ret;
req = (struct hclge_query_ppu_pf_other_int_dfx_cmd *)desc.data;
*vf_id = le16_to_cpu(req->over_8bd_no_fe_vf_id);
*q_id = le16_to_cpu(req->over_8bd_no_fe_qid);
return 0;
}
/* hclge_handle_over_8bd_err: handle MSI-X error named over_8bd_nfe_err
* @hdev: pointer to struct hclge_dev
* @reset_requests: reset level that we need to trigger later
*
* over_8bd_nfe_err is a special MSI-X because it may caused by a VF, in
* that case, we need to trigger VF reset. Otherwise, a PF reset is needed.
*/
static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
struct device *dev = &hdev->pdev->dev;
u16 vf_id;
u16 q_id;
int ret;
ret = hclge_query_over_8bd_err_info(hdev, &vf_id, &q_id);
if (ret) {
dev_err(dev, "fail(%d) to query over_8bd_no_fe info\n",
ret);
return;
}
dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n",
vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
dev_err(dev, "invalid vf id(%d)\n", vf_id);
return;
}
/* If we need to trigger other reset whose level is higher
* than HNAE3_VF_FUNC_RESET, no need to trigger a VF reset
* here.
*/
if (*reset_requests != 0)
return;
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
dev_warn(dev, "inform reset to vf(%d) failed %d!\n",
hdev->vport->vport_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
}
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
......@@ -1732,6 +1873,10 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
set_bit(reset_level, reset_requests);
}
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK;
if (status)
hclge_handle_over_8bd_err(hdev, reset_requests);
/* clear all PF MSIx errors */
hclge_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
......
......@@ -83,7 +83,8 @@
#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28)
#define HCLGE_PPU_PF_INT_RAS_MASK 0x18
#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27
#define HCLGE_PPU_PF_INT_MSIX_MASK 0x26
#define HCLGE_PPU_PF_OVER_8BD_ERR_MASK 0x01
#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0)
......@@ -94,6 +95,7 @@
#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1
#define HCLGE_ROCEE_RERR_INT_MASK BIT(0)
#define HCLGE_ROCEE_BERR_INT_MASK BIT(1)
#define HCLGE_ROCEE_AXI_ERR_INT_MASK GENMASK(1, 0)
#define HCLGE_ROCEE_ECC_INT_MASK BIT(2)
#define HCLGE_ROCEE_OVF_INT_MASK BIT(3)
#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
......
......@@ -28,6 +28,10 @@
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256
#define HCLGE_BUF_MUL_BY 2
#define HCLGE_BUF_DIV_BY 2
#define HCLGE_RESET_MAX_FAIL_CNT 5
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
......@@ -439,8 +443,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_QUERY_RX_STATUS,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
......@@ -502,6 +505,7 @@ static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
/* each tqp has TX & RX two queues */
return kinfo->num_tqps * (2);
}
......@@ -646,8 +650,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
return count;
}
static void hclge_get_strings(struct hnae3_handle *handle,
u32 stringset,
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u8 *data)
{
u8 *p = (char *)data;
......@@ -655,21 +658,17 @@ static void hclge_get_strings(struct hnae3_handle *handle,
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
p = hclge_comm_get_strings(stringset,
g_mac_stats_string,
size,
p);
p = hclge_comm_get_strings(stringset, g_mac_stats_string,
size, p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p,
hns3_nic_test_strs[HNAE3_LOOP_APP],
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
memcpy(p,
hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
......@@ -680,8 +679,7 @@ static void hclge_get_strings(struct hnae3_handle *handle,
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
memcpy(p,
hns3_nic_test_strs[HNAE3_LOOP_PHY],
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
......@@ -694,10 +692,8 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back;
u64 *p;
p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string),
data);
p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
......@@ -728,6 +724,8 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
static int hclge_query_function_status(struct hclge_dev *hdev)
{
#define HCLGE_QUERY_MAX_CNT 5
struct hclge_func_status_cmd *req;
struct hclge_desc desc;
int timeout = 0;
......@@ -740,9 +738,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"query function status failed %d.\n",
ret);
"query function status failed %d.\n", ret);
return ret;
}
......@@ -750,7 +746,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
if (req->pf_state)
break;
usleep_range(1000, 2000);
} while (timeout++ < 5);
} while (timeout++ < HCLGE_QUERY_MAX_CNT);
ret = hclge_parse_func_status(hdev, req);
......@@ -1461,11 +1457,6 @@ static int hclge_map_tqp(struct hclge_dev *hdev)
return 0;
}
static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
{
/* this would be initialized later */
}
static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
{
struct hnae3_handle *nic = &vport->nic;
......@@ -1476,20 +1467,12 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->ae_algo = &ae_algo;
nic->numa_node_mask = hdev->numa_node_mask;
if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
ret = hclge_knic_setup(vport, num_tqps,
hdev->num_tx_desc, hdev->num_rx_desc);
if (ret)
dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
if (ret) {
dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
ret);
return ret;
}
} else {
hclge_unic_setup(vport, num_tqps);
}
return 0;
}
static int hclge_alloc_vport(struct hclge_dev *hdev)
......@@ -1675,7 +1658,8 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev))
shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
hdev->dv_buf_size;
else
shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ hdev->dv_buf_size;
......@@ -1693,7 +1677,8 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
if (hnae3_dev_dcb_supported(hdev)) {
buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
HCLGE_BUF_SIZE_UNIT);
} else {
buf_alloc->s_buf.self.high = aligned_mps +
HCLGE_NON_DCB_ADDITIONAL_BUF;
......@@ -1706,9 +1691,9 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
else
hi_thrd = shared_buf - hdev->dv_buf_size;
hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
lo_thrd = hi_thrd - aligned_mps / 2;
lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
} else {
hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
lo_thrd = aligned_mps;
......@@ -1769,12 +1754,13 @@ static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = max ? aligned_mps : 256;
priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
priv->wl.high = roundup(priv->wl.low + aligned_mps,
HCLGE_BUF_SIZE_UNIT);
} else {
priv->wl.low = 0;
priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
aligned_mps;
}
priv->buf_size = priv->wl.high + hdev->dv_buf_size;
......@@ -2157,7 +2143,6 @@ static int hclge_init_msi(struct hclge_dev *hdev)
static u8 hclge_check_speed_dup(u8 duplex, int speed)
{
if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
duplex = HCLGE_MAC_FULL;
......@@ -2175,7 +2160,8 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
if (duplex)
hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
switch (speed) {
case HCLGE_MAC_SPEED_10M:
......@@ -2537,7 +2523,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac)
static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
{
struct hclge_sfp_info_cmd *resp = NULL;
struct hclge_sfp_info_cmd *resp;
struct hclge_desc desc;
int ret;
......@@ -2865,8 +2851,7 @@ int hclge_notify_client(struct hclge_dev *hdev,
struct hnae3_client *client = hdev->nic_client;
u16 i;
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
!client)
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
return 0;
if (!client->ops->reset_notify)
......@@ -2894,8 +2879,7 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
int ret = 0;
u16 i;
if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
!client)
if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
return 0;
if (!client->ops->reset_notify)
......@@ -3226,7 +3210,6 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
{
#define MAX_RESET_FAIL_CNT 5
#define RESET_UPGRADE_DELAY_SEC 10
if (hdev->reset_pending) {
dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
......@@ -3251,7 +3234,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
hclge_clear_reset_cause(hdev);
mod_timer(&hdev->reset_timer,
jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
jiffies + HCLGE_RESET_INTERVAL);
return false;
}
......@@ -3278,6 +3261,25 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev)
return ret;
}
static int hclge_reset_stack(struct hclge_dev *hdev)
{
int ret;
ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
return ret;
ret = hclge_reset_ae_dev(hdev->ae_dev);
if (ret)
return ret;
ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
if (ret)
return ret;
return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
}
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
......@@ -3321,19 +3323,8 @@ static void hclge_reset(struct hclge_dev *hdev)
goto err_reset;
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
goto err_reset_lock;
ret = hclge_reset_ae_dev(hdev->ae_dev);
if (ret)
goto err_reset_lock;
ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
if (ret)
goto err_reset_lock;
ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
ret = hclge_reset_stack(hdev);
if (ret)
goto err_reset_lock;
......@@ -3343,16 +3334,23 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset_lock;
rtnl_unlock();
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
* times
*/
if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
goto err_reset;
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
if (ret)
goto err_reset_lock;
rtnl_unlock();
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
if (ret)
goto err_reset;
ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
if (ret)
goto err_reset;
......@@ -3395,7 +3393,8 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
if (!handle)
handle = &hdev->vport[0].nic;
if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
if (time_before(jiffies, (hdev->last_reset_time +
HCLGE_RESET_INTERVAL)))
return;
else if (hdev->default_reset_request)
hdev->reset_level =
......@@ -3615,28 +3614,27 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
{
struct hclge_rss_config_cmd *req;
struct hclge_desc desc;
int key_offset;
int key_offset = 0;
int key_counts;
int key_size;
int ret;
key_counts = HCLGE_RSS_KEY_SIZE;
req = (struct hclge_rss_config_cmd *)desc.data;
for (key_offset = 0; key_offset < 3; key_offset++) {
while (key_counts) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
false);
req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
if (key_offset == 2)
key_size =
HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
else
key_size = HCLGE_RSS_HASH_KEY_NUM;
key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
memcpy(req->hash_key,
key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
key_counts -= key_size;
key_offset++;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
......@@ -4156,8 +4154,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
return 0;
}
static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
int vector,
static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
......@@ -4174,8 +4171,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
}
static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
int vector,
static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
......@@ -4196,8 +4192,7 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
if (ret)
dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
vector_id,
ret);
vector_id, ret);
return ret;
}
......@@ -4503,19 +4498,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
case 0:
return false;
case BIT(INNER_DST_MAC):
for (i = 0; i < 6; i++) {
calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
for (i = 0; i < ETH_ALEN; i++) {
calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]);
calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]);
}
return true;
case BIT(INNER_SRC_MAC):
for (i = 0; i < 6; i++) {
calc_x(key_x[5 - i], rule->tuples.src_mac[i],
for (i = 0; i < ETH_ALEN; i++) {
calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples.src_mac[i]);
calc_y(key_y[5 - i], rule->tuples.src_mac[i],
calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples.src_mac[i]);
}
......@@ -4551,19 +4546,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
return true;
case BIT(INNER_SRC_IP):
calc_x(tmp_x_l, rule->tuples.src_ip[3],
rule->tuples_mask.src_ip[3]);
calc_y(tmp_y_l, rule->tuples.src_ip[3],
rule->tuples_mask.src_ip[3]);
calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
rule->tuples_mask.src_ip[IPV4_INDEX]);
calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
rule->tuples_mask.src_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true;
case BIT(INNER_DST_IP):
calc_x(tmp_x_l, rule->tuples.dst_ip[3],
rule->tuples_mask.dst_ip[3]);
calc_y(tmp_y_l, rule->tuples.dst_ip[3],
rule->tuples_mask.dst_ip[3]);
calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
rule->tuples_mask.dst_ip[IPV4_INDEX]);
calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
rule->tuples_mask.dst_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
......@@ -4812,6 +4807,7 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
BIT(INNER_IP_TOS);
/* check whether src/dst ip address used */
if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
!tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
*unused |= BIT(INNER_SRC_IP);
......@@ -4836,6 +4832,7 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
BIT(INNER_DST_PORT);
/* check whether src/dst ip address used */
if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
!usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
*unused |= BIT(INNER_SRC_IP);
......@@ -4979,14 +4976,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
rule->tuples.src_ip[3] =
rule->tuples.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
rule->tuples_mask.src_ip[3] =
rule->tuples_mask.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
rule->tuples.dst_ip[3] =
rule->tuples.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
rule->tuples_mask.dst_ip[3] =
rule->tuples_mask.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
......@@ -5005,14 +5002,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break;
case IP_USER_FLOW:
rule->tuples.src_ip[3] =
rule->tuples.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
rule->tuples_mask.src_ip[3] =
rule->tuples_mask.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
rule->tuples.dst_ip[3] =
rule->tuples.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
rule->tuples_mask.dst_ip[3] =
rule->tuples_mask.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
......@@ -5029,14 +5026,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
case TCP_V6_FLOW:
case UDP_V6_FLOW:
be32_to_cpu_array(rule->tuples.src_ip,
fs->h_u.tcp_ip6_spec.ip6src, 4);
fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip,
fs->m_u.tcp_ip6_spec.ip6src, 4);
fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip,
fs->h_u.tcp_ip6_spec.ip6dst, 4);
fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.dst_ip,
fs->m_u.tcp_ip6_spec.ip6dst, 4);
fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
rule->tuples_mask.src_port =
......@@ -5052,14 +5049,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break;
case IPV6_USER_FLOW:
be32_to_cpu_array(rule->tuples.src_ip,
fs->h_u.usr_ip6_spec.ip6src, 4);
fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip,
fs->m_u.usr_ip6_spec.ip6src, 4);
fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip,
fs->h_u.usr_ip6_spec.ip6dst, 4);
fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.dst_ip,
fs->m_u.usr_ip6_spec.ip6dst, 4);
fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
......@@ -5259,13 +5256,12 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev,
"Delete fail, rule %d is inexistent\n",
fs->location);
"Delete fail, rule %d is inexistent\n", fs->location);
return -ENOENT;
}
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
fs->location, NULL, false);
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
NULL, false);
if (ret)
return ret;
......@@ -5402,16 +5398,16 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
fs->h_u.tcp_ip4_spec.ip4src =
cpu_to_be32(rule->tuples.src_ip[3]);
cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4src =
rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
fs->h_u.tcp_ip4_spec.ip4dst =
cpu_to_be32(rule->tuples.dst_ip[3]);
cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4dst =
rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
fs->m_u.tcp_ip4_spec.psrc =
......@@ -5431,16 +5427,16 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
break;
case IP_USER_FLOW:
fs->h_u.usr_ip4_spec.ip4src =
cpu_to_be32(rule->tuples.src_ip[3]);
cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4src =
rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
fs->h_u.usr_ip4_spec.ip4dst =
cpu_to_be32(rule->tuples.dst_ip[3]);
cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
fs->m_u.usr_ip4_spec.ip4dst =
rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
fs->m_u.usr_ip4_spec.tos =
......@@ -5459,20 +5455,22 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
case TCP_V6_FLOW:
case UDP_V6_FLOW:
cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
rule->tuples.src_ip, 4);
rule->tuples.src_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
rule->tuples_mask.src_ip, 4);
rule->tuples_mask.src_ip, IPV6_SIZE);
cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
rule->tuples.dst_ip, 4);
rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
rule->tuples_mask.dst_ip, 4);
rule->tuples_mask.dst_ip, IPV6_SIZE);
fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
fs->m_u.tcp_ip6_spec.psrc =
......@@ -5487,20 +5485,22 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
break;
case IPV6_USER_FLOW:
cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
rule->tuples.src_ip, 4);
rule->tuples.src_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
memset(fs->m_u.usr_ip6_spec.ip6src, 0,
sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
rule->tuples_mask.src_ip, 4);
rule->tuples_mask.src_ip, IPV6_SIZE);
cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
rule->tuples.dst_ip, 4);
rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
rule->tuples_mask.dst_ip, 4);
rule->tuples_mask.dst_ip, IPV6_SIZE);
fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
fs->m_u.usr_ip6_spec.l4_proto =
......@@ -6157,11 +6157,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (op == HCLGE_MAC_VLAN_ADD) {
if ((!resp_code) || (resp_code == 1)) {
return_status = 0;
} else if (resp_code == 2) {
} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for uc_overflow.\n");
} else if (resp_code == 3) {
} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for mc_overflow.\n");
......@@ -6206,13 +6206,15 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
{
#define HCLGE_VF_NUM_IN_FIRST_DESC 192
int word_num;
int bit_num;
if (vfid > 255 || vfid < 0)
return -EIO;
if (vfid >= 0 && vfid <= 191) {
if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
......@@ -6220,7 +6222,7 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
word_num = (vfid - 192) / 32;
word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32;
if (clr)
desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
......@@ -6403,6 +6405,10 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
mutex_init(&hdev->umv_mutex);
hdev->max_umv_size = allocated_size;
/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
* preserve some unicast mac vlan table entries shared by pf
* and its vfs.
*/
hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_req_vfs + 2);
......@@ -6435,7 +6441,9 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
req = (struct hclge_umv_spc_alc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
if (!is_alloc)
hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
req->space_size = cpu_to_le32(space_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
......@@ -6524,8 +6532,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
is_multicast_ether_addr(addr)) {
dev_err(&hdev->pdev->dev,
"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
addr,
is_zero_ether_addr(addr),
addr, is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr));
return -EINVAL;
......@@ -6592,8 +6599,7 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
dev_dbg(&hdev->pdev->dev,
"Remove mac err! invalid mac:%pM.\n",
dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
addr);
return -EINVAL;
}
......@@ -6635,18 +6641,16 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
/* This mac addr exist, update VFID for it */
hclge_update_desc_vfid(desc, vport->vport_id, false);
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
} else {
if (status) {
/* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data));
memset(desc[1].data, 0, sizeof(desc[0].data));
memset(desc[2].data, 0, sizeof(desc[0].data));
hclge_update_desc_vfid(desc, vport->vport_id, false);
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
}
status = hclge_update_desc_vfid(desc, vport->vport_id, false);
if (status)
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
if (status == -ENOSPC)
dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
......@@ -6684,7 +6688,9 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
/* This mac addr exist, remove this handle's VFID for it */
hclge_update_desc_vfid(desc, vport->vport_id, true);
status = hclge_update_desc_vfid(desc, vport->vport_id, true);
if (status)
return status;
if (hclge_is_all_function_id_zero(desc))
/* All the vfid is zero, so need to delete this entry */
......@@ -7757,7 +7763,7 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
{
struct hclge_dev *hdev = vport->back;
int i, max_frm_size, ret = 0;
int i, max_frm_size, ret;
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
......@@ -7868,7 +7874,7 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret = 0;
int ret;
queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
......@@ -7885,7 +7891,6 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
return ret;
}
reset_try_times = 0;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
/* Wait for tqp hw reset */
msleep(20);
......@@ -7924,7 +7929,6 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
return;
}
reset_try_times = 0;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
/* Wait for tqp hw reset */
msleep(20);
......@@ -7994,7 +7998,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
u16 remote_advertising = 0;
u16 local_advertising = 0;
u16 local_advertising;
u32 rx_pause, tx_pause;
u8 flowctl;
......@@ -8263,17 +8267,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (ret)
goto clear_roce;
break;
case HNAE3_CLIENT_UNIC:
hdev->nic_client = client;
vport->nic.client = client;
ret = client->ops->init_instance(&vport->nic);
if (ret)
goto clear_nic;
hnae3_set_client_init_flag(client, ae_dev, 1);
break;
case HNAE3_CLIENT_ROCE:
if (hnae3_dev_roce_supported(hdev)) {
......@@ -8718,8 +8711,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_init_fd_config(hdev);
if (ret) {
dev_err(&pdev->dev,
"fd table init fail, ret=%d\n", ret);
dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
return ret;
}
......@@ -8910,10 +8902,12 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
void *data)
{
#define HCLGE_32_BIT_REG_RTN_DATANUM 8
#define HCLGE_32_BIT_DESC_NODATA_LEN 2
struct hclge_desc *desc;
u32 *reg_val = data;
__le32 *desc_data;
int nodata_num;
int cmd_num;
int i, k, n;
int ret;
......@@ -8921,7 +8915,9 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
if (regs_num == 0)
return 0;
cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
HCLGE_32_BIT_REG_RTN_DATANUM);
desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
......@@ -8938,7 +8934,7 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
for (i = 0; i < cmd_num; i++) {
if (i == 0) {
desc_data = (__le32 *)(&desc[i].data[0]);
n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
} else {
desc_data = (__le32 *)(&desc[i]);
n = HCLGE_32_BIT_REG_RTN_DATANUM;
......@@ -8960,10 +8956,12 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
void *data)
{
#define HCLGE_64_BIT_REG_RTN_DATANUM 4
#define HCLGE_64_BIT_DESC_NODATA_LEN 1
struct hclge_desc *desc;
u64 *reg_val = data;
__le64 *desc_data;
int nodata_len;
int cmd_num;
int i, k, n;
int ret;
......@@ -8971,7 +8969,9 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
if (regs_num == 0)
return 0;
cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
HCLGE_64_BIT_REG_RTN_DATANUM);
desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
......@@ -8988,7 +8988,7 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
for (i = 0; i < cmd_num; i++) {
if (i == 0) {
desc_data = (__le64 *)(&desc[i].data[0]);
n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
} else {
desc_data = (__le64 *)(&desc[i]);
n = HCLGE_64_BIT_REG_RTN_DATANUM;
......
......@@ -474,6 +474,7 @@ enum HCLGE_FD_KEY_TYPE {
enum HCLGE_FD_STAGE {
HCLGE_FD_STAGE_1,
HCLGE_FD_STAGE_2,
MAX_STAGE_NUM,
};
/* OUTER_XXX indicates tuples in tunnel header of tunnel packet
......@@ -528,7 +529,7 @@ enum HCLGE_FD_META_DATA {
struct key_info {
u8 key_type;
u8 key_length;
u8 key_length; /* use bit as unit */
};
static const struct key_info meta_data_key_info[] = {
......@@ -612,18 +613,23 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg {
u8 fd_mode;
u16 max_key_length;
u16 max_key_length; /* use bit as unit */
u32 proto_support;
u32 rule_num[2]; /* rule entry number */
u16 cnt_num[2]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[2];
u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
};
#define IPV4_INDEX 3
#define IPV6_SIZE 4
struct hclge_fd_rule_tuples {
u8 src_mac[6];
u8 dst_mac[6];
u32 src_ip[4];
u32 dst_ip[4];
u8 src_mac[ETH_ALEN];
u8 dst_mac[ETH_ALEN];
/* Be compatible for ip address of both ipv4 and ipv6.
* For ipv4 address, we store it in src/dst_ip[3].
*/
u32 src_ip[IPV6_SIZE];
u32 dst_ip[IPV6_SIZE];
u16 src_port;
u16 dst_port;
u16 vlan_tag1;
......@@ -693,6 +699,8 @@ struct hclge_mac_tnl_stats {
u32 status;
};
#define HCLGE_RESET_INTERVAL (10 * HZ)
/* For each bit of TCAM entry, it uses a pair of 'x' and
* 'y' to indicate which value to match, like below:
* ----------------------------------
......
......@@ -93,7 +93,7 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
else if (hdev->reset_type == HNAE3_FLR_RESET)
reset_type = HNAE3_VF_FULL_RESET;
else
return -EINVAL;
reset_type = HNAE3_VF_FUNC_RESET;
memcpy(&msg_data[0], &reset_type, sizeof(u16));
......@@ -369,7 +369,7 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
vf_tc_map |= BIT(i);
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
sizeof(u8));
sizeof(vf_tc_map));
return ret;
}
......
......@@ -43,13 +43,17 @@ enum hclge_shaper_level {
static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
u8 *ir_b, u8 *ir_u, u8 *ir_s)
{
#define DIVISOR_CLK (1000 * 8)
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */
6 * 32, /* Prioriy group level */
6 * 8, /* Port level */
6 * 256 /* Qset level */
};
u8 ir_u_calc = 0, ir_s_calc = 0;
u8 ir_u_calc = 0;
u8 ir_s_calc = 0;
u32 ir_calc;
u32 tick;
......@@ -66,7 +70,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
* ir_calc = ---------------- * 1000
* tick * 1
*/
ir_calc = (1008000 + (tick >> 1) - 1) / tick;
ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
if (ir_calc == ir) {
*ir_b = 126;
......@@ -78,27 +82,28 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Increasing the denominator to select ir_s value */
while (ir_calc > ir) {
ir_s_calc++;
ir_calc = 1008000 / (tick * (1 << ir_s_calc));
ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
}
if (ir_calc == ir)
*ir_b = 126;
else
*ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
*ir_b = (ir * tick * (1 << ir_s_calc) +
(DIVISOR_CLK >> 1)) / DIVISOR_CLK;
} else {
/* Increasing the numerator to select ir_u value */
u32 numerator;
while (ir_calc < ir) {
ir_u_calc++;
numerator = 1008000 * (1 << ir_u_calc);
numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
ir_calc = (numerator + (tick >> 1)) / tick;
}
if (ir_calc == ir) {
*ir_b = 126;
} else {
u32 denominator = (8000 * (1 << --ir_u_calc));
u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
*ir_b = (ir * tick + (denominator >> 1)) / denominator;
}
}
......@@ -119,14 +124,13 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev,
opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
return -EINVAL;
for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1))
desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
}
hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
if (ret)
return ret;
......@@ -219,8 +223,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
trans_gap = pause_param->pause_trans_gap;
trans_time = le16_to_cpu(pause_param->pause_trans_time);
return hclge_pause_param_cfg(hdev, mac_addr, trans_gap,
trans_time);
return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
}
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
......@@ -361,14 +364,27 @@ static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
u8 bs_b, u8 bs_s)
{
u32 shapping_para = 0;
hclge_tm_set_field(shapping_para, IR_B, ir_b);
hclge_tm_set_field(shapping_para, IR_U, ir_u);
hclge_tm_set_field(shapping_para, IR_S, ir_s);
hclge_tm_set_field(shapping_para, BS_B, bs_b);
hclge_tm_set_field(shapping_para, BS_S, bs_s);
return shapping_para;
}
static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pg_id,
u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
u32 shapping_para)
{
struct hclge_pg_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
struct hclge_desc desc;
u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
HCLGE_OPC_TM_PG_C_SHAPPING;
......@@ -378,12 +394,6 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pg_id = pg_id;
hclge_tm_set_field(shapping_para, IR_B, ir_b);
hclge_tm_set_field(shapping_para, IR_U, ir_u);
hclge_tm_set_field(shapping_para, IR_S, ir_s);
hclge_tm_set_field(shapping_para, BS_B, bs_b);
hclge_tm_set_field(shapping_para, BS_S, bs_s);
shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1);
......@@ -406,11 +416,9 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
hclge_tm_set_field(shapping_para, IR_B, ir_b);
hclge_tm_set_field(shapping_para, IR_U, ir_u);
hclge_tm_set_field(shapping_para, IR_S, ir_s);
hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
......@@ -419,13 +427,11 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pri_id,
u8 ir_b, u8 ir_u, u8 ir_s,
u8 bs_b, u8 bs_s)
u32 shapping_para)
{
struct hclge_pri_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
struct hclge_desc desc;
u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
HCLGE_OPC_TM_PRI_C_SHAPPING;
......@@ -436,12 +442,6 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_id = pri_id;
hclge_tm_set_field(shapping_para, IR_B, ir_b);
hclge_tm_set_field(shapping_para, IR_U, ir_u);
hclge_tm_set_field(shapping_para, IR_S, ir_s);
hclge_tm_set_field(shapping_para, BS_B, bs_b);
hclge_tm_set_field(shapping_para, BS_S, bs_s);
shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1);
......@@ -531,6 +531,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
max_rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc);
/* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
......@@ -538,6 +539,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
/* Set to the maximum specification value (max_rss_size). */
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size;
......@@ -604,12 +606,14 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
{
#define BW_PERCENT 100
u8 i;
for (i = 0; i < hdev->tm_info.num_pg; i++) {
int k;
hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
hdev->tm_info.pg_info[i].pg_id = i;
hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
......@@ -621,7 +625,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
}
}
......@@ -682,6 +686,7 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
{
u8 ir_u, ir_b, ir_s;
u32 shaper_para;
int ret;
u32 i;
......@@ -699,18 +704,21 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_C_BUCKET, i,
0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
shaper_para);
if (ret)
return ret;
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_P_BUCKET, i,
ir_b, ir_u, ir_s,
shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_P_BUCKET, i,
shaper_para);
if (ret)
return ret;
}
......@@ -730,8 +738,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
/* pg to prio */
for (i = 0; i < hdev->tm_info.num_pg; i++) {
/* Cfg dwrr */
ret = hclge_tm_pg_weight_cfg(hdev, i,
hdev->tm_info.pg_dwrr[i]);
ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
if (ret)
return ret;
}
......@@ -811,6 +818,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{
u8 ir_u, ir_b, ir_s;
u32 shaper_para;
int ret;
u32 i;
......@@ -822,17 +830,19 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
ret = hclge_tm_pri_shapping_cfg(
hdev, HCLGE_TM_SHAP_C_BUCKET, i,
0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
shaper_para);
if (ret)
return ret;
ret = hclge_tm_pri_shapping_cfg(
hdev, HCLGE_TM_SHAP_P_BUCKET, i,
ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
shaper_para);
if (ret)
return ret;
}
......@@ -844,6 +854,7 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
u8 ir_u, ir_b, ir_s;
u32 shaper_para;
int ret;
ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
......@@ -851,18 +862,19 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
if (ret)
return ret;
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
vport->vport_id,
0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
vport->vport_id, shaper_para);
if (ret)
return ret;
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
vport->vport_id,
ir_b, ir_u, ir_s,
shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
vport->vport_id, shaper_para);
if (ret)
return ret;
......@@ -1358,7 +1370,8 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{
u8 i, bit_map = 0;
u8 bit_map = 0;
u8 i;
hdev->tm_info.num_tc = num_tc;
......
......@@ -83,8 +83,7 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
HCLGEVF_TQP_INTR_GL2_REG,
HCLGEVF_TQP_INTR_RL_REG};
static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
struct hnae3_handle *handle)
static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
{
if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic);
......@@ -232,7 +231,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
int status;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
true, &resp_msg, sizeof(u8));
true, &resp_msg, sizeof(resp_msg));
if (status) {
dev_err(&hdev->pdev->dev,
"VF request to get TC info from PF failed %d",
......@@ -321,7 +320,8 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
2, true, resp_data, 2);
sizeof(msg_data), true, resp_data,
sizeof(resp_data));
if (!ret)
qid_in_pf = *(u16 *)resp_data;
......@@ -418,7 +418,7 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
u8 resp_msg;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
0, false, &resp_msg, sizeof(u8));
0, false, &resp_msg, sizeof(resp_msg));
if (status)
dev_err(&hdev->pdev->dev,
"VF failed to fetch link status(%d) from PF", status);
......@@ -453,11 +453,13 @@ static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
u8 resp_msg;
send_msg = HCLGEVF_ADVERTISING;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
sizeof(u8), false, &resp_msg, sizeof(u8));
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
&send_msg, sizeof(send_msg), false,
&resp_msg, sizeof(resp_msg));
send_msg = HCLGEVF_SUPPORTED;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
sizeof(u8), false, &resp_msg, sizeof(u8));
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
&send_msg, sizeof(send_msg), false,
&resp_msg, sizeof(resp_msg));
}
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
......@@ -470,12 +472,6 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->numa_node_mask = hdev->numa_node_mask;
nic->flags |= HNAE3_SUPPORT_VF;
if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
hdev->ae_dev->dev_type);
return -EINVAL;
}
ret = hclgevf_knic_setup(hdev);
if (ret)
dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
......@@ -545,13 +541,15 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
{
struct hclgevf_rss_config_cmd *req;
struct hclgevf_desc desc;
int key_offset;
int key_offset = 0;
int key_counts;
int key_size;
int ret;
key_counts = HCLGEVF_RSS_KEY_SIZE;
req = (struct hclgevf_rss_config_cmd *)desc.data;
for (key_offset = 0; key_offset < 3; key_offset++) {
while (key_counts) {
hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_RSS_GENERIC_CONFIG,
false);
......@@ -560,15 +558,12 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
req->hash_config |=
(key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
if (key_offset == 2)
key_size =
HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
else
key_size = HCLGEVF_RSS_HASH_KEY_NUM;
key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
memcpy(req->hash_key,
key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
key_counts -= key_size;
key_offset++;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
......@@ -1193,7 +1188,7 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
HCLGE_MBX_MAC_VLAN_UC_MODIFY;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
subcode, msg_data, ETH_ALEN * 2,
subcode, msg_data, sizeof(msg_data),
true, NULL, 0);
if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
......@@ -1249,7 +1244,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
if (vlan_id > 4095)
if (vlan_id > HCLGEVF_MAX_VLAN_ID)
return -EINVAL;
if (proto != htons(ETH_P_8021Q))
......@@ -1280,7 +1275,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
u8 msg_data[2];
int ret;
memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
memcpy(msg_data, &queue_id, sizeof(queue_id));
/* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
......@@ -1288,7 +1283,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
return ret;
return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
2, true, NULL, 0);
sizeof(msg_data), true, NULL, 0);
}
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
......@@ -1658,7 +1653,8 @@ static void hclgevf_service_timer(struct timer_list *t)
{
struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
mod_timer(&hdev->service_timer, jiffies +
HCLGEVF_GENERAL_TASK_INTERVAL * HZ);
hdev->stats_timer++;
hclgevf_task_schedule(hdev);
......@@ -1678,9 +1674,9 @@ static void hclgevf_reset_service_task(struct work_struct *work)
if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) {
/* PF has initmated that it is about to reset the hardware.
* We now have to poll & check if harware has actually completed
* the reset sequence. On hardware reset completion, VF needs to
* reset the client and ae device.
* We now have to poll & check if hardware has actually
* completed the reset sequence. On hardware reset completion,
* VF needs to reset the client and ae device.
*/
hdev->reset_attempts = 0;
......@@ -1696,7 +1692,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
* 1. reset was initiated due to watchdog timeout due to
* 1. reset was initiated due to watchdog timeout caused by
* a. IMP was earlier reset and our TX got choked down and
* which resulted in watchdog reacting and inducing VF
* reset. This also means our cmdq would be unreliable.
......@@ -1758,7 +1754,8 @@ static void hclgevf_keep_alive_timer(struct timer_list *t)
struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
schedule_work(&hdev->keep_alive_task);
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
mod_timer(&hdev->keep_alive_timer, jiffies +
HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
}
static void hclgevf_keep_alive_task(struct work_struct *work)
......@@ -1773,7 +1770,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
0, false, &respmsg, sizeof(u8));
0, false, &respmsg, sizeof(respmsg));
if (ret)
dev_err(&hdev->pdev->dev,
"VF sends keep alive cmd failed(=%d)\n", ret);
......@@ -2005,7 +2002,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
}
/* Initialize RSS indirect table for each vport */
/* Initialize RSS indirect table */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
......@@ -2018,9 +2015,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{
/* other vlan config(like, VLAN TX/RX offload) would also be added
* here later
*/
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false);
}
......@@ -2042,7 +2036,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
......@@ -2066,7 +2059,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
if (hclgevf_reset_tqp(handle, i))
break;
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0);
}
......@@ -2090,7 +2082,8 @@ static int hclgevf_client_start(struct hnae3_handle *handle)
if (ret)
return ret;
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
mod_timer(&hdev->keep_alive_timer, jiffies +
HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
return 0;
}
......@@ -2322,16 +2315,6 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
if (ret)
goto clear_roce;
break;
case HNAE3_CLIENT_UNIC:
hdev->nic_client = client;
hdev->nic.client = client;
ret = client->ops->init_instance(&hdev->nic);
if (ret)
goto clear_nic;
hnae3_set_client_init_flag(client, ae_dev, 1);
break;
case HNAE3_CLIENT_ROCE:
if (hnae3_dev_roce_supported(hdev)) {
......
......@@ -12,9 +12,12 @@
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_MAX_VLAN_ID 4095
#define HCLGEVF_MISC_VECTOR_NUM 0
#define HCLGEVF_INVALID_VPORT 0xffff
#define HCLGEVF_GENERAL_TASK_INTERVAL 5
#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2
/* This number in actual depends upon the total number of VFs
* created by physical function. But the maximum number of
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment