Commit 9b2f3477 authored by Weihang Li's avatar Weihang Li Committed by David S. Miller

net: hns3: fix some coding style issues

This patch fixes some coding style issues reported by some static code
analysis tools and code review, such as modify some comments, rename
some variables, log some errors in detail, and fixes some alignment
errors.

BTW, these cleanups do not change the logic of code.
Signed-off-by: default avatarWeihang Li <liweihang@hisilicon.com>
Signed-off-by: default avatarPeng Li <lipeng321@huawei.com>
Signed-off-by: default avatarYonglong Liu <liuyonglong@huawei.com>
Signed-off-by: default avatarHuiSong Li <lihuisong@huawei.com>
Signed-off-by: default avatarJian Shen <shenjian15@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 63cbf7a9
...@@ -4,8 +4,7 @@ ...@@ -4,8 +4,7 @@
#include "hnae3.h" #include "hnae3.h"
#include "hns3_enet.h" #include "hns3_enet.h"
static static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) ...@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) ...@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) ...@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
......
...@@ -139,8 +139,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) ...@@ -139,8 +139,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
tqp_vectors->name, tqp_vectors->name, tqp_vectors);
tqp_vectors);
if (ret) { if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n", netdev_err(priv->netdev, "request irq(%d) fail\n",
tqp_vectors->vector_irq); tqp_vectors->vector_irq);
...@@ -277,8 +276,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) ...@@ -277,8 +276,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
ret = netif_set_real_num_tx_queues(netdev, queue_size); ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) { if (ret) {
netdev_err(netdev, netdev_err(netdev,
"netif_set_real_num_tx_queues fail, ret=%d!\n", "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
ret);
return ret; return ret;
} }
...@@ -373,7 +371,7 @@ static int hns3_nic_net_up(struct net_device *netdev) ...@@ -373,7 +371,7 @@ static int hns3_nic_net_up(struct net_device *netdev)
/* get irq resource for all vectors */ /* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv); ret = hns3_nic_init_irq(priv);
if (ret) { if (ret) {
netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); netdev_err(netdev, "init irq failed! ret=%d\n", ret);
goto free_rmap; goto free_rmap;
} }
...@@ -449,16 +447,13 @@ static int hns3_nic_net_open(struct net_device *netdev) ...@@ -449,16 +447,13 @@ static int hns3_nic_net_open(struct net_device *netdev)
ret = hns3_nic_net_up(netdev); ret = hns3_nic_net_up(netdev);
if (ret) { if (ret) {
netdev_err(netdev, netdev_err(netdev, "net up fail, ret=%d!\n", ret);
"hns net up fail, ret=%d!\n", ret);
return ret; return ret;
} }
kinfo = &h->kinfo; kinfo = &h->kinfo;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
netdev_set_prio_tc_map(netdev, i, netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
kinfo->prio_tc[i]);
}
if (h->ae_algo->ops->set_timer_task) if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true); h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
...@@ -663,7 +658,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, ...@@ -663,7 +658,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
if (l3.v4->version == 4) if (l3.v4->version == 4)
l3.v4->check = 0; l3.v4->check = 0;
/* tunnel packet.*/ /* tunnel packet */
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM | SKB_GSO_GRE_CSUM |
SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL |
...@@ -688,11 +683,11 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, ...@@ -688,11 +683,11 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
l3.v4->check = 0; l3.v4->check = 0;
} }
/* normal or tunnel packet*/ /* normal or tunnel packet */
l4_offset = l4.hdr - skb->data; l4_offset = l4.hdr - skb->data;
hdr_len = (l4.tcp->doff << 2) + l4_offset; hdr_len = (l4.tcp->doff << 2) + l4_offset;
/* remove payload length from inner pseudo checksum when tso*/ /* remove payload length from inner pseudo checksum when tso */
l4_paylen = skb->len - l4_offset; l4_paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check, csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(l4_paylen)); (__force __wsum)htonl(l4_paylen));
...@@ -800,7 +795,7 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, ...@@ -800,7 +795,7 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
il2_hdr = skb_inner_mac_header(skb); il2_hdr = skb_inner_mac_header(skb);
/* compute OL4 header size, defined in 4 Bytes. */ /* compute OL4 header size, defined in 4 Bytes */
l4_len = il2_hdr - l4.hdr; l4_len = il2_hdr - l4.hdr;
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
...@@ -1060,8 +1055,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1060,8 +1055,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* Set txbd */ /* Set txbd */
desc->tx.ol_type_vlan_len_msec = desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec); cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen); desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss); desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag); desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
...@@ -1108,19 +1102,19 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1108,19 +1102,19 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc_cb->priv = priv; desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
desc_cb->type = (type == DESC_TYPE_SKB && !k) ? desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE; DESC_TYPE_SKB : DESC_TYPE_PAGE;
/* now, fill the descriptor */ /* now, fill the descriptor */
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
(u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
frag_end && (k == frag_buf_num - 1) ? frag_end && (k == frag_buf_num - 1) ?
1 : 0); 1 : 0);
desc->tx.bdtp_fe_sc_vld_ra_ri = desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(bdtp_fe_sc_vld_ra_ri); cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
/* move ring pointer to next.*/ /* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use); ring_ptr_move_fw(ring, next_to_use);
desc_cb = &ring->desc_cb[ring->next_to_use]; desc_cb = &ring->desc_cb[ring->next_to_use];
...@@ -1577,7 +1571,7 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, ...@@ -1577,7 +1571,7 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (h->ae_algo->ops->set_vf_vlan_filter) if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
qos, vlan_proto); qos, vlan_proto);
return ret; return ret;
} }
...@@ -1828,8 +1822,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1828,8 +1822,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
int ret; int ret;
ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
GFP_KERNEL);
if (!ae_dev) { if (!ae_dev) {
ret = -ENOMEM; ret = -ENOMEM;
return ret; return ret;
...@@ -2196,7 +2189,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) ...@@ -2196,7 +2189,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
return ret; return ret;
} }
/* detach a in-used buffer and replace with a reserved one */ /* detach a in-used buffer and replace with a reserved one */
static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
struct hns3_desc_cb *res_cb) struct hns3_desc_cb *res_cb)
{ {
...@@ -2209,8 +2202,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, ...@@ -2209,8 +2202,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{ {
ring->desc_cb[i].reuse_flag = 0; ring->desc_cb[i].reuse_flag = 0;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset); ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0; ring->desc[i].rx.bd_base_info = 0;
} }
...@@ -2312,8 +2305,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring) ...@@ -2312,8 +2305,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
} }
static void static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) int cleand_count)
{ {
struct hns3_desc_cb *desc_cb; struct hns3_desc_cb *desc_cb;
struct hns3_desc_cb res_cbs; struct hns3_desc_cb res_cbs;
...@@ -2375,7 +2368,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2375,7 +2368,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) { if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given*/ /* Bump ref count on page before it is given */
get_page(desc_cb->priv); get_page(desc_cb->priv);
} else if (page_count(desc_cb->priv) == 1) { } else if (page_count(desc_cb->priv) == 1) {
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
...@@ -2619,7 +2612,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, ...@@ -2619,7 +2612,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
*/ */
if (pending) { if (pending) {
pre_bd = (ring->next_to_clean - 1 + ring->desc_num) % pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
ring->desc_num; ring->desc_num;
pre_desc = &ring->desc[pre_bd]; pre_desc = &ring->desc[pre_bd];
bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info); bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
} else { } else {
...@@ -2688,8 +2681,7 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, ...@@ -2688,8 +2681,7 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
HNS3_RXD_GRO_COUNT_M, HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S); HNS3_RXD_GRO_COUNT_S);
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
HNS3_RXD_L3ID_S);
if (l3_type == HNS3_L3_TYPE_IPV4) if (l3_type == HNS3_L3_TYPE_IPV4)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
else if (l3_type == HNS3_L3_TYPE_IPV6) else if (l3_type == HNS3_L3_TYPE_IPV6)
...@@ -2877,9 +2869,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2877,9 +2869,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return 0; return 0;
} }
int hns3_clean_rx_ring( int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
struct hns3_enet_ring *ring, int budget, void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{ {
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int recv_pkts, recv_bds, clean_count, err; int recv_pkts, recv_bds, clean_count, err;
...@@ -2931,8 +2922,7 @@ int hns3_clean_rx_ring( ...@@ -2931,8 +2922,7 @@ int hns3_clean_rx_ring(
out: out:
/* Make all data has been write before submit */ /* Make all data has been write before submit */
if (clean_count + unused_count > 0) if (clean_count + unused_count > 0)
hns3_nic_alloc_rx_buffers(ring, hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
clean_count + unused_count);
return recv_pkts; return recv_pkts;
} }
...@@ -3337,6 +3327,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) ...@@ -3337,6 +3327,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
if (!vector) if (!vector)
return -ENOMEM; return -ENOMEM;
/* save the actual available vector number */
vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
priv->vector_num = vector_num; priv->vector_num = vector_num;
...@@ -3605,8 +3596,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) ...@@ -3605,8 +3596,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
struct hnae3_queue *q = ring->tqp; struct hnae3_queue *q = ring->tqp;
if (!HNAE3_IS_TX_RING(ring)) { if (!HNAE3_IS_TX_RING(ring)) {
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
(u32)dma);
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1)); (u32)((dma >> 31) >> 1));
...@@ -4052,8 +4042,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) ...@@ -4052,8 +4042,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ret); ret);
return ret; return ret;
} }
hns3_replace_buffer(ring, ring->next_to_use, hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
&res_cbs);
} }
ring_ptr_move_fw(ring, next_to_use); ring_ptr_move_fw(ring, next_to_use);
} }
...@@ -4228,7 +4217,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) ...@@ -4228,7 +4217,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
if (ret) { if (ret) {
set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
netdev_err(kinfo->netdev, netdev_err(kinfo->netdev,
"hns net up fail, ret=%d!\n", ret); "net up fail, ret=%d!\n", ret);
return ret; return ret;
} }
} }
......
...@@ -150,6 +150,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) ...@@ -150,6 +150,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
/* The dst mac addr of loopback packet is the same as the host'
* mac addr, the SSU component may loop back the packet to host
* before the packet reaches mac or serdes, which will defect
* the purpose of mac or serdes selftest.
*/
ethh->h_dest[5] += 0x1f; ethh->h_dest[5] += 0x1f;
eth_zero_addr(ethh->h_source); eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP); ethh->h_proto = htons(ETH_P_ARP);
......
...@@ -399,7 +399,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -399,7 +399,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret) if (ret)
goto err_tm_pg_cmd_send; goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]); dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -407,7 +407,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -407,7 +407,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret) if (ret)
goto err_tm_pg_cmd_send; goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]); dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -416,9 +416,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -416,9 +416,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
goto err_tm_pg_cmd_send; goto err_tm_pg_cmd_send;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n", dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
bp_to_qs_map_cmd->tc_id); bp_to_qs_map_cmd->tc_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n", dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id); bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
bp_to_qs_map_cmd->qs_bit_map); bp_to_qs_map_cmd->qs_bit_map);
...@@ -477,7 +477,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) ...@@ -477,7 +477,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id); dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n", dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
nq_to_qs_map->qset_id); nq_to_qs_map->qset_id);
cmd = HCLGE_OPC_TM_PG_WEIGHT; cmd = HCLGE_OPC_TM_PG_WEIGHT;
......
...@@ -673,19 +673,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, ...@@ -673,19 +673,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
enum hclge_err_int_type int_type) enum hclge_err_int_type int_type)
{ {
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
int num = 1; int desc_num = 1;
int ret; int ret;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true); hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
if (flag) { if (flag) {
desc[0].flag |= cpu_to_le16(flag); desc[0].flag |= cpu_to_le16(flag);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true); hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
num = 2; desc_num = 2;
} }
if (w_num) if (w_num)
desc[0].data[w_num] = cpu_to_le32(int_type); desc[0].data[w_num] = cpu_to_le32(int_type);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num); ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret) if (ret)
dev_err(dev, "query error cmd failed (%d)\n", ret); dev_err(dev, "query error cmd failed (%d)\n", ret);
...@@ -941,7 +941,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -941,7 +941,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
{ {
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2]; struct hclge_desc desc[2];
int num = 1; int desc_num = 1;
int ret; int ret;
/* configure PPU error interrupts */ /* configure PPU error interrupts */
...@@ -960,7 +960,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -960,7 +960,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK; desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK; desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK; desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
num = 2; desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false); hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en) if (en)
...@@ -978,7 +978,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -978,7 +978,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
return -EINVAL; return -EINVAL;
} }
ret = hclge_cmd_send(&hdev->hw, &desc[0], num); ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
return ret; return ret;
} }
...@@ -1455,8 +1455,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) ...@@ -1455,8 +1455,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
int ret; int ret;
/* read overflow error status */ /* read overflow error status */
ret = hclge_cmd_query_error(hdev, &desc[0], ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
HCLGE_ROCEE_PF_RAS_INT_CMD,
0, 0, 0); 0, 0, 0);
if (ret) { if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
......
...@@ -443,8 +443,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) ...@@ -443,8 +443,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
queue = handle->kinfo.tqp[i]; queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q); tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */ /* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0], hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
HCLGE_OPC_QUERY_RX_STATUS,
true); true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
...@@ -452,7 +451,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) ...@@ -452,7 +451,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Query tqp stat fail, status = %d,queue = %d\n", "Query tqp stat fail, status = %d,queue = %d\n",
ret, i); ret, i);
return ret; return ret;
} }
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
...@@ -506,6 +505,7 @@ static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) ...@@ -506,6 +505,7 @@ static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
{ {
struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hnae3_knic_private_info *kinfo = &handle->kinfo;
/* each tqp has TX & RX two queues */
return kinfo->num_tqps * (2); return kinfo->num_tqps * (2);
} }
...@@ -650,8 +650,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) ...@@ -650,8 +650,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
return count; return count;
} }
static void hclge_get_strings(struct hnae3_handle *handle, static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u32 stringset,
u8 *data) u8 *data)
{ {
u8 *p = (char *)data; u8 *p = (char *)data;
...@@ -659,21 +658,17 @@ static void hclge_get_strings(struct hnae3_handle *handle, ...@@ -659,21 +658,17 @@ static void hclge_get_strings(struct hnae3_handle *handle,
if (stringset == ETH_SS_STATS) { if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string); size = ARRAY_SIZE(g_mac_stats_string);
p = hclge_comm_get_strings(stringset, p = hclge_comm_get_strings(stringset, g_mac_stats_string,
g_mac_stats_string, size, p);
size,
p);
p = hclge_tqps_get_strings(handle, p); p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) { } else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
memcpy(p, memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
...@@ -684,8 +679,7 @@ static void hclge_get_strings(struct hnae3_handle *handle, ...@@ -684,8 +679,7 @@ static void hclge_get_strings(struct hnae3_handle *handle,
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
memcpy(p, memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
hns3_nic_test_strs[HNAE3_LOOP_PHY],
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
...@@ -698,10 +692,8 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) ...@@ -698,10 +692,8 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u64 *p; u64 *p;
p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
g_mac_stats_string, ARRAY_SIZE(g_mac_stats_string), data);
ARRAY_SIZE(g_mac_stats_string),
data);
p = hclge_tqps_get_stats(handle, p); p = hclge_tqps_get_stats(handle, p);
} }
...@@ -746,9 +738,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev) ...@@ -746,9 +738,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"query function status failed %d.\n", "query function status failed %d.\n", ret);
ret);
return ret; return ret;
} }
...@@ -808,7 +798,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) ...@@ -808,7 +798,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
/* PF should have NIC vectors and Roce vectors, /* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors. * NIC vectors are queued before Roce vectors.
*/ */
hdev->num_msi = hdev->num_roce_msi + hdev->num_msi = hdev->num_roce_msi +
hdev->roce_base_msix_offset; hdev->roce_base_msix_offset;
} else { } else {
hdev->num_msi = hdev->num_msi =
...@@ -2153,7 +2143,6 @@ static int hclge_init_msi(struct hclge_dev *hdev) ...@@ -2153,7 +2143,6 @@ static int hclge_init_msi(struct hclge_dev *hdev)
static u8 hclge_check_speed_dup(u8 duplex, int speed) static u8 hclge_check_speed_dup(u8 duplex, int speed)
{ {
if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
duplex = HCLGE_MAC_FULL; duplex = HCLGE_MAC_FULL;
...@@ -2862,8 +2851,7 @@ int hclge_notify_client(struct hclge_dev *hdev, ...@@ -2862,8 +2851,7 @@ int hclge_notify_client(struct hclge_dev *hdev,
struct hnae3_client *client = hdev->nic_client; struct hnae3_client *client = hdev->nic_client;
u16 i; u16 i;
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
!client)
return 0; return 0;
if (!client->ops->reset_notify) if (!client->ops->reset_notify)
...@@ -2891,8 +2879,7 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev, ...@@ -2891,8 +2879,7 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
int ret = 0; int ret = 0;
u16 i; u16 i;
if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
!client)
return 0; return 0;
if (!client->ops->reset_notify) if (!client->ops->reset_notify)
...@@ -4167,8 +4154,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, ...@@ -4167,8 +4154,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
return 0; return 0;
} }
static int hclge_map_ring_to_vector(struct hnae3_handle *handle, static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
int vector,
struct hnae3_ring_chain_node *ring_chain) struct hnae3_ring_chain_node *ring_chain)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
...@@ -4185,8 +4171,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle, ...@@ -4185,8 +4171,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
} }
static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
int vector,
struct hnae3_ring_chain_node *ring_chain) struct hnae3_ring_chain_node *ring_chain)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
...@@ -4207,8 +4192,7 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, ...@@ -4207,8 +4192,7 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
if (ret) if (ret)
dev_err(&handle->pdev->dev, dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vectorid=%d, ret =%d\n", "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
vector_id, vector_id, ret);
ret);
return ret; return ret;
} }
...@@ -5272,13 +5256,12 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, ...@@ -5272,13 +5256,12 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (!hclge_fd_rule_exist(hdev, fs->location)) { if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Delete fail, rule %d is inexistent\n", "Delete fail, rule %d is inexistent\n", fs->location);
fs->location);
return -ENOENT; return -ENOENT;
} }
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
fs->location, NULL, false); NULL, false);
if (ret) if (ret)
return ret; return ret;
...@@ -6549,8 +6532,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, ...@@ -6549,8 +6532,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
is_multicast_ether_addr(addr)) { is_multicast_ether_addr(addr)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
addr, addr, is_zero_ether_addr(addr),
is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr), is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr)); is_multicast_ether_addr(addr));
return -EINVAL; return -EINVAL;
...@@ -6617,9 +6599,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, ...@@ -6617,9 +6599,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) || if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) || is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) { is_multicast_ether_addr(addr)) {
dev_dbg(&hdev->pdev->dev, dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
"Remove mac err! invalid mac:%pM.\n", addr);
addr);
return -EINVAL; return -EINVAL;
} }
...@@ -8730,8 +8711,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -8730,8 +8711,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_init_fd_config(hdev); ret = hclge_init_fd_config(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
"fd table init fail, ret=%d\n", ret);
return ret; return ret;
} }
......
...@@ -52,7 +52,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, ...@@ -52,7 +52,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
6 * 8, /* Port level */ 6 * 8, /* Port level */
6 * 256 /* Qset level */ 6 * 256 /* Qset level */
}; };
u8 ir_u_calc = 0, ir_s_calc = 0; u8 ir_u_calc = 0;
u8 ir_s_calc = 0;
u32 ir_calc; u32 ir_calc;
u32 tick; u32 tick;
...@@ -222,8 +223,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) ...@@ -222,8 +223,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
trans_gap = pause_param->pause_trans_gap; trans_gap = pause_param->pause_trans_gap;
trans_time = le16_to_cpu(pause_param->pause_trans_time); trans_time = le16_to_cpu(pause_param->pause_trans_time);
return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
trans_time);
} }
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
...@@ -387,7 +387,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, ...@@ -387,7 +387,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
struct hclge_desc desc; struct hclge_desc desc;
opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
HCLGE_OPC_TM_PG_C_SHAPPING; HCLGE_OPC_TM_PG_C_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, opcode, false); hclge_cmd_setup_basic_desc(&desc, opcode, false);
shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
...@@ -434,7 +434,7 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, ...@@ -434,7 +434,7 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
struct hclge_desc desc; struct hclge_desc desc;
opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
HCLGE_OPC_TM_PRI_C_SHAPPING; HCLGE_OPC_TM_PRI_C_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, opcode, false); hclge_cmd_setup_basic_desc(&desc, opcode, false);
...@@ -531,6 +531,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -531,6 +531,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
max_rss_size = min_t(u16, hdev->rss_size_max, max_rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc); vport->alloc_tqps / kinfo->num_tc);
/* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) { kinfo->req_rss_size <= max_rss_size) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
...@@ -538,6 +539,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -538,6 +539,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->rss_size = kinfo->req_rss_size; kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size || } else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
/* Set to the maximum specification value (max_rss_size). */
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, max_rss_size); kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size; kinfo->rss_size = max_rss_size;
...@@ -736,8 +738,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) ...@@ -736,8 +738,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
/* pg to prio */ /* pg to prio */
for (i = 0; i < hdev->tm_info.num_pg; i++) { for (i = 0; i < hdev->tm_info.num_pg; i++) {
/* Cfg dwrr */ /* Cfg dwrr */
ret = hclge_tm_pg_weight_cfg(hdev, i, ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
hdev->tm_info.pg_dwrr[i]);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1223,8 +1224,8 @@ static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) ...@@ -1223,8 +1224,8 @@ static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
struct hclge_mac *mac = &hdev->hw.mac; struct hclge_mac *mac = &hdev->hw.mac;
return hclge_pause_param_cfg(hdev, mac->mac_addr, return hclge_pause_param_cfg(hdev, mac->mac_addr,
HCLGE_DEFAULT_PAUSE_TRANS_GAP, HCLGE_DEFAULT_PAUSE_TRANS_GAP,
HCLGE_DEFAULT_PAUSE_TRANS_TIME); HCLGE_DEFAULT_PAUSE_TRANS_TIME);
} }
static int hclge_pfc_setup_hw(struct hclge_dev *hdev) static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
...@@ -1369,7 +1370,8 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) ...@@ -1369,7 +1370,8 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{ {
u8 i, bit_map = 0; u8 bit_map = 0;
u8 i;
hdev->tm_info.num_tc = num_tc; hdev->tm_info.num_tc = num_tc;
......
...@@ -83,8 +83,7 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, ...@@ -83,8 +83,7 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
HCLGEVF_TQP_INTR_GL2_REG, HCLGEVF_TQP_INTR_GL2_REG,
HCLGEVF_TQP_INTR_RL_REG}; HCLGEVF_TQP_INTR_RL_REG};
static inline struct hclgevf_dev *hclgevf_ae_get_hdev( static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
struct hnae3_handle *handle)
{ {
if (!handle->client) if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic); return container_of(handle, struct hclgevf_dev, nic);
...@@ -1675,9 +1674,9 @@ static void hclgevf_reset_service_task(struct work_struct *work) ...@@ -1675,9 +1674,9 @@ static void hclgevf_reset_service_task(struct work_struct *work)
if (test_and_clear_bit(HCLGEVF_RESET_PENDING, if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) { &hdev->reset_state)) {
/* PF has initmated that it is about to reset the hardware. /* PF has initmated that it is about to reset the hardware.
* We now have to poll & check if harware has actually completed * We now have to poll & check if hardware has actually
* the reset sequence. On hardware reset completion, VF needs to * completed the reset sequence. On hardware reset completion,
* reset the client and ae device. * VF needs to reset the client and ae device.
*/ */
hdev->reset_attempts = 0; hdev->reset_attempts = 0;
...@@ -1693,7 +1692,7 @@ static void hclgevf_reset_service_task(struct work_struct *work) ...@@ -1693,7 +1692,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) { &hdev->reset_state)) {
/* we could be here when either of below happens: /* we could be here when either of below happens:
* 1. reset was initiated due to watchdog timeout due to * 1. reset was initiated due to watchdog timeout caused by
* a. IMP was earlier reset and our TX got choked down and * a. IMP was earlier reset and our TX got choked down and
* which resulted in watchdog reacting and inducing VF * which resulted in watchdog reacting and inducing VF
* reset. This also means our cmdq would be unreliable. * reset. This also means our cmdq would be unreliable.
...@@ -2003,7 +2002,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ...@@ -2003,7 +2002,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
} }
/* Initialize RSS indirect table for each vport */ /* Initialize RSS indirect table */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
...@@ -2016,9 +2015,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ...@@ -2016,9 +2015,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{ {
/* other vlan config(like, VLAN TX/RX offload) would also be added
* here later
*/
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false); false);
} }
...@@ -2040,7 +2036,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) ...@@ -2040,7 +2036,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle); hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev); hclgevf_request_link_info(hdev);
...@@ -2064,7 +2059,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) ...@@ -2064,7 +2059,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
if (hclgevf_reset_tqp(handle, i)) if (hclgevf_reset_tqp(handle, i))
break; break;
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle); hclgevf_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0); hclgevf_update_link_status(hdev, 0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment