Commit 148f025d authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
code optimizations & bugfixes for HNS3 driver

This patch-set includes code optimizations and bugfixes for the HNS3
ethernet controller driver.

[patch 1/11 - 3/11] fixes some bugs about the IO path

[patch 4/11 - 6/11] includes some optimization and bugfixes
about mailbox handling

[patch 7/11 - 11/11] adds misc code optimizations and bugfixes.

Change log:
V2->V3: fixes comments from Neil Horman, removes [patch 8/12]
V1->V2: adds modification on [patch 8/12]
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 790d23e7 96490a1c
......@@ -84,12 +84,15 @@ struct hclgevf_mbx_resp_status {
struct hclge_mbx_vf_to_pf_cmd {
u8 rsv;
u8 mbx_src_vfid; /* Auto filled by IMP */
u8 rsv1[2];
u8 mbx_need_resp;
u8 rsv1[1];
u8 msg_len;
u8 rsv2[3];
u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
};
#define HCLGE_MBX_NEED_RESP_BIT BIT(0)
struct hclge_mbx_pf_to_vf_cmd {
u8 dest_vfid;
u8 rsv[3];
......@@ -111,7 +114,7 @@ struct hclgevf_mbx_arq_ring {
struct hclgevf_dev *hdev;
u32 head;
u32 tail;
u32 count;
atomic_t count;
u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
};
......
......@@ -2214,14 +2214,22 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
int *pkts)
{
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
int ntc = ring->next_to_clean;
struct hns3_desc_cb *desc_cb;
desc_cb = &ring->desc_cb[ntc];
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
/* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
hns3_free_buffer_detach(ring, ring->next_to_clean);
hns3_free_buffer_detach(ring, ntc);
ring_ptr_move_fw(ring, next_to_clean);
if (++ntc == ring->desc_num)
ntc = 0;
/* This smp_store_release() pairs with smp_load_acquire() in
* ring_space called by hns3_nic_net_xmit.
*/
smp_store_release(&ring->next_to_clean, ntc);
}
static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
......@@ -2689,36 +2697,37 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
struct sk_buff *skb)
struct sk_buff *skb, u32 rss_hash)
{
struct hnae3_handle *handle = ring->tqp->handle;
enum pkt_hash_types rss_type;
struct hns3_desc *desc;
int last_bd;
/* When driver handle the rss type, ring->next_to_clean indicates the
* first descriptor of next packet, need -1 here.
*/
last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
desc = &ring->desc[last_bd];
if (le32_to_cpu(desc->rx.rss_hash))
if (rss_hash)
rss_type = handle->kinfo.rss_type;
else
rss_type = PKT_HASH_TYPE_NONE;
skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
skb_set_hash(skb, rss_hash, rss_type);
}
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb,
struct hns3_desc *desc)
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
u32 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
u32 l234info = le32_to_cpu(desc->rx.l234_info);
enum hns3_pkt_l2t_type l2_frame_type;
u32 bd_base_info, l234info;
struct hns3_desc *desc;
unsigned int len;
int ret;
int pre_ntc, ret;
/* bdinfo handled below is only valid on the last BD of the
* current packet, and ring->next_to_clean indicates the first
* descriptor of next packet, so need - 1 below.
*/
pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
(ring->desc_num - 1);
desc = &ring->desc[pre_ntc];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
l234info = le32_to_cpu(desc->rx.l234_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
......@@ -2779,6 +2788,8 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb,
u64_stats_update_end(&ring->syncp);
ring->tqp_vector->rx_group.total_bytes += len;
hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
return 0;
}
......@@ -2848,14 +2859,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ALIGN(ring->pull_len, sizeof(long)));
}
ret = hns3_handle_bdinfo(ring, skb, desc);
ret = hns3_handle_bdinfo(ring, skb);
if (unlikely(ret)) {
dev_kfree_skb_any(skb);
return ret;
}
*out_skb = skb;
hns3_set_rx_skb_rss_type(ring, skb);
return 0;
}
......@@ -2866,7 +2876,7 @@ int hns3_clean_rx_ring(
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int recv_pkts, recv_bds, clean_count, err;
int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = ring->skb;
int num;
......@@ -2875,6 +2885,7 @@ int hns3_clean_rx_ring(
recv_pkts = 0, recv_bds = 0, clean_count = 0;
num -= unused_count;
unused_count -= ring->pending_buf;
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
......@@ -3476,6 +3487,7 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv)
}
devm_kfree(&pdev->dev, priv->ring_data);
priv->ring_data = NULL;
return ret;
}
......@@ -3484,12 +3496,16 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
struct hnae3_handle *h = priv->ae_handle;
int i;
if (!priv->ring_data)
return;
for (i = 0; i < h->kinfo.num_tqps; i++) {
devm_kfree(priv->dev, priv->ring_data[i].ring);
devm_kfree(priv->dev,
priv->ring_data[i + h->kinfo.num_tqps].ring);
}
devm_kfree(priv->dev, priv->ring_data);
priv->ring_data = NULL;
}
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
......@@ -3909,8 +3925,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_dbg_uninit(handle);
priv->ring_data = NULL;
out_netdev_free:
free_netdev(netdev);
}
......@@ -4257,12 +4271,10 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
hns3_uninit_all_ring(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
priv->ring_data = NULL;
err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv);
err_put_ring:
hns3_put_ring_config(priv);
priv->ring_data = NULL;
return ret;
}
......@@ -4324,7 +4336,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
netdev_err(netdev, "uninit ring error\n");
hns3_put_ring_config(priv);
priv->ring_data = NULL;
return ret;
}
......
......@@ -581,8 +581,11 @@ union l4_hdr_info {
static inline int ring_space(struct hns3_enet_ring *ring)
{
int begin = ring->next_to_clean;
int end = ring->next_to_use;
/* This smp_load_acquire() pairs with smp_store_release() in
* hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
*/
int begin = smp_load_acquire(&ring->next_to_clean);
int end = READ_ONCE(ring->next_to_use);
return ((end >= begin) ? (ring->desc_num - end + begin) :
(begin - end)) - 1;
......
......@@ -1653,8 +1653,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "fail(%d) to query msix int status bd num\n",
ret);
/* reset everything for now */
set_bit(HNAE3_GLOBAL_RESET, reset_requests);
return ret;
}
......@@ -1675,8 +1673,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
ret);
/* reset everything for now */
set_bit(HNAE3_GLOBAL_RESET, reset_requests);
goto msi_error;
}
......@@ -1710,8 +1706,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
ret);
/* reset everything for now */
set_bit(HNAE3_GLOBAL_RESET, reset_requests);
goto msi_error;
}
......@@ -1725,8 +1719,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "query all pf msix int cmd failed (%d)\n",
ret);
/* reset everything for now */
set_bit(HNAE3_GLOBAL_RESET, reset_requests);
goto msi_error;
}
......@@ -1767,8 +1759,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
ret);
/* reset everything for now */
set_bit(HNAE3_GLOBAL_RESET, reset_requests);
}
/* query and clear mac tnl interruptions */
......
......@@ -5337,8 +5337,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
#define HCLGE_MAC_LINK_STATUS_MS 20
#define HCLGE_MAC_LINK_STATUS_NUM 10
#define HCLGE_MAC_LINK_STATUS_MS 10
#define HCLGE_MAC_LINK_STATUS_NUM 100
#define HCLGE_MAC_LINK_STATUS_DOWN 0
#define HCLGE_MAC_LINK_STATUS_UP 1
......
......@@ -212,8 +212,7 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
}
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
struct hclge_dev *hdev = vport->back;
......@@ -249,7 +248,7 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
return -EIO;
}
if (gen_resp)
if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
return 0;
......@@ -597,7 +596,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_SET_UNICAST:
ret = hclge_set_vf_uc_mac_addr(vport, req, true);
ret = hclge_set_vf_uc_mac_addr(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF fail(%d) to set VF UC MAC Addr\n",
......
......@@ -1331,8 +1331,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
ret = hclge_pfc_setup_hw(hdev);
if (init && ret == -EOPNOTSUPP)
dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
else
else if (ret) {
dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
ret);
return ret;
}
return hclge_tm_bp_setup(hdev);
}
......
......@@ -340,7 +340,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
hdev->arq.hdev = hdev;
hdev->arq.head = 0;
hdev->arq.tail = 0;
hdev->arq.count = 0;
atomic_set(&hdev->arq.count, 0);
hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0;
hdev->hw.cmq.crq.next_to_clean = 0;
......
......@@ -1758,7 +1758,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
......@@ -2050,8 +2050,10 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
for (i = 0; i < handle->kinfo.num_tqps; i++)
hclgevf_reset_tqp(handle, i);
if (hdev->reset_type != HNAE3_VF_RESET)
for (i = 0; i < handle->kinfo.num_tqps; i++)
if (hclgevf_reset_tqp(handle, i))
break;
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
......
......@@ -98,6 +98,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
}
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT :
~HCLGE_MBX_NEED_RESP_BIT;
req->msg[0] = code;
req->msg[1] = subcode;
memcpy(&req->msg[2], msg_data, msg_len);
......@@ -212,7 +214,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
/* we will drop the async msg if we find ARQ as full
* and continue with next message
*/
if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
if (atomic_read(&hdev->arq.count) >=
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%d)\n",
req->msg[1]);
......@@ -224,7 +227,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
memcpy(&msg_q[0], req->msg,
HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq);
hdev->arq.count++;
atomic_inc(&hdev->arq.count);
hclgevf_mbx_task_schedule(hdev);
......@@ -317,7 +320,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
}
hclge_mbx_head_ptr_move_arq(hdev->arq);
hdev->arq.count--;
atomic_dec(&hdev->arq.count);
msg_q = hdev->arq.msg_q[hdev->arq.head];
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment