Commit 7a9212d1 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-fixes'

Huazhong Tan says:

====================
net: hns3: fixes for -net

There are some bugfixes for the HNS3 ethernet driver. patch#1 fixes
a desc filling bug, patch#2 fixes a false TX timeout issue, and
patch#3~#5 fixes some bugs related to VLAN and FD.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 181964e6 b7b5d25b
...@@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int k, sizeoflast; int k, sizeoflast;
dma_addr_t dma; dma_addr_t dma;
if (type == DESC_TYPE_SKB) { if (type == DESC_TYPE_FRAGLIST_SKB ||
struct sk_buff *skb = (struct sk_buff *)priv; type == DESC_TYPE_SKB) {
int ret;
ret = hns3_fill_skb_desc(ring, skb, desc);
if (unlikely(ret < 0))
return ret;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else if (type == DESC_TYPE_FRAGLIST_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv; struct sk_buff *skb = (struct sk_buff *)priv;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
...@@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use; next_to_use_head = ring->next_to_use;
ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
if (unlikely(ret < 0))
goto fill_err;
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
goto fill_err; goto fill_err;
...@@ -4140,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) ...@@ -4140,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
return; return;
if (linkup) { if (linkup) {
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev); netif_tx_wake_all_queues(netdev);
netif_carrier_on(netdev);
if (netif_msg_link(handle)) if (netif_msg_link(handle))
netdev_info(netdev, "link up\n"); netdev_info(netdev, "link up\n");
} else { } else {
......
...@@ -5806,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, ...@@ -5806,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
/* to avoid rule conflict, when user configure rule by ethtool, /* to avoid rule conflict, when user configure rule by ethtool,
* we need to clear all arfs rules * we need to clear all arfs rules
*/ */
spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle); hclge_clear_arfs_rules(handle);
spin_lock_bh(&hdev->fd_rule_lock);
ret = hclge_fd_config_rule(hdev, rule); ret = hclge_fd_config_rule(hdev, rule);
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
...@@ -5851,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, ...@@ -5851,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
return ret; return ret;
} }
/* make sure being called after lock up with fd_rule_lock */
static void hclge_del_all_fd_entries(struct hnae3_handle *handle, static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bool clear_list) bool clear_list)
{ {
...@@ -5863,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, ...@@ -5863,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev)) if (!hnae3_dev_fd_supported(hdev))
return; return;
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap, for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
...@@ -5880,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, ...@@ -5880,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bitmap_zero(hdev->fd_bmap, bitmap_zero(hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
} }
spin_unlock_bh(&hdev->fd_rule_lock);
} }
static int hclge_restore_fd_entries(struct hnae3_handle *handle) static int hclge_restore_fd_entries(struct hnae3_handle *handle)
...@@ -6263,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, ...@@ -6263,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys) u16 flow_id, struct flow_keys *fkeys)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_fd_rule_tuples new_tuples; struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule; struct hclge_fd_rule *rule;
u16 tmp_queue_id; u16 tmp_queue_id;
...@@ -6273,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, ...@@ -6273,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
if (!hnae3_dev_fd_supported(hdev)) if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
memset(&new_tuples, 0, sizeof(new_tuples));
hclge_fd_get_flow_tuples(fkeys, &new_tuples);
spin_lock_bh(&hdev->fd_rule_lock);
/* when there is already fd rule existed add by user, /* when there is already fd rule existed add by user,
* arfs should not work * arfs should not work
*/ */
spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
hclge_fd_get_flow_tuples(fkeys, &new_tuples);
/* check is there flow director filter existed for this flow, /* check is there flow director filter existed for this flow,
* if not, create a new filter for it; * if not, create a new filter for it;
* if filter exist with different queue id, modify the filter; * if filter exist with different queue id, modify the filter;
...@@ -6368,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) ...@@ -6368,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
#endif #endif
} }
/* make sure being called after lock up with fd_rule_lock */
static void hclge_clear_arfs_rules(struct hnae3_handle *handle) static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
{ {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
...@@ -6420,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) ...@@ -6420,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
hdev->fd_en = enable; hdev->fd_en = enable;
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
if (!enable)
if (!enable) {
spin_lock_bh(&hdev->fd_rule_lock);
hclge_del_all_fd_entries(handle, clear); hclge_del_all_fd_entries(handle, clear);
else spin_unlock_bh(&hdev->fd_rule_lock);
} else {
hclge_restore_fd_entries(handle); hclge_restore_fd_entries(handle);
}
} }
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
...@@ -6886,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle) ...@@ -6886,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int i; int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state); set_bit(HCLGE_STATE_DOWN, &hdev->state);
spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle); hclge_clear_arfs_rules(handle);
spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC, /* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here. * so it only need to stop phy here.
...@@ -9040,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, ...@@ -9040,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false; bool writen_to_tbl = false;
int ret = 0; int ret = 0;
/* When device is resetting, firmware is unable to handle /* When device is resetting or reset failed, firmware is unable to
* mailbox. Just record the vlan id, and remove it after * handle mailbox. Just record the vlan id, and remove it after
* reset finished. * reset finished.
*/ */
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) { if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap); set_bit(vlan_id, vport->vlan_del_fail_bmap);
return -EBUSY; return -EBUSY;
} }
......
...@@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, ...@@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
if (proto != htons(ETH_P_8021Q)) if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
/* When device is resetting, firmware is unable to handle /* When device is resetting or reset failed, firmware is unable to
* mailbox. Just record the vlan id, and remove it after * handle mailbox. Just record the vlan id, and remove it after
* reset finished. * reset finished.
*/ */
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap); set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY; return -EBUSY;
} }
...@@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, ...@@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
{ {
struct hnae3_handle *nic = &hdev->nic; struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg; struct hclge_vf_to_pf_msg send_msg;
int ret;
rtnl_lock(); rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
rtnl_unlock(); if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
dev_warn(&hdev->pdev->dev,
"is resetting when updating port based vlan info\n");
rtnl_unlock();
return;
}
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret) {
rtnl_unlock();
return;
}
/* send msg to PF and wait update port based vlan info */ /* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG); HCLGE_MBX_PORT_BASE_VLAN_CFG);
memcpy(send_msg.data, port_base_vlan_info, data_size); memcpy(send_msg.data, port_base_vlan_info, data_size);
hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (!ret) {
if (state == HNAE3_PORT_BASE_VLAN_DISABLE) if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; nic->port_base_vlan_state = state;
else else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
}
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock(); rtnl_unlock();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment