Commit d24b6c62 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: some code optimizations & bugfixes & features

This patch-set includes code optimizations, bugfixes and features for
the HNS3 ethernet controller driver.

[patch 1/10] checks reset status before setting channel.

[patch 2/10] adds a NULL pointer checking.

[patch 3/10] removes reset level upgrading when current reset fails.

[patch 4/10] fixes a GFP flags errors when holding spin_lock.

[patch 5/10] modifies firmware version format.

[patch 6/10] adds some print information which is off by default.

[patch 7/10 - 8/10] adds two code optimizations about interrupt handler
and work task.

[patch 9/10] adds support for using order 1 pages with a 4K buffer.

[patch 10/10] modifies messages prints with dev_info() instead of
pr_info().

Change log:
V3->V4: replace netif_info with netif_dbg in [patch 6/10]
V2->V3: fixes comments from Saeed Mahameed and Joe Perches.
V1->V2: fixes comments from Saeed Mahameed and
	removes previous [patch 4/11] and [patch 11/11]
	which needs further discussion, and adds a new
	patch [10/10] suggested by Saeed Mahameed.
====================
Reviewed-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents dda91bd7 08d80a4c
...@@ -179,6 +179,15 @@ struct hnae3_vector_info { ...@@ -179,6 +179,15 @@ struct hnae3_vector_info {
#define HNAE3_RING_GL_RX 0 #define HNAE3_RING_GL_RX 0
#define HNAE3_RING_GL_TX 1 #define HNAE3_RING_GL_TX 1
#define HNAE3_FW_VERSION_BYTE3_SHIFT 24
#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24)
#define HNAE3_FW_VERSION_BYTE2_SHIFT 16
#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16)
#define HNAE3_FW_VERSION_BYTE1_SHIFT 8
#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8)
#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
struct hnae3_ring_chain_node { struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next; struct hnae3_ring_chain_node *next;
u32 tqp_index; u32 tqp_index;
......
...@@ -459,6 +459,9 @@ static int hns3_nic_net_open(struct net_device *netdev) ...@@ -459,6 +459,9 @@ static int hns3_nic_net_open(struct net_device *netdev)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true); h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
hns3_config_xps(priv); hns3_config_xps(priv);
netif_dbg(h, drv, netdev, "net open\n");
return 0; return 0;
} }
...@@ -519,6 +522,8 @@ static int hns3_nic_net_stop(struct net_device *netdev) ...@@ -519,6 +522,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return 0; return 0;
netif_dbg(h, drv, netdev, "net stop\n");
if (h->ae_algo->ops->set_timer_task) if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, false); h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
...@@ -1550,6 +1555,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) ...@@ -1550,6 +1555,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
h = hns3_get_handle(netdev); h = hns3_get_handle(netdev);
kinfo = &h->kinfo; kinfo = &h->kinfo;
netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
} }
...@@ -1593,6 +1600,10 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, ...@@ -1593,6 +1600,10 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO; int ret = -EIO;
netif_dbg(h, drv, netdev,
"set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n",
vf, vlan, qos, vlan_proto);
if (h->ae_algo->ops->set_vf_vlan_filter) if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
qos, vlan_proto); qos, vlan_proto);
...@@ -1611,6 +1622,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -1611,6 +1622,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
if (!h->ae_algo->ops->set_mtu) if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP; return -EOPNOTSUPP;
netif_dbg(h, drv, netdev,
"change mtu from %u to %d\n", netdev->mtu, new_mtu);
ret = h->ae_algo->ops->set_mtu(h, new_mtu); ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret) if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n", netdev_err(netdev, "failed to change MTU in hardware %d\n",
...@@ -1963,7 +1977,7 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) ...@@ -1963,7 +1977,7 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
ops = ae_dev->ops; ops = ae_dev->ops;
/* request the reset */ /* request the reset */
if (ops->reset_event) { if (ops->reset_event && ops->get_reset_level) {
if (ae_dev->hw_err_reset_req) { if (ae_dev->hw_err_reset_req) {
reset_type = ops->get_reset_level(ae_dev, reset_type = ops->get_reset_level(ae_dev,
&ae_dev->hw_err_reset_req); &ae_dev->hw_err_reset_req);
...@@ -2067,7 +2081,7 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -2067,7 +2081,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
static int hns3_alloc_buffer(struct hns3_enet_ring *ring, static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb) struct hns3_desc_cb *cb)
{ {
unsigned int order = hnae3_page_order(ring); unsigned int order = hns3_page_order(ring);
struct page *p; struct page *p;
p = dev_alloc_pages(order); p = dev_alloc_pages(order);
...@@ -2078,7 +2092,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, ...@@ -2078,7 +2092,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0; cb->page_offset = 0;
cb->reuse_flag = 0; cb->reuse_flag = 0;
cb->buf = page_address(p); cb->buf = page_address(p);
cb->length = hnae3_page_size(ring); cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE; cb->type = DESC_TYPE_PAGE;
return 0; return 0;
...@@ -2381,7 +2395,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2381,7 +2395,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
{ {
struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
int size = le16_to_cpu(desc->rx.size); int size = le16_to_cpu(desc->rx.size);
u32 truesize = hnae3_buf_size(ring); u32 truesize = hns3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize); size - pull_len, truesize);
...@@ -2396,7 +2410,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2396,7 +2410,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Move offset up to the next cache line */ /* Move offset up to the next cache line */
desc_cb->page_offset += truesize; desc_cb->page_offset += truesize;
if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) { if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given */ /* Bump ref count on page before it is given */
get_page(desc_cb->priv); get_page(desc_cb->priv);
...@@ -2678,7 +2692,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, ...@@ -2678,7 +2692,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
} }
if (ring->tail_skb) { if (ring->tail_skb) {
head_skb->truesize += hnae3_buf_size(ring); head_skb->truesize += hns3_buf_size(ring);
head_skb->data_len += le16_to_cpu(desc->rx.size); head_skb->data_len += le16_to_cpu(desc->rx.size);
head_skb->len += le16_to_cpu(desc->rx.size); head_skb->len += le16_to_cpu(desc->rx.size);
skb = ring->tail_skb; skb = ring->tail_skb;
...@@ -4378,6 +4392,9 @@ int hns3_set_channels(struct net_device *netdev, ...@@ -4378,6 +4392,9 @@ int hns3_set_channels(struct net_device *netdev,
u16 org_tqp_num; u16 org_tqp_num;
int ret; int ret;
if (hns3_nic_resetting(netdev))
return -EBUSY;
if (ch->rx_count || ch->tx_count) if (ch->rx_count || ch->tx_count)
return -EINVAL; return -EINVAL;
...@@ -4392,6 +4409,10 @@ int hns3_set_channels(struct net_device *netdev, ...@@ -4392,6 +4409,10 @@ int hns3_set_channels(struct net_device *netdev,
if (kinfo->rss_size == new_tqp_num) if (kinfo->rss_size == new_tqp_num)
return 0; return 0;
netif_dbg(h, drv, netdev,
"set channels: tqp_num=%u, rxfh=%d\n",
new_tqp_num, rxfh_configured);
ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
if (ret) if (ret)
return ret; return ret;
......
...@@ -608,9 +608,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) ...@@ -608,9 +608,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) #define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
#define hnae3_buf_size(_ring) ((_ring)->buf_size) #define hns3_buf_size(_ring) ((_ring)->buf_size)
#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring)) static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring->buf_size > (PAGE_SIZE / 2))
return 1;
#endif
return 0;
}
#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
/* iterator for handling rings in ring group */ /* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \ #define hns3_for_each_ring(pos, head) \
......
...@@ -311,6 +311,8 @@ static void hns3_self_test(struct net_device *ndev, ...@@ -311,6 +311,8 @@ static void hns3_self_test(struct net_device *ndev,
if (eth_test->flags != ETH_TEST_FL_OFFLINE) if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return; return;
netif_dbg(h, drv, ndev, "self test start");
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] = st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK; h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
...@@ -374,6 +376,8 @@ static void hns3_self_test(struct net_device *ndev, ...@@ -374,6 +376,8 @@ static void hns3_self_test(struct net_device *ndev,
if (if_running) if (if_running)
ndev->netdev_ops->ndo_open(ndev); ndev->netdev_ops->ndo_open(ndev);
netif_dbg(h, drv, ndev, "self test end\n");
} }
static int hns3_get_sset_count(struct net_device *netdev, int stringset) static int hns3_get_sset_count(struct net_device *netdev, int stringset)
...@@ -527,6 +531,7 @@ static void hns3_get_drvinfo(struct net_device *netdev, ...@@ -527,6 +531,7 @@ static void hns3_get_drvinfo(struct net_device *netdev,
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
u32 fw_version;
if (!h->ae_algo->ops->get_fw_version) { if (!h->ae_algo->ops->get_fw_version) {
netdev_err(netdev, "could not get fw version!\n"); netdev_err(netdev, "could not get fw version!\n");
...@@ -545,8 +550,18 @@ static void hns3_get_drvinfo(struct net_device *netdev, ...@@ -545,8 +550,18 @@ static void hns3_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h);
priv->ae_handle->ae_algo->ops->get_fw_version(h));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%lu.%lu.%lu.%lu",
hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
HNAE3_FW_VERSION_BYTE3_SHIFT),
hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
HNAE3_FW_VERSION_BYTE2_SHIFT),
hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
HNAE3_FW_VERSION_BYTE1_SHIFT),
hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
} }
static u32 hns3_get_link(struct net_device *netdev) static u32 hns3_get_link(struct net_device *netdev)
...@@ -593,6 +608,10 @@ static int hns3_set_pauseparam(struct net_device *netdev, ...@@ -593,6 +608,10 @@ static int hns3_set_pauseparam(struct net_device *netdev,
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
netif_dbg(h, drv, netdev,
"set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
param->autoneg, param->rx_pause, param->tx_pause);
if (h->ae_algo->ops->set_pauseparam) if (h->ae_algo->ops->set_pauseparam)
return h->ae_algo->ops->set_pauseparam(h, param->autoneg, return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
param->rx_pause, param->rx_pause,
...@@ -732,6 +751,11 @@ static int hns3_set_link_ksettings(struct net_device *netdev, ...@@ -732,6 +751,11 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL; return -EINVAL;
netif_dbg(handle, drv, netdev,
"set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
netdev->phydev ? "phy" : "mac",
cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
/* Only support ksettings_set for netdev with phy attached for now */ /* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev) if (netdev->phydev)
return phy_ethtool_ksettings_set(netdev->phydev, cmd); return phy_ethtool_ksettings_set(netdev->phydev, cmd);
...@@ -973,6 +997,9 @@ static int hns3_nway_reset(struct net_device *netdev) ...@@ -973,6 +997,9 @@ static int hns3_nway_reset(struct net_device *netdev)
return -EINVAL; return -EINVAL;
} }
netif_dbg(handle, drv, netdev,
"nway reset (using %s)\n", phy ? "phy" : "mac");
if (phy) if (phy)
return genphy_restart_aneg(phy); return genphy_restart_aneg(phy);
...@@ -1297,6 +1324,9 @@ static int hns3_set_fecparam(struct net_device *netdev, ...@@ -1297,6 +1324,9 @@ static int hns3_set_fecparam(struct net_device *netdev,
if (!ops->set_fec) if (!ops->set_fec)
return -EOPNOTSUPP; return -EOPNOTSUPP;
fec_mode = eth_to_loc_fec(fec->fec); fec_mode = eth_to_loc_fec(fec->fec);
netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode);
return ops->set_fec(handle, fec_mode); return ops->set_fec(handle, fec_mode);
} }
......
...@@ -419,7 +419,15 @@ int hclge_cmd_init(struct hclge_dev *hdev) ...@@ -419,7 +419,15 @@ int hclge_cmd_init(struct hclge_dev *hdev)
} }
hdev->fw_version = version; hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
HNAE3_FW_VERSION_BYTE3_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
HNAE3_FW_VERSION_BYTE2_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
HNAE3_FW_VERSION_BYTE1_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
return 0; return 0;
......
...@@ -201,6 +201,7 @@ static int hclge_client_setup_tc(struct hclge_dev *hdev) ...@@ -201,6 +201,7 @@ static int hclge_client_setup_tc(struct hclge_dev *hdev)
static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
{ {
struct hclge_vport *vport = hclge_get_vport(h); struct hclge_vport *vport = hclge_get_vport(h);
struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
bool map_changed = false; bool map_changed = false;
u8 num_tc = 0; u8 num_tc = 0;
...@@ -215,6 +216,8 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) ...@@ -215,6 +216,8 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
return ret; return ret;
if (map_changed) { if (map_changed) {
netif_dbg(h, drv, netdev, "set ets\n");
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret) if (ret)
return ret; return ret;
...@@ -300,6 +303,7 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) ...@@ -300,6 +303,7 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
{ {
struct hclge_vport *vport = hclge_get_vport(h); struct hclge_vport *vport = hclge_get_vport(h);
struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc; u8 i, j, pfc_map, *prio_tc;
...@@ -325,6 +329,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) ...@@ -325,6 +329,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hdev->tm_info.hw_pfc_map = pfc_map; hdev->tm_info.hw_pfc_map = pfc_map;
hdev->tm_info.pfc_en = pfc->pfc_en; hdev->tm_info.pfc_en = pfc->pfc_en;
netif_dbg(h, drv, netdev,
"set pfc: pfc_en=%u, pfc_map=%u, num_tc=%u\n",
pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
hclge_tm_pfc_info_update(hdev); hclge_tm_pfc_info_update(hdev);
return hclge_pause_setup_hw(hdev, false); return hclge_pause_setup_hw(hdev, false);
...@@ -345,8 +353,11 @@ static u8 hclge_getdcbx(struct hnae3_handle *h) ...@@ -345,8 +353,11 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
{ {
struct hclge_vport *vport = hclge_get_vport(h); struct hclge_vport *vport = hclge_get_vport(h);
struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
/* No support for LLD_MANAGED modes or CEE */ /* No support for LLD_MANAGED modes or CEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
(mode & DCB_CAP_DCBX_VER_CEE) || (mode & DCB_CAP_DCBX_VER_CEE) ||
......
...@@ -1270,6 +1270,12 @@ static int hclge_configure(struct hclge_dev *hdev) ...@@ -1270,6 +1270,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev); hclge_init_kdump_kernel_config(hdev);
/* Set the init affinity based on pci func number */
i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
&hdev->affinity_mask);
return ret; return ret;
} }
...@@ -2499,22 +2505,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev) ...@@ -2499,22 +2505,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{ {
if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) && if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
schedule_work(&hdev->mbx_service_task); queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
&hdev->mbx_service_task);
} }
static void hclge_reset_task_schedule(struct hclge_dev *hdev) static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{ {
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
schedule_work(&hdev->rst_service_task); queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
&hdev->rst_service_task);
} }
static void hclge_task_schedule(struct hclge_dev *hdev) static void hclge_task_schedule(struct hclge_dev *hdev)
{ {
if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
(void)schedule_work(&hdev->service_task); hdev->hw_stats.stats_timer++;
hdev->fd_arfs_expire_timer++;
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
system_wq, &hdev->service_task,
round_jiffies_relative(HZ));
}
} }
static int hclge_get_mac_link_status(struct hclge_dev *hdev) static int hclge_get_mac_link_status(struct hclge_dev *hdev)
...@@ -2729,25 +2742,6 @@ static int hclge_get_status(struct hnae3_handle *handle) ...@@ -2729,25 +2742,6 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link; return hdev->hw.mac.link;
} }
static void hclge_service_timer(struct timer_list *t)
{
struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + HZ);
hdev->hw_stats.stats_timer++;
hdev->fd_arfs_expire_timer++;
hclge_task_schedule(hdev);
}
static void hclge_service_complete(struct hclge_dev *hdev)
{
WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
/* Flush memory before next watchdog */
smp_mb__before_atomic();
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{ {
u32 rst_src_reg, cmdq_src_reg, msix_src_reg; u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
...@@ -2918,6 +2912,36 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) ...@@ -2918,6 +2912,36 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1; hdev->num_msi_used += 1;
} }
static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
affinity_notify);
cpumask_copy(&hdev->affinity_mask, mask);
}
static void hclge_irq_affinity_release(struct kref *ref)
{
}
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
{
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
&hdev->affinity_mask);
hdev->affinity_notify.notify = hclge_irq_affinity_notify;
hdev->affinity_notify.release = hclge_irq_affinity_release;
irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
&hdev->affinity_notify);
}
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
{
irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
}
static int hclge_misc_irq_init(struct hclge_dev *hdev) static int hclge_misc_irq_init(struct hclge_dev *hdev)
{ {
int ret; int ret;
...@@ -3305,7 +3329,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) ...@@ -3305,7 +3329,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
return ret; return ret;
} }
static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) static bool hclge_reset_err_handle(struct hclge_dev *hdev)
{ {
#define MAX_RESET_FAIL_CNT 5 #define MAX_RESET_FAIL_CNT 5
...@@ -3322,20 +3346,11 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) ...@@ -3322,20 +3346,11 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
return false; return false;
} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->reset_fail_cnt++; hdev->reset_fail_cnt++;
if (is_timeout) { set_bit(hdev->reset_type, &hdev->reset_pending);
set_bit(hdev->reset_type, &hdev->reset_pending); dev_info(&hdev->pdev->dev,
dev_info(&hdev->pdev->dev, "re-schedule reset task(%d)\n",
"re-schedule to wait for hw reset done\n"); hdev->reset_fail_cnt);
return true; return true;
}
dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
hclge_clear_reset_cause(hdev);
set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
mod_timer(&hdev->reset_timer,
jiffies + HCLGE_RESET_INTERVAL);
return false;
} }
hclge_clear_reset_cause(hdev); hclge_clear_reset_cause(hdev);
...@@ -3382,7 +3397,6 @@ static int hclge_reset_stack(struct hclge_dev *hdev) ...@@ -3382,7 +3397,6 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev) static void hclge_reset(struct hclge_dev *hdev)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
bool is_timeout = false;
int ret; int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to /* Initialize ae_dev reset status as well, in case enet layer wants to
...@@ -3410,10 +3424,8 @@ static void hclge_reset(struct hclge_dev *hdev) ...@@ -3410,10 +3424,8 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret) if (ret)
goto err_reset; goto err_reset;
if (hclge_reset_wait(hdev)) { if (hclge_reset_wait(hdev))
is_timeout = true;
goto err_reset; goto err_reset;
}
hdev->rst_stats.hw_reset_done_cnt++; hdev->rst_stats.hw_reset_done_cnt++;
...@@ -3465,7 +3477,7 @@ static void hclge_reset(struct hclge_dev *hdev) ...@@ -3465,7 +3477,7 @@ static void hclge_reset(struct hclge_dev *hdev)
err_reset_lock: err_reset_lock:
rtnl_unlock(); rtnl_unlock();
err_reset: err_reset:
if (hclge_reset_err_handle(hdev, is_timeout)) if (hclge_reset_err_handle(hdev))
hclge_reset_task_schedule(hdev); hclge_reset_task_schedule(hdev);
} }
...@@ -3606,7 +3618,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev) ...@@ -3606,7 +3618,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
static void hclge_service_task(struct work_struct *work) static void hclge_service_task(struct work_struct *work)
{ {
struct hclge_dev *hdev = struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task); container_of(work, struct hclge_dev, service_task.work);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
hclge_update_stats_for_all(hdev); hclge_update_stats_for_all(hdev);
...@@ -3621,7 +3635,8 @@ static void hclge_service_task(struct work_struct *work) ...@@ -3621,7 +3635,8 @@ static void hclge_service_task(struct work_struct *work)
hclge_rfs_filter_expire(hdev); hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0; hdev->fd_arfs_expire_timer = 0;
} }
hclge_service_complete(hdev);
hclge_task_schedule(hdev);
} }
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
...@@ -5808,7 +5823,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, ...@@ -5808,7 +5823,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
return -ENOSPC; return -ENOSPC;
} }
rule = kzalloc(sizeof(*rule), GFP_KERNEL); rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) { if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
...@@ -6160,10 +6175,13 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) ...@@ -6160,10 +6175,13 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
if (enable) { if (enable) {
mod_timer(&hdev->service_timer, jiffies + HZ); hclge_task_schedule(hdev);
} else { } else {
del_timer_sync(&hdev->service_timer); /* Set the DOWN flag here to disable the service to be
cancel_work_sync(&hdev->service_task); * scheduled again
*/
set_bit(HCLGE_STATE_DOWN, &hdev->state);
cancel_delayed_work_sync(&hdev->service_task);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
} }
} }
...@@ -8602,12 +8620,10 @@ static void hclge_state_uninit(struct hclge_dev *hdev) ...@@ -8602,12 +8620,10 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_DOWN, &hdev->state); set_bit(HCLGE_STATE_DOWN, &hdev->state);
set_bit(HCLGE_STATE_REMOVING, &hdev->state); set_bit(HCLGE_STATE_REMOVING, &hdev->state);
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
if (hdev->reset_timer.function) if (hdev->reset_timer.function)
del_timer_sync(&hdev->reset_timer); del_timer_sync(&hdev->reset_timer);
if (hdev->service_task.func) if (hdev->service_task.work.func)
cancel_work_sync(&hdev->service_task); cancel_delayed_work_sync(&hdev->service_task);
if (hdev->rst_service_task.func) if (hdev->rst_service_task.func)
cancel_work_sync(&hdev->rst_service_task); cancel_work_sync(&hdev->rst_service_task);
if (hdev->mbx_service_task.func) if (hdev->mbx_service_task.func)
...@@ -8812,12 +8828,16 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -8812,12 +8828,16 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_dcb_ops_set(hdev); hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task); INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
/* Setup affinity after service timer setup because add_timer_on
* is called in affinity notify.
*/
hclge_misc_affinity_setup(hdev);
hclge_clear_all_event_cause(hdev); hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev); hclge_clear_resetting_state(hdev);
...@@ -8842,7 +8862,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -8842,7 +8862,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev); hclge_state_init(hdev);
hdev->last_reset_time = jiffies; hdev->last_reset_time = jiffies;
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
return 0; return 0;
err_mdiobus_unreg: err_mdiobus_unreg:
...@@ -8979,6 +9001,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -8979,6 +9001,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv; struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac; struct hclge_mac *mac = &hdev->hw.mac;
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev); hclge_state_uninit(hdev);
if (mac->phydev) if (mac->phydev)
......
...@@ -806,9 +806,8 @@ struct hclge_dev { ...@@ -806,9 +806,8 @@ struct hclge_dev {
u16 adminq_work_limit; /* Num of admin receive queue desc to process */ u16 adminq_work_limit; /* Num of admin receive queue desc to process */
unsigned long service_timer_period; unsigned long service_timer_period;
unsigned long service_timer_previous; unsigned long service_timer_previous;
struct timer_list service_timer;
struct timer_list reset_timer; struct timer_list reset_timer;
struct work_struct service_task; struct delayed_work service_task;
struct work_struct rst_service_task; struct work_struct rst_service_task;
struct work_struct mbx_service_task; struct work_struct mbx_service_task;
...@@ -864,6 +863,10 @@ struct hclge_dev { ...@@ -864,6 +863,10 @@ struct hclge_dev {
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE); HCLGE_MAC_TNL_LOG_SIZE);
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
}; };
/* VPort level vlan tag configuration for TX direction */ /* VPort level vlan tag configuration for TX direction */
......
...@@ -405,7 +405,15 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) ...@@ -405,7 +405,15 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
} }
hdev->fw_version = version; hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
HNAE3_FW_VERSION_BYTE3_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
HNAE3_FW_VERSION_BYTE2_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
HNAE3_FW_VERSION_BYTE1_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
return 0; return 0;
......
...@@ -2695,7 +2695,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -2695,7 +2695,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
} }
hdev->last_reset_time = jiffies; hdev->last_reset_time = jiffies;
pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment