Commit 13faf771 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: add some cleanups & optimizations

This patchset includes some cleanups and optimizations for the HNS3
ethernet driver.

[patch 1/8] removes unused and unnecessary structures.

[patch 2/8] uses a ETH_ALEN u8 array to replace two mac_addr_*
field in struct hclge_mac_mgr_tbl_entry_cmd.

[patch 3/8] optimizes the barrier used in the IO path.

[patch 4/8] introduces macro ring_to_netdev() to get netdevive
from struct hns3_enet_ring variable.

[patch 5/8] makes struct hns3_enet_ring to be cacheline aligned

[patch 6/8] adds a minor cleanup for hns3_handle_rx_bd().

[patch 7/8] removes linear data allocating for fraglist SKB.

[patch 8/8] clears hardware error when resetting.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2f184393 4fdd0bca
...@@ -16,15 +16,14 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -16,15 +16,14 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
const char *cmd_buf) const char *cmd_buf)
{ {
struct hns3_nic_priv *priv = h->priv; struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
u32 base_add_l, base_add_h; u32 base_add_l, base_add_h;
u32 queue_num, queue_max; u32 queue_num, queue_max;
u32 value, i = 0; u32 value, i = 0;
int cnt; int cnt;
if (!priv->ring_data) { if (!priv->ring) {
dev_err(&h->pdev->dev, "ring_data is NULL\n"); dev_err(&h->pdev->dev, "priv->ring is NULL\n");
return -EFAULT; return -EFAULT;
} }
...@@ -44,7 +43,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -44,7 +43,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
return -EINVAL; return -EINVAL;
} }
ring_data = priv->ring_data;
for (i = queue_num; i < queue_max; i++) { for (i = queue_num; i < queue_max; i++) {
/* Each cycle needs to determine whether the instance is reset, /* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure * to prevent reference to invalid memory. And need to ensure
...@@ -54,7 +52,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -54,7 +52,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM; return -EPERM;
ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring; ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
base_add_h = readl_relaxed(ring->tqp->io_base + base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG); HNS3_RING_RX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base + base_add_l = readl_relaxed(ring->tqp->io_base +
...@@ -86,7 +84,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -86,7 +84,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
HNS3_RING_RX_RING_PKTNUM_RECORD_REG); HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value); dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
ring = ring_data[i].ring; ring = &priv->ring[i];
base_add_h = readl_relaxed(ring->tqp->io_base + base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG); HNS3_RING_TX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base + base_add_l = readl_relaxed(ring->tqp->io_base +
...@@ -130,7 +128,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -130,7 +128,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
static int hns3_dbg_queue_map(struct hnae3_handle *h) static int hns3_dbg_queue_map(struct hnae3_handle *h)
{ {
struct hns3_nic_priv *priv = h->priv; struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
int i; int i;
if (!h->ae_algo->ops->get_global_queue_id) if (!h->ae_algo->ops->get_global_queue_id)
...@@ -143,15 +140,12 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h) ...@@ -143,15 +140,12 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
u16 global_qid; u16 global_qid;
global_qid = h->ae_algo->ops->get_global_queue_id(h, i); global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
ring_data = &priv->ring_data[i]; if (!priv->ring || !priv->ring[i].tqp_vector)
if (!ring_data || !ring_data->ring ||
!ring_data->ring->tqp_vector)
continue; continue;
dev_info(&h->pdev->dev, dev_info(&h->pdev->dev,
" %4d %4d %4d\n", " %4d %4d %4d\n",
i, global_qid, i, global_qid, priv->ring[i].tqp_vector->vector_irq);
ring_data->ring->tqp_vector->vector_irq);
} }
return 0; return 0;
...@@ -160,7 +154,6 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h) ...@@ -160,7 +154,6 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
{ {
struct hns3_nic_priv *priv = h->priv; struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
struct hns3_desc *rx_desc, *tx_desc; struct hns3_desc *rx_desc, *tx_desc;
struct device *dev = &h->pdev->dev; struct device *dev = &h->pdev->dev;
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
...@@ -183,8 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) ...@@ -183,8 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
return -EINVAL; return -EINVAL;
} }
ring_data = priv->ring_data; ring = &priv->ring[q_num];
ring = ring_data[q_num].ring;
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
tx_index = (cnt == 1) ? value : tx_index; tx_index = (cnt == 1) ? value : tx_index;
...@@ -214,7 +206,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) ...@@ -214,7 +206,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri); dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri);
dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss); dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss);
ring = ring_data[q_num + h->kinfo.num_tqps].ring; ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
rx_index = (cnt == 1) ? value : tx_index; rx_index = (cnt == 1) ? value : tx_index;
rx_desc = &ring->desc[rx_index]; rx_desc = &ring->desc[rx_index];
......
...@@ -409,6 +409,7 @@ struct hns3_enet_ring { ...@@ -409,6 +409,7 @@ struct hns3_enet_ring {
struct hns3_enet_ring *next; struct hns3_enet_ring *next;
struct hns3_enet_tqp_vector *tqp_vector; struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_queue *tqp; struct hnae3_queue *tqp;
int queue_index;
struct device *dev; /* will be used for DMA mapping of descriptors */ struct device *dev; /* will be used for DMA mapping of descriptors */
/* statistic */ /* statistic */
...@@ -434,18 +435,7 @@ struct hns3_enet_ring { ...@@ -434,18 +435,7 @@ struct hns3_enet_ring {
int pending_buf; int pending_buf;
struct sk_buff *skb; struct sk_buff *skb;
struct sk_buff *tail_skb; struct sk_buff *tail_skb;
}; } ____cacheline_internodealigned_in_smp;
struct hns_queue;
struct hns3_nic_ring_data {
struct hns3_enet_ring *ring;
struct napi_struct napi;
int queue_index;
int (*poll_one)(struct hns3_nic_ring_data *, int, void *);
void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *);
void (*fini_process)(struct hns3_nic_ring_data *);
};
enum hns3_flow_level_range { enum hns3_flow_level_range {
HNS3_FLOW_LOW = 0, HNS3_FLOW_LOW = 0,
...@@ -522,7 +512,7 @@ struct hns3_nic_priv { ...@@ -522,7 +512,7 @@ struct hns3_nic_priv {
* the cb for nic to manage the ring buffer, the first half of the * the cb for nic to manage the ring buffer, the first half of the
* array is for tx_ring and vice versa for the second half * array is for tx_ring and vice versa for the second half
*/ */
struct hns3_nic_ring_data *ring_data; struct hns3_enet_ring *ring;
struct hns3_enet_tqp_vector *tqp_vector; struct hns3_enet_tqp_vector *tqp_vector;
u16 vector_num; u16 vector_num;
...@@ -617,11 +607,11 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) ...@@ -617,11 +607,11 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define ring_to_dev(ring) ((ring)->dev) #define ring_to_dev(ring) ((ring)->dev)
#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE) DMA_TO_DEVICE : DMA_FROM_DEVICE)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
#define hns3_buf_size(_ring) ((_ring)->buf_size) #define hns3_buf_size(_ring) ((_ring)->buf_size)
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
......
...@@ -203,7 +203,7 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) ...@@ -203,7 +203,7 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
kinfo = &h->kinfo; kinfo = &h->kinfo;
for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
struct hns3_enet_ring *ring = priv->ring_data[i].ring; struct hns3_enet_ring *ring = &priv->ring[i];
struct hns3_enet_ring_group *rx_group; struct hns3_enet_ring_group *rx_group;
u64 pre_rx_pkt; u64 pre_rx_pkt;
...@@ -226,7 +226,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, ...@@ -226,7 +226,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
u32 i; u32 i;
for (i = start_ringid; i <= end_ringid; i++) { for (i = start_ringid; i <= end_ringid; i++) {
struct hns3_enet_ring *ring = priv->ring_data[i].ring; struct hns3_enet_ring *ring = &priv->ring[i];
hns3_clean_tx_ring(ring); hns3_clean_tx_ring(ring);
} }
...@@ -491,7 +491,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) ...@@ -491,7 +491,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Tx */ /* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) { for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i].ring; ring = &nic_priv->ring[i];
for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat; *data++ = *(u64 *)stat;
...@@ -500,7 +500,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) ...@@ -500,7 +500,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */ /* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) { for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; ring = &nic_priv->ring[i + kinfo->num_tqps];
for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat; *data++ = *(u64 *)stat;
...@@ -603,8 +603,8 @@ static void hns3_get_ringparam(struct net_device *netdev, ...@@ -603,8 +603,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->tx_max_pending = HNS3_RING_MAX_PENDING; param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING; param->rx_max_pending = HNS3_RING_MAX_PENDING;
param->tx_pending = priv->ring_data[0].ring->desc_num; param->tx_pending = priv->ring[0].desc_num;
param->rx_pending = priv->ring_data[queue_num].ring->desc_num; param->rx_pending = priv->ring[queue_num].desc_num;
} }
static void hns3_get_pauseparam(struct net_device *netdev, static void hns3_get_pauseparam(struct net_device *netdev,
...@@ -906,9 +906,8 @@ static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, ...@@ -906,9 +906,8 @@ static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
h->kinfo.num_rx_desc = rx_desc_num; h->kinfo.num_rx_desc = rx_desc_num;
for (i = 0; i < h->kinfo.num_tqps; i++) { for (i = 0; i < h->kinfo.num_tqps; i++) {
priv->ring_data[i].ring->desc_num = tx_desc_num; priv->ring[i].desc_num = tx_desc_num;
priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num = priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num;
rx_desc_num;
} }
} }
...@@ -924,7 +923,7 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv) ...@@ -924,7 +923,7 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
return NULL; return NULL;
for (i = 0; i < handle->kinfo.num_tqps * 2; i++) { for (i = 0; i < handle->kinfo.num_tqps * 2; i++) {
memcpy(&tmp_rings[i], priv->ring_data[i].ring, memcpy(&tmp_rings[i], &priv->ring[i],
sizeof(struct hns3_enet_ring)); sizeof(struct hns3_enet_ring));
tmp_rings[i].skb = NULL; tmp_rings[i].skb = NULL;
} }
...@@ -972,8 +971,8 @@ static int hns3_set_ringparam(struct net_device *ndev, ...@@ -972,8 +971,8 @@ static int hns3_set_ringparam(struct net_device *ndev,
/* Hardware requires that its descriptors must be multiple of eight */ /* Hardware requires that its descriptors must be multiple of eight */
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
old_tx_desc_num = priv->ring_data[0].ring->desc_num; old_tx_desc_num = priv->ring[0].desc_num;
old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num; old_rx_desc_num = priv->ring[queue_num].desc_num;
if (old_tx_desc_num == new_tx_desc_num && if (old_tx_desc_num == new_tx_desc_num &&
old_rx_desc_num == new_rx_desc_num) old_rx_desc_num == new_rx_desc_num)
return 0; return 0;
...@@ -1002,7 +1001,7 @@ static int hns3_set_ringparam(struct net_device *ndev, ...@@ -1002,7 +1001,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
hns3_change_all_ring_bd_num(priv, old_tx_desc_num, hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
old_rx_desc_num); old_rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++) for (i = 0; i < h->kinfo.num_tqps * 2; i++)
memcpy(priv->ring_data[i].ring, &tmp_rings[i], memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring)); sizeof(struct hns3_enet_ring));
} else { } else {
for (i = 0; i < h->kinfo.num_tqps * 2; i++) for (i = 0; i < h->kinfo.num_tqps * 2; i++)
...@@ -1103,8 +1102,8 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, ...@@ -1103,8 +1102,8 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
return -EINVAL; return -EINVAL;
} }
tx_vector = priv->ring_data[queue].ring->tqp_vector; tx_vector = priv->ring[queue].tqp_vector;
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; rx_vector = priv->ring[queue_num + queue].tqp_vector;
cmd->use_adaptive_tx_coalesce = cmd->use_adaptive_tx_coalesce =
tx_vector->tx_group.coal.gl_adapt_enable; tx_vector->tx_group.coal.gl_adapt_enable;
...@@ -1229,8 +1228,8 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, ...@@ -1229,8 +1228,8 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps; int queue_num = h->kinfo.num_tqps;
tx_vector = priv->ring_data[queue].ring->tqp_vector; tx_vector = priv->ring[queue].tqp_vector;
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; rx_vector = priv->ring[queue_num + queue].tqp_vector;
tx_vector->tx_group.coal.gl_adapt_enable = tx_vector->tx_group.coal.gl_adapt_enable =
cmd->use_adaptive_tx_coalesce; cmd->use_adaptive_tx_coalesce;
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#define __HCLGE_CMD_H #define __HCLGE_CMD_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/etherdevice.h>
#define HCLGE_CMDQ_TX_TIMEOUT 30000 #define HCLGE_CMDQ_TX_TIMEOUT 30000
...@@ -712,8 +713,7 @@ struct hclge_mac_mgr_tbl_entry_cmd { ...@@ -712,8 +713,7 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 flags; u8 flags;
u8 resp_code; u8 resp_code;
__le16 vlan_tag; __le16 vlan_tag;
__le32 mac_addr_hi32; u8 mac_addr[ETH_ALEN];
__le16 mac_addr_lo16;
__le16 rsv1; __le16 rsv1;
__le16 ethter_type; __le16 ethter_type;
__le16 egress_port; __le16 egress_port;
......
...@@ -325,8 +325,7 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { ...@@ -325,8 +325,7 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
{ {
.flags = HCLGE_MAC_MGR_MASK_VLAN_B, .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
.ethter_type = cpu_to_le16(ETH_P_LLDP), .ethter_type = cpu_to_le16(ETH_P_LLDP),
.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
.i_port_bitmap = 0x1, .i_port_bitmap = 0x1,
}, },
}; };
...@@ -9801,6 +9800,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -9801,6 +9800,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret; return ret;
} }
/* Log and clear the hw errors those already occurred */
hclge_handle_all_hns_hw_errors(ae_dev);
/* Re-enable the hw error interrupts because /* Re-enable the hw error interrupts because
* the interrupts get disabled on global reset. * the interrupts get disabled on global reset.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment