Commit 6cc40834 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-tm-fixes'

Yunsheng Lin says:

====================
TM related bugfixes for the HNS3 Ethernet Driver

This patch set contains a few bugfixes related to hclge_tm module.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4c4d11b9 c5795c53
...@@ -49,7 +49,17 @@ ...@@ -49,7 +49,17 @@
#define HNAE3_CLASS_NAME_SIZE 16 #define HNAE3_CLASS_NAME_SIZE 16
#define HNAE3_DEV_INITED_B 0x0 #define HNAE3_DEV_INITED_B 0x0
#define HNAE_DEV_SUPPORT_ROCE_B 0x1 #define HNAE3_DEV_SUPPORT_ROCE_B 0x1
#define HNAE3_DEV_SUPPORT_DCB_B 0x2
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define ring_ptr_move_fw(ring, p) \ #define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num) ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
...@@ -366,12 +376,12 @@ struct hnae3_ae_algo { ...@@ -366,12 +376,12 @@ struct hnae3_ae_algo {
struct hnae3_tc_info { struct hnae3_tc_info {
u16 tqp_offset; /* TQP offset from base TQP */ u16 tqp_offset; /* TQP offset from base TQP */
u16 tqp_count; /* Total TQPs */ u16 tqp_count; /* Total TQPs */
u8 up; /* user priority */
u8 tc; /* TC index */ u8 tc; /* TC index */
bool enable; /* If this TC is enable or not */ bool enable; /* If this TC is enable or not */
}; };
#define HNAE3_MAX_TC 8 #define HNAE3_MAX_TC 8
#define HNAE3_MAX_USER_PRIO 8
struct hnae3_knic_private_info { struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */ struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */ u16 rss_size; /* Allocated RSS queues */
...@@ -379,6 +389,7 @@ struct hnae3_knic_private_info { ...@@ -379,6 +389,7 @@ struct hnae3_knic_private_info {
u16 num_desc; u16 num_desc;
u8 num_tc; /* Total number of enabled TCs */ u8 num_tc; /* Total number of enabled TCs */
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
u16 num_tqps; /* total number of TQPs in this handle */ u16 num_tqps; /* total number of TQPs in this handle */
......
...@@ -270,7 +270,8 @@ struct hclge_tx_buff_alloc { ...@@ -270,7 +270,8 @@ struct hclge_tx_buff_alloc {
struct hclge_rx_priv_buff { struct hclge_rx_priv_buff {
__le16 buf_num[HCLGE_TC_NUM]; __le16 buf_num[HCLGE_TC_NUM];
u8 rsv[8]; __le16 shared_buf;
u8 rsv[6];
}; };
struct hclge_query_version { struct hclge_query_version {
...@@ -688,6 +689,7 @@ struct hclge_reset_tqp_queue { ...@@ -688,6 +689,7 @@ struct hclge_reset_tqp_queue {
#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ #define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ #define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ #define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
#define HCLGE_TYPE_CRQ 0 #define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1 #define HCLGE_TYPE_CSQ 1
......
...@@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { ...@@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
/* Required last entry */ /* required last entry */
{0, }
};
static const struct pci_device_id roce_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
/* Required last entry */
{0, } {0, }
}; };
...@@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) ...@@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->num_tqps = __le16_to_cpu(req->tqp_num); hdev->num_tqps = __le16_to_cpu(req->tqp_num);
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) { if (hnae3_dev_roce_supported(hdev)) {
hdev->num_roce_msix = hdev->num_roce_msix =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
...@@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) ...@@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
tc_num = hclge_get_tc_num(hdev); tc_num = hclge_get_tc_num(hdev);
pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; if (hnae3_dev_dcb_supported(hdev))
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
else
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
shared_buf_tc = pfc_enable_num * hdev->mps + shared_buf_tc = pfc_enable_num * hdev->mps +
(tc_num - pfc_enable_num) * hdev->mps / 2 + (tc_num - pfc_enable_num) * hdev->mps / 2 +
hdev->mps; hdev->mps;
...@@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) ...@@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
struct hclge_priv_buf *priv; struct hclge_priv_buf *priv;
int i; int i;
/* When DCB is not supported, rx private
* buffer is not allocated.
*/
if (!hnae3_dev_dcb_supported(hdev)) {
if (!hclge_is_rx_buf_ok(hdev, rx_all))
return -ENOMEM;
return 0;
}
/* step 1, try to alloc private buffer for all enabled tc */ /* step 1, try to alloc private buffer for all enabled tc */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &hdev->priv_buf[i]; priv = &hdev->priv_buf[i];
...@@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) ...@@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
priv->wl.high = 2 * hdev->mps; priv->wl.high = 2 * hdev->mps;
priv->buf_size = priv->wl.high; priv->buf_size = priv->wl.high;
} }
} else {
priv->enable = 0;
priv->wl.low = 0;
priv->wl.high = 0;
priv->buf_size = 0;
} }
} }
...@@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) ...@@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &hdev->priv_buf[i]; priv = &hdev->priv_buf[i];
if (hdev->hw_tc_map & BIT(i)) priv->enable = 0;
priv->enable = 1; priv->wl.low = 0;
priv->wl.high = 0;
priv->buf_size = 0;
if (!(hdev->hw_tc_map & BIT(i)))
continue;
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) { if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = 128; priv->wl.low = 128;
...@@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) ...@@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B); cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
} }
req->shared_buf =
cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
(1 << HCLGE_TC0_PRI_BUF_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev) ...@@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev)
return ret; return ret;
} }
ret = hclge_rx_priv_wl_config(hdev); if (hnae3_dev_dcb_supported(hdev)) {
if (ret) { ret = hclge_rx_priv_wl_config(hdev);
dev_err(&hdev->pdev->dev, if (ret) {
"could not configure rx private waterline %d\n", ret); dev_err(&hdev->pdev->dev,
return ret; "could not configure rx private waterline %d\n",
} ret);
return ret;
}
ret = hclge_common_thrd_config(hdev); ret = hclge_common_thrd_config(hdev);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"could not configure common threshold %d\n", ret); "could not configure common threshold %d\n",
return ret; ret);
return ret;
}
} }
ret = hclge_common_wl_config(hdev); ret = hclge_common_wl_config(hdev);
...@@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) ...@@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
u16 tc_valid[HCLGE_MAX_TC_NUM]; u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 tc_size[HCLGE_MAX_TC_NUM]; u16 tc_size[HCLGE_MAX_TC_NUM];
u32 *rss_indir = NULL; u32 *rss_indir = NULL;
u16 rss_size = 0, roundup_size;
const u8 *key; const u8 *key;
int i, ret, j; int i, ret, j;
...@@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) ...@@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
vport[j].rss_indirection_tbl[i] = vport[j].rss_indirection_tbl[i] =
i % hdev->rss_size_max; i % vport[j].alloc_rss_size;
/* vport 0 is for PF */
if (j != 0)
continue;
rss_size = vport[j].alloc_rss_size;
rss_indir[i] = vport[j].rss_indirection_tbl[i]; rss_indir[i] = vport[j].rss_indirection_tbl[i];
} }
} }
...@@ -2613,42 +2644,31 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) ...@@ -2613,42 +2644,31 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
if (ret) if (ret)
goto err; goto err;
/* Each TC have the same queue size, and tc_size set to hardware is
* the log2 of roundup power of two of rss_size, the acutal queue
* size is limited by indirection table.
*/
if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
dev_err(&hdev->pdev->dev,
"Configure rss tc size failed, invalid TC_SIZE = %d\n",
rss_size);
return -EINVAL;
}
roundup_size = roundup_pow_of_two(rss_size);
roundup_size = ilog2(roundup_size);
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
if (hdev->hw_tc_map & BIT(i)) tc_valid[i] = 0;
tc_valid[i] = 1;
else
tc_valid[i] = 0;
switch (hdev->rss_size_max) { if (!(hdev->hw_tc_map & BIT(i)))
case HCLGE_RSS_TC_SIZE_0: continue;
tc_size[i] = 0;
break; tc_valid[i] = 1;
case HCLGE_RSS_TC_SIZE_1: tc_size[i] = roundup_size;
tc_size[i] = 1; tc_offset[i] = rss_size * i;
break;
case HCLGE_RSS_TC_SIZE_2:
tc_size[i] = 2;
break;
case HCLGE_RSS_TC_SIZE_3:
tc_size[i] = 3;
break;
case HCLGE_RSS_TC_SIZE_4:
tc_size[i] = 4;
break;
case HCLGE_RSS_TC_SIZE_5:
tc_size[i] = 5;
break;
case HCLGE_RSS_TC_SIZE_6:
tc_size[i] = 6;
break;
case HCLGE_RSS_TC_SIZE_7:
tc_size[i] = 7;
break;
default:
break;
}
tc_offset[i] = hdev->rss_size_max * i;
} }
ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
err: err:
...@@ -3932,8 +3952,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, ...@@ -3932,8 +3952,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
goto err; goto err;
if (hdev->roce_client && if (hdev->roce_client &&
hnae_get_bit(hdev->ae_dev->flag, hnae3_dev_roce_supported(hdev)) {
HNAE_DEV_SUPPORT_ROCE_B)) {
struct hnae3_client *rc = hdev->roce_client; struct hnae3_client *rc = hdev->roce_client;
ret = hclge_init_roce_base_info(vport); ret = hclge_init_roce_base_info(vport);
...@@ -3956,8 +3975,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, ...@@ -3956,8 +3975,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
break; break;
case HNAE3_CLIENT_ROCE: case HNAE3_CLIENT_ROCE:
if (hnae_get_bit(hdev->ae_dev->flag, if (hnae3_dev_roce_supported(hdev)) {
HNAE_DEV_SUPPORT_ROCE_B)) {
hdev->roce_client = client; hdev->roce_client = client;
vport->roce.client = client; vport->roce.client = client;
} }
...@@ -4069,7 +4087,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) ...@@ -4069,7 +4087,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{ {
struct pci_dev *pdev = ae_dev->pdev; struct pci_dev *pdev = ae_dev->pdev;
const struct pci_device_id *id;
struct hclge_dev *hdev; struct hclge_dev *hdev;
int ret; int ret;
...@@ -4084,10 +4101,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -4084,10 +4101,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->ae_dev = ae_dev; hdev->ae_dev = ae_dev;
ae_dev->priv = hdev; ae_dev->priv = hdev;
id = pci_match_id(roce_pci_tbl, ae_dev->pdev);
if (id)
hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1);
ret = hclge_pci_init(hdev); ret = hclge_pci_init(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "PCI init failed\n"); dev_err(&pdev->dev, "PCI init failed\n");
...@@ -4150,12 +4163,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -4150,12 +4163,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret; return ret;
} }
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
return ret;
}
ret = hclge_init_vlan_config(hdev); ret = hclge_init_vlan_config(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
...@@ -4168,6 +4175,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -4168,6 +4175,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret; return ret;
} }
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
return ret;
}
setup_timer(&hdev->service_timer, hclge_service_timer, setup_timer(&hdev->service_timer, hclge_service_timer,
(unsigned long)hdev); (unsigned long)hdev);
INIT_WORK(&hdev->service_task, hclge_service_task); INIT_WORK(&hdev->service_task, hclge_service_task);
......
...@@ -176,7 +176,6 @@ struct hclge_pg_info { ...@@ -176,7 +176,6 @@ struct hclge_pg_info {
struct hclge_tc_info { struct hclge_tc_info {
u8 tc_id; u8 tc_id;
u8 tc_sch_mode; /* 0: sp; 1: dwrr */ u8 tc_sch_mode; /* 0: sp; 1: dwrr */
u8 up;
u8 pgid; u8 pgid;
u32 bw_limit; u32 bw_limit;
}; };
...@@ -197,6 +196,7 @@ struct hclge_tm_info { ...@@ -197,6 +196,7 @@ struct hclge_tm_info {
u8 num_tc; u8 num_tc;
u8 num_pg; /* It must be 1 if vNET-Base schd */ u8 num_pg; /* It must be 1 if vNET-Base schd */
u8 pg_dwrr[HCLGE_PG_NUM]; u8 pg_dwrr[HCLGE_PG_NUM];
u8 prio_tc[HNAE3_MAX_USER_PRIO];
struct hclge_pg_info pg_info[HCLGE_PG_NUM]; struct hclge_pg_info pg_info[HCLGE_PG_NUM];
struct hclge_tc_info tc_info[HNAE3_MAX_TC]; struct hclge_tc_info tc_info[HNAE3_MAX_TC];
enum hclge_fc_mode fc_mode; enum hclge_fc_mode fc_mode;
...@@ -477,6 +477,7 @@ struct hclge_vport { ...@@ -477,6 +477,7 @@ struct hclge_vport {
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */ /* User configured lookup table entries */
u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
u16 alloc_rss_size;
u16 qs_offset; u16 qs_offset;
u16 bw_limit; /* VSI BW Limit (0 = disabled) */ u16 bw_limit; /* VSI BW Limit (0 = disabled) */
......
...@@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) ...@@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{ {
u8 tc; u8 tc;
for (tc = 0; tc < hdev->tm_info.num_tc; tc++) tc = hdev->tm_info.prio_tc[pri_id];
if (hdev->tm_info.tc_info[tc].up == pri_id)
break;
if (tc >= hdev->tm_info.num_tc) if (tc >= hdev->tm_info.num_tc)
return -EINVAL; return -EINVAL;
...@@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev) ...@@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) { for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
ret = hclge_fill_pri_array(hdev, pri, pri_id); ret = hclge_fill_pri_array(hdev, pri, pri_id);
if (ret) if (ret)
return ret; return ret;
...@@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, ...@@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pg_id = pg_id; shap_cfg_cmd->pg_id = pg_id;
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
} }
...@@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, ...@@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_id = pri_id; shap_cfg_cmd->pri_id = pri_id;
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
} }
...@@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->num_tqps / kinfo->num_tc); kinfo->num_tqps / kinfo->num_tc);
vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
vport->dwrr = 100; /* 100 percent as init */ vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size;
for (i = 0; i < kinfo->num_tc; i++) { for (i = 0; i < kinfo->num_tc; i++) {
if (hdev->hw_tc_map & BIT(i)) { if (hdev->hw_tc_map & BIT(i)) {
...@@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
kinfo->tc_info[i].tqp_count = kinfo->rss_size; kinfo->tc_info[i].tqp_count = kinfo->rss_size;
kinfo->tc_info[i].tc = i; kinfo->tc_info[i].tc = i;
kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up;
} else { } else {
/* Set to default queue if TC is disable */ /* Set to default queue if TC is disable */
kinfo->tc_info[i].enable = false; kinfo->tc_info[i].enable = false;
kinfo->tc_info[i].tqp_offset = 0; kinfo->tc_info[i].tqp_offset = 0;
kinfo->tc_info[i].tqp_count = 1; kinfo->tc_info[i].tqp_count = 1;
kinfo->tc_info[i].tc = 0; kinfo->tc_info[i].tc = 0;
kinfo->tc_info[i].up = 0;
} }
} }
memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
} }
static void hclge_tm_vport_info_update(struct hclge_dev *hdev) static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
...@@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) ...@@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
for (i = 0; i < hdev->tm_info.num_tc; i++) { for (i = 0; i < hdev->tm_info.num_tc; i++) {
hdev->tm_info.tc_info[i].tc_id = i; hdev->tm_info.tc_info[i].tc_id = i;
hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
hdev->tm_info.tc_info[i].up = i;
hdev->tm_info.tc_info[i].pgid = 0; hdev->tm_info.tc_info[i].pgid = 0;
hdev->tm_info.tc_info[i].bw_limit = hdev->tm_info.tc_info[i].bw_limit =
hdev->tm_info.pg_info[0].bw_limit; hdev->tm_info.pg_info[0].bw_limit;
} }
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
hdev->tm_info.prio_tc[i] =
(i >= hdev->tm_info.num_tc) ? 0 : i;
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
} }
...@@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) ...@@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
/* Only DCB-supported dev supports qset back pressure setting */
if (!hnae3_dev_dcb_supported(hdev))
return 0;
for (i = 0; i < hdev->tm_info.num_tc; i++) { for (i = 0; i < hdev->tm_info.num_tc; i++) {
ret = hclge_tm_qs_bp_cfg(hdev, i); ret = hclge_tm_qs_bp_cfg(hdev, i);
if (ret) if (ret)
......
...@@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd { ...@@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd {
u32 rsvd1; u32 rsvd1;
}; };
#define hclge_tm_set_feild(dest, string, val) \ #define hclge_tm_set_field(dest, string, val) \
hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val) (HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_feild(src, string) \ #define hclge_tm_get_field(src, string) \
hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH)) (HCLGE_TM_SHAP_##string##_LSH))
......
...@@ -41,11 +41,16 @@ static struct hnae3_client client; ...@@ -41,11 +41,16 @@ static struct hnae3_client client;
static const struct pci_device_id hns3_pci_tbl[] = { static const struct pci_device_id hns3_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */ /* required last entry */
{0, } {0, }
}; };
...@@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
ae_dev->pdev = pdev; ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
ae_dev->dev_type = HNAE3_DEV_KNIC; ae_dev->dev_type = HNAE3_DEV_KNIC;
pci_set_drvdata(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment