Commit 3a678b58 authored by Xi Wang's avatar Xi Wang Committed by David S. Miller

net: hns3: Optimize the VF's process of updating multicast MAC

In the update flow of the new PF driver, if a multicast address is in mta
table, the VF deletion action will not take effect.

This patch adds the VF adaptation according to the new flow of PF'driver.
Signed-off-by: default avatarXi Wang <wangxi11@huawei.com>
Reviewed-by: default avatarJian Shen <shenjian15@huawei.com>
Signed-off-by: default avatarPeng Li <lipeng321@huawei.com>
Signed-off-by: default avatarSalil Mehta <salil.mehta@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 40cca1c5
...@@ -47,6 +47,8 @@ enum hclge_mbx_mac_vlan_subcode { ...@@ -47,6 +47,8 @@ enum hclge_mbx_mac_vlan_subcode {
HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */
HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */
}; };
/* below are per-VF vlan cfg subcodes */ /* below are per-VF vlan cfg subcodes */
......
...@@ -231,12 +231,51 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, ...@@ -231,12 +231,51 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
return 0; return 0;
} }
static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport,
u8 *msg, u8 idx, bool is_end)
{
#define HCLGE_MTA_STATUS_MSG_SIZE 13
#define HCLGE_MTA_STATUS_MSG_BITS \
(HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
#define HCLGE_MTA_STATUS_MSG_END_BITS \
(HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS)
unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)];
u16 tbl_cnt;
u16 tbl_idx;
u8 msg_ofs;
u8 msg_bit;
tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS :
HCLGE_MTA_STATUS_MSG_BITS;
/* set msg field */
msg_ofs = 0;
msg_bit = 0;
memset(status, 0, sizeof(status));
for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) {
if (msg[msg_ofs] & BIT(msg_bit))
set_bit(tbl_idx, status);
msg_bit++;
if (msg_bit == BITS_PER_BYTE) {
msg_bit = 0;
msg_ofs++;
}
}
return hclge_update_mta_status_common(vport,
status, idx * HCLGE_MTA_STATUS_MSG_BITS,
tbl_cnt, is_end);
}
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req, struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp) bool gen_resp)
{ {
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u8 resp_len = 0;
u8 resp_data;
int status; int status;
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
...@@ -248,6 +287,22 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, ...@@ -248,6 +287,22 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
bool enable = mbx_req->msg[2]; bool enable = mbx_req->msg[2];
status = hclge_cfg_func_mta_filter(hdev, func_id, enable); status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) {
resp_data = hdev->mta_mac_sel_type;
resp_len = sizeof(u8);
gen_resp = true;
status = 0;
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) {
/* mta status update msg format
* msg[2.6 : 2.0] msg index
* msg[2.7] msg is end
* msg[15 : 3] mta status bits[103 : 0]
*/
bool is_end = (mbx_req->msg[2] & 0x80) ? true : false;
status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3],
mbx_req->msg[2] & 0x7F,
is_end);
} else { } else {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %d\n", "failed to set mcast mac addr, unknown subcode %d\n",
...@@ -256,7 +311,8 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, ...@@ -256,7 +311,8 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
} }
if (gen_resp) if (gen_resp)
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); hclge_gen_resp_to_vf(vport, mbx_req, status,
&resp_data, resp_len);
return 0; return 0;
} }
......
...@@ -739,6 +739,126 @@ static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en) ...@@ -739,6 +739,126 @@ static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
msg, 1, false, NULL, 0); msg, 1, false, NULL, 0);
} }
static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
{
u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
int ret;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
NULL, 0, true, &resp_msg, sizeof(u8));
if (ret) {
dev_err(&hdev->pdev->dev,
"Read mta type fail, ret=%d.\n", ret);
return ret;
}
if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
dev_err(&hdev->pdev->dev,
"Read mta type invalid, resp=%d.\n", resp_msg);
return -EINVAL;
}
hdev->mta_mac_sel_type = resp_msg;
return 0;
}
static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
const u8 *addr)
{
u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
u16 high_val = addr[1] | (addr[0] << 8);
return (high_val >> rsh) & 0xfff;
}
static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
unsigned long *status)
{
#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
#define HCLGEVF_MTA_STATUS_MSG_BITS \
(HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
(HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
u16 tbl_cnt;
u16 tbl_idx;
u8 msg_cnt;
u8 msg_idx;
int ret;
msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
HCLGEVF_MTA_STATUS_MSG_BITS);
tbl_idx = 0;
msg_idx = 0;
while (msg_cnt--) {
u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
u8 *p = &msg[1];
u8 msg_ofs;
u8 msg_bit;
memset(msg, 0, sizeof(msg));
/* set index field */
msg[0] = 0x7F & msg_idx;
/* set end flag field */
if (msg_cnt == 0) {
msg[0] |= 0x80;
tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
} else {
tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
}
/* set status field */
msg_ofs = 0;
msg_bit = 0;
while (tbl_cnt--) {
if (test_bit(tbl_idx, status))
p[msg_ofs] |= BIT(msg_bit);
tbl_idx++;
msg_bit++;
if (msg_bit == BITS_PER_BYTE) {
msg_bit = 0;
msg_ofs++;
}
}
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
msg, sizeof(msg), false, NULL, 0);
if (ret)
break;
msg_idx++;
}
return ret;
}
static int hclgevf_update_mta_status(struct hnae3_handle *handle)
{
unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct net_device *netdev = hdev->nic.kinfo.netdev;
struct netdev_hw_addr *ha;
u16 tbl_idx;
/* clear status */
memset(mta_status, 0, sizeof(mta_status));
/* update status from mc addr list */
netdev_for_each_mc_addr(ha, netdev) {
tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
set_bit(tbl_idx, mta_status);
}
return hclgevf_do_update_mta_status(hdev, mta_status);
}
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
...@@ -1669,12 +1789,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -1669,12 +1789,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config; goto err_config;
} }
/* Initialize VF's MTA */ /* Initialize mta type for this VF */
hdev->accept_mta_mc = true; ret = hclgevf_cfg_func_mta_type(hdev);
ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed(%d) to set mta filter mode\n", ret); "failed(%d) to initialize MTA type\n", ret);
goto err_config; goto err_config;
} }
...@@ -1829,6 +1948,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { ...@@ -1829,6 +1948,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.rm_uc_addr = hclgevf_rm_uc_addr, .rm_uc_addr = hclgevf_rm_uc_addr,
.add_mc_addr = hclgevf_add_mc_addr, .add_mc_addr = hclgevf_add_mc_addr,
.rm_mc_addr = hclgevf_rm_mc_addr, .rm_mc_addr = hclgevf_rm_mc_addr,
.update_mta_status = hclgevf_update_mta_status,
.get_stats = hclgevf_get_stats, .get_stats = hclgevf_get_stats,
.update_stats = hclgevf_update_stats, .update_stats = hclgevf_update_stats,
.get_strings = hclgevf_get_strings, .get_strings = hclgevf_get_strings,
......
...@@ -48,6 +48,9 @@ ...@@ -48,6 +48,9 @@
#define HCLGEVF_RSS_CFG_TBL_NUM \ #define HCLGEVF_RSS_CFG_TBL_NUM \
(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
#define HCLGEVF_MTA_TBL_SIZE 4096
#define HCLGEVF_MTA_TYPE_SEL_MAX 4
/* states of hclgevf device & tasks */ /* states of hclgevf device & tasks */
enum hclgevf_states { enum hclgevf_states {
/* device states */ /* device states */
...@@ -152,6 +155,7 @@ struct hclgevf_dev { ...@@ -152,6 +155,7 @@ struct hclgevf_dev {
int *vector_irq; int *vector_irq;
bool accept_mta_mc; /* whether to accept mta filter multicast */ bool accept_mta_mc; /* whether to accept mta filter multicast */
u8 mta_mac_sel_type;
bool mbx_event_pending; bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment