Commit 818f1675 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

net: hns3: Add mtu setting support for vf

The patch adds mtu setting support for vf, currently
vf and pf share the same hardware mtu setting. Mtu set
by vf must be less than or equal to pf' mtu, and mtu
set by pf must be greater than or equal to vf' mtu.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarJian Shen <shenjian15@huawei.com>
Signed-off-by: default avatarSalil Mehta <salil.mehta@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a6d818e3
...@@ -38,6 +38,7 @@ enum HCLGE_MBX_OPCODE { ...@@ -38,6 +38,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */ HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */ HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
}; };
/* below are per-VF mac-vlan subcodes */ /* below are per-VF mac-vlan subcodes */
......
...@@ -1166,6 +1166,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) ...@@ -1166,6 +1166,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) { for (i = 0; i < num_vport; i++) {
vport->back = hdev; vport->back = hdev;
vport->vport_id = i; vport->vport_id = i;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
if (i == 0) if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport); ret = hclge_vport_setup(vport, tqp_main_vport);
...@@ -2921,6 +2922,10 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev) ...@@ -2921,6 +2922,10 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
/* If vf is not alive, set to default value */
if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
} }
} }
...@@ -6400,8 +6405,6 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) ...@@ -6400,8 +6405,6 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
struct hclge_config_max_frm_size_cmd *req; struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
new_mps = max(new_mps, HCLGE_MAC_DEFAULT_FRAME);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data; req = (struct hclge_config_max_frm_size_cmd *)desc.data;
...@@ -6414,28 +6417,56 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) ...@@ -6414,28 +6417,56 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
return hclge_set_vport_mtu(vport, new_mtu);
}
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
{
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int max_frm_size, ret; int i, max_frm_size, ret = 0;
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME || if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME) max_frm_size > HCLGE_MAC_MAX_FRAME)
return -EINVAL; return -EINVAL;
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
mutex_lock(&hdev->vport_lock);
/* VF's mps must fit within hdev->mps */
if (vport->vport_id && max_frm_size > hdev->mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
} else if (vport->vport_id) {
vport->mps = max_frm_size;
mutex_unlock(&hdev->vport_lock);
return 0;
}
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
if (max_frm_size < hdev->vport[i].mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
}
ret = hclge_set_mac_mtu(hdev, max_frm_size); ret = hclge_set_mac_mtu(hdev, max_frm_size);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Change mtu fail, ret =%d\n", ret); "Change mtu fail, ret =%d\n", ret);
return ret; goto out;
} }
hdev->mps = max_frm_size; hdev->mps = max_frm_size;
vport->mps = max_frm_size;
ret = hclge_buffer_alloc(hdev); ret = hclge_buffer_alloc(hdev);
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Allocate buffer fail, ret =%d\n", ret); "Allocate buffer fail, ret =%d\n", ret);
out:
mutex_unlock(&hdev->vport_lock);
return ret; return ret;
} }
...@@ -7054,6 +7085,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -7054,6 +7085,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ae_dev->priv = hdev; ae_dev->priv = hdev;
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
ret = hclge_pci_init(hdev); ret = hclge_pci_init(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "PCI init failed\n"); dev_err(&pdev->dev, "PCI init failed\n");
...@@ -7353,6 +7386,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -7353,6 +7386,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_destroy_cmd_queue(&hdev->hw); hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev); hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev); hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL; ae_dev->priv = NULL;
} }
......
...@@ -678,6 +678,8 @@ struct hclge_dev { ...@@ -678,6 +678,8 @@ struct hclge_dev {
u32 pkt_buf_size; /* Total pf buf size for tx/rx */ u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 mps; /* Max packet size */ u32 mps; /* Max packet size */
/* vport_lock protect resource shared by vports */
struct mutex vport_lock;
struct hclge_vlan_type_cfg vlan_type_cfg; struct hclge_vlan_type_cfg vlan_type_cfg;
...@@ -761,6 +763,7 @@ struct hclge_vport { ...@@ -761,6 +763,7 @@ struct hclge_vport {
unsigned long state; unsigned long state;
unsigned long last_active_jiffies; unsigned long last_active_jiffies;
u32 mps; /* Max packet size */
}; };
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
...@@ -810,4 +813,5 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev); ...@@ -810,4 +813,5 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport); int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport); void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
#endif #endif
...@@ -401,6 +401,18 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport, ...@@ -401,6 +401,18 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport,
vport->last_active_jiffies = jiffies; vport->last_active_jiffies = jiffies;
} }
static int hclge_set_vf_mtu(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
int ret;
u32 mtu;
memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
ret = hclge_set_vport_mtu(vport, mtu);
return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw) static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{ {
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
...@@ -515,6 +527,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -515,6 +527,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_KEEP_ALIVE: case HCLGE_MBX_KEEP_ALIVE:
hclge_vf_keep_alive(vport, req); hclge_vf_keep_alive(vport, req);
break; break;
case HCLGE_MBX_SET_MTU:
ret = hclge_set_vf_mtu(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"VF fail(%d) to set mtu\n", ret);
break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n", "un-supported mailbox message, code = %d\n",
......
...@@ -1081,6 +1081,14 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -1081,6 +1081,14 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
2, true, NULL, 0); 2, true, NULL, 0);
} }
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
sizeof(new_mtu), true, NULL, 0);
}
static int hclgevf_notify_client(struct hclgevf_dev *hdev, static int hclgevf_notify_client(struct hclgevf_dev *hdev,
enum hnae3_reset_notify_type type) enum hnae3_reset_notify_type type)
{ {
...@@ -2513,6 +2521,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { ...@@ -2513,6 +2521,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.ae_dev_resetting = hclgevf_ae_dev_resetting, .ae_dev_resetting = hclgevf_ae_dev_resetting,
.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
.set_gro_en = hclgevf_gro_en, .set_gro_en = hclgevf_gro_en,
.set_mtu = hclgevf_set_mtu,
}; };
static struct hnae3_ae_algo ae_algovf = { static struct hnae3_ae_algo ae_algovf = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment