Commit 9c095bd0 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Guangbin Huang says:

====================
net: hns3: updates for -next

This series includes some updates for the HNS3 ethernet driver.

Change logs:
V1 -> V2:
 - Fix some sparse warnings of patch 3# and 4#.
 - Add patch #6 to fix sparse warnings of incorrect type of argument.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1728c056 443edfd6
...@@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE { ...@@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */
HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
...@@ -92,8 +93,8 @@ struct hclge_ring_chain_param { ...@@ -92,8 +93,8 @@ struct hclge_ring_chain_param {
struct hclge_basic_info { struct hclge_basic_info {
u8 hw_tc_map; u8 hw_tc_map;
u8 rsv; u8 rsv;
u16 mbx_api_version; __le16 mbx_api_version;
u32 pf_caps; __le32 pf_caps;
}; };
struct hclgevf_mbx_resp_status { struct hclgevf_mbx_resp_status {
...@@ -134,13 +135,13 @@ struct hclge_vf_to_pf_msg { ...@@ -134,13 +135,13 @@ struct hclge_vf_to_pf_msg {
}; };
struct hclge_pf_to_vf_msg { struct hclge_pf_to_vf_msg {
u16 code; __le16 code;
union { union {
/* used for mbx response */ /* used for mbx response */
struct { struct {
u16 vf_mbx_msg_code; __le16 vf_mbx_msg_code;
u16 vf_mbx_msg_subcode; __le16 vf_mbx_msg_subcode;
u16 resp_status; __le16 resp_status;
u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
}; };
/* used for general mbx */ /* used for general mbx */
...@@ -157,7 +158,7 @@ struct hclge_mbx_vf_to_pf_cmd { ...@@ -157,7 +158,7 @@ struct hclge_mbx_vf_to_pf_cmd {
u8 rsv1[1]; u8 rsv1[1];
u8 msg_len; u8 msg_len;
u8 rsv2; u8 rsv2;
u16 match_id; __le16 match_id;
struct hclge_vf_to_pf_msg msg; struct hclge_vf_to_pf_msg msg;
}; };
...@@ -168,7 +169,7 @@ struct hclge_mbx_pf_to_vf_cmd { ...@@ -168,7 +169,7 @@ struct hclge_mbx_pf_to_vf_cmd {
u8 rsv[3]; u8 rsv[3];
u8 msg_len; u8 msg_len;
u8 rsv1; u8 rsv1;
u16 match_id; __le16 match_id;
struct hclge_pf_to_vf_msg msg; struct hclge_pf_to_vf_msg msg;
}; };
...@@ -178,6 +179,49 @@ struct hclge_vf_rst_cmd { ...@@ -178,6 +179,49 @@ struct hclge_vf_rst_cmd {
u8 rsv[22]; u8 rsv[22];
}; };
#pragma pack(1)
struct hclge_mbx_link_status {
__le16 link_status;
__le32 speed;
__le16 duplex;
u8 flag;
};
struct hclge_mbx_link_mode {
__le16 idx;
__le64 link_mode;
};
struct hclge_mbx_port_base_vlan {
__le16 state;
__le16 vlan_proto;
__le16 qos;
__le16 vlan_tag;
};
struct hclge_mbx_vf_queue_info {
__le16 num_tqps;
__le16 rss_size;
__le16 rx_buf_len;
};
struct hclge_mbx_vf_queue_depth {
__le16 num_tx_desc;
__le16 num_rx_desc;
};
struct hclge_mbx_vlan_filter {
u8 is_kill;
__le16 vlan_id;
__le16 proto;
};
struct hclge_mbx_mtu_info {
__le32 mtu;
};
#pragma pack()
/* used by VF to store the received Async responses from PF */ /* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring { struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 #define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
...@@ -186,7 +230,7 @@ struct hclgevf_mbx_arq_ring { ...@@ -186,7 +230,7 @@ struct hclgevf_mbx_arq_ring {
u32 head; u32 head;
u32 tail; u32 tail;
atomic_t count; atomic_t count;
u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; __le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
}; };
#define hclge_mbx_ring_ptr_move_crq(crq) \ #define hclge_mbx_ring_ptr_move_crq(crq) \
......
...@@ -106,7 +106,7 @@ int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, ...@@ -106,7 +106,7 @@ int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
u8 *hfunc); u8 *hfunc);
void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
u32 *indir, __le16 rss_ind_tbl_size); u32 *indir, u16 rss_ind_tbl_size);
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key); const u8 *key);
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg, int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
......
...@@ -1915,8 +1915,11 @@ static int hns3_set_tunable(struct net_device *netdev, ...@@ -1915,8 +1915,11 @@ static int hns3_set_tunable(struct net_device *netdev,
return ret; return ret;
} }
netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n", if (!priv->ring->tx_spare)
priv->ring->tx_spare->len); netdev_info(netdev, "the active tx spare buf size is 0, disable tx spare buffer\n");
else
netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n",
priv->ring->tx_spare->len);
break; break;
default: default:
......
...@@ -1546,9 +1546,8 @@ static void hclge_init_tc_config(struct hclge_dev *hdev) ...@@ -1546,9 +1546,8 @@ static void hclge_init_tc_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg; struct hclge_cfg cfg;
int node, ret; int ret;
ret = hclge_get_cfg(hdev, &cfg); ret = hclge_get_cfg(hdev, &cfg);
if (ret) if (ret)
...@@ -1594,13 +1593,6 @@ static int hclge_configure(struct hclge_dev *hdev) ...@@ -1594,13 +1593,6 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_tc_config(hdev); hclge_init_tc_config(hdev);
hclge_init_kdump_kernel_config(hdev); hclge_init_kdump_kernel_config(hdev);
/* Set the affinity based on numa node */
node = dev_to_node(&hdev->pdev->dev);
if (node != NUMA_NO_NODE)
cpumask = cpumask_of_node(node);
cpumask_copy(&hdev->affinity_mask, cpumask);
return ret; return ret;
} }
...@@ -3564,17 +3556,6 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) ...@@ -3564,17 +3556,6 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1; hdev->num_msi_used += 1;
} }
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
{
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
&hdev->affinity_mask);
}
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
{
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
}
static int hclge_misc_irq_init(struct hclge_dev *hdev) static int hclge_misc_irq_init(struct hclge_dev *hdev)
{ {
int ret; int ret;
...@@ -11457,11 +11438,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -11457,11 +11438,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
/* Setup affinity after service timer setup because add_timer_on
* is called in affinity notify.
*/
hclge_misc_affinity_setup(hdev);
hclge_clear_all_event_cause(hdev); hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev); hclge_clear_resetting_state(hdev);
...@@ -11879,7 +11855,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -11879,7 +11855,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_reset_vf_rate(hdev); hclge_reset_vf_rate(hdev);
hclge_clear_vf_vlan(hdev); hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev); hclge_state_uninit(hdev);
hclge_ptp_uninit(hdev); hclge_ptp_uninit(hdev);
hclge_uninit_rxd_adv_layout(hdev); hclge_uninit_rxd_adv_layout(hdev);
......
...@@ -780,8 +780,8 @@ struct hclge_vf_vlan_cfg { ...@@ -780,8 +780,8 @@ struct hclge_vf_vlan_cfg {
union { union {
struct { struct {
u8 is_kill; u8 is_kill;
u16 vlan; __le16 vlan;
u16 proto; __le16 proto;
}; };
u8 enable; u8 enable;
}; };
...@@ -938,8 +938,6 @@ struct hclge_dev { ...@@ -938,8 +938,6 @@ struct hclge_dev {
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE); HCLGE_MAC_TNL_LOG_SIZE);
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
struct hclge_ptp *ptp; struct hclge_ptp *ptp;
struct devlink *devlink; struct devlink *devlink;
struct hclge_comm_rss_cfg rss_cfg; struct hclge_comm_rss_cfg rss_cfg;
......
...@@ -62,7 +62,7 @@ TRACE_EVENT(hclge_pf_mbx_send, ...@@ -62,7 +62,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
TP_fast_assign( TP_fast_assign(
__entry->vfid = req->dest_vfid; __entry->vfid = req->dest_vfid;
__entry->code = req->msg.code; __entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev)); __assign_str(pciname, pci_name(hdev->pdev));
__assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req, memcpy(__entry->mbx_data, req,
......
...@@ -189,8 +189,8 @@ static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) ...@@ -189,8 +189,8 @@ static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
basic_info = (struct hclge_basic_info *)resp_msg; basic_info = (struct hclge_basic_info *)resp_msg;
hdev->hw_tc_map = basic_info->hw_tc_map; hdev->hw_tc_map = basic_info->hw_tc_map;
hdev->mbx_api_version = basic_info->mbx_api_version; hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version);
caps = basic_info->pf_caps; caps = le32_to_cpu(basic_info->pf_caps);
if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
...@@ -223,10 +223,8 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) ...@@ -223,10 +223,8 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{ {
#define HCLGEVF_TQPS_RSS_INFO_LEN 6 #define HCLGEVF_TQPS_RSS_INFO_LEN 6
#define HCLGEVF_TQPS_ALLOC_OFFSET 0
#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2
#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4
struct hclge_mbx_vf_queue_info *queue_info;
u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
struct hclge_vf_to_pf_msg send_msg; struct hclge_vf_to_pf_msg send_msg;
int status; int status;
...@@ -241,12 +239,10 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) ...@@ -241,12 +239,10 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
return status; return status;
} }
memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg;
sizeof(u16)); hdev->num_tqps = le16_to_cpu(queue_info->num_tqps);
memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], hdev->rss_size_max = le16_to_cpu(queue_info->rss_size);
sizeof(u16)); hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len);
memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
sizeof(u16));
return 0; return 0;
} }
...@@ -254,9 +250,8 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) ...@@ -254,9 +250,8 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
{ {
#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0
#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2
struct hclge_mbx_vf_queue_depth *queue_depth;
u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
struct hclge_vf_to_pf_msg send_msg; struct hclge_vf_to_pf_msg send_msg;
int ret; int ret;
...@@ -271,10 +266,9 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) ...@@ -271,10 +266,9 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
return ret; return ret;
} }
memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg;
sizeof(u16)); hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc);
memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc);
sizeof(u16));
return 0; return 0;
} }
...@@ -288,11 +282,11 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) ...@@ -288,11 +282,11 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
int ret; int ret;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
memcpy(send_msg.data, &queue_id, sizeof(queue_id)); *(__le16 *)send_msg.data = cpu_to_le16(queue_id);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
sizeof(resp_data)); sizeof(resp_data));
if (!ret) if (!ret)
qid_in_pf = *(u16 *)resp_data; qid_in_pf = le16_to_cpu(*(__le16 *)resp_data);
return qid_in_pf; return qid_in_pf;
} }
...@@ -1245,11 +1239,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, ...@@ -1245,11 +1239,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id, __be16 proto, u16 vlan_id,
bool is_kill) bool is_kill)
{ {
#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0
#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1
#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_mbx_vlan_filter *vlan_filter;
struct hclge_vf_to_pf_msg send_msg; struct hclge_vf_to_pf_msg send_msg;
int ret; int ret;
...@@ -1271,11 +1262,11 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, ...@@ -1271,11 +1262,11 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_VLAN_FILTER); HCLGE_MBX_VLAN_FILTER);
send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data;
memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, vlan_filter->is_kill = is_kill;
sizeof(vlan_id)); vlan_filter->vlan_id = cpu_to_le16(vlan_id);
memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto));
sizeof(proto));
/* when remove hw vlan filter failed, record the vlan id, /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence * and try to remove it from hw later, to be consistence
* with stack. * with stack.
...@@ -1347,7 +1338,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle) ...@@ -1347,7 +1338,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle)
for (i = 1; i < handle->kinfo.num_tqps; i++) { for (i = 1; i < handle->kinfo.num_tqps; i++) {
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
memcpy(send_msg.data, &i, sizeof(i)); *(__le16 *)send_msg.data = cpu_to_le16(i);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (ret) if (ret)
return ret; return ret;
...@@ -1359,10 +1350,13 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle) ...@@ -1359,10 +1350,13 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle)
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_mbx_mtu_info *mtu_info;
struct hclge_vf_to_pf_msg send_msg; struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data;
mtu_info->mtu = cpu_to_le32(new_mtu);
return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
} }
...@@ -3333,7 +3327,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, ...@@ -3333,7 +3327,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
} }
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
u8 *port_base_vlan_info, u8 data_size) struct hclge_mbx_port_base_vlan *port_base_vlan)
{ {
struct hnae3_handle *nic = &hdev->nic; struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg; struct hclge_vf_to_pf_msg send_msg;
...@@ -3358,7 +3352,7 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, ...@@ -3358,7 +3352,7 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
/* send msg to PF and wait update port based vlan info */ /* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG); HCLGE_MBX_PORT_BASE_VLAN_CFG);
memcpy(send_msg.data, port_base_vlan_info, data_size); memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan));
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (!ret) { if (!ret) {
if (state == HNAE3_PORT_BASE_VLAN_DISABLE) if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
......
...@@ -293,5 +293,5 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, ...@@ -293,5 +293,5 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
u8 *port_base_vlan_info, u8 data_size); struct hclge_mbx_port_base_vlan *port_base_vlan);
#endif #endif
...@@ -124,7 +124,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, ...@@ -124,7 +124,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
if (need_resp) { if (need_resp) {
mutex_lock(&hdev->mbx_resp.mbx_mutex); mutex_lock(&hdev->mbx_resp.mbx_mutex);
hclgevf_reset_mbx_resp_status(hdev); hclgevf_reset_mbx_resp_status(hdev);
req->match_id = hdev->mbx_resp.match_id; req->match_id = cpu_to_le16(hdev->mbx_resp.match_id);
status = hclgevf_cmd_send(&hdev->hw, &desc, 1); status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status) { if (status) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -162,27 +162,29 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) ...@@ -162,27 +162,29 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
struct hclge_mbx_pf_to_vf_cmd *req) struct hclge_mbx_pf_to_vf_cmd *req)
{ {
u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode);
u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code);
struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp; struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp;
u16 resp_status = le16_to_cpu(req->msg.resp_status);
u16 match_id = le16_to_cpu(req->match_id);
if (resp->received_resp) if (resp->received_resp)
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"VF mbx resp flag not clear(%u)\n", "VF mbx resp flag not clear(%u)\n",
req->msg.vf_mbx_msg_code); vf_mbx_msg_code);
resp->origin_mbx_msg = resp->origin_mbx_msg = (vf_mbx_msg_code << 16);
(req->msg.vf_mbx_msg_code << 16); resp->origin_mbx_msg |= vf_mbx_msg_subcode;
resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; resp->resp_status = hclgevf_resp_to_errno(resp_status);
resp->resp_status =
hclgevf_resp_to_errno(req->msg.resp_status);
memcpy(resp->additional_info, req->msg.resp_data, memcpy(resp->additional_info, req->msg.resp_data,
HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
if (req->match_id) { if (match_id) {
/* If match_id is not zero, it means PF support match_id. /* If match_id is not zero, it means PF support match_id.
* if the match_id is right, VF get the right response, or * if the match_id is right, VF get the right response, or
* ignore the response. and driver will clear hdev->mbx_resp * ignore the response. and driver will clear hdev->mbx_resp
* when send next message which need response. * when send next message which need response.
*/ */
if (req->match_id == resp->match_id) if (match_id == resp->match_id)
resp->received_resp = true; resp->received_resp = true;
} else { } else {
resp->received_resp = true; resp->received_resp = true;
...@@ -199,7 +201,7 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, ...@@ -199,7 +201,7 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
HCLGE_MBX_MAX_ARQ_MSG_NUM) { HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%u)\n", "Async Q full, dropping msg(%u)\n",
req->msg.code); le16_to_cpu(req->msg.code));
return; return;
} }
...@@ -218,6 +220,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -218,6 +220,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
struct hclge_comm_cmq_ring *crq; struct hclge_comm_cmq_ring *crq;
struct hclge_desc *desc; struct hclge_desc *desc;
u16 flag; u16 flag;
u16 code;
crq = &hdev->hw.hw.cmq.crq; crq = &hdev->hw.hw.cmq.crq;
...@@ -232,10 +235,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -232,10 +235,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
code = le16_to_cpu(req->msg.code);
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %u\n", "dropped invalid mailbox message, code = %u\n",
req->msg.code); code);
/* dropping/not processing this invalid message */ /* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0; crq->desc[crq->next_to_use].flag = 0;
...@@ -251,7 +255,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -251,7 +255,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
* timeout and simultaneously queue the async messages for later * timeout and simultaneously queue the async messages for later
* prcessing in context of mailbox task i.e. the slow path. * prcessing in context of mailbox task i.e. the slow path.
*/ */
switch (req->msg.code) { switch (code) {
case HCLGE_MBX_PF_VF_RESP: case HCLGE_MBX_PF_VF_RESP:
hclgevf_handle_mbx_response(hdev, req); hclgevf_handle_mbx_response(hdev, req);
break; break;
...@@ -265,7 +269,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -265,7 +269,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF received unsupported(%u) mbx msg from PF\n", "VF received unsupported(%u) mbx msg from PF\n",
req->msg.code); code);
break; break;
} }
crq->desc[crq->next_to_use].flag = 0; crq->desc[crq->next_to_use].flag = 0;
...@@ -287,14 +291,18 @@ static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev, ...@@ -287,14 +291,18 @@ static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{ {
struct hclge_mbx_port_base_vlan *vlan_info;
struct hclge_mbx_link_status *link_info;
struct hclge_mbx_link_mode *link_mode;
enum hnae3_reset_type reset_type; enum hnae3_reset_type reset_type;
u16 link_status, state; u16 link_status, state;
u16 *msg_q, *vlan_info; __le16 *msg_q;
u16 opcode;
u8 duplex; u8 duplex;
u32 speed; u32 speed;
u32 tail; u32 tail;
u8 flag; u8 flag;
u8 idx; u16 idx;
tail = hdev->arq.tail; tail = hdev->arq.tail;
...@@ -308,13 +316,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) ...@@ -308,13 +316,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
} }
msg_q = hdev->arq.msg_q[hdev->arq.head]; msg_q = hdev->arq.msg_q[hdev->arq.head];
opcode = le16_to_cpu(msg_q[0]);
switch (msg_q[0]) { switch (opcode) {
case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_LINK_STAT_CHANGE:
link_status = msg_q[1]; link_info = (struct hclge_mbx_link_status *)(msg_q + 1);
memcpy(&speed, &msg_q[2], sizeof(speed)); link_status = le16_to_cpu(link_info->link_status);
duplex = (u8)msg_q[4]; speed = le32_to_cpu(link_info->speed);
flag = (u8)msg_q[5]; duplex = (u8)le16_to_cpu(link_info->duplex);
flag = link_info->flag;
/* update upper layer with new link link status */ /* update upper layer with new link link status */
hclgevf_update_speed_duplex(hdev, speed, duplex); hclgevf_update_speed_duplex(hdev, speed, duplex);
...@@ -326,13 +335,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) ...@@ -326,13 +335,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
break; break;
case HCLGE_MBX_LINK_STAT_MODE: case HCLGE_MBX_LINK_STAT_MODE:
idx = (u8)msg_q[1]; link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1);
idx = le16_to_cpu(link_mode->idx);
if (idx) if (idx)
memcpy(&hdev->hw.mac.supported, &msg_q[2], hdev->hw.mac.supported =
sizeof(unsigned long)); le64_to_cpu(link_mode->link_mode);
else else
memcpy(&hdev->hw.mac.advertising, &msg_q[2], hdev->hw.mac.advertising =
sizeof(unsigned long)); le64_to_cpu(link_mode->link_mode);
break; break;
case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_ASSERTING_RESET:
/* PF has asserted reset hence VF should go in pending /* PF has asserted reset hence VF should go in pending
...@@ -340,25 +350,27 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) ...@@ -340,25 +350,27 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should * has been completely reset. After this stack should
* eventually be re-initialized. * eventually be re-initialized.
*/ */
reset_type = (enum hnae3_reset_type)msg_q[1]; reset_type =
(enum hnae3_reset_type)le16_to_cpu(msg_q[1]);
set_bit(reset_type, &hdev->reset_pending); set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev); hclgevf_reset_task_schedule(hdev);
break; break;
case HCLGE_MBX_PUSH_VLAN_INFO: case HCLGE_MBX_PUSH_VLAN_INFO:
state = msg_q[1]; vlan_info =
vlan_info = &msg_q[1]; (struct hclge_mbx_port_base_vlan *)(msg_q + 1);
state = le16_to_cpu(vlan_info->state);
hclgevf_update_port_base_vlan_info(hdev, state, hclgevf_update_port_base_vlan_info(hdev, state,
(u8 *)vlan_info, 8); vlan_info);
break; break;
case HCLGE_MBX_PUSH_PROMISC_INFO: case HCLGE_MBX_PUSH_PROMISC_INFO:
hclgevf_parse_promisc_info(hdev, msg_q[1]); hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1]));
break; break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"fetched unsupported(%u) message from arq\n", "fetched unsupported(%u) message from arq\n",
msg_q[0]); opcode);
break; break;
} }
......
...@@ -29,7 +29,7 @@ TRACE_EVENT(hclge_vf_mbx_get, ...@@ -29,7 +29,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
TP_fast_assign( TP_fast_assign(
__entry->vfid = req->dest_vfid; __entry->vfid = req->dest_vfid;
__entry->code = req->msg.code; __entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev)); __assign_str(pciname, pci_name(hdev->pdev));
__assign_str(devname, &hdev->nic.kinfo.netdev->name); __assign_str(devname, &hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req, memcpy(__entry->mbx_data, req,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment