Commit 84e095d6 authored by Salil Mehta's avatar Salil Mehta Committed by David S. Miller

net: hns3: Change PF to add ring-vect binding & resetQ to mailbox

This patch is required to support ring-vector binding and reset
of TQPs requested by the VF driver to the PF driver. Mailbox
handler is added with corresponding VF commands/messages to
handle the request.
Signed-off-by: default avatarSalil Mehta <salil.mehta@huawei.com>
Signed-off-by: default avatarlipeng <lipeng321@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dde1a86e
...@@ -3256,49 +3256,48 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) ...@@ -3256,49 +3256,48 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
return ret; return ret;
} }
int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hnae3_ring_chain_node *ring_chain) int vector_id, bool en,
struct hnae3_ring_chain_node *ring_chain)
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hclge_ctrl_vector_chain_cmd *req;
struct hnae3_ring_chain_node *node; struct hnae3_ring_chain_node *node;
struct hclge_desc desc; struct hclge_desc desc;
int ret; struct hclge_ctrl_vector_chain_cmd *req
= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
enum hclge_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
int i; int i;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false); op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
hclge_cmd_setup_basic_desc(&desc, op, false);
req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
req->int_vector_id = vector_id; req->int_vector_id = vector_id;
i = 0; i = 0;
for (node = ring_chain; node; node = node->next) { for (node = ring_chain; node; node = node->next) {
u16 type_and_id = 0; tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
node->tqp_index); HCLGE_TQP_ID_S, node->tqp_index);
hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M, req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
HCLGE_INT_GL_IDX_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
req->vfid = vport->vport_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (status) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Map TQP fail, status is %d.\n", "Map TQP fail, status is %d.\n",
ret); status);
return ret; return -EIO;
} }
i = 0; i = 0;
hclge_cmd_setup_basic_desc(&desc, hclge_cmd_setup_basic_desc(&desc,
HCLGE_OPC_ADD_RING_TO_VECTOR, op,
false); false);
req->int_vector_id = vector_id; req->int_vector_id = vector_id;
} }
...@@ -3306,21 +3305,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, ...@@ -3306,21 +3305,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
if (i > 0) { if (i > 0) {
req->int_cause_num = i; req->int_cause_num = i;
req->vfid = vport->vport_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (status) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Map TQP fail, status is %d.\n", ret); "Map TQP fail, status is %d.\n", status);
return ret; return -EIO;
} }
} }
return 0; return 0;
} }
static int hclge_map_handle_ring_to_vector( static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
struct hnae3_handle *handle, int vector, int vector,
struct hnae3_ring_chain_node *ring_chain) struct hnae3_ring_chain_node *ring_chain)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
...@@ -3329,24 +3328,20 @@ static int hclge_map_handle_ring_to_vector( ...@@ -3329,24 +3328,20 @@ static int hclge_map_handle_ring_to_vector(
vector_id = hclge_get_vector_index(hdev, vector); vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) { if (vector_id < 0) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Get vector index fail. ret =%d\n", vector_id); "Get vector index fail. vector_id =%d\n", vector_id);
return vector_id; return vector_id;
} }
return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain); return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
} }
static int hclge_unmap_ring_from_vector( static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
struct hnae3_handle *handle, int vector, int vector,
struct hnae3_ring_chain_node *ring_chain) struct hnae3_ring_chain_node *ring_chain)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hclge_ctrl_vector_chain_cmd *req; int vector_id, ret;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
int i, vector_id;
int ret;
vector_id = hclge_get_vector_index(hdev, vector); vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) { if (vector_id < 0) {
...@@ -3355,54 +3350,17 @@ static int hclge_unmap_ring_from_vector( ...@@ -3355,54 +3350,17 @@ static int hclge_unmap_ring_from_vector(
return vector_id; return vector_id;
} }
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false); ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
if (ret) {
req = (struct hclge_ctrl_vector_chain_cmd *)desc.data; dev_err(&handle->pdev->dev,
req->int_vector_id = vector_id; "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
vector_id,
i = 0; ret);
for (node = ring_chain; node; node = node->next) { return ret;
u16 type_and_id = 0;
hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
node->tqp_index);
hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Unmap TQP fail, status is %d.\n",
ret);
return ret;
}
i = 0;
hclge_cmd_setup_basic_desc(&desc,
HCLGE_OPC_DEL_RING_TO_VECTOR,
false);
req->int_vector_id = vector_id;
}
} }
if (i > 0) { /* Free this MSIX or MSI vector */
req->int_cause_num = i; hclge_free_vector(hdev, vector_id);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Unmap TQP fail, status is %d.\n", ret);
return ret;
}
}
return 0; return 0;
} }
...@@ -4423,7 +4381,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) ...@@ -4423,7 +4381,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
} }
static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
...@@ -4995,8 +4953,8 @@ static const struct hnae3_ae_ops hclge_ops = { ...@@ -4995,8 +4953,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.uninit_ae_dev = hclge_uninit_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev,
.init_client_instance = hclge_init_client_instance, .init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance, .uninit_client_instance = hclge_uninit_client_instance,
.map_ring_to_vector = hclge_map_handle_ring_to_vector, .map_ring_to_vector = hclge_map_ring_to_vector,
.unmap_ring_from_vector = hclge_unmap_ring_from_vector, .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
.get_vector = hclge_get_vector, .get_vector = hclge_get_vector,
.set_promisc_mode = hclge_set_promisc_mode, .set_promisc_mode = hclge_set_promisc_mode,
.set_loopback = hclge_set_loopback, .set_loopback = hclge_set_loopback,
......
...@@ -539,8 +539,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, ...@@ -539,8 +539,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
u8 func_id, u8 func_id,
bool enable); bool enable);
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector, int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hnae3_ring_chain_node *ring_chain); int vector_id, bool en,
struct hnae3_ring_chain_node *ring_chain);
static inline int hclge_get_queue_id(struct hnae3_queue *queue) static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{ {
struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
...@@ -556,4 +558,5 @@ int hclge_buffer_alloc(struct hclge_dev *hdev); ...@@ -556,4 +558,5 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev);
void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
#endif #endif
...@@ -79,6 +79,91 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, ...@@ -79,6 +79,91 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status; return status;
} }
static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
{
struct hnae3_ring_chain_node *chain_tmp, *chain;
chain = head->next;
while (chain) {
chain_tmp = chain->next;
kzfree(chain);
chain = chain_tmp;
}
}
/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
* msg[0]: opcode
* msg[1]: <not relevant to this function>
* msg[2]: ring_num
* msg[3]: first ring type (TX|RX)
* msg[4]: first tqp id
* msg[5] ~ msg[14]: other ring type and tqp id
*/
static int hclge_get_ring_chain_from_mbx(
struct hclge_mbx_vf_to_pf_cmd *req,
struct hnae3_ring_chain_node *ring_chain,
struct hclge_vport *vport)
{
#define HCLGE_RING_NODE_VARIABLE_NUM 3
#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM 3
struct hnae3_ring_chain_node *cur_chain, *new_chain;
int ring_num;
int i;
ring_num = req->msg[2];
hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
cur_chain = ring_chain;
for (i = 1; i < ring_num; i++) {
new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
if (!new_chain)
goto err;
hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]);
new_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
[req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]);
cur_chain->next = new_chain;
cur_chain = new_chain;
}
return 0;
err:
hclge_free_vector_ring_chain(ring_chain);
return -ENOMEM;
}
static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
struct hclge_mbx_vf_to_pf_cmd *req)
{
struct hnae3_ring_chain_node ring_chain;
int vector_id = req->msg[1];
int ret;
memset(&ring_chain, 0, sizeof(ring_chain));
ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
if (ret)
return ret;
ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
if (ret)
return ret;
hclge_free_vector_ring_chain(&ring_chain);
return 0;
}
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req) struct hclge_mbx_vf_to_pf_cmd *req)
{ {
...@@ -224,6 +309,16 @@ static int hclge_get_link_info(struct hclge_vport *vport, ...@@ -224,6 +309,16 @@ static int hclge_get_link_info(struct hclge_vport *vport,
HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
} }
static void hclge_reset_vf_queue(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
u16 queue_id;
memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
hclge_reset_tqp(&vport->nic, queue_id);
}
void hclge_mbx_handler(struct hclge_dev *hdev) void hclge_mbx_handler(struct hclge_dev *hdev)
{ {
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
...@@ -241,6 +336,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -241,6 +336,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
vport = &hdev->vport[req->mbx_src_vfid]; vport = &hdev->vport[req->mbx_src_vfid];
switch (req->msg[0]) { switch (req->msg[0]) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
req);
break;
case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
req);
break;
case HCLGE_MBX_SET_PROMISC_MODE: case HCLGE_MBX_SET_PROMISC_MODE:
ret = hclge_set_vf_promisc_mode(vport, req); ret = hclge_set_vf_promisc_mode(vport, req);
if (ret) if (ret)
...@@ -290,6 +393,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -290,6 +393,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF fail(%d) to get link stat for VF\n", "PF fail(%d) to get link stat for VF\n",
ret); ret);
break; break;
case HCLGE_MBX_QUEUE_RESET:
hclge_reset_vf_queue(vport, req);
break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n", "un-supported mailbox message, code = %d\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment