Commit 028ba8aa authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-management-interaction-and-feature-changes'

Yuval Mintz says:

====================
qed: Management interaction & feature changes

All patches in this series either affect direct interaction with the
management firmware, or changes logic relating to some values retrieved
from it.

Patch #1 revises the basic logic for sending messages to the management
firmware and there completion, and is the most significant [at least
code-wise] of the bunch.

Patch #2 changes infrastrcure in a way that should better protect us form
mistakes leading to stack corruption such as was fixed in
bb480242 ("qed: Prevent stack corruption on MFW interaction").

Patch #3 corrects some update API endian issue [sent here as it would
create conflicts with #2, and because it's lack would create a rather
insignifcant problem].

Patch #4 removes some unnecessary logging, allowing cleaner forward
compatibility with future management firmware versions.

Patches #5, #6 slightly change the number of possible L2 queues in some
scenarios, leading to the possibility of having more queues / VFS.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4deece6c dec26533
...@@ -1550,7 +1550,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -1550,7 +1550,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{ {
u32 *feat_num = p_hwfn->hw_info.feat_num; u32 *feat_num = p_hwfn->hw_info.feat_num;
struct qed_sb_cnt_info sb_cnt_info; struct qed_sb_cnt_info sb_cnt_info;
int num_features = 1; u32 non_l2_sbs = 0;
if (IS_ENABLED(CONFIG_QED_RDMA) && if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
...@@ -1558,31 +1558,37 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -1558,31 +1558,37 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
* the status blocks equally between L2 / RoCE but with * the status blocks equally between L2 / RoCE but with
* consideration as to how many l2 queues / cnqs we have. * consideration as to how many l2 queues / cnqs we have.
*/ */
num_features++;
feat_num[QED_RDMA_CNQ] = feat_num[QED_RDMA_CNQ] =
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
}
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / non_l2_sbs = feat_num[QED_RDMA_CNQ];
num_features, }
RESC_NUM(p_hwfn, QED_L2_QUEUE));
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); p_hwfn->hw_info.personality == QED_PCI_ETH) {
feat_num[QED_VF_L2_QUE] = /* Start by allocating VF queues, then PF's */
min_t(u32, memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
RESC_NUM(p_hwfn, QED_L2_QUEUE) - qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt); feat_num[QED_VF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE),
sb_cnt_info.sb_iov_cnt);
feat_num[QED_PF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_SB) -
non_l2_sbs,
RESC_NUM(p_hwfn,
QED_L2_QUEUE) -
FEAT_NUM(p_hwfn,
QED_VF_L2_QUE));
}
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE, NETIF_MSG_PROBE,
"#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n", "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
RESC_NUM(p_hwfn, QED_SB), num_features); RESC_NUM(p_hwfn, QED_SB));
} }
static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id) static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id)
......
...@@ -111,12 +111,71 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -111,12 +111,71 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
} }
} }
struct qed_mcp_cmd_elem {
struct list_head list;
struct qed_mcp_mb_params *p_mb_params;
u16 expected_seq_num;
bool b_is_completed;
};
/* Must be called while cmd_lock is acquired */
static struct qed_mcp_cmd_elem *
qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
struct qed_mcp_mb_params *p_mb_params,
u16 expected_seq_num)
{
struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
if (!p_cmd_elem)
goto out;
p_cmd_elem->p_mb_params = p_mb_params;
p_cmd_elem->expected_seq_num = expected_seq_num;
list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
out:
return p_cmd_elem;
}
/* Must be called while cmd_lock is acquired */
static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
struct qed_mcp_cmd_elem *p_cmd_elem)
{
list_del(&p_cmd_elem->list);
kfree(p_cmd_elem);
}
/* Must be called while cmd_lock is acquired */
static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
u16 seq_num)
{
struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
if (p_cmd_elem->expected_seq_num == seq_num)
return p_cmd_elem;
}
return NULL;
}
int qed_mcp_free(struct qed_hwfn *p_hwfn) int qed_mcp_free(struct qed_hwfn *p_hwfn)
{ {
if (p_hwfn->mcp_info) { if (p_hwfn->mcp_info) {
struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
kfree(p_hwfn->mcp_info->mfw_mb_cur); kfree(p_hwfn->mcp_info->mfw_mb_cur);
kfree(p_hwfn->mcp_info->mfw_mb_shadow); kfree(p_hwfn->mcp_info->mfw_mb_shadow);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
list_for_each_entry_safe(p_cmd_elem,
p_tmp,
&p_hwfn->mcp_info->cmd_list, list) {
qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
}
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
} }
kfree(p_hwfn->mcp_info); kfree(p_hwfn->mcp_info);
return 0; return 0;
...@@ -160,7 +219,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -160,7 +219,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
DRV_PULSE_SEQ_MASK; DRV_PULSE_SEQ_MASK;
p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
return 0; return 0;
} }
...@@ -176,6 +235,12 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -176,6 +235,12 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
goto err; goto err;
p_info = p_hwfn->mcp_info; p_info = p_hwfn->mcp_info;
/* Initialize the MFW spinlock */
spin_lock_init(&p_info->cmd_lock);
spin_lock_init(&p_info->link_lock);
INIT_LIST_HEAD(&p_info->cmd_list);
if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) { if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
DP_NOTICE(p_hwfn, "MCP is not initialized\n"); DP_NOTICE(p_hwfn, "MCP is not initialized\n");
/* Do not free mcp_info here, since public_base indicate that /* Do not free mcp_info here, since public_base indicate that
...@@ -190,10 +255,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -190,10 +255,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err; goto err;
/* Initialize the MFW spinlock */
spin_lock_init(&p_info->lock);
spin_lock_init(&p_info->link_lock);
return 0; return 0;
err: err:
...@@ -201,68 +262,39 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -201,68 +262,39 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return -ENOMEM; return -ENOMEM;
} }
/* Locks the MFW mailbox of a PF to ensure a single access. static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
* The lock is achieved in most cases by holding a spinlock, causing other struct qed_ptt *p_ptt)
* threads to wait till a previous access is done.
* In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
* access is achieved by setting a blocking flag, which will fail other
* competing contexts to send their mailboxes.
*/
static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
{ {
spin_lock_bh(&p_hwfn->mcp_info->lock); u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
/* The spinlock shouldn't be acquired when the mailbox command is
* [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
* pending [UN]LOAD_REQ command of another PF together with a spinlock
* (i.e. interrupts are disabled) - can lead to a deadlock.
* It is assumed that for a single PF, no other mailbox commands can be
* sent from another context while sending LOAD_REQ, and that any
* parallel commands to UNLOAD_REQ can be cancelled.
*/
if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
p_hwfn->mcp_info->block_mb_sending = false;
if (p_hwfn->mcp_info->block_mb_sending) { /* Use MCP history register to check if MCP reset occurred between init
DP_NOTICE(p_hwfn, * time and now.
"Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n", */
cmd); if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
spin_unlock_bh(&p_hwfn->mcp_info->lock); DP_VERBOSE(p_hwfn,
return -EBUSY; QED_MSG_SP,
} "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
p_hwfn->mcp_info->mcp_hist, generic_por_0);
if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) { qed_load_mcp_offsets(p_hwfn, p_ptt);
p_hwfn->mcp_info->block_mb_sending = true; qed_mcp_cmd_port_init(p_hwfn, p_ptt);
spin_unlock_bh(&p_hwfn->mcp_info->lock);
} }
return 0;
}
static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
{
if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
spin_unlock_bh(&p_hwfn->mcp_info->lock);
} }
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
u8 delay = CHIP_MCP_RESP_ITER_US;
u32 org_mcp_reset_seq, cnt = 0;
int rc = 0; int rc = 0;
/* Ensure that only a single thread is accessing the mailbox at a /* Ensure that only a single thread is accessing the mailbox */
* certain time. spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
*/
rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
if (rc != 0)
return rc;
/* Set drv command along with the updated sequence */
org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
(DRV_MSG_CODE_MCP_RESET | seq)); /* Set drv command along with the updated sequence */
qed_mcp_reread_offsets(p_hwfn, p_ptt);
seq = ++p_hwfn->mcp_info->drv_mb_seq;
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
do { do {
/* Wait for MFW response */ /* Wait for MFW response */
...@@ -281,72 +313,205 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -281,72 +313,205 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
rc = -EAGAIN; rc = -EAGAIN;
} }
qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
return rc; return rc;
} }
static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, /* Must be called while cmd_lock is acquired */
struct qed_ptt *p_ptt, static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param)
{ {
u8 delay = CHIP_MCP_RESP_ITER_US; struct qed_mcp_cmd_elem *p_cmd_elem;
u32 seq, cnt = 1, actual_mb_seq;
int rc = 0;
/* Get actual driver mailbox sequence */
actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
/* Use MCP history register to check if MCP reset occurred between /* There is at most one pending command at a certain time, and if it
* init time and now. * exists - it is placed at the HEAD of the list.
*/ */
if (p_hwfn->mcp_info->mcp_hist != if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n"); struct qed_mcp_cmd_elem, list);
qed_load_mcp_offsets(p_hwfn, p_ptt); return !p_cmd_elem->b_is_completed;
qed_mcp_cmd_port_init(p_hwfn, p_ptt);
} }
seq = ++p_hwfn->mcp_info->drv_mb_seq;
/* Set drv param */ return false;
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param); }
/* Set drv command along with the updated sequence */ /* Must be called while cmd_lock is acquired */
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq)); static int
qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_mb_params *p_mb_params;
struct qed_mcp_cmd_elem *p_cmd_elem;
u32 mcp_resp;
u16 seq_num;
mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
/* Return if no new non-handled response has been received */
if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
return -EAGAIN;
p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
if (!p_cmd_elem) {
DP_ERR(p_hwfn,
"Failed to find a pending mailbox cmd that expects sequence number %d\n",
seq_num);
return -EINVAL;
}
p_mb_params = p_cmd_elem->p_mb_params;
/* Get the MFW response along with the sequence number */
p_mb_params->mcp_resp = mcp_resp;
/* Get the MFW param */
p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
/* Get the union data */
if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb,
union_data);
qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
union_data_addr, p_mb_params->data_dst_size);
}
p_cmd_elem->b_is_completed = true;
return 0;
}
/* Must be called while cmd_lock is acquired */
static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params,
u16 seq_num)
{
union drv_union_data union_data;
u32 union_data_addr;
/* Set the union data */
union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb, union_data);
memset(&union_data, 0, sizeof(union_data));
if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
memcpy(&union_data, p_mb_params->p_data_src,
p_mb_params->data_src_size);
qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
sizeof(union_data));
/* Set the drv param */
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
/* Set the drv command along with the sequence number */
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
DP_VERBOSE(p_hwfn, QED_MSG_SP, DP_VERBOSE(p_hwfn, QED_MSG_SP,
"wrote command (%x) to MFW MB param 0x%08x\n", "MFW mailbox: command 0x%08x param 0x%08x\n",
(cmd | seq), param); (p_mb_params->cmd | seq_num), p_mb_params->param);
}
static int
_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params,
u32 max_retries, u32 delay)
{
struct qed_mcp_cmd_elem *p_cmd_elem;
u32 cnt = 0;
u16 seq_num;
int rc = 0;
/* Wait until the mailbox is non-occupied */
do { do {
/* Wait for MFW response */ /* Exit the loop if there is no pending command, or if the
* pending command is completed during this iteration.
* The spinlock stays locked until the command is sent.
*/
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
if (!qed_mcp_has_pending_cmd(p_hwfn))
break;
rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
if (!rc)
break;
else if (rc != -EAGAIN)
goto err;
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
udelay(delay); udelay(delay);
*o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); } while (++cnt < max_retries);
if (cnt >= max_retries) {
DP_NOTICE(p_hwfn,
"The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
return -EAGAIN;
}
/* Give the FW up to 5 second (500*10ms) */ /* Send the mailbox command */
} while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) && qed_mcp_reread_offsets(p_hwfn, p_ptt);
(cnt++ < QED_DRV_MB_MAX_RETRIES)); seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
if (!p_cmd_elem)
goto err;
DP_VERBOSE(p_hwfn, QED_MSG_SP, __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
"[after %d ms] read (%x) seq is (%x) from FW MB\n", spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
cnt * delay, *o_mcp_resp, seq);
/* Wait for the MFW response */
/* Is this a reply to our command? */ do {
if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) { /* Exit the loop if the command is already completed, or if the
*o_mcp_resp &= FW_MSG_CODE_MASK; * command is completed during this iteration.
/* Get the MCP param */ * The spinlock stays locked until the list element is removed.
*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); */
} else {
/* FW BUG! */ udelay(delay);
DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n", spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
cmd, param);
*o_mcp_resp = 0; if (p_cmd_elem->b_is_completed)
rc = -EAGAIN; break;
rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
if (!rc)
break;
else if (rc != -EAGAIN)
goto err;
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
} while (++cnt < max_retries);
if (cnt >= max_retries) {
DP_NOTICE(p_hwfn,
"The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
return -EAGAIN;
} }
qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
p_mb_params->mcp_resp,
p_mb_params->mcp_param,
(cnt * delay) / 1000, (cnt * delay) % 1000);
/* Clear the sequence number from the MFW response */
p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
return 0;
err:
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
return rc; return rc;
} }
...@@ -354,9 +519,9 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, ...@@ -354,9 +519,9 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params) struct qed_mcp_mb_params *p_mb_params)
{ {
u32 union_data_addr; size_t union_data_size = sizeof(union drv_union_data);
u32 max_retries = QED_DRV_MB_MAX_RETRIES;
int rc; u32 delay = CHIP_MCP_RESP_ITER_US;
/* MCP not initialized */ /* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) { if (!qed_mcp_is_init(p_hwfn)) {
...@@ -364,33 +529,17 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, ...@@ -364,33 +529,17 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EBUSY; return -EBUSY;
} }
union_data_addr = p_hwfn->mcp_info->drv_mb_addr + if (p_mb_params->data_src_size > union_data_size ||
offsetof(struct public_drv_mb, union_data); p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn,
/* Ensure that only a single thread is accessing the mailbox at a "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
* certain time. p_mb_params->data_src_size,
*/ p_mb_params->data_dst_size, union_data_size);
rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd); return -EINVAL;
if (rc) }
return rc;
if (p_mb_params->p_data_src != NULL)
qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
p_mb_params->p_data_src,
sizeof(*p_mb_params->p_data_src));
rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
p_mb_params->param, &p_mb_params->mcp_resp,
&p_mb_params->mcp_param);
if (p_mb_params->p_data_dst != NULL)
qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
union_data_addr,
sizeof(*p_mb_params->p_data_dst));
qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
return rc; return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
delay);
} }
int qed_mcp_cmd(struct qed_hwfn *p_hwfn, int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
...@@ -401,11 +550,10 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, ...@@ -401,11 +550,10 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param) u32 *o_mcp_param)
{ {
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
union drv_union_data data_src; struct mcp_mac wol_mac;
int rc; int rc;
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
memset(&data_src, 0, sizeof(data_src));
mb_params.cmd = cmd; mb_params.cmd = cmd;
mb_params.param = param; mb_params.param = param;
...@@ -414,17 +562,18 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, ...@@ -414,17 +562,18 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
(p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) { (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
u8 *p_mac = p_hwfn->cdev->wol_mac; u8 *p_mac = p_hwfn->cdev->wol_mac;
data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1]; memset(&wol_mac, 0, sizeof(wol_mac));
data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 | wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
p_mac[4] << 8 | p_mac[5]; wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
p_mac[4] << 8 | p_mac[5];
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
(QED_MSG_SP | NETIF_MSG_IFDOWN), (QED_MSG_SP | NETIF_MSG_IFDOWN),
"Setting WoL MAC: %pM --> [%08x,%08x]\n", "Setting WoL MAC: %pM --> [%08x,%08x]\n",
p_mac, data_src.wol_mac.mac_upper, p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
data_src.wol_mac.mac_lower);
mb_params.p_data_src = &data_src; mb_params.p_data_src = &wol_mac;
mb_params.data_src_size = sizeof(wol_mac);
} }
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
...@@ -445,13 +594,17 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, ...@@ -445,13 +594,17 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf) u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
{ {
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
union drv_union_data union_data; u8 raw_data[MCP_DRV_NVM_BUF_LEN];
int rc; int rc;
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd; mb_params.cmd = cmd;
mb_params.param = param; mb_params.param = param;
mb_params.p_data_dst = &union_data; mb_params.p_data_dst = raw_data;
/* Use the maximal value since the actual one is part of the response */
mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) if (rc)
return rc; return rc;
...@@ -460,7 +613,7 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, ...@@ -460,7 +613,7 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_param = mb_params.mcp_param; *o_mcp_param = mb_params.mcp_param;
*o_txn_size = *o_mcp_param; *o_txn_size = *o_mcp_param;
memcpy(o_buf, &union_data.raw_data, *o_txn_size); memcpy(o_buf, raw_data, *o_txn_size);
return 0; return 0;
} }
...@@ -480,6 +633,7 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, ...@@ -480,6 +633,7 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
cdev->drv_type; cdev->drv_type;
memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE); memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
mb_params.p_data_src = &union_data; mb_params.p_data_src = &union_data;
mb_params.data_src_size = sizeof(union_data.ver_str);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */ /* if mcp fails to respond we must abort */
...@@ -549,7 +703,6 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, ...@@ -549,7 +703,6 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
u32 func_addr = SECTION_ADDR(mfw_func_offsize, u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn)); MCP_PF_ID(p_hwfn));
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
union drv_union_data union_data;
int rc; int rc;
int i; int i;
...@@ -560,8 +713,8 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, ...@@ -560,8 +713,8 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8); mb_params.p_data_src = vfs_to_ack;
mb_params.p_data_src = &union_data; mb_params.data_src_size = VF_MAX_STATIC / 8;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n"); DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
...@@ -744,33 +897,31 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) ...@@ -744,33 +897,31 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
{ {
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
union drv_union_data union_data; struct eth_phy_cfg phy_cfg;
struct eth_phy_cfg *phy_cfg;
int rc = 0; int rc = 0;
u32 cmd; u32 cmd;
/* Set the shmem configuration according to params */ /* Set the shmem configuration according to params */
phy_cfg = &union_data.drv_phy_cfg; memset(&phy_cfg, 0, sizeof(phy_cfg));
memset(phy_cfg, 0, sizeof(*phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg) if (!params->speed.autoneg)
phy_cfg->speed = params->speed.forced_speed; phy_cfg.speed = params->speed.forced_speed;
phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg->adv_speed = params->speed.advertised_speeds; phy_cfg.adv_speed = params->speed.advertised_speeds;
phy_cfg->loopback_mode = params->loopback_mode; phy_cfg.loopback_mode = params->loopback_mode;
p_hwfn->b_drv_link_init = b_up; p_hwfn->b_drv_link_init = b_up;
if (b_up) { if (b_up) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
phy_cfg->speed, phy_cfg.speed,
phy_cfg->pause, phy_cfg.pause,
phy_cfg->adv_speed, phy_cfg.adv_speed,
phy_cfg->loopback_mode, phy_cfg.loopback_mode,
phy_cfg->feature_config_flags); phy_cfg.feature_config_flags);
} else { } else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link\n"); "Resetting link\n");
...@@ -778,7 +929,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) ...@@ -778,7 +929,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd; mb_params.cmd = cmd;
mb_params.p_data_src = &union_data; mb_params.p_data_src = &phy_cfg;
mb_params.data_src_size = sizeof(phy_cfg);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */ /* if mcp fails to respond we must abort */
...@@ -805,7 +957,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, ...@@ -805,7 +957,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
enum qed_mcp_protocol_type stats_type; enum qed_mcp_protocol_type stats_type;
union qed_mcp_protocol_stats stats; union qed_mcp_protocol_stats stats;
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
union drv_union_data union_data;
u32 hsi_param; u32 hsi_param;
switch (type) { switch (type) {
...@@ -835,8 +986,8 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, ...@@ -835,8 +986,8 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_STATS; mb_params.cmd = DRV_MSG_CODE_GET_STATS;
mb_params.param = hsi_param; mb_params.param = hsi_param;
memcpy(&union_data, &stats, sizeof(stats)); mb_params.p_data_src = &stats;
mb_params.p_data_src = &union_data; mb_params.data_src_size = sizeof(stats);
qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
} }
...@@ -963,7 +1114,7 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, ...@@ -963,7 +1114,7 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
qed_mcp_update_bw(p_hwfn, p_ptt); qed_mcp_update_bw(p_hwfn, p_ptt);
break; break;
default: default:
DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL; rc = -EINVAL;
} }
} }
...@@ -1316,24 +1467,23 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, ...@@ -1316,24 +1467,23 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver) struct qed_mcp_drv_version *p_ver)
{ {
struct drv_version_stc *p_drv_version;
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
union drv_union_data union_data; struct drv_version_stc drv_version;
__be32 val; __be32 val;
u32 i; u32 i;
int rc; int rc;
p_drv_version = &union_data.drv_version; memset(&drv_version, 0, sizeof(drv_version));
p_drv_version->version = p_ver->version; drv_version.version = p_ver->version;
for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
*(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
} }
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_SET_VERSION; mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
mb_params.p_data_src = &union_data; mb_params.p_data_src = &drv_version;
mb_params.data_src_size = sizeof(drv_version);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) if (rc)
DP_ERR(p_hwfn, "MCP response failure, aborting\n"); DP_ERR(p_hwfn, "MCP response failure, aborting\n");
...@@ -1450,7 +1600,7 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, ...@@ -1450,7 +1600,7 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 *mac) struct qed_ptt *p_ptt, u8 *mac)
{ {
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
union drv_union_data union_data; u32 mfw_mac[2];
int rc; int rc;
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
...@@ -1458,8 +1608,17 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, ...@@ -1458,8 +1608,17 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC << mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
DRV_MSG_CODE_VMAC_TYPE_SHIFT; DRV_MSG_CODE_VMAC_TYPE_SHIFT;
mb_params.param |= MCP_PF_ID(p_hwfn); mb_params.param |= MCP_PF_ID(p_hwfn);
ether_addr_copy(&union_data.raw_data[0], mac);
mb_params.p_data_src = &union_data; /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
* in 32-bit granularity.
* So the MAC has to be set in native order [and not byte order],
* otherwise it would be read incorrectly by MFW after swap.
*/
mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
mb_params.p_data_src = (u8 *)mfw_mac;
mb_params.data_src_size = 8;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) if (rc)
DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
...@@ -1737,27 +1896,21 @@ int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, ...@@ -1737,27 +1896,21 @@ int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
u32 *p_mcp_resp, u32 *p_mcp_param) u32 *p_mcp_resp, u32 *p_mcp_param)
{ {
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
union drv_union_data union_data;
int rc; int rc;
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
memset(&union_data, 0, sizeof(union_data));
mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
mb_params.param = QED_RESC_ALLOC_VERSION; mb_params.param = QED_RESC_ALLOC_VERSION;
/* Need to have a sufficient large struct, as the cmd_and_union mb_params.p_data_src = p_resc_info;
* is going to do memcpy from and to it. mb_params.data_src_size = sizeof(*p_resc_info);
*/ mb_params.p_data_dst = p_resc_info;
memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info)); mb_params.data_dst_size = sizeof(*p_resc_info);
mb_params.p_data_src = &union_data;
mb_params.p_data_dst = &union_data;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) if (rc)
return rc; return rc;
/* Copy the data back */ /* Copy the data back */
memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
*p_mcp_resp = mb_params.mcp_resp; *p_mcp_resp = mb_params.mcp_resp;
*p_mcp_param = mb_params.mcp_param; *p_mcp_param = mb_params.mcp_param;
......
...@@ -484,8 +484,13 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, ...@@ -484,8 +484,13 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
qed_device_num_engines((_p_hwfn)->cdev))) qed_device_num_engines((_p_hwfn)->cdev)))
struct qed_mcp_info { struct qed_mcp_info {
/* Spinlock used for protecting the access to the MFW mailbox */ /* List for mailbox commands which were sent and wait for a response */
spinlock_t lock; struct list_head cmd_list;
/* Spinlock used for protecting the access to the mailbox commands list
* and the sending of the commands.
*/
spinlock_t cmd_lock;
/* Spinlock used for syncing SW link-changes and link-changes /* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context. * originating from attention context.
...@@ -505,14 +510,16 @@ struct qed_mcp_info { ...@@ -505,14 +510,16 @@ struct qed_mcp_info {
u8 *mfw_mb_cur; u8 *mfw_mb_cur;
u8 *mfw_mb_shadow; u8 *mfw_mb_shadow;
u16 mfw_mb_length; u16 mfw_mb_length;
u16 mcp_hist; u32 mcp_hist;
}; };
struct qed_mcp_mb_params { struct qed_mcp_mb_params {
u32 cmd; u32 cmd;
u32 param; u32 param;
union drv_union_data *p_data_src; void *p_data_src;
union drv_union_data *p_data_dst; u8 data_src_size;
void *p_data_dst;
u8 data_dst_size;
u32 mcp_resp; u32 mcp_resp;
u32 mcp_param; u32 mcp_param;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment