Commit b310974e authored by Tomer Tayar's avatar Tomer Tayar Committed by David S. Miller

qed: Avoid sending mailbox commands when MFW is not responsive

Keep sending mailbox commands to the MFW when it is not responsive ends up
with a redundant amount of timeout expiries.
This patch prints the MCP status on the first command which is not
responded, and blocks the following commands.
Since the (un)load request commands might be not responded due to other
PFs, the patch also adds the option to skip the blocking upon a failure.
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent eaa50fc5
...@@ -320,6 +320,12 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -320,6 +320,12 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
int rc = 0; int rc = 0;
if (p_hwfn->mcp_info->b_block_cmd) {
DP_NOTICE(p_hwfn,
"The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
return -EBUSY;
}
/* Ensure that only a single thread is accessing the mailbox */ /* Ensure that only a single thread is accessing the mailbox */
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
...@@ -445,6 +451,33 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, ...@@ -445,6 +451,33 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
(p_mb_params->cmd | seq_num), p_mb_params->param); (p_mb_params->cmd | seq_num), p_mb_params->param);
} }
static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
{
p_hwfn->mcp_info->b_block_cmd = block_cmd;
DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
block_cmd ? "Block" : "Unblock");
}
static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
u32 delay = QED_MCP_RESP_ITER_US;
cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
udelay(delay);
cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
udelay(delay);
cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
DP_NOTICE(p_hwfn,
"MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
}
static int static int
_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
...@@ -531,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, ...@@ -531,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"The MFW failed to respond to command 0x%08x [param 0x%08x].\n", "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param); p_mb_params->cmd, p_mb_params->param);
qed_mcp_print_cpu_info(p_hwfn, p_ptt);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
qed_mcp_cmd_set_blocking(p_hwfn, true);
return -EAGAIN; return -EAGAIN;
} }
...@@ -573,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, ...@@ -573,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EBUSY; return -EBUSY;
} }
if (p_hwfn->mcp_info->b_block_cmd) {
DP_NOTICE(p_hwfn,
"The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
return -EBUSY;
}
if (p_mb_params->data_src_size > union_data_size || if (p_mb_params->data_src_size > union_data_size ||
p_mb_params->data_dst_size > union_data_size) { p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
...@@ -806,7 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn, ...@@ -806,7 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
mb_params.data_src_size = sizeof(load_req); mb_params.data_src_size = sizeof(load_req);
mb_params.p_data_dst = &load_rsp; mb_params.p_data_dst = &load_rsp;
mb_params.data_dst_size = sizeof(load_rsp); mb_params.data_dst_size = sizeof(load_rsp);
mb_params.flags = QED_MB_FLAG_CAN_SLEEP; mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
DP_VERBOSE(p_hwfn, QED_MSG_SP, DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
...@@ -1050,7 +1094,7 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1050,7 +1094,7 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
mb_params.param = wol_param; mb_params.param = wol_param;
mb_params.flags = QED_MB_FLAG_CAN_SLEEP; mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
} }
...@@ -2158,6 +2202,8 @@ int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2158,6 +2202,8 @@ int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return -EBUSY; return -EBUSY;
} }
qed_mcp_cmd_set_blocking(p_hwfn, true);
return 0; return 0;
} }
...@@ -2182,6 +2228,8 @@ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2182,6 +2228,8 @@ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return -EBUSY; return -EBUSY;
} }
qed_mcp_cmd_set_blocking(p_hwfn, false);
return 0; return 0;
} }
......
...@@ -635,11 +635,14 @@ struct qed_mcp_info { ...@@ -635,11 +635,14 @@ struct qed_mcp_info {
*/ */
spinlock_t cmd_lock; spinlock_t cmd_lock;
/* Flag to indicate whether sending a MFW mailbox command is blocked */
bool b_block_cmd;
/* Spinlock used for syncing SW link-changes and link-changes /* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context. * originating from attention context.
*/ */
spinlock_t link_lock; spinlock_t link_lock;
bool block_mb_sending;
u32 public_base; u32 public_base;
u32 drv_mb_addr; u32 drv_mb_addr;
u32 mfw_mb_addr; u32 mfw_mb_addr;
...@@ -670,6 +673,7 @@ struct qed_mcp_mb_params { ...@@ -670,6 +673,7 @@ struct qed_mcp_mb_params {
u32 mcp_param; u32 mcp_param;
u32 flags; u32 flags;
#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0) #define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
#define QED_MB_FLAGS_IS_SET(params, flag) \ #define QED_MB_FLAGS_IS_SET(params, flag) \
({ typeof(params) __params = (params); \ ({ typeof(params) __params = (params); \
(__params && (__params->flags & QED_MB_FLAG_ ## flag)); }) (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
......
...@@ -565,6 +565,7 @@ ...@@ -565,6 +565,7 @@
#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10) #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
#define MCP_REG_CPU_EVENT_MASK \ #define MCP_REG_CPU_EVENT_MASK \
0xe05008UL 0xe05008UL
#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
#define PGLUE_B_REG_PF_BAR0_SIZE \ #define PGLUE_B_REG_PF_BAR0_SIZE \
0x2aae60UL 0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \ #define PGLUE_B_REG_PF_BAR1_SIZE \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment