Commit 5a4dcdca authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-bug-fixes'

Yuval Mintz says:

====================
qed*: Bug fixes

This is a respin of the series with same name for net-next
instead of net [only patch #10 is changed].

The 3 first patches here are a repost of
("qed: RoCE infrastructure fixes"). The fourth is an additional
RoCE-related infrastructure fix, and the latter contain various fixes
to qed/qede.

Please consider applying these to `net-next'.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8676ea8f 65ed2ffd
......@@ -913,7 +913,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* Either EDPM is mandatory, or we are attempting to allocate a
* WID per CPU.
*/
n_cpus = num_active_cpus();
n_cpus = num_present_cpus();
rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
......
......@@ -1072,6 +1072,7 @@ static u32 qed_sb_init(struct qed_dev *cdev,
enum qed_sb_type type)
{
struct qed_hwfn *p_hwfn;
struct qed_ptt *p_ptt;
int hwfn_index;
u16 rel_sb_id;
u8 n_hwfns;
......@@ -1093,8 +1094,18 @@ static u32 qed_sb_init(struct qed_dev *cdev,
"hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
hwfn_index, rel_sb_id, sb_id);
rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
sb_virt_addr, sb_phy_addr, rel_sb_id);
if (IS_PF(p_hwfn->cdev)) {
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
sb_phy_addr, rel_sb_id);
qed_ptt_release(p_hwfn, p_ptt);
} else {
rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
sb_phy_addr, rel_sb_id);
}
return rc;
}
......@@ -1135,12 +1146,18 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
if (!cdev)
return -ENODEV;
if (IS_VF(cdev))
return 0;
/* The link should be set only once per PF */
hwfn = &cdev->hwfns[0];
/* When VF wants to set link, force it to read the bulletin instead.
* This mimics the PF behavior, where a noitification [both immediate
* and possible later] would be generated when changing properties.
*/
if (IS_VF(cdev)) {
qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
return 0;
}
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EBUSY;
......
......@@ -192,6 +192,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* Initialize the MFW spinlock */
spin_lock_init(&p_info->lock);
spin_lock_init(&p_info->link_lock);
return 0;
......@@ -610,6 +611,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
u8 max_bw, min_bw;
u32 status = 0;
/* Prevent SW/attentions from doing this at the same time */
spin_lock_bh(&p_hwfn->mcp_info->link_lock);
p_link = &p_hwfn->mcp_info->link_output;
memset(p_link, 0, sizeof(*p_link));
if (!b_reset) {
......@@ -624,7 +628,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link indications\n");
return;
goto out;
}
if (p_hwfn->b_drv_link_init)
......@@ -731,6 +735,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
qed_link_update(p_hwfn);
out:
spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
}
int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
......@@ -780,9 +786,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
return rc;
}
/* Reset the link status if needed */
if (!b_up)
qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
/* Mimic link-change attention, done for several reasons:
* - On reset, there's no guarantee MFW would trigger
* an attention.
* - On initialization, older MFWs might not indicate link change
* during LFA, so we'll never get an UP indication.
*/
qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
return 0;
}
......
......@@ -485,7 +485,13 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->cdev->num_ports_in_engines * 2))
struct qed_mcp_info {
/* Spinlock used for protecting the access to the MFW mailbox */
spinlock_t lock;
/* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context.
*/
spinlock_t link_lock;
bool block_mb_sending;
u32 public_base;
u32 drv_mb_addr;
......
......@@ -948,7 +948,9 @@ static int qed_rdma_create_cq(void *rdma_cxt,
err:
/* release allocated icid */
spin_lock_bh(&p_info->lock);
qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
spin_unlock_bh(&p_info->lock);
DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
return rc;
......@@ -1766,13 +1768,13 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
if (rc)
goto err_resp;
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
p_resp_ramrod_res, resp_ramrod_res_phys);
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
p_resp_ramrod_res, resp_ramrod_res_phys);
if (!(qp->req_offloaded)) {
/* Don't send query qp for the requester */
out_params->sq_psn = qp->sq_psn;
......@@ -1813,9 +1815,6 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
if (rc)
goto err_req;
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
p_req_ramrod_res, req_ramrod_res_phys);
out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
......@@ -1823,6 +1822,9 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
p_req_ramrod_res, req_ramrod_res_phys);
out_params->draining = false;
if (rq_err_state)
......@@ -1847,6 +1849,7 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
u32 start_cid;
......@@ -1861,35 +1864,39 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
return -EINVAL;
}
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
if (rc)
return rc;
if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
&num_invalidated_mw);
if (rc)
return rc;
/* Send destroy requester ramrod */
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
if (rc)
return rc;
/* Send destroy requester ramrod */
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
&num_bound_mw);
if (rc)
return rc;
if (num_invalidated_mw != num_bound_mw) {
DP_NOTICE(p_hwfn,
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
if (num_invalidated_mw != num_bound_mw) {
DP_NOTICE(p_hwfn,
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
spin_lock_bh(&p_rdma_info->lock);
start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
p_hwfn->p_rdma_info->proto);
start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
p_rdma_info->proto);
/* Release responder's icid */
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
qp->icid - start_cid);
/* Release responder's icid */
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
qp->icid - start_cid);
/* Release requester's icid */
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
qp->icid + 1 - start_cid);
/* Release requester's icid */
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
qp->icid + 1 - start_cid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
spin_unlock_bh(&p_rdma_info->lock);
}
return 0;
}
......
......@@ -831,10 +831,52 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
vf->num_sbs = 0;
}
static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
u16 vfid,
struct qed_mcp_link_params *params,
struct qed_mcp_link_state *link,
struct qed_mcp_link_capabilities *p_caps)
{
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
vfid,
false);
struct qed_bulletin_content *p_bulletin;
if (!p_vf)
return;
p_bulletin = p_vf->bulletin.p_virt;
p_bulletin->req_autoneg = params->speed.autoneg;
p_bulletin->req_adv_speed = params->speed.advertised_speeds;
p_bulletin->req_forced_speed = params->speed.forced_speed;
p_bulletin->req_autoneg_pause = params->pause.autoneg;
p_bulletin->req_forced_rx = params->pause.forced_rx;
p_bulletin->req_forced_tx = params->pause.forced_tx;
p_bulletin->req_loopback = params->loopback_mode;
p_bulletin->link_up = link->link_up;
p_bulletin->speed = link->speed;
p_bulletin->full_duplex = link->full_duplex;
p_bulletin->autoneg = link->an;
p_bulletin->autoneg_complete = link->an_complete;
p_bulletin->parallel_detection = link->parallel_detection;
p_bulletin->pfc_enabled = link->pfc_enabled;
p_bulletin->partner_adv_speed = link->partner_adv_speed;
p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
p_bulletin->partner_adv_pause = link->partner_adv_pause;
p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
p_bulletin->capability_speed = p_caps->speed_capabilities;
}
static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_iov_vf_init_params *p_params)
{
struct qed_mcp_link_capabilities link_caps;
struct qed_mcp_link_params link_params;
struct qed_mcp_link_state link_state;
u8 num_of_vf_avaiable_chains = 0;
struct qed_vf_info *vf = NULL;
u16 qid, num_irqs;
......@@ -923,6 +965,15 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
p_queue->fw_tx_qid, p_queue->fw_cid);
}
/* Update the link configuration in bulletin */
memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
sizeof(link_params));
memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
sizeof(link_caps));
qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
&link_params, &link_state, &link_caps);
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
if (!rc) {
vf->b_init = true;
......@@ -934,45 +985,6 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
return rc;
}
static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
u16 vfid,
struct qed_mcp_link_params *params,
struct qed_mcp_link_state *link,
struct qed_mcp_link_capabilities *p_caps)
{
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
vfid,
false);
struct qed_bulletin_content *p_bulletin;
if (!p_vf)
return;
p_bulletin = p_vf->bulletin.p_virt;
p_bulletin->req_autoneg = params->speed.autoneg;
p_bulletin->req_adv_speed = params->speed.advertised_speeds;
p_bulletin->req_forced_speed = params->speed.forced_speed;
p_bulletin->req_autoneg_pause = params->pause.autoneg;
p_bulletin->req_forced_rx = params->pause.forced_rx;
p_bulletin->req_forced_tx = params->pause.forced_tx;
p_bulletin->req_loopback = params->loopback_mode;
p_bulletin->link_up = link->link_up;
p_bulletin->speed = link->speed;
p_bulletin->full_duplex = link->full_duplex;
p_bulletin->autoneg = link->an;
p_bulletin->autoneg_complete = link->an_complete;
p_bulletin->parallel_detection = link->parallel_detection;
p_bulletin->pfc_enabled = link->pfc_enabled;
p_bulletin->partner_adv_speed = link->partner_adv_speed;
p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
p_bulletin->partner_adv_pause = link->partner_adv_pause;
p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
p_bulletin->capability_speed = p_caps->speed_capabilities;
}
static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 rel_vf_id)
{
......
......@@ -254,6 +254,7 @@ enum qed_iov_wq_flag {
QED_IOV_WQ_STOP_WQ_FLAG,
QED_IOV_WQ_FLR_FLAG,
QED_IOV_WQ_TRUST_FLAG,
QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
};
#ifdef CONFIG_QED_SRIOV
......
......@@ -1285,6 +1285,9 @@ void qed_iov_vf_task(struct work_struct *work)
/* Handle bulletin board changes */
qed_vf_read_bulletin(hwfn, &change);
if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
&hwfn->iov_task_flags))
change = 1;
if (change)
qed_handle_bulletin_change(hwfn);
......
......@@ -1333,7 +1333,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
int i, rc = 0;
int i, iter, rc = 0;
u8 *data_ptr;
for_each_queue(i) {
......@@ -1352,7 +1352,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* enabled. This is because the queue 0 is configured as the default
* queue and that the loopback traffic is not IP.
*/
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
if (!qede_has_rx_work(rxq)) {
usleep_range(100, 200);
continue;
......@@ -1399,7 +1399,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
if (i == QEDE_SELFTEST_POLL_COUNT) {
if (iter == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
......
......@@ -854,6 +854,13 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (rc)
goto err3;
/* Prepare the lock prior to the registeration of the netdev,
* as once it's registered we might reach flows requiring it
* [it's even possible to reach a flow needing it directly
* from there, although it's unlikely].
*/
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
rc = register_netdev(edev->ndev);
if (rc) {
DP_NOTICE(edev, "Cannot register net-device\n");
......@@ -878,8 +885,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_set_dcbnl_ops(edev->ndev);
#endif
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
DP_INFO(edev, "Ending successfully qede probe\n");
......@@ -951,14 +956,20 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
if (edev->xdp_prog)
bpf_prog_put(edev->xdp_prog);
free_netdev(ndev);
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
return;
qed_ops->common->remove(cdev);
/* Since this can happen out-of-sync with other flows,
* don't release the netdevice until after slowpath stop
* has been called to guarantee various other contexts
* [e.g., QED register callbacks] won't break anything when
* accessing the netdevice.
*/
free_netdev(ndev);
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
......@@ -1861,7 +1872,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
bool is_locked)
{
struct qed_link_params link_params;
struct qed_link_output link_output;
int rc;
DP_INFO(edev, "Starting qede load\n");
......@@ -1913,11 +1923,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
link_params.link_up = true;
edev->ops->common->set_link(edev->cdev, &link_params);
/* Query whether link is already-up */
memset(&link_output, 0, sizeof(link_output));
edev->ops->common->get_link(edev->cdev, &link_output);
qede_roce_dev_event_open(edev);
qede_link_update(edev, &link_output);
qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment