Commit 291d57f6 authored by Michal Kalderon's avatar Michal Kalderon Committed by David S. Miller

qed: Fix rdma_info structure allocation

Certain flows need to access the rdma-info structure, for example dcbx
update flows. In some cases there can be a race between the allocation or
deallocation of the structure which was done in roce start / roce stop and
an asynchrounous dcbx event that tries to access the structure.
For this reason, we move the allocation of the rdma_info structure to be
similar to the iscsi/fcoe info structures which are allocated during device
setup.
We add a new field of "active" to the struct to define whether roce has
already been started or not, and this is checked instead of whether the
pointer to the info structure.

Fixes: 51ff1725 ("qed: Add support for RoCE hw init")
Signed-off-by: default avatarMichal Kalderon <michal.kalderon@cavium.com>
Signed-off-by: default avatarDenis Bolotin <denis.bolotin@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e90202ed
...@@ -185,6 +185,10 @@ void qed_resc_free(struct qed_dev *cdev) ...@@ -185,6 +185,10 @@ void qed_resc_free(struct qed_dev *cdev)
qed_iscsi_free(p_hwfn); qed_iscsi_free(p_hwfn);
qed_ooo_free(p_hwfn); qed_ooo_free(p_hwfn);
} }
if (QED_IS_RDMA_PERSONALITY(p_hwfn))
qed_rdma_info_free(p_hwfn);
qed_iov_free(p_hwfn); qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn); qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn); qed_dmae_info_free(p_hwfn);
...@@ -1081,6 +1085,12 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -1081,6 +1085,12 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err; goto alloc_err;
} }
if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
rc = qed_rdma_info_alloc(p_hwfn);
if (rc)
goto alloc_err;
}
/* DMA info initialization */ /* DMA info initialization */
rc = qed_dmae_info_alloc(p_hwfn); rc = qed_dmae_info_alloc(p_hwfn);
if (rc) if (rc)
...@@ -2102,11 +2112,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) ...@@ -2102,11 +2112,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
if (!p_ptt) if (!p_ptt)
return -EAGAIN; return -EAGAIN;
/* If roce info is allocated it means roce is initialized and should
* be enabled in searcher.
*/
if (p_hwfn->p_rdma_info && if (p_hwfn->p_rdma_info &&
p_hwfn->b_rdma_enabled_in_prs) p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs)
qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
/* Re-open incoming traffic */ /* Re-open incoming traffic */
......
...@@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) ...@@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
} }
static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params)
{ {
struct qed_rdma_info *p_rdma_info; struct qed_rdma_info *p_rdma_info;
u32 num_cons, num_tasks;
int rc = -ENOMEM;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
/* Allocate a struct with current pf rdma info */
p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
if (!p_rdma_info) if (!p_rdma_info)
return rc; return -ENOMEM;
spin_lock_init(&p_rdma_info->lock);
p_hwfn->p_rdma_info = p_rdma_info; p_hwfn->p_rdma_info = p_rdma_info;
return 0;
}
void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_rdma_info);
p_hwfn->p_rdma_info = NULL;
}
static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 num_cons, num_tasks;
int rc = -ENOMEM;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) if (QED_IS_IWARP_PERSONALITY(p_hwfn))
p_rdma_info->proto = PROTOCOLID_IWARP; p_rdma_info->proto = PROTOCOLID_IWARP;
else else
...@@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, ...@@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Allocate a struct with device params and fill it */ /* Allocate a struct with device params and fill it */
p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
if (!p_rdma_info->dev) if (!p_rdma_info->dev)
goto free_rdma_info; return rc;
/* Allocate a struct with port params and fill it */ /* Allocate a struct with port params and fill it */
p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
...@@ -298,8 +310,6 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, ...@@ -298,8 +310,6 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
kfree(p_rdma_info->port); kfree(p_rdma_info->port);
free_rdma_dev: free_rdma_dev:
kfree(p_rdma_info->dev); kfree(p_rdma_info->dev);
free_rdma_info:
kfree(p_rdma_info);
return rc; return rc;
} }
...@@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) ...@@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
kfree(p_rdma_info->port); kfree(p_rdma_info->port);
kfree(p_rdma_info->dev); kfree(p_rdma_info->dev);
kfree(p_rdma_info);
} }
static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
...@@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, ...@@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
spin_lock_init(&p_hwfn->p_rdma_info->lock);
qed_rdma_init_devinfo(p_hwfn, params); qed_rdma_init_devinfo(p_hwfn, params);
qed_rdma_init_port(p_hwfn); qed_rdma_init_port(p_hwfn);
qed_rdma_init_events(p_hwfn, params); qed_rdma_init_events(p_hwfn, params);
...@@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt) ...@@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt)
/* Disable RoCE search */ /* Disable RoCE search */
qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
p_hwfn->b_rdma_enabled_in_prs = false; p_hwfn->b_rdma_enabled_in_prs = false;
p_hwfn->p_rdma_info->active = 0;
qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
...@@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt, ...@@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt,
u8 max_stats_queues; u8 max_stats_queues;
int rc; int rc;
if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { if (!rdma_cxt || !in_params || !out_params ||
!p_hwfn->p_rdma_info->active) {
DP_ERR(p_hwfn->cdev, DP_ERR(p_hwfn->cdev,
"qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
rdma_cxt, in_params, out_params); rdma_cxt, in_params, out_params);
...@@ -1802,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) ...@@ -1802,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
{ {
bool result; bool result;
/* if rdma info has not been allocated, naturally there are no qps */ /* if rdma wasn't activated yet, naturally there are no qps */
if (!p_hwfn->p_rdma_info) if (!p_hwfn->p_rdma_info->active)
return false; return false;
spin_lock_bh(&p_hwfn->p_rdma_info->lock); spin_lock_bh(&p_hwfn->p_rdma_info->lock);
...@@ -1849,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt, ...@@ -1849,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt,
if (!p_ptt) if (!p_ptt)
goto err; goto err;
rc = qed_rdma_alloc(p_hwfn, p_ptt, params); rc = qed_rdma_alloc(p_hwfn);
if (rc) if (rc)
goto err1; goto err1;
...@@ -1858,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt, ...@@ -1858,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt,
goto err2; goto err2;
qed_ptt_release(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt);
p_hwfn->p_rdma_info->active = 1;
return rc; return rc;
......
...@@ -102,6 +102,7 @@ struct qed_rdma_info { ...@@ -102,6 +102,7 @@ struct qed_rdma_info {
u16 max_queue_zones; u16 max_queue_zones;
enum protocol_type proto; enum protocol_type proto;
struct qed_iwarp_info iwarp; struct qed_iwarp_info iwarp;
u8 active:1;
}; };
struct qed_rdma_qp { struct qed_rdma_qp {
...@@ -176,10 +177,14 @@ struct qed_rdma_qp { ...@@ -176,10 +177,14 @@ struct qed_rdma_qp {
#if IS_ENABLED(CONFIG_QED_RDMA) #if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
#else #else
static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) {} struct qed_ptt *p_ptt) {}
static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL}
static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
#endif #endif
int int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment