Commit 6c9e80ea authored by Michal Kalderon's avatar Michal Kalderon Committed by David S. Miller

qed: SPQ async callback registration

Whenever firmware indicates that there's an async indication it needs
to handle, there's a switch-case where the right functionality is called
based on function's personality and information.

Before iWARP is added [as yet another client], switch over the SPQ into
a callback-registered mechanism, allowing registration of the relevant
event-processing logic based on the function's personality. This allows
us to tidy the code by removing protocol-specifics from a common file.
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 898fff12
...@@ -62,6 +62,22 @@ ...@@ -62,6 +62,22 @@
#include "qed_sriov.h" #include "qed_sriov.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
static int
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
fw_event_code, data);
} else {
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
return -EINVAL;
}
}
struct qed_iscsi_conn { struct qed_iscsi_conn {
struct list_head list_entry; struct list_head list_entry;
bool free_on_delete; bool free_on_delete;
...@@ -265,6 +281,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -265,6 +281,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_hwfn->p_iscsi_info->event_context = event_context; p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb; p_hwfn->p_iscsi_info->event_cb = async_event_cb;
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ISCSI,
qed_iscsi_async_event);
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
...@@ -631,7 +650,10 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn, ...@@ -631,7 +650,10 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.iscsi_destroy; p_ramrod = &p_ent->ramrod.iscsi_destroy;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC; p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
return qed_spq_post(p_hwfn, p_ent, NULL); rc = qed_spq_post(p_hwfn, p_ent, NULL);
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
return rc;
} }
static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
......
...@@ -68,12 +68,14 @@ ...@@ -68,12 +68,14 @@
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
void qed_roce_async_event(struct qed_hwfn *p_hwfn, static int
u8 fw_event_code, union rdma_eqe_data *rdma_data) qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{ {
if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
u16 icid = u16 icid =
(u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid); (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
/* icid release in this async event can occur only if the icid /* icid release in this async event can occur only if the icid
* was offloaded to the FW. In case it wasn't offloaded this is * was offloaded to the FW. In case it wasn't offloaded this is
...@@ -85,8 +87,10 @@ void qed_roce_async_event(struct qed_hwfn *p_hwfn, ...@@ -85,8 +87,10 @@ void qed_roce_async_event(struct qed_hwfn *p_hwfn,
events->affiliated_event(p_hwfn->p_rdma_info->events.context, events->affiliated_event(p_hwfn->p_rdma_info->events.context,
fw_event_code, fw_event_code,
&rdma_data->async_handle); (void *)&data->rdma_data.async_handle);
} }
return 0;
} }
static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
...@@ -686,6 +690,9 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, ...@@ -686,6 +690,9 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
qed_roce_async_event);
return qed_rdma_start_fw(p_hwfn, params, p_ptt); return qed_rdma_start_fw(p_hwfn, params, p_ptt);
} }
...@@ -706,6 +713,7 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn) ...@@ -706,6 +713,7 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
break; break;
} }
} }
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
} }
static int qed_rdma_stop(void *rdma_cxt) static int qed_rdma_stop(void *rdma_cxt)
......
...@@ -169,16 +169,10 @@ struct qed_rdma_qp { ...@@ -169,16 +169,10 @@ struct qed_rdma_qp {
#if IS_ENABLED(CONFIG_QED_RDMA) #if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, union rdma_eqe_data *rdma_data);
#else #else
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
static inline void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, static inline void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) {} struct qed_ptt *p_ptt) {}
static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
union rdma_eqe_data *rdma_data) {}
#endif #endif
#endif #endif
...@@ -174,6 +174,22 @@ struct qed_consq { ...@@ -174,6 +174,22 @@ struct qed_consq {
struct qed_chain chain; struct qed_chain chain;
}; };
typedef int
(*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn,
u8 opcode,
u16 echo,
union event_ring_data *data,
u8 fw_return_code);
int
qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
enum protocol_type protocol_id,
qed_spq_async_comp_cb cb);
void
qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
enum protocol_type protocol_id);
struct qed_spq { struct qed_spq {
spinlock_t lock; /* SPQ lock */ spinlock_t lock; /* SPQ lock */
...@@ -203,6 +219,7 @@ struct qed_spq { ...@@ -203,6 +219,7 @@ struct qed_spq {
u32 comp_count; u32 comp_count;
u32 cid; u32 cid;
qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
}; };
/** /**
......
...@@ -302,32 +302,16 @@ static int ...@@ -302,32 +302,16 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn, qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe) struct event_ring_entry *p_eqe)
{ {
switch (p_eqe->protocol_id) { qed_spq_async_comp_cb cb;
#if IS_ENABLED(CONFIG_QED_RDMA)
case PROTOCOLID_ROCE:
qed_roce_async_event(p_hwfn, p_eqe->opcode,
&p_eqe->data.rdma_data);
return 0;
#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
p_eqe->echo, &p_eqe->data);
case PROTOCOLID_ISCSI:
if (!IS_ENABLED(CONFIG_QED_ISCSI))
return -EINVAL;
if (p_hwfn->p_iscsi_info->event_cb) { if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; return -EINVAL;
return p_iscsi->event_cb(p_iscsi->event_context, cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
p_eqe->opcode, &p_eqe->data); if (cb) {
} else { return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
DP_NOTICE(p_hwfn, &p_eqe->data, p_eqe->fw_return_code);
"iSCSI async completion is not set\n"); } else {
return -EINVAL;
}
default:
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n", "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id); p_eqe->protocol_id);
...@@ -335,6 +319,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, ...@@ -335,6 +319,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
} }
} }
int
qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
enum protocol_type protocol_id,
qed_spq_async_comp_cb cb)
{
if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
return -EINVAL;
p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
return 0;
}
void
qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
enum protocol_type protocol_id)
{
if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
return;
p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
}
/*************************************************************************** /***************************************************************************
* EQ API * EQ API
***************************************************************************/ ***************************************************************************/
......
...@@ -44,6 +44,11 @@ ...@@ -44,6 +44,11 @@
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h" #include "qed_sriov.h"
#include "qed_vf.h" #include "qed_vf.h"
static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
union event_ring_data *data, u8 fw_return_code);
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{ {
...@@ -565,6 +570,9 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn) ...@@ -565,6 +570,9 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn)
p_hwfn->pf_iov_info = p_sriov; p_hwfn->pf_iov_info = p_sriov;
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
qed_sriov_eqe_event);
return qed_iov_allocate_vfdb(p_hwfn); return qed_iov_allocate_vfdb(p_hwfn);
} }
...@@ -578,6 +586,8 @@ void qed_iov_setup(struct qed_hwfn *p_hwfn) ...@@ -578,6 +586,8 @@ void qed_iov_setup(struct qed_hwfn *p_hwfn)
void qed_iov_free(struct qed_hwfn *p_hwfn) void qed_iov_free(struct qed_hwfn *p_hwfn)
{ {
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
if (IS_PF_SRIOV_ALLOC(p_hwfn)) { if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
qed_iov_free_vfdb(p_hwfn); qed_iov_free_vfdb(p_hwfn);
kfree(p_hwfn->pf_iov_info); kfree(p_hwfn->pf_iov_info);
...@@ -3833,8 +3843,10 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, ...@@ -3833,8 +3843,10 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
} }
} }
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data) u8 opcode,
__le16 echo,
union event_ring_data *data, u8 fw_return_code)
{ {
switch (opcode) { switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL: case COMMON_EVENT_VF_PF_CHANNEL:
......
...@@ -343,17 +343,6 @@ void qed_iov_free(struct qed_hwfn *p_hwfn); ...@@ -343,17 +343,6 @@ void qed_iov_free(struct qed_hwfn *p_hwfn);
*/ */
void qed_iov_free_hw_info(struct qed_dev *cdev); void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
* @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
*
* @param p_hwfn
* @param opcode
* @param echo
* @param data
*/
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data);
/** /**
* @brief Mark structs of vfs that have been FLR-ed. * @brief Mark structs of vfs that have been FLR-ed.
* *
...@@ -418,13 +407,6 @@ static inline void qed_iov_free_hw_info(struct qed_dev *cdev) ...@@ -418,13 +407,6 @@ static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
{ {
} }
static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo, union event_ring_data *data)
{
return -EINVAL;
}
static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
u32 *disabled_vfs) u32 *disabled_vfs)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment