Commit fd1fc79d authored by Ariel Elior's avatar Ariel Elior Committed by David S. Miller

bnx2x: Infrastructure for VF <-> PF request on PF side

Support interrupt from device which indicates VF has placed
A request on the VF <-> PF channel.
The PF driver issues a DMAE to retrieve the request from the VM
memory (the Ghost Physical Address of the request is contained
in the interrupt. The PF driver uses the GPA in the DMAE request,
which is translated by the IOMMU to the correct physical address).
The request which arrives is examined to recognize the sending VF.
The PF driver allocates a workitem to handle the VF Operation (vfop).
Signed-off-by: default avatarAriel Elior <ariele@broadcom.com>
Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b56e9670
...@@ -1379,6 +1379,7 @@ struct bnx2x { ...@@ -1379,6 +1379,7 @@ struct bnx2x {
int mrrs; int mrrs;
struct delayed_work sp_task; struct delayed_work sp_task;
atomic_t interrupt_occurred;
struct delayed_work sp_rtnl_task; struct delayed_work sp_rtnl_task;
struct delayed_work period_task; struct delayed_work period_task;
...@@ -1870,6 +1871,11 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); ...@@ -1870,6 +1871,11 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
bool with_comp, u8 comp_type); bool with_comp, u8 comp_type);
void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
u8 src_type, u8 dst_type);
int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl);
u8 bnx2x_is_pcie_pending(struct pci_dev *dev); u8 bnx2x_is_pcie_pending(struct pci_dev *dev);
void bnx2x_calc_fc_adv(struct bnx2x *bp); void bnx2x_calc_fc_adv(struct bnx2x *bp);
......
...@@ -518,6 +518,16 @@ static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -518,6 +518,16 @@ static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
} }
} }
void bnx2x_iov_remove_one(struct bnx2x *bp)
{
/* if SRIOV is not enabled there's nothing to do */
if (!IS_SRIOV(bp))
return;
/* free vf database */
__bnx2x_iov_free_vfdb(bp);
}
void bnx2x_iov_free_mem(struct bnx2x *bp) void bnx2x_iov_free_mem(struct bnx2x *bp)
{ {
int i; int i;
...@@ -692,12 +702,241 @@ int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) ...@@ -692,12 +702,241 @@ int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
return line + i; return line + i;
} }
void bnx2x_iov_remove_one(struct bnx2x *bp) static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
{ {
/* if SRIOV is not enabled there's nothing to do */ return ((cid >= BNX2X_FIRST_VF_CID) &&
((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
}
static
void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
struct bnx2x_vf_queue *vfq,
union event_ring_elem *elem)
{
unsigned long ramrod_flags = 0;
int rc = 0;
/* Always push next commands out, don't wait here */
set_bit(RAMROD_CONT, &ramrod_flags);
switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
&ramrod_flags);
break;
case BNX2X_FILTER_VLAN_PENDING:
rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
&ramrod_flags);
break;
default:
BNX2X_ERR("Unsupported classification command: %d\n",
elem->message.data.eth_event.echo);
return;
}
if (rc < 0)
BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
else if (rc > 0)
DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
}
static
void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
struct bnx2x_mcast_ramrod_params rparam = {NULL};
int rc;
rparam.mcast_obj = &vf->mcast_obj;
vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
/* If there are pending mcast commands - send them */
if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
if (rc < 0)
BNX2X_ERR("Failed to send pending mcast commands: %d\n",
rc);
}
}
static
void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
smp_mb__before_clear_bit();
clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
smp_mb__after_clear_bit();
}
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
{
struct bnx2x_virtf *vf;
int qidx = 0, abs_vfid;
u8 opcode;
u16 cid = 0xffff;
if (!IS_SRIOV(bp))
return 1;
/* first get the cid - the only events we handle here are cfc-delete
* and set-mac completion
*/
opcode = elem->message.opcode;
switch (opcode) {
case EVENT_RING_OPCODE_CFC_DEL:
cid = SW_CID((__force __le32)
elem->message.data.cfc_del_event.cid);
DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
break;
case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
case EVENT_RING_OPCODE_MULTICAST_RULES:
case EVENT_RING_OPCODE_FILTERS_RULES:
cid = (elem->message.data.eth_event.echo &
BNX2X_SWCID_MASK);
DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
break;
case EVENT_RING_OPCODE_VF_FLR:
abs_vfid = elem->message.data.vf_flr_event.vf_id;
DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
abs_vfid);
goto get_vf;
case EVENT_RING_OPCODE_MALICIOUS_VF:
abs_vfid = elem->message.data.malicious_vf_event.vf_id;
DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
abs_vfid);
goto get_vf;
default:
return 1;
}
/* check if the cid is the VF range */
if (!bnx2x_iov_is_vf_cid(bp, cid)) {
DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
return 1;
}
/* extract vf and rxq index from vf_cid - relies on the following:
* 1. vfid on cid reflects the true abs_vfid
* 2. the max number of VFs (per path) is 64
*/
qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
get_vf:
vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
if (!vf) {
BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
cid, abs_vfid);
return 0;
}
switch (opcode) {
case EVENT_RING_OPCODE_CFC_DEL:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
vf->abs_vfid, qidx);
vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
&vfq_get(vf,
qidx)->sp_obj,
BNX2X_Q_CMD_CFC_DEL);
break;
case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
vf->abs_vfid, qidx);
bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
break;
case EVENT_RING_OPCODE_MULTICAST_RULES:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
vf->abs_vfid, qidx);
bnx2x_vf_handle_mcast_eqe(bp, vf);
break;
case EVENT_RING_OPCODE_FILTERS_RULES:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
vf->abs_vfid, qidx);
bnx2x_vf_handle_filters_eqe(bp, vf);
break;
case EVENT_RING_OPCODE_VF_FLR:
DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
vf->abs_vfid);
/* Do nothing for now */
break;
case EVENT_RING_OPCODE_MALICIOUS_VF:
DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
vf->abs_vfid);
/* Do nothing for now */
break;
}
/* SRIOV: reschedule any 'in_progress' operations */
bnx2x_iov_sp_event(bp, cid, false);
return 0;
}
static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
{
/* extract the vf from vf_cid - relies on the following:
* 1. vfid on cid reflects the true abs_vfid
* 2. the max number of VFs (per path) is 64
*/
int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
return bnx2x_vf_by_abs_fid(bp, abs_vfid);
}
void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj)
{
struct bnx2x_virtf *vf;
if (!IS_SRIOV(bp)) if (!IS_SRIOV(bp))
return; return;
/* free vf database */ vf = bnx2x_vf_by_cid(bp, vf_cid);
__bnx2x_iov_free_vfdb(bp);
if (vf) {
/* extract queue index from vf_cid - relies on the following:
* 1. vfid on cid reflects the true abs_vfid
* 2. the max number of VFs (per path) is 64
*/
int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
} else {
BNX2X_ERR("No vf matching cid %d\n", vf_cid);
}
}
void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
{
struct bnx2x_virtf *vf;
/* check if the cid is the VF range */
if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
return;
vf = bnx2x_vf_by_cid(bp, vf_cid);
if (vf) {
/* set in_progress flag */
atomic_set(&vf->op_in_progress, 1);
if (queue_work)
queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
}
}
void bnx2x_iov_sp_task(struct bnx2x *bp)
{
int i;
if (!IS_SRIOV(bp))
return;
/* Iterate over all VFs and invoke state transition for VFs with
* 'in-progress' slow-path operations
*/
DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i);
if (!list_empty(&vf->op_list_head) &&
atomic_read(&vf->op_in_progress)) {
DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
}
}
} }
...@@ -83,6 +83,84 @@ union bnx2x_vfop_params { ...@@ -83,6 +83,84 @@ union bnx2x_vfop_params {
/* forward */ /* forward */
struct bnx2x_virtf; struct bnx2x_virtf;
/* VFOP definitions */
typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
/* VFOP queue filters command additional arguments */
struct bnx2x_vfop_filter {
struct list_head link;
int type;
#define BNX2X_VFOP_FILTER_MAC 1
#define BNX2X_VFOP_FILTER_VLAN 2
bool add;
u8 *mac;
u16 vid;
};
struct bnx2x_vfop_filters {
int add_cnt;
struct list_head head;
struct bnx2x_vfop_filter filters[];
};
/* transient list allocated, built and saved until its
* passed to the SP-VERBs layer.
*/
struct bnx2x_vfop_args_mcast {
int mc_num;
struct bnx2x_mcast_list_elem *mc;
};
struct bnx2x_vfop_args_qctor {
int qid;
u16 sb_idx;
};
struct bnx2x_vfop_args_qdtor {
int qid;
struct eth_context *cxt;
};
struct bnx2x_vfop_args_defvlan {
int qid;
bool enable;
u16 vid;
u8 prio;
};
struct bnx2x_vfop_args_qx {
int qid;
bool en_add;
};
struct bnx2x_vfop_args_filters {
struct bnx2x_vfop_filters *multi_filter;
atomic_t *credit; /* non NULL means 'don't consume credit' */
};
union bnx2x_vfop_args {
struct bnx2x_vfop_args_mcast mc_list;
struct bnx2x_vfop_args_qctor qctor;
struct bnx2x_vfop_args_qdtor qdtor;
struct bnx2x_vfop_args_defvlan defvlan;
struct bnx2x_vfop_args_qx qx;
struct bnx2x_vfop_args_filters filters;
};
struct bnx2x_vfop {
struct list_head link;
int rc; /* return code */
int state; /* next state */
union bnx2x_vfop_args args; /* extra arguments */
union bnx2x_vfop_params *op_p; /* ramrod params */
/* state machine callbacks */
vfop_handler_t transition;
vfop_handler_t done;
};
/* vf context */ /* vf context */
struct bnx2x_virtf { struct bnx2x_virtf {
u16 cfg_flags; u16 cfg_flags;
...@@ -281,6 +359,12 @@ struct bnx2x_vfdb { ...@@ -281,6 +359,12 @@ struct bnx2x_vfdb {
u32 flrd_vfs[FLRD_VFS_DWORDS]; u32 flrd_vfs[FLRD_VFS_DWORDS];
}; };
/* queue access */
static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
{
return &(vf->vfqs[index]);
}
static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
{ {
return vf->igu_base_id + sb_idx; return vf->igu_base_id + sb_idx;
...@@ -295,7 +379,22 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp); ...@@ -295,7 +379,22 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp);
int bnx2x_iov_nic_init(struct bnx2x *bp); int bnx2x_iov_nic_init(struct bnx2x *bp);
void bnx2x_iov_init_dq(struct bnx2x *bp); void bnx2x_iov_init_dq(struct bnx2x *bp);
void bnx2x_iov_init_dmae(struct bnx2x *bp); void bnx2x_iov_init_dmae(struct bnx2x *bp);
void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj);
void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
void bnx2x_iov_sp_task(struct bnx2x *bp);
/* global vf mailbox routines */
void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event);
void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
WARN_ON(list_empty(&vf->op_list_head));
return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
}
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
/* VF FLR helpers */ /* VF FLR helpers */
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
...@@ -305,4 +404,7 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, ...@@ -305,4 +404,7 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
u16 type, u16 length); u16 type, u16 length);
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list); void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
bool bnx2x_tlv_supported(u16 tlvtype);
#endif /* bnx2x_sriov.h */ #endif /* bnx2x_sriov.h */
...@@ -79,6 +79,24 @@ void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) ...@@ -79,6 +79,24 @@ void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
tlv->type, tlv->length); tlv->type, tlv->length);
} }
/* test whether we support a tlv type */
bool bnx2x_tlv_supported(u16 tlvtype)
{
return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
}
static inline int bnx2x_pfvf_status_codes(int rc)
{
switch (rc) {
case 0:
return PFVF_STATUS_SUCCESS;
case -ENOMEM:
return PFVF_STATUS_NO_RESOURCE;
default:
return PFVF_STATUS_FAILURE;
}
}
/* General service functions */ /* General service functions */
static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid) static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
{ {
...@@ -116,3 +134,139 @@ void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) ...@@ -116,3 +134,139 @@ void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
/* enable the VF access to the mailbox */ /* enable the VF access to the mailbox */
bnx2x_vf_enable_access(bp, abs_vfid); bnx2x_vf_enable_access(bp, abs_vfid);
} }
/* this works only on !E1h */
static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
u32 vf_addr_lo, u32 len32)
{
struct dmae_command dmae;
if (CHIP_IS_E1x(bp)) {
BNX2X_ERR("Chip revision does not support VFs\n");
return DMAE_NOT_RDY;
}
if (!bp->dmae_ready) {
BNX2X_ERR("DMAE is not ready, can not copy\n");
return DMAE_NOT_RDY;
}
/* set opcode and fixed command fields */
bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
if (from_vf) {
dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
(DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
(DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
dmae.src_addr_lo = vf_addr_lo;
dmae.src_addr_hi = vf_addr_hi;
dmae.dst_addr_lo = U64_LO(pf_addr);
dmae.dst_addr_hi = U64_HI(pf_addr);
} else {
dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
(DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
(DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
dmae.src_addr_lo = U64_LO(pf_addr);
dmae.src_addr_hi = U64_HI(pf_addr);
dmae.dst_addr_lo = vf_addr_lo;
dmae.dst_addr_hi = vf_addr_hi;
}
dmae.len = len32;
bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE);
/* issue the command and wait for completion */
return bnx2x_issue_dmae_with_comp(bp, &dmae);
}
/* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
int i;
/* check if tlv type is known */
if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
/* switch on the opcode */
switch (mbx->first_tlv.tl.type) {
}
} else {
/* unknown TLV - this may belong to a VF driver from the future
* - a version written after this PF driver was written, which
* supports features unknown as of yet. Too bad since we don't
* support them. Or this may be because someone wrote a crappy
* VF driver and is sending garbage over the channel.
*/
BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
for (i = 0; i < 20; i++)
DP_CONT(BNX2X_MSG_IOV, "%x ",
mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
}
}
/* handle new vf-pf message */
void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
{
struct bnx2x_virtf *vf;
struct bnx2x_vf_mbx *mbx;
u8 vf_idx;
int rc;
DP(BNX2X_MSG_IOV,
"vf pf event received: vfid %d, address_hi %x, address lo %x",
vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
/* Sanity checks consider removing later */
/* check if the vf_id is valid */
if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
BNX2X_NR_VIRTFN(bp)) {
BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
goto mbx_done;
}
vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
mbx = BP_VF_MBX(bp, vf_idx);
/* verify an event is not currently being processed -
* debug failsafe only
*/
if (mbx->flags & VF_MSG_INPROCESS) {
BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
vfpf_event->vf_id);
goto mbx_done;
}
vf = BP_VF(bp, vf_idx);
/* save the VF message address */
mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
/* dmae to get the VF request */
rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
mbx->vf_addr_hi, mbx->vf_addr_lo,
sizeof(union vfpf_tlvs)/4);
if (rc) {
BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
goto mbx_error;
}
/* process the VF message header */
mbx->first_tlv = mbx->msg->req.first_tlv;
/* dispatch the request (will prepare the response) */
bnx2x_vf_mbx_request(bp, vf, mbx);
goto mbx_done;
mbx_error:
mbx_done:
return;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment