Commit 2dc33bbc authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller

bnx2x: Remove the sriov VFOP mechanism

Since we now posses a workqueue dedicated for sriov, the paradigm that sriov-
related tasks cannot sleep is no longer correct.

The VFOP mechanism was the one previously supporting said paradigm - the sriov
related tasks were broken into segments which did not require sleep, and the
mechanism re-scheduled the next segment whenever possible.

This patch remvoes the VFOP mechanism altogether - the resulting code is a much
easier to follow code; The segments are gathered into straight-forward
functions which sleep whenever neccessary.
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarAriel Elior <Ariel.Elior@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 370d4a26
......@@ -1414,7 +1414,6 @@ enum sp_rtnl_flag {
enum bnx2x_iov_flag {
BNX2X_IOV_HANDLE_VF_MSG,
BNX2X_IOV_CONT_VFOP,
BNX2X_IOV_HANDLE_FLR,
};
......
......@@ -1857,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
#else
return;
#endif
/* SRIOV: reschedule any 'in_progress' operations */
bnx2x_iov_sp_event(bp, cid);
smp_mb__before_atomic_inc();
atomic_inc(&bp->cq_spq_left);
......
......@@ -117,87 +117,7 @@ static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
return true;
}
/* VFOP - VF slow-path operation support */
#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
/* VFOP operations states */
enum bnx2x_vfop_qctor_state {
BNX2X_VFOP_QCTOR_INIT,
BNX2X_VFOP_QCTOR_SETUP,
BNX2X_VFOP_QCTOR_INT_EN
};
enum bnx2x_vfop_qdtor_state {
BNX2X_VFOP_QDTOR_HALT,
BNX2X_VFOP_QDTOR_TERMINATE,
BNX2X_VFOP_QDTOR_CFCDEL,
BNX2X_VFOP_QDTOR_DONE
};
enum bnx2x_vfop_vlan_mac_state {
BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
BNX2X_VFOP_VLAN_MAC_CLEAR,
BNX2X_VFOP_VLAN_MAC_CHK_DONE,
BNX2X_VFOP_MAC_CONFIG_LIST,
BNX2X_VFOP_VLAN_CONFIG_LIST,
BNX2X_VFOP_VLAN_CONFIG_LIST_0
};
enum bnx2x_vfop_qsetup_state {
BNX2X_VFOP_QSETUP_CTOR,
BNX2X_VFOP_QSETUP_VLAN0,
BNX2X_VFOP_QSETUP_DONE
};
enum bnx2x_vfop_mcast_state {
BNX2X_VFOP_MCAST_DEL,
BNX2X_VFOP_MCAST_ADD,
BNX2X_VFOP_MCAST_CHK_DONE
};
enum bnx2x_vfop_qflr_state {
BNX2X_VFOP_QFLR_CLR_VLAN,
BNX2X_VFOP_QFLR_CLR_MAC,
BNX2X_VFOP_QFLR_TERMINATE,
BNX2X_VFOP_QFLR_DONE
};
enum bnx2x_vfop_flr_state {
BNX2X_VFOP_FLR_QUEUES,
BNX2X_VFOP_FLR_HW
};
enum bnx2x_vfop_close_state {
BNX2X_VFOP_CLOSE_QUEUES,
BNX2X_VFOP_CLOSE_HW
};
enum bnx2x_vfop_rxmode_state {
BNX2X_VFOP_RXMODE_CONFIG,
BNX2X_VFOP_RXMODE_DONE
};
enum bnx2x_vfop_qteardown_state {
BNX2X_VFOP_QTEARDOWN_RXMODE,
BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
BNX2X_VFOP_QTEARDOWN_CLR_MAC,
BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
BNX2X_VFOP_QTEARDOWN_QDTOR,
BNX2X_VFOP_QTEARDOWN_DONE
};
enum bnx2x_vfop_rss_state {
BNX2X_VFOP_RSS_CONFIG,
BNX2X_VFOP_RSS_DONE
};
enum bnx2x_vfop_tpa_state {
BNX2X_VFOP_TPA_CONFIG,
BNX2X_VFOP_TPA_DONE
};
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
struct bnx2x_queue_setup_params *setup_params,
......@@ -241,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
struct bnx2x_vfop_qctor_params *p,
struct bnx2x_vf_queue_construct_params *p,
unsigned long q_type)
{
struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
......@@ -310,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
}
}
/* VFOP queue construction */
static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
static int bnx2x_vf_queue_create(struct bnx2x *bp,
struct bnx2x_virtf *vf, int qid,
struct bnx2x_vf_queue_construct_params *qctor)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
enum bnx2x_vfop_qctor_state state = vfop->state;
bnx2x_vfop_reset_wq(vf);
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
switch (state) {
case BNX2X_VFOP_QCTOR_INIT:
/* has this queue already been opened? */
if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
BNX2X_Q_LOGICAL_STATE_ACTIVE) {
DP(BNX2X_MSG_IOV,
"Entered qctor but queue was already up. Aborting gracefully\n");
goto op_done;
}
/* next state */
vfop->state = BNX2X_VFOP_QCTOR_SETUP;
q_params->cmd = BNX2X_Q_CMD_INIT;
vfop->rc = bnx2x_queue_state_change(bp, q_params);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QCTOR_SETUP:
/* next state */
vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
/* copy pre-prepared setup params to the queue-state params */
vfop->op_p->qctor.qstate.params.setup =
vfop->op_p->qctor.prep_qsetup;
q_params->cmd = BNX2X_Q_CMD_SETUP;
vfop->rc = bnx2x_queue_state_change(bp, q_params);
struct bnx2x_queue_state_params *q_params;
int rc = 0;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
case BNX2X_VFOP_QCTOR_INT_EN:
/* Prepare ramrod information */
q_params = &qctor->qstate;
q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
/* enable interrupts */
bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
USTORM_ID, 0, IGU_INT_ENABLE, 0);
goto op_done;
default:
bnx2x_vfop_default(state);
if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
BNX2X_Q_LOGICAL_STATE_ACTIVE) {
DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
goto out;
}
op_err:
BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
op_done:
bnx2x_vfop_end(bp, vf, vfop);
op_pending:
return;
}
static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
/* Run Queue 'construction' ramrods */
q_params->cmd = BNX2X_Q_CMD_INIT;
rc = bnx2x_queue_state_change(bp, q_params);
if (rc)
goto out;
vfop->args.qctor.qid = qid;
vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
memcpy(&q_params->params.setup, &qctor->prep_qsetup,
sizeof(struct bnx2x_queue_setup_params));
q_params->cmd = BNX2X_Q_CMD_SETUP;
rc = bnx2x_queue_state_change(bp, q_params);
if (rc)
goto out;
bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
bnx2x_vfop_qctor, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
cmd->block);
}
return -ENOMEM;
/* enable interrupts */
bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
USTORM_ID, 0, IGU_INT_ENABLE, 0);
out:
return rc;
}
/* VFOP queue destruction */
static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
enum bnx2x_vfop_qdtor_state state = vfop->state;
bnx2x_vfop_reset_wq(vf);
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
switch (state) {
case BNX2X_VFOP_QDTOR_HALT:
/* has this queue already been stopped? */
if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
BNX2X_Q_LOGICAL_STATE_STOPPED) {
DP(BNX2X_MSG_IOV,
"Entered qdtor but queue was already stopped. Aborting gracefully\n");
/* next state */
vfop->state = BNX2X_VFOP_QDTOR_DONE;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
}
/* next state */
vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
q_params->cmd = BNX2X_Q_CMD_HALT;
vfop->rc = bnx2x_queue_state_change(bp, q_params);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QDTOR_TERMINATE:
/* next state */
vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
q_params->cmd = BNX2X_Q_CMD_TERMINATE;
vfop->rc = bnx2x_queue_state_change(bp, q_params);
enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
BNX2X_Q_CMD_TERMINATE,
BNX2X_Q_CMD_CFC_DEL};
struct bnx2x_queue_state_params q_params;
int rc, i;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
case BNX2X_VFOP_QDTOR_CFCDEL:
/* next state */
vfop->state = BNX2X_VFOP_QDTOR_DONE;
/* Prepare ramrod information */
memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
vfop->rc = bnx2x_queue_state_change(bp, q_params);
if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
BNX2X_Q_LOGICAL_STATE_STOPPED) {
DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
goto out;
}
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
op_done:
case BNX2X_VFOP_QDTOR_DONE:
/* invalidate the context */
if (qdtor->cxt) {
qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
/* Run Queue 'destruction' ramrods */
for (i = 0; i < ARRAY_SIZE(cmds); i++) {
q_params.cmd = cmds[i];
rc = bnx2x_queue_state_change(bp, &q_params);
if (rc) {
BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
return rc;
}
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
}
op_pending:
return;
}
static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_queue_state_params *qstate =
&vf->op_params.qctor.qstate;
memset(qstate, 0, sizeof(*qstate));
qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
vfop->args.qdtor.qid = qid;
vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
bnx2x_vfop_qdtor, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
cmd->block);
} else {
BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
return -ENOMEM;
out:
/* Clean Context */
if (bnx2x_vfq(vf, qid, cxt)) {
bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
}
return 0;
}
static void
......@@ -516,731 +330,291 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
BP_VFDB(bp)->vf_sbs_pool++;
}
/* VFOP MAC/VLAN helpers */
static inline void bnx2x_vfop_credit(struct bnx2x *bp,
struct bnx2x_vfop *vfop,
struct bnx2x_vlan_mac_obj *obj)
static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *obj,
atomic_t *counter)
{
struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
/* update credit only if there is no error
* and a valid credit counter
*/
if (!vfop->rc && args->credit) {
struct list_head *pos;
int read_lock;
int cnt = 0;
struct list_head *pos;
int read_lock;
int cnt = 0;
read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
if (read_lock)
DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
if (read_lock)
DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
list_for_each(pos, &obj->head)
cnt++;
list_for_each(pos, &obj->head)
cnt++;
if (!read_lock)
bnx2x_vlan_mac_h_read_unlock(bp, obj);
if (!read_lock)
bnx2x_vlan_mac_h_read_unlock(bp, obj);
atomic_set(args->credit, cnt);
}
atomic_set(counter, cnt);
}
static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
struct bnx2x_vfop_filter *pos,
struct bnx2x_vlan_mac_data *user_req)
static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid, bool drv_only, bool mac)
{
user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
BNX2X_VLAN_MAC_DEL;
switch (pos->type) {
case BNX2X_VFOP_FILTER_MAC:
memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
break;
case BNX2X_VFOP_FILTER_VLAN:
user_req->u.vlan.vlan = pos->vid;
break;
default:
BNX2X_ERR("Invalid filter type, skipping\n");
return 1;
}
return 0;
}
static int bnx2x_vfop_config_list(struct bnx2x *bp,
struct bnx2x_vfop_filters *filters,
struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
{
struct bnx2x_vfop_filter *pos, *tmp;
struct list_head rollback_list, *filters_list = &filters->head;
struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
int rc = 0, cnt = 0;
INIT_LIST_HEAD(&rollback_list);
list_for_each_entry_safe(pos, tmp, filters_list, link) {
if (bnx2x_vfop_set_user_req(bp, pos, user_req))
continue;
struct bnx2x_vlan_mac_ramrod_params ramrod;
int rc;
rc = bnx2x_config_vlan_mac(bp, vlan_mac);
if (rc >= 0) {
cnt += pos->add ? 1 : -1;
list_move(&pos->link, &rollback_list);
rc = 0;
} else if (rc == -EEXIST) {
rc = 0;
} else {
BNX2X_ERR("Failed to add a new vlan_mac command\n");
break;
}
}
DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
mac ? "MACs" : "VLANs");
/* rollback if error or too many rules added */
if (rc || cnt > filters->add_cnt) {
BNX2X_ERR("error or too many rules added. Performing rollback\n");
list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
pos->add = !pos->add; /* reverse op */
bnx2x_vfop_set_user_req(bp, pos, user_req);
bnx2x_config_vlan_mac(bp, vlan_mac);
list_del(&pos->link);
}
cnt = 0;
if (!rc)
rc = -EINVAL;
/* Prepare ramrod params */
memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
if (mac) {
set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
} else {
set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
&ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
}
filters->add_cnt = cnt;
return rc;
}
/* VFOP set VLAN/MAC */
static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
enum bnx2x_vfop_vlan_mac_state state = vfop->state;
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
bnx2x_vfop_reset_wq(vf);
switch (state) {
case BNX2X_VFOP_VLAN_MAC_CLEAR:
/* next state */
vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
/* do delete */
vfop->rc = obj->delete_all(bp, obj,
&vlan_mac->user_req.vlan_mac_flags,
&vlan_mac->ramrod_flags);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
/* next state */
vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
/* do config */
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
if (vfop->rc == -EEXIST)
vfop->rc = 0;
ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
vfop->rc = !!obj->raw.check_pending(&obj->raw);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
case BNX2X_VFOP_MAC_CONFIG_LIST:
/* next state */
vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
/* do list config */
vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
if (vfop->rc)
goto op_err;
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_VLAN_CONFIG_LIST:
/* next state */
vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
/* do list config */
vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
if (!vfop->rc) {
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
}
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
if (drv_only)
set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
else
set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
default:
bnx2x_vfop_default(state);
/* Start deleting */
rc = ramrod.vlan_mac_obj->delete_all(bp,
ramrod.vlan_mac_obj,
&ramrod.user_req.vlan_mac_flags,
&ramrod.ramrod_flags);
if (rc) {
BNX2X_ERR("Failed to delete all %s\n",
mac ? "MACs" : "VLANs");
return rc;
}
op_err:
BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
op_done:
kfree(filters);
bnx2x_vfop_credit(bp, vfop, obj);
bnx2x_vfop_end(bp, vf, vfop);
op_pending:
return;
}
struct bnx2x_vfop_vlan_mac_flags {
bool drv_only;
bool dont_consume;
bool single_cmd;
bool add;
};
static void
bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
struct bnx2x_vfop_vlan_mac_flags *flags)
{
struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
memset(ramrod, 0, sizeof(*ramrod));
/* Clear the vlan counters */
if (!mac)
atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
/* ramrod flags */
if (flags->drv_only)
set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
if (flags->single_cmd)
set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
/* mac_vlan flags */
if (flags->dont_consume)
set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
/* cmd */
ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
}
static inline void
bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
struct bnx2x_vfop_vlan_mac_flags *flags)
{
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
return 0;
}
static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid, bool drv_only)
static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
struct bnx2x_virtf *vf, int qid,
struct bnx2x_vf_mac_vlan_filter *filter,
bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = NULL, /* single */
.credit = NULL, /* consume credit */
};
struct bnx2x_vfop_vlan_mac_flags flags = {
.drv_only = drv_only,
.dont_consume = (filters.credit != NULL),
.single_cmd = true,
.add = false /* don't care */,
};
struct bnx2x_vlan_mac_ramrod_params *ramrod =
&vf->op_params.vlan_mac;
/* set ramrod params */
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
vfop->args.filters = filters;
struct bnx2x_vlan_mac_ramrod_params ramrod;
int rc;
bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
bnx2x_vfop_vlan_mac, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
cmd->block);
DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
vf->abs_vfid, filter->add ? "Adding" : "Deleting",
filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
/* Prepare ramrod params */
memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
if (filter->type == BNX2X_VF_FILTER_VLAN) {
set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
&ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
ramrod.user_req.u.vlan.vlan = filter->vid;
} else {
set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
}
ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
BNX2X_VLAN_MAC_DEL;
/* Verify there are available vlan credits */
if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
(atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
vf_vlan_rules_cnt(vf))) {
BNX2X_ERR("No credits for vlan\n");
return -ENOMEM;
}
return -ENOMEM;
}
int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct bnx2x_vfop_filters *macs,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop;
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
return -EINVAL;
vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = macs,
.credit = NULL, /* consume credit */
};
struct bnx2x_vfop_vlan_mac_flags flags = {
.drv_only = drv_only,
.dont_consume = (filters.credit != NULL),
.single_cmd = false,
.add = false, /* don't care since only the items in the
* filters list affect the sp operation,
* not the list itself
*/
};
struct bnx2x_vlan_mac_ramrod_params *ramrod =
&vf->op_params.vlan_mac;
/* set ramrod params */
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
vfop->args.filters = filters;
bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
bnx2x_vfop_vlan_mac, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
cmd->block);
set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
if (drv_only)
set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
else
set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
/* Add/Remove the filter */
rc = bnx2x_config_vlan_mac(bp, &ramrod);
if (rc && rc != -EEXIST) {
BNX2X_ERR("Failed to %s %s\n",
filter->add ? "add" : "delete",
filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
"VLAN");
return rc;
}
return -ENOMEM;
}
static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid, u16 vid, bool add)
{
struct bnx2x_vfop *vfop;
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
return -EINVAL;
/* Update the vlan counters */
if (filter->type == BNX2X_VF_FILTER_VLAN)
bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
&bnx2x_vfq(vf, qid, vlan_count));
vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = NULL, /* single command */
.credit = &bnx2x_vfq(vf, qid, vlan_count),
};
struct bnx2x_vfop_vlan_mac_flags flags = {
.drv_only = false,
.dont_consume = (filters.credit != NULL),
.single_cmd = true,
.add = add,
};
struct bnx2x_vlan_mac_ramrod_params *ramrod =
&vf->op_params.vlan_mac;
/* set ramrod params */
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
ramrod->user_req.u.vlan.vlan = vid;
/* set object */
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
vfop->args.filters = filters;
bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
bnx2x_vfop_vlan_mac, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
cmd->block);
}
return -ENOMEM;
return 0;
}
static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid, bool drv_only)
int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mac_vlan_filters *filters,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = NULL, /* single command */
.credit = &bnx2x_vfq(vf, qid, vlan_count),
};
struct bnx2x_vfop_vlan_mac_flags flags = {
.drv_only = drv_only,
.dont_consume = (filters.credit != NULL),
.single_cmd = true,
.add = false, /* don't care */
};
struct bnx2x_vlan_mac_ramrod_params *ramrod =
&vf->op_params.vlan_mac;
/* set ramrod params */
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
int rc = 0, i;
/* set object */
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
vfop->args.filters = filters;
bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
bnx2x_vfop_vlan_mac, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
cmd->block);
}
return -ENOMEM;
}
int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct bnx2x_vfop_filters *vlans,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop;
DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
return -EINVAL;
vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = vlans,
.credit = &bnx2x_vfq(vf, qid, vlan_count),
};
struct bnx2x_vfop_vlan_mac_flags flags = {
.drv_only = drv_only,
.dont_consume = (filters.credit != NULL),
.single_cmd = false,
.add = false, /* don't care */
};
struct bnx2x_vlan_mac_ramrod_params *ramrod =
&vf->op_params.vlan_mac;
/* set ramrod params */
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
atomic_read(filters.credit);
vfop->args.filters = filters;
bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
bnx2x_vfop_vlan_mac, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
cmd->block);
/* Prepare ramrod params */
for (i = 0; i < filters->count; i++) {
rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
&filters->filters[i], drv_only);
if (rc)
break;
}
return -ENOMEM;
}
/* VFOP queue setup (queue constructor + set vlan 0) */
static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
int qid = vfop->args.qctor.qid;
enum bnx2x_vfop_qsetup_state state = vfop->state;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vfop_qsetup,
.block = false,
};
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
switch (state) {
case BNX2X_VFOP_QSETUP_CTOR:
/* init the queue ctor command */
vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
if (vfop->rc)
goto op_err;
return;
case BNX2X_VFOP_QSETUP_VLAN0:
/* skip if non-leading or FPGA/EMU*/
if (qid)
goto op_done;
/* init the queue set-vlan command (for vlan 0) */
vfop->state = BNX2X_VFOP_QSETUP_DONE;
vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
if (vfop->rc)
goto op_err;
return;
op_err:
BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
op_done:
case BNX2X_VFOP_QSETUP_DONE:
vf->cfg_flags |= VF_CFG_VLAN;
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_MSG_IOV);
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
/* Rollback if needed */
if (i != filters->count) {
BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
i, filters->count + 1);
while (--i >= 0) {
filters->filters[i].add = !filters->filters[i].add;
bnx2x_vf_mac_vlan_config(bp, vf, qid,
&filters->filters[i],
drv_only);
}
}
}
int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
vfop->args.qctor.qid = qid;
/* It's our responsibility to free the filters */
kfree(filters);
bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
bnx2x_vfop_qsetup, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
cmd->block);
}
return -ENOMEM;
return rc;
}
/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
struct bnx2x_vf_queue_construct_params *qctor)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
int qid = vfop->args.qx.qid;
enum bnx2x_vfop_qflr_state state = vfop->state;
struct bnx2x_queue_state_params *qstate;
struct bnx2x_vfop_cmd cmd;
int rc;
bnx2x_vfop_reset_wq(vf);
DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
if (vfop->rc < 0)
rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
if (rc)
goto op_err;
DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
cmd.done = bnx2x_vfop_qflr;
cmd.block = false;
switch (state) {
case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
/* the vlan_mac vfop will re-schedule us */
vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
if (vfop->rc)
goto op_err;
return;
/* Configure vlan0 for leading queue */
if (!qid) {
struct bnx2x_vf_mac_vlan_filter filter;
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
/* the vlan_mac vfop will re-schedule us */
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
if (vfop->rc)
memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
filter.type = BNX2X_VF_FILTER_VLAN;
filter.add = true;
filter.vid = 0;
rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
if (rc)
goto op_err;
return;
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
memset(qstate , 0, sizeof(*qstate));
qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
vfop->state = BNX2X_VFOP_QFLR_DONE;
DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
vf->abs_vfid, qstate->q_obj->state);
if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
qstate->cmd = BNX2X_Q_CMD_TERMINATE;
vfop->rc = bnx2x_queue_state_change(bp, qstate);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
} else {
goto op_done;
}
}
/* Schedule the configuration of any pending vlan filters */
vf->cfg_flags |= VF_CFG_VLAN;
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_MSG_IOV);
return 0;
op_err:
BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
vf->abs_vfid, qid, vfop->rc);
op_done:
case BNX2X_VFOP_QFLR_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
}
op_pending:
return;
BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
return rc;
}
static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
vfop->args.qx.qid = qid;
if ((qid == LEADING_IDX) &&
bnx2x_validate_vf_sp_objs(bp, vf, false))
bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
bnx2x_vfop_qflr, cmd->done);
else
bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE,
bnx2x_vfop_qflr, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
cmd->block);
}
return -ENOMEM;
}
/* VFOP multi-casts */
static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
enum bnx2x_vfop_mcast_state state = vfop->state;
int i;
int rc;
bnx2x_vfop_reset_wq(vf);
DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
if (vfop->rc < 0)
goto op_err;
/* If needed, clean the filtering data base */
if ((qid == LEADING_IDX) &&
bnx2x_validate_vf_sp_objs(bp, vf, false)) {
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
if (rc)
goto op_err;
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
if (rc)
goto op_err;
}
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
switch (state) {
case BNX2X_VFOP_MCAST_DEL:
/* clear existing mcasts */
vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
: BNX2X_VFOP_MCAST_CHK_DONE;
mcast->mcast_list_len = vf->mcast_list_len;
vf->mcast_list_len = args->mc_num;
vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_MCAST_ADD:
if (raw->check_pending(raw))
goto op_pending;
/* update mcast list on the ramrod params */
INIT_LIST_HEAD(&mcast->mcast_list);
for (i = 0; i < args->mc_num; i++)
list_add_tail(&(args->mc[i].link),
&mcast->mcast_list);
mcast->mcast_list_len = args->mc_num;
/* Terminate queue */
if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
struct bnx2x_queue_state_params qstate;
/* add new mcasts */
vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
vfop->rc = bnx2x_config_mcast(bp, mcast,
BNX2X_MCAST_CMD_ADD);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
case BNX2X_VFOP_MCAST_CHK_DONE:
vfop->rc = raw->check_pending(raw) ? 1 : 0;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
default:
bnx2x_vfop_default(state);
memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
qstate.cmd = BNX2X_Q_CMD_TERMINATE;
set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
rc = bnx2x_queue_state_change(bp, &qstate);
if (rc)
goto op_err;
}
op_err:
BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
op_done:
kfree(args->mc);
bnx2x_vfop_end(bp, vf, vfop);
op_pending:
return;
}
int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
bnx2x_mac_addr_t *mcasts,
int mcast_num, bool drv_only)
{
struct bnx2x_vfop *vfop = NULL;
size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
NULL;
if (!mc_sz || mc) {
vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
int i;
struct bnx2x_mcast_ramrod_params *ramrod =
&vf->op_params.mcast;
/* set ramrod params */
memset(ramrod, 0, sizeof(*ramrod));
ramrod->mcast_obj = &vf->mcast_obj;
if (drv_only)
set_bit(RAMROD_DRV_CLR_ONLY,
&ramrod->ramrod_flags);
/* copy mcasts pointers */
vfop->args.mc_list.mc_num = mcast_num;
vfop->args.mc_list.mc = mc;
for (i = 0; i < mcast_num; i++)
mc[i].mac = mcasts[i];
bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
bnx2x_vfop_mcast, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
cmd->block);
} else {
kfree(mc);
}
}
return -ENOMEM;
return 0;
op_err:
BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
return rc;
}
/* VFOP rx-mode */
static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
enum bnx2x_vfop_rxmode_state state = vfop->state;
struct bnx2x_mcast_list_elem *mc = NULL;
struct bnx2x_mcast_ramrod_params mcast;
int rc, i;
bnx2x_vfop_reset_wq(vf);
DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
if (vfop->rc < 0)
goto op_err;
/* Prepare Multicast command */
memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
mcast.mcast_obj = &vf->mcast_obj;
if (drv_only)
set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
else
set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
if (mc_num) {
mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
GFP_KERNEL);
if (!mc) {
BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
return -ENOMEM;
}
}
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
/* clear existing mcasts */
mcast.mcast_list_len = vf->mcast_list_len;
vf->mcast_list_len = mc_num;
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
if (rc) {
BNX2X_ERR("Failed to remove multicasts\n");
return rc;
}
switch (state) {
case BNX2X_VFOP_RXMODE_CONFIG:
/* next state */
vfop->state = BNX2X_VFOP_RXMODE_DONE;
/* update mcast list on the ramrod params */
if (mc_num) {
INIT_LIST_HEAD(&mcast.mcast_list);
for (i = 0; i < mc_num; i++) {
mc[i].mac = mcasts[i];
list_add_tail(&mc[i].link,
&mcast.mcast_list);
}
/* record the accept flags in vfdb so hypervisor can modify them
* if necessary
*/
bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
ramrod->rx_accept_flags;
vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
op_done:
case BNX2X_VFOP_RXMODE_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
/* add new mcasts */
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
if (rc)
BNX2X_ERR("Faled to add multicasts\n");
kfree(mc);
}
op_pending:
return;
return rc;
}
static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
......@@ -1268,121 +642,56 @@ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
}
int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid, unsigned long accept_flags)
int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid, unsigned long accept_flags)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_rx_mode_ramrod_params *ramrod =
&vf->op_params.rx_mode;
struct bnx2x_rx_mode_ramrod_params ramrod;
bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
bnx2x_vfop_rxmode, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
cmd->block);
}
return -ENOMEM;
bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
return bnx2x_config_rx_mode(bp, &ramrod);
}
/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
* queue destructor)
*/
static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
int qid = vfop->args.qx.qid;
enum bnx2x_vfop_qteardown_state state = vfop->state;
struct bnx2x_vfop_cmd cmd;
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
cmd.done = bnx2x_vfop_qdown;
cmd.block = false;
switch (state) {
case BNX2X_VFOP_QTEARDOWN_RXMODE:
/* Drop all */
if (bnx2x_validate_vf_sp_objs(bp, vf, true))
vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
else
vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
if (vfop->rc)
goto op_err;
return;
case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
/* vlan-clear-all: don't consume credit */
vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
if (vfop->rc)
goto op_err;
return;
case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
/* mac-clear-all: consume credit */
vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
if (vfop->rc)
goto op_err;
return;
int rc;
case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
if (vfop->rc)
goto op_err;
return;
DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
case BNX2X_VFOP_QTEARDOWN_QDTOR:
/* run the queue destruction flow */
DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
DP(BNX2X_MSG_IOV, "returned from cmd\n");
if (vfop->rc)
/* Remove all classification configuration for leading queue */
if (qid == LEADING_IDX) {
rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
if (rc)
goto op_err;
return;
op_err:
BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
vf->abs_vfid, qid, vfop->rc);
case BNX2X_VFOP_QTEARDOWN_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
}
}
int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
/* for non leading queues skip directly to qdown sate */
if (vfop) {
vfop->args.qx.qid = qid;
bnx2x_vfop_opset(qid == LEADING_IDX ?
BNX2X_VFOP_QTEARDOWN_RXMODE :
BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
cmd->block);
/* Remove filtering if feasible */
if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
false, false);
if (rc)
goto op_err;
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
false, true);
if (rc)
goto op_err;
rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
if (rc)
goto op_err;
}
}
return -ENOMEM;
/* Destroy queue */
rc = bnx2x_vf_queue_destroy(bp, vf, qid);
if (rc)
goto op_err;
return rc;
op_err:
BNX2X_ERR("vf[%d:%d] error: rc %d\n",
vf->abs_vfid, qid, rc);
return rc;
}
/* VF enable primitives
......@@ -1582,120 +891,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
bnx2x_tx_hw_flushed(bp, poll_cnt);
}
static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
enum bnx2x_vfop_flr_state state = vfop->state;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vfop_flr,
.block = false,
};
if (vfop->rc < 0)
goto op_err;
int rc, i;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
switch (state) {
case BNX2X_VFOP_FLR_QUEUES:
/* the cleanup operations are valid if and only if the VF
* was first acquired.
*/
if (++(qx->qid) < vf_rxq_count(vf)) {
vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
qx->qid);
if (vfop->rc)
goto op_err;
return;
}
/* remove multicasts */
vfop->state = BNX2X_VFOP_FLR_HW;
vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
0, true);
if (vfop->rc)
goto op_err;
return;
case BNX2X_VFOP_FLR_HW:
/* the cleanup operations are valid if and only if the VF
* was first acquired.
*/
for (i = 0; i < vf_rxq_count(vf); i++) {
rc = bnx2x_vf_queue_flr(bp, vf, i);
if (rc)
goto out;
}
/* dispatch final cleanup and wait for HW queues to flush */
bnx2x_vf_flr_clnup_hw(bp, vf);
/* remove multicasts */
bnx2x_vf_mcast(bp, vf, NULL, 0, true);
/* release VF resources */
bnx2x_vf_free_resc(bp, vf);
/* dispatch final cleanup and wait for HW queues to flush */
bnx2x_vf_flr_clnup_hw(bp, vf);
/* re-open the mailbox */
bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
/* release VF resources */
bnx2x_vf_free_resc(bp, vf);
goto op_done;
default:
bnx2x_vfop_default(state);
}
op_err:
BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
op_done:
vf->flr_clnup_stage = VF_FLR_ACK;
bnx2x_vfop_end(bp, vf, vfop);
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
}
static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
vfop_handler_t done)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
vfop->args.qx.qid = -1; /* loop */
bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
bnx2x_vfop_flr, done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
}
return -ENOMEM;
/* re-open the mailbox */
bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
return;
out:
BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
vf->abs_vfid, i, rc);
}
static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
{
int i = prev_vf ? prev_vf->index + 1 : 0;
struct bnx2x_virtf *vf;
int i;
/* find next VF to cleanup */
next_vf_to_clean:
for (;
i < BNX2X_NR_VIRTFN(bp) &&
(bnx2x_vf(bp, i, state) != VF_RESET ||
bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
i++)
;
for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
/* VF should be RESET & in FLR cleanup states */
if (bnx2x_vf(bp, i, state) != VF_RESET ||
!bnx2x_vf(bp, i, flr_clnup_stage))
continue;
DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
BNX2X_NR_VIRTFN(bp));
DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
i, BNX2X_NR_VIRTFN(bp));
if (i < BNX2X_NR_VIRTFN(bp)) {
vf = BP_VF(bp, i);
/* lock the vf pf channel */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
/* invoke the VF FLR SM */
if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
vf->abs_vfid);
bnx2x_vf_flr(bp, vf);
/* mark the VF to be ACKED and continue */
vf->flr_clnup_stage = VF_FLR_ACK;
goto next_vf_to_clean;
}
return;
}
/* we are done, update vf records */
for_each_vf(bp, i) {
vf = BP_VF(bp, i);
if (vf->flr_clnup_stage != VF_FLR_ACK)
continue;
vf->flr_clnup_stage = VF_FLR_EPILOG;
/* mark the VF to be ACKED and continue */
vf->flr_clnup_stage = false;
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
}
/* Acknowledge the handled VFs.
......@@ -1745,7 +997,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
if (reset) {
/* set as reset and ready for cleanup */
vf->state = VF_RESET;
vf->flr_clnup_stage = VF_FLR_CLN;
vf->flr_clnup_stage = true;
DP(BNX2X_MSG_IOV,
"Initiating Final cleanup for VF %d\n",
......@@ -1754,7 +1006,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
}
/* do the FLR cleanup for all marked VFs*/
bnx2x_vf_flr_clnup(bp, NULL);
bnx2x_vf_flr_clnup(bp);
}
/* IOV global initialization routines */
......@@ -2021,7 +1273,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
bnx2x_vf(bp, i, index) = i;
bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
bnx2x_vf(bp, i, state) = VF_FREE;
INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
mutex_init(&bnx2x_vf(bp, i, op_mutex));
bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
}
......@@ -2288,7 +1539,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
/* release all the VFs */
for_each_vf(bp, i)
bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
bnx2x_vf_release(bp, BP_VF(bp, i));
return 0;
}
......@@ -2378,6 +1629,12 @@ void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
smp_mb__after_clear_bit();
}
static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
}
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
{
struct bnx2x_virtf *vf;
......@@ -2402,6 +1659,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
case EVENT_RING_OPCODE_MULTICAST_RULES:
case EVENT_RING_OPCODE_FILTERS_RULES:
case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
cid = (elem->message.data.eth_event.echo &
BNX2X_SWCID_MASK);
DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
......@@ -2466,13 +1724,15 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
vf->abs_vfid, qidx);
bnx2x_vf_handle_filters_eqe(bp, vf);
break;
case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
vf->abs_vfid, qidx);
bnx2x_vf_handle_rss_update_eqe(bp, vf);
case EVENT_RING_OPCODE_VF_FLR:
case EVENT_RING_OPCODE_MALICIOUS_VF:
/* Do nothing for now */
return 0;
}
/* SRIOV: reschedule any 'in_progress' operations */
bnx2x_iov_sp_event(bp, cid);
return 0;
}
......@@ -2509,22 +1769,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
}
}
void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid)
{
struct bnx2x_virtf *vf;
/* check if the cid is the VF range */
if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
return;
vf = bnx2x_vf_by_cid(bp, vf_cid);
if (vf) {
/* set in_progress flag */
atomic_set(&vf->op_in_progress, 1);
bnx2x_schedule_iov_task(bp, BNX2X_IOV_CONT_VFOP);
}
}
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
{
int i;
......@@ -2606,33 +1850,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
}
void bnx2x_iov_vfop_cont(struct bnx2x *bp)
{
int i;
if (!IS_SRIOV(bp))
return;
/* Iterate over all VFs and invoke state transition for VFs with
* 'in-progress' slow-path operations
*/
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP),
"searching for pending vf operations\n");
for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i);
if (!vf) {
BNX2X_ERR("VF was null! skipping...\n");
continue;
}
if (!list_empty(&vf->op_list_head) &&
atomic_read(&vf->op_in_progress)) {
DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
}
}
}
static inline
struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
{
......@@ -2868,52 +2085,26 @@ static void bnx2x_set_vf_state(void *cookie)
p->vf->state = p->state;
}
/* VFOP close (teardown the queues, delete mcasts and close HW) */
static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
enum bnx2x_vfop_close_state state = vfop->state;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vfop_close,
.block = false,
};
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
int rc = 0, i;
switch (state) {
case BNX2X_VFOP_CLOSE_QUEUES:
if (++(qx->qid) < vf_rxq_count(vf)) {
vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
if (vfop->rc)
goto op_err;
return;
}
vfop->state = BNX2X_VFOP_CLOSE_HW;
vfop->rc = 0;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
case BNX2X_VFOP_CLOSE_HW:
/* Close all queues */
for (i = 0; i < vf_rxq_count(vf); i++) {
rc = bnx2x_vf_queue_teardown(bp, vf, i);
if (rc)
goto op_err;
}
/* disable the interrupts */
DP(BNX2X_MSG_IOV, "disabling igu\n");
bnx2x_vf_igu_disable(bp, vf);
/* disable the interrupts */
DP(BNX2X_MSG_IOV, "disabling igu\n");
bnx2x_vf_igu_disable(bp, vf);
/* disable the VF */
DP(BNX2X_MSG_IOV, "clearing qtbl\n");
bnx2x_vf_clr_qtbl(bp, vf);
goto op_done;
default:
bnx2x_vfop_default(state);
}
op_err:
BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
op_done:
/* disable the VF */
DP(BNX2X_MSG_IOV, "clearing qtbl\n");
bnx2x_vf_clr_qtbl(bp, vf);
/* need to make sure there are no outstanding stats ramrods which may
* cause the device to access the VF's stats buffer which it will free
......@@ -2928,43 +2119,20 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
}
DP(BNX2X_MSG_IOV, "set state to acquired\n");
bnx2x_vfop_end(bp, vf, vfop);
op_pending:
/* Not supported at the moment; Exists for macros only */
return;
}
int bnx2x_vfop_close_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
vfop->args.qx.qid = -1; /* loop */
bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
bnx2x_vfop_close, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
cmd->block);
}
return -ENOMEM;
return 0;
op_err:
BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
return rc;
}
/* VF release can be called either: 1. The VF was acquired but
* not enabled 2. the vf was enabled or in the process of being
* enabled
*/
static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vfop_release,
.block = false,
};
DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
if (vfop->rc < 0)
goto op_err;
int rc;
DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
vf->state == VF_FREE ? "Free" :
......@@ -2975,193 +2143,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (vf->state) {
case VF_ENABLED:
vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
if (vfop->rc)
rc = bnx2x_vf_close(bp, vf);
if (rc)
goto op_err;
return;
/* Fallthrough to release resources */
case VF_ACQUIRED:
DP(BNX2X_MSG_IOV, "about to free resources\n");
bnx2x_vf_free_resc(bp, vf);
DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
goto op_done;
break;
case VF_FREE:
case VF_RESET:
/* do nothing */
goto op_done;
default:
bnx2x_vfop_default(vf->state);
}
op_err:
BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
op_done:
bnx2x_vfop_end(bp, vf, vfop);
}
static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
enum bnx2x_vfop_rss_state state;
if (!vfop) {
BNX2X_ERR("vfop was null\n");
return;
break;
}
state = vfop->state;
bnx2x_vfop_reset_wq(vf);
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
switch (state) {
case BNX2X_VFOP_RSS_CONFIG:
/* next state */
vfop->state = BNX2X_VFOP_RSS_DONE;
bnx2x_config_rss(bp, &vfop->op_p->rss);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
return 0;
op_err:
BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
op_done:
case BNX2X_VFOP_RSS_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
}
op_pending:
return;
}
int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
bnx2x_vfop_opset(-1, /* use vf->state */
bnx2x_vfop_release, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
cmd->block);
}
return -ENOMEM;
BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
return rc;
}
int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_config_rss_params *rss)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
cmd->block);
}
return -ENOMEM;
DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
return bnx2x_config_rss(bp, rss);
}
/* VFOP tpa update, send update on all queues */
static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct vfpf_tpa_tlv *tlv,
struct bnx2x_queue_update_tpa_params *params)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
enum bnx2x_vfop_tpa_state state = vfop->state;
bnx2x_vfop_reset_wq(vf);
aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
struct bnx2x_queue_state_params qstate;
int qid, rc = 0;
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
vf->abs_vfid, tpa_args->qid,
state);
switch (state) {
case BNX2X_VFOP_TPA_CONFIG:
if (tpa_args->qid < vf_rxq_count(vf)) {
struct bnx2x_queue_state_params *qstate =
&vf->op_params.qstate;
DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
/* Set ramrod params */
memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
memcpy(&qstate.params.update_tpa, params,
sizeof(struct bnx2x_queue_update_tpa_params));
qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
/* The only thing that changes for the ramrod params
* between calls is the sge_map
*/
qstate->params.update_tpa.sge_map =
tpa_args->sge_map[tpa_args->qid];
DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
tpa_args->qid,
U64_HI(qstate->params.update_tpa.sge_map),
U64_LO(qstate->params.update_tpa.sge_map));
qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
vfop->rc = bnx2x_queue_state_change(bp, qstate);
tpa_args->qid++;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
for (qid = 0; qid < vf_rxq_count(vf); qid++) {
qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
qstate.params.update_tpa.sge_map = sge_addr[qid];
DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
U64_LO(sge_addr[qid]));
rc = bnx2x_queue_state_change(bp, &qstate);
if (rc) {
BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
vf->abs_vfid, qid);
return rc;
}
vfop->state = BNX2X_VFOP_TPA_DONE;
vfop->rc = 0;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
op_done:
case BNX2X_VFOP_TPA_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
}
op_pending:
return;
}
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct vfpf_tpa_tlv *tpa_tlv)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
vfop->args.qx.qid = 0; /* loop */
memcpy(&vfop->args.tpa.sge_map,
tpa_tlv->tpa_client_info.sge_addr,
sizeof(vfop->args.tpa.sge_map));
bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
bnx2x_vfop_tpa, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
cmd->block);
}
return -ENOMEM;
return rc;
}
/* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
*/
void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop_cmd cmd = {
.done = NULL,
.block = block,
};
int rc;
DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
rc = bnx2x_vf_free(bp, vf);
if (rc)
WARN(rc,
"VF[%d] Failed to allocate resources for release op- rc=%d\n",
vf->abs_vfid, rc);
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
return rc;
}
static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
......@@ -3889,10 +2951,6 @@ void bnx2x_iov_task(struct work_struct *work)
&bp->iov_task_state))
bnx2x_vf_handle_flr_event(bp);
if (test_and_clear_bit(BNX2X_IOV_CONT_VFOP,
&bp->iov_task_state))
bnx2x_iov_vfop_cont(bp);
if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
&bp->iov_task_state))
bnx2x_vf_mbx(bp);
......
......@@ -88,113 +88,32 @@ struct bnx2x_vf_queue {
bool sp_initialized;
};
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
* q-init, q-setup and SB index
/* struct bnx2x_vf_queue_construct_params - prepare queue construction
* parameters: q-init, q-setup and SB index
*/
struct bnx2x_vfop_qctor_params {
struct bnx2x_vf_queue_construct_params {
struct bnx2x_queue_state_params qstate;
struct bnx2x_queue_setup_params prep_qsetup;
};
/* VFOP parameters (one copy per VF) */
union bnx2x_vfop_params {
struct bnx2x_vlan_mac_ramrod_params vlan_mac;
struct bnx2x_rx_mode_ramrod_params rx_mode;
struct bnx2x_mcast_ramrod_params mcast;
struct bnx2x_config_rss_params rss;
struct bnx2x_vfop_qctor_params qctor;
struct bnx2x_queue_state_params qstate;
};
/* forward */
struct bnx2x_virtf;
/* VFOP definitions */
typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
struct bnx2x_vfop_cmd {
vfop_handler_t done;
bool block;
};
/* VFOP queue filters command additional arguments */
struct bnx2x_vfop_filter {
struct list_head link;
struct bnx2x_vf_mac_vlan_filter {
int type;
#define BNX2X_VFOP_FILTER_MAC 1
#define BNX2X_VFOP_FILTER_VLAN 2
#define BNX2X_VF_FILTER_MAC 1
#define BNX2X_VF_FILTER_VLAN 2
bool add;
u8 *mac;
u16 vid;
};
struct bnx2x_vfop_filters {
int add_cnt;
struct list_head head;
struct bnx2x_vfop_filter filters[];
};
/* transient list allocated, built and saved until its
* passed to the SP-VERBs layer.
*/
struct bnx2x_vfop_args_mcast {
int mc_num;
struct bnx2x_mcast_list_elem *mc;
};
struct bnx2x_vfop_args_qctor {
int qid;
u16 sb_idx;
};
struct bnx2x_vfop_args_qdtor {
int qid;
struct eth_context *cxt;
};
struct bnx2x_vfop_args_defvlan {
int qid;
bool enable;
u16 vid;
u8 prio;
};
struct bnx2x_vfop_args_qx {
int qid;
bool en_add;
};
struct bnx2x_vfop_args_filters {
struct bnx2x_vfop_filters *multi_filter;
atomic_t *credit; /* non NULL means 'don't consume credit' */
};
struct bnx2x_vfop_args_tpa {
int qid;
dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
};
union bnx2x_vfop_args {
struct bnx2x_vfop_args_mcast mc_list;
struct bnx2x_vfop_args_qctor qctor;
struct bnx2x_vfop_args_qdtor qdtor;
struct bnx2x_vfop_args_defvlan defvlan;
struct bnx2x_vfop_args_qx qx;
struct bnx2x_vfop_args_filters filters;
struct bnx2x_vfop_args_tpa tpa;
};
struct bnx2x_vfop {
struct list_head link;
int rc; /* return code */
int state; /* next state */
union bnx2x_vfop_args args; /* extra arguments */
union bnx2x_vfop_params *op_p; /* ramrod params */
/* state machine callbacks */
vfop_handler_t transition;
vfop_handler_t done;
struct bnx2x_vf_mac_vlan_filters {
int count;
struct bnx2x_vf_mac_vlan_filter filters[];
};
/* vf context */
......@@ -214,15 +133,7 @@ struct bnx2x_virtf {
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
/* non 0 during flr cleanup */
u8 flr_clnup_stage;
#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
* sans the end-wait
*/
#define VF_FLR_ACK 2 /* ACK flr notification */
#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
* ~ final cleanup' end wait
*/
bool flr_clnup_stage; /* true during flr cleanup */
/* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
......@@ -286,11 +197,6 @@ struct bnx2x_virtf {
struct bnx2x_rss_config_obj rss_conf_obj;
/* slow-path operations */
atomic_t op_in_progress;
int op_rc;
bool op_wait_blocking;
struct list_head op_list_head;
union bnx2x_vfop_params op_params;
struct mutex op_mutex; /* one vfop at a time mutex */
enum channel_tlvs op_current;
};
......@@ -477,7 +383,6 @@ void bnx2x_iov_init_dq(struct bnx2x *bp);
void bnx2x_iov_init_dmae(struct bnx2x *bp);
void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj);
void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid);
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
......@@ -497,163 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
dma_addr_t *sb_map);
/* VFOP generic helpers */
#define bnx2x_vfop_default(state) do { \
BNX2X_ERR("Bad state %d\n", (state)); \
vfop->rc = -EINVAL; \
goto op_err; \
} while (0)
enum {
VFOP_DONE,
VFOP_CONT,
VFOP_VERIFY_PEND,
};
#define bnx2x_vfop_finalize(vf, rc, next) do { \
if ((rc) < 0) \
goto op_err; \
else if ((rc) > 0) \
goto op_pending; \
else if ((next) == VFOP_DONE) \
goto op_done; \
else if ((next) == VFOP_VERIFY_PEND) \
BNX2X_ERR("expected pending\n"); \
else { \
DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \
atomic_set(&vf->op_in_progress, 1); \
bnx2x_schedule_iov_task(bp, \
BNX2X_IOV_CONT_VFOP); \
return; \
} \
} while (0)
#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
do { \
vfop->state = first_state; \
vfop->op_p = &vf->op_params; \
vfop->transition = trans_hndlr; \
vfop->done = done_hndlr; \
} while (0)
static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
WARN_ON(list_empty(&vf->op_list_head));
return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
}
static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
if (vfop) {
INIT_LIST_HEAD(&vfop->link);
list_add(&vfop->link, &vf->op_list_head);
}
return vfop;
}
static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vfop *vfop)
{
/* rc < 0 - error, otherwise set to 0 */
DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
if (vfop->rc >= 0)
vfop->rc = 0;
DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
/* unlink the current op context and propagate error code
* must be done before invoking the 'done()' handler
*/
WARN(!mutex_is_locked(&vf->op_mutex),
"about to access vf op linked list but mutex was not locked!");
list_del(&vfop->link);
if (list_empty(&vf->op_list_head)) {
DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
vf->op_rc = vfop->rc;
DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
vf->op_rc, vfop->rc);
} else {
struct bnx2x_vfop *cur_vfop;
DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
cur_vfop = bnx2x_vfop_cur(bp, vf);
cur_vfop->rc = vfop->rc;
DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
vf->op_rc, vfop->rc);
}
/* invoke done handler */
if (vfop->done) {
DP(BNX2X_MSG_IOV, "calling done handler\n");
vfop->done(bp, vf);
} else {
/* there is no done handler for the operation to unlock
* the mutex. Must have gotten here from PF initiated VF RELEASE
*/
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
}
DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
vf->op_rc, vfop->rc);
/* if this is the last nested op reset the wait_blocking flag
* to release any blocking wrappers, only after 'done()' is invoked
*/
if (list_empty(&vf->op_list_head)) {
DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
vf->op_wait_blocking = false;
}
kfree(vfop);
}
static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
/* can take a while if any port is running */
int cnt = 5000;
might_sleep();
while (cnt--) {
if (vf->op_wait_blocking == false) {
#ifdef BNX2X_STOP_ON_ERROR
DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
#endif
return 0;
}
usleep_range(1000, 2000);
if (bp->panic)
return -EIO;
}
/* timeout! */
#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
#endif
return -EBUSY;
}
static inline int bnx2x_vfop_transition(struct bnx2x *bp,
struct bnx2x_virtf *vf,
vfop_handler_t transition,
bool block)
{
if (block)
vf->op_wait_blocking = true;
transition(bp, vf);
if (block)
return bnx2x_vfop_wait_blocking(bp, vf);
return 0;
}
/* VFOP queue construction helpers */
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
......@@ -668,64 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
struct bnx2x_vfop_qctor_params *p,
struct bnx2x_vf_queue_construct_params *p,
unsigned long q_type);
int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct bnx2x_vfop_filters *macs,
int qid, bool drv_only);
int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct bnx2x_vfop_filters *vlans,
int qid, bool drv_only);
int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid);
int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid);
int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
bnx2x_mac_addr_t *mcasts,
int mcast_num, bool drv_only);
int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid, unsigned long accept_flags);
int bnx2x_vfop_close_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mac_vlan_filters *filters,
int qid, bool drv_only);
int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
struct bnx2x_vf_queue_construct_params *qctor);
int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid, unsigned long accept_flags);
int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_config_rss_params *rss);
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct vfpf_tpa_tlv *tpa_tlv);
int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct vfpf_tpa_tlv *tlv,
struct bnx2x_queue_update_tpa_params *params);
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
*/
void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
......@@ -796,7 +521,6 @@ void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj) {}
static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid) {}
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
union event_ring_elem *elem) {return 1; }
......
......@@ -673,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
}
......@@ -1048,7 +1049,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
}
static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
struct bnx2x_virtf *vf)
struct bnx2x_virtf *vf,
int vf_rc)
{
struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
......@@ -1060,7 +1062,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
/* send response */
vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
......@@ -1108,14 +1110,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
return;
mbx_error:
bnx2x_vf_release(bp, vf, false); /* non blocking */
bnx2x_vf_release(bp, vf);
}
static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
struct bnx2x_virtf *vf)
struct bnx2x_virtf *vf,
int rc)
{
bnx2x_vf_mbx_resp_single_tlv(bp, vf);
bnx2x_vf_mbx_resp_send_msg(bp, vf);
bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
}
static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
......@@ -1239,8 +1242,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
sizeof(struct channel_list_end_tlv));
/* send the response */
vf->op_rc = vfop_status;
bnx2x_vf_mbx_resp_send_msg(bp, vf);
bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
}
static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
......@@ -1272,19 +1274,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_init_tlv *init = &mbx->msg->req.init;
int rc;
/* record ghost addresses from vf message */
vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
vf->stats_stride = init->stats_stride;
vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
/* set VF multiqueue statistics collection mode */
if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
vf->cfg_flags |= VF_CFG_STATS_COALESCE;
/* response */
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
/* convert MBX queue-flags to standard SP queue-flags */
......@@ -1319,16 +1322,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_vf_queue_construct_params qctor;
int rc = 0;
/* verify vf_qid */
if (setup_q->vf_qid >= vf_rxq_count(vf)) {
BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
setup_q->vf_qid, vf_rxq_count(vf));
vf->op_rc = -EINVAL;
rc = -EINVAL;
goto response;
}
......@@ -1346,9 +1347,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_leading_vfq_init(bp, vf, q);
/* re-init the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup;
init_p = &vf->op_params.qctor.qstate.params.init;
memset(&qctor, 0 ,
sizeof(struct bnx2x_vf_queue_construct_params));
setup_p = &qctor.prep_qsetup;
init_p = &qctor.qstate.params.init;
/* activate immediately */
__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
......@@ -1434,44 +1436,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
q->index, q->sb_idx);
}
/* complete the preparations */
bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
if (vf->op_rc)
rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
if (rc)
goto response;
return;
}
response:
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
enum bnx2x_vfop_filters_state {
BNX2X_VFOP_MBX_Q_FILTERS_MACS,
BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
BNX2X_VFOP_MBX_Q_FILTERS_DONE
};
static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct vfpf_set_q_filters_tlv *tlv,
struct bnx2x_vfop_filters **pfl,
struct bnx2x_vf_mac_vlan_filters **pfl,
u32 type_flag)
{
int i, j;
struct bnx2x_vfop_filters *fl = NULL;
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
size_t fsz;
fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
sizeof(struct bnx2x_vfop_filters);
fsz = tlv->n_mac_vlan_filters *
sizeof(struct bnx2x_vf_mac_vlan_filter) +
sizeof(struct bnx2x_vf_mac_vlan_filters);
fl = kzalloc(fsz, GFP_KERNEL);
if (!fl)
return -ENOMEM;
INIT_LIST_HEAD(&fl->head);
for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
......@@ -1479,17 +1471,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
continue;
if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
fl->filters[j].mac = msg_filter->mac;
fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
fl->filters[j].type = BNX2X_VF_FILTER_MAC;
} else {
fl->filters[j].vid = msg_filter->vlan_tag;
fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
}
fl->filters[j].add =
(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
true : false;
list_add_tail(&fl->filters[j++].link, &fl->head);
fl->count++;
}
if (list_empty(&fl->head))
if (!fl->count)
kfree(fl);
else
*pfl = fl;
......@@ -1529,168 +1521,97 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
int rc;
int rc = 0;
struct vfpf_set_q_filters_tlv *msg =
&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
enum bnx2x_vfop_filters_state state = vfop->state;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vfop_mbx_qfilters,
.block = false,
};
/* check for any mac/vlan changes */
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
/* build mac list */
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
if (vfop->rc < 0)
goto op_err;
switch (state) {
case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
/* next state */
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
VFPF_MAC_FILTER);
if (rc)
goto op_err;
/* check for any vlan/mac changes */
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
/* build mac list */
struct bnx2x_vfop_filters *fl = NULL;
if (fl) {
vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
VFPF_MAC_FILTER);
if (vfop->rc)
/* set mac list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid,
false);
if (rc)
goto op_err;
if (fl) {
/* set mac list */
rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
msg->vf_qid,
false);
if (rc) {
vfop->rc = rc;
goto op_err;
}
return;
}
}
/* fall through */
case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
/* next state */
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
/* check for any vlan/mac changes */
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
/* build vlan list */
struct bnx2x_vfop_filters *fl = NULL;
/* build vlan list */
fl = NULL;
vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
VFPF_VLAN_FILTER);
if (vfop->rc)
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
VFPF_VLAN_FILTER);
if (rc)
goto op_err;
if (fl) {
/* set vlan list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid,
false);
if (rc)
goto op_err;
if (fl) {
/* set vlan list */
rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
msg->vf_qid,
false);
if (rc) {
vfop->rc = rc;
goto op_err;
}
return;
}
}
/* fall through */
}
case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
/* next state */
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
unsigned long accept = 0;
struct pf_vf_bulletin_content *bulletin =
BP_VF_BULLETIN(bp, vf->index);
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
unsigned long accept = 0;
struct pf_vf_bulletin_content *bulletin =
BP_VF_BULLETIN(bp, vf->index);
/* covert VF-PF if mask to bnx2x accept flags */
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
__set_bit(BNX2X_ACCEPT_UNICAST, &accept);
/* covert VF-PF if mask to bnx2x accept flags */
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
__set_bit(BNX2X_ACCEPT_UNICAST, &accept);
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
__set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
if (msg->rx_mask &
VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
__set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
__set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
__set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
/* A packet arriving the vf's mac should be accepted
* with any vlan, unless a vlan has already been
* configured.
*/
if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
/* set rx-mode */
rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
msg->vf_qid, accept);
if (rc) {
vfop->rc = rc;
goto op_err;
}
return;
}
/* fall through */
case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
/* next state */
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
/* set mcasts */
rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
msg->n_multicast, false);
if (rc) {
vfop->rc = rc;
goto op_err;
}
return;
}
/* fall through */
op_done:
case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
op_err:
BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
vf->abs_vfid, msg->vf_qid, vfop->rc);
goto op_done;
/* A packet arriving the vf's mac should be accepted
* with any vlan, unless a vlan has already been
* configured.
*/
if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
default:
bnx2x_vfop_default(state);
/* set rx-mode */
rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
if (rc)
goto op_err;
}
}
static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
bnx2x_vfop_mbx_qfilters, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
cmd->block);
if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
/* set mcasts */
rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
msg->n_multicast, false);
if (rc)
goto op_err;
}
return -ENOMEM;
op_err:
if (rc)
BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
vf->abs_vfid, msg->vf_qid, rc);
return rc;
}
static int bnx2x_filters_validate_mac(struct bnx2x *bp,
......@@ -1710,7 +1631,6 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
if (filters->n_mac_vlan_filters > 1) {
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
vf->abs_vfid);
vf->op_rc = -EPERM;
rc = -EPERM;
goto response;
}
......@@ -1721,7 +1641,6 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
vf->op_rc = -EPERM;
rc = -EPERM;
goto response;
}
......@@ -1748,7 +1667,6 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
VFPF_Q_FILTER_VLAN_TAG_VALID) {
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
vf->abs_vfid);
vf->op_rc = -EPERM;
rc = -EPERM;
goto response;
}
......@@ -1770,15 +1688,14 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
int rc;
if (bnx2x_filters_validate_mac(bp, vf, filters))
rc = bnx2x_filters_validate_mac(bp, vf, filters);
if (rc)
goto response;
if (bnx2x_filters_validate_vlan(bp, vf, filters))
rc = bnx2x_filters_validate_vlan(bp, vf, filters);
if (rc)
goto response;
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
......@@ -1788,125 +1705,105 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
/* print q_filter message */
bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
if (vf->op_rc)
goto response;
return;
rc = bnx2x_vf_mbx_qfilters(bp, vf);
response:
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
int qid = mbx->msg->req.q_op.vf_qid;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
int rc;
DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
vf->abs_vfid, qid);
vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
rc = bnx2x_vf_queue_teardown(bp, vf, qid);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
int rc;
DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
rc = bnx2x_vf_close(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
int rc;
DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
rc = bnx2x_vf_free(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
struct bnx2x_config_rss_params rss;
struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
int rc = 0;
if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
vf->index);
vf->op_rc = -EINVAL;
rc = -EINVAL;
goto mbx_resp;
}
memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
/* set vfop params according to rss tlv */
memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
memcpy(rss.ind_table, rss_tlv->ind_table,
T_ETH_INDIRECTION_TABLE_SIZE);
memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
sizeof(rss_tlv->rss_key));
vf_op_params->rss_obj = &vf->rss_conf_obj;
vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
rss.rss_obj = &vf->rss_conf_obj;
rss.rss_result_mask = rss_tlv->rss_result_mask;
/* flags handled individually for backward/forward compatability */
vf_op_params->rss_flags = 0;
vf_op_params->ramrod_flags = 0;
rss.rss_flags = 0;
rss.ramrod_flags = 0;
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
__set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
__set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
__set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
__set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
__set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
__set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
__set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
__set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
__set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
(!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
BNX2X_ERR("about to hit a FW assert. aborting...\n");
vf->op_rc = -EINVAL;
rc = -EINVAL;
goto mbx_resp;
}
vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
rc = bnx2x_vf_rss_update(bp, vf, &rss);
mbx_resp:
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
static int bnx2x_validate_tpa_params(struct bnx2x *bp,
......@@ -1935,47 +1832,42 @@ static int bnx2x_validate_tpa_params(struct bnx2x *bp,
static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_queue_update_tpa_params *vf_op_params =
&vf->op_params.qstate.params.update_tpa;
struct bnx2x_queue_update_tpa_params vf_op_params;
struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
int rc = 0;
memset(vf_op_params, 0, sizeof(*vf_op_params));
memset(&vf_op_params, 0, sizeof(vf_op_params));
if (bnx2x_validate_tpa_params(bp, tpa_tlv))
goto mbx_resp;
vf_op_params->complete_on_both_clients =
vf_op_params.complete_on_both_clients =
tpa_tlv->tpa_client_info.complete_on_both_clients;
vf_op_params->dont_verify_thr =
vf_op_params.dont_verify_thr =
tpa_tlv->tpa_client_info.dont_verify_thr;
vf_op_params->max_agg_sz =
vf_op_params.max_agg_sz =
tpa_tlv->tpa_client_info.max_agg_size;
vf_op_params->max_sges_pkt =
vf_op_params.max_sges_pkt =
tpa_tlv->tpa_client_info.max_sges_for_packet;
vf_op_params->max_tpa_queues =
vf_op_params.max_tpa_queues =
tpa_tlv->tpa_client_info.max_tpa_queues;
vf_op_params->sge_buff_sz =
vf_op_params.sge_buff_sz =
tpa_tlv->tpa_client_info.sge_buff_size;
vf_op_params->sge_pause_thr_high =
vf_op_params.sge_pause_thr_high =
tpa_tlv->tpa_client_info.sge_pause_thr_high;
vf_op_params->sge_pause_thr_low =
vf_op_params.sge_pause_thr_low =
tpa_tlv->tpa_client_info.sge_pause_thr_low;
vf_op_params->tpa_mode =
vf_op_params.tpa_mode =
tpa_tlv->tpa_client_info.tpa_mode;
vf_op_params->update_ipv4 =
vf_op_params.update_ipv4 =
tpa_tlv->tpa_client_info.update_ipv4;
vf_op_params->update_ipv6 =
vf_op_params.update_ipv6 =
tpa_tlv->tpa_client_info.update_ipv6;
vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
mbx_resp:
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, rc);
}
/* dispatch request */
......@@ -2039,11 +1931,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* can we respond to VF (do we have an address for it?) */
if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
/* mbx_resp uses the op_rc of the VF */
vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
/* notify the VF that we do not support this request */
bnx2x_vf_mbx_resp(bp, vf);
bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
} else {
/* can't send a response since this VF is unknown to us
* just ack the FW to release the mailbox and unlock
......@@ -2123,7 +2012,7 @@ void bnx2x_vf_mbx(struct bnx2x *bp)
if (rc) {
BNX2X_ERR("Failed to copy request VF %d\n",
vf->abs_vfid);
bnx2x_vf_release(bp, vf, false); /* non blocking */
bnx2x_vf_release(bp, vf);
return;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment