Commit fc85c910 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-vf-xdp'

Yuval Mintz says:

qed*: Support VF XDP attachment

====================
Each driver queue [Rx, Tx, XDP-forwarding] requires an allocated HW/FW
connection + configured queue-zone.

VF handling by the PF has several limitations that prevented adding the
capability to perform XDP at driver-level:

 - The VF assumes there's 1-to-1 correspondance between the VF queue and
   the used connection, meaning q<x> is always going to use cid<x>,
   whereas for its own queues the PF is acquiring a new cid per each new
   queue.

 - There's a 1-to-1 correspondate between the VF-queues and the HW queue
   zones. While this is necessary for Rx-queues [as the queue-zone
   contains the producer], transmission queues can share the underlaying
   queue-zone [only shared configuration is coalescing].
   But all VF<->PF communication mechanisms assume there's a single
   identifier that identify a queue [as queue-zone == queue], while
   sharing queue-zones requires passing additional information.

 - VFs currently don't try mapping a doorbell bar - there's a small
   doorbell window in the regview allowing VFs to doorbell up to 16
   connections; but this window isn's wide enough for the added XDP
   forwarding queues.

This series is going to add the necessary infrastrucutre to finally let
our VFs support XDP assuming both the PF and VF drivers are sufficiently
new [Legacy support would be retained both for older VFs and older PFs,
but both will be needed for this new support to work].
Basically, the various database driver maintains for its queue-cids
would be revised, and queue-cids would be identified using the
(queue-zone, unique index) pair. The TLV mechanism would then be
extended to allow VFs to communicate that unique-index as well as the
already provided queue-zone. Finally, the VFs would try to map their
doorbell bar and inform their PF that they're using it.

Almost all the changes are in qed, with exception of #3 [which does some
cleanup in qede as well] and #11 that actually enables the feature.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a619cc8b e7b80dec
......@@ -412,6 +412,11 @@ struct qed_fw_data {
u32 init_ops_size;
};
enum BAR_ID {
BAR_ID_0, /* used for GRC */
BAR_ID_1 /* Used for doorbells */
};
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
......@@ -533,6 +538,9 @@ struct qed_hwfn {
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
/* L2-related */
struct qed_l2_info *p_l2_info;
struct qed_ptt *p_arfs_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
......
......@@ -135,7 +135,6 @@ struct qed_tid_seg {
struct qed_conn_type_cfg {
u32 cid_count;
u32 cid_start;
u32 cids_per_vf;
struct qed_tid_seg tid_seg[TASK_SEGMENTS];
};
......@@ -222,6 +221,9 @@ struct qed_cxt_mngr {
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
struct qed_cid_acquired_map
acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
/* ILT shadow table */
struct qed_dma_mem *ilt_shadow;
u32 pf_start_line;
......@@ -1121,45 +1123,76 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 type;
u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
kfree(p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
for (vf = 0; vf < MAX_NUM_VFS; vf++) {
kfree(p_mngr->acquired_vf[type][vf].cid_map);
p_mngr->acquired_vf[type][vf].max_count = 0;
p_mngr->acquired_vf[type][vf].start_cid = 0;
}
}
}
static int
qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
u32 type,
u32 cid_start,
u32 cid_count, struct qed_cid_acquired_map *p_map)
{
u32 size;
if (!cid_count)
return 0;
size = DIV_ROUND_UP(cid_count,
sizeof(unsigned long) * BITS_PER_BYTE) *
sizeof(unsigned long);
p_map->cid_map = kzalloc(size, GFP_KERNEL);
if (!p_map->cid_map)
return -ENOMEM;
p_map->max_count = cid_count;
p_map->start_cid = cid_start;
DP_VERBOSE(p_hwfn, QED_MSG_CXT,
"Type %08x start: %08x count %08x\n",
type, p_map->start_cid, p_map->max_count);
return 0;
}
static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 start_cid = 0;
u32 type;
u32 start_cid = 0, vf_start_cid = 0;
u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
u32 size;
if (cid_cnt == 0)
continue;
struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
struct qed_cid_acquired_map *p_map;
size = DIV_ROUND_UP(cid_cnt,
sizeof(unsigned long) * BITS_PER_BYTE) *
sizeof(unsigned long);
p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
if (!p_mngr->acquired[type].cid_map)
/* Handle PF maps */
p_map = &p_mngr->acquired[type];
if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
p_cfg->cid_count, p_map))
goto cid_map_fail;
p_mngr->acquired[type].max_count = cid_cnt;
p_mngr->acquired[type].start_cid = start_cid;
p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
/* Handle VF maps */
for (vf = 0; vf < MAX_NUM_VFS; vf++) {
p_map = &p_mngr->acquired_vf[type][vf];
if (qed_cid_map_alloc_single(p_hwfn, type,
vf_start_cid,
p_cfg->cids_per_vf, p_map))
goto cid_map_fail;
}
DP_VERBOSE(p_hwfn, QED_MSG_CXT,
"Type %08x start: %08x count %08x\n",
type, p_mngr->acquired[type].start_cid,
p_mngr->acquired[type].max_count);
start_cid += cid_cnt;
start_cid += p_cfg->cid_count;
vf_start_cid += p_cfg->cids_per_vf;
}
return 0;
......@@ -1265,19 +1298,36 @@ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map;
struct qed_conn_type_cfg *p_cfg;
int type;
u32 len;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
u32 vf;
p_cfg = &p_mngr->conn_cfg[type];
if (p_cfg->cid_count) {
p_map = &p_mngr->acquired[type];
len = DIV_ROUND_UP(p_map->max_count,
sizeof(unsigned long) *
BITS_PER_BYTE) *
sizeof(unsigned long);
memset(p_map->cid_map, 0, len);
}
if (cid_cnt == 0)
if (!p_cfg->cids_per_vf)
continue;
memset(p_mngr->acquired[type].cid_map, 0,
DIV_ROUND_UP(cid_cnt,
sizeof(unsigned long) * BITS_PER_BYTE) *
sizeof(unsigned long));
for (vf = 0; vf < MAX_NUM_VFS; vf++) {
p_map = &p_mngr->acquired_vf[type][vf];
len = DIV_ROUND_UP(p_map->max_count,
sizeof(unsigned long) *
BITS_PER_BYTE) *
sizeof(unsigned long);
memset(p_map->cid_map, 0, len);
}
}
}
......@@ -1841,91 +1891,145 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_prs_init_pf(p_hwfn);
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid)
int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid, u8 vfid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map;
u32 rel_cid;
if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
if (type >= MAX_CONN_TYPES) {
DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
return -EINVAL;
}
rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
p_mngr->acquired[type].max_count);
if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
return -EINVAL;
}
if (rel_cid >= p_mngr->acquired[type].max_count) {
/* Determine the right map to take this CID from */
if (vfid == QED_CXT_PF_CID)
p_map = &p_mngr->acquired[type];
else
p_map = &p_mngr->acquired_vf[type][vfid];
if (!p_map->cid_map) {
DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
return -EINVAL;
}
rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
if (rel_cid >= p_map->max_count) {
DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
return -EINVAL;
}
__set_bit(rel_cid, p_mngr->acquired[type].cid_map);
__set_bit(rel_cid, p_map->cid_map);
*p_cid = rel_cid + p_mngr->acquired[type].start_cid;
*p_cid = rel_cid + p_map->start_cid;
DP_VERBOSE(p_hwfn, QED_MSG_CXT,
"Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
*p_cid, rel_cid, vfid, type);
return 0;
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid)
{
return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
}
static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
u32 cid, enum protocol_type *p_type)
u32 cid,
u8 vfid,
enum protocol_type *p_type,
struct qed_cid_acquired_map **pp_map)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map;
enum protocol_type p;
u32 rel_cid;
/* Iterate over protocols and find matching cid range */
for (p = 0; p < MAX_CONN_TYPES; p++) {
p_map = &p_mngr->acquired[p];
for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
if (vfid == QED_CXT_PF_CID)
*pp_map = &p_mngr->acquired[*p_type];
else
*pp_map = &p_mngr->acquired_vf[*p_type][vfid];
if (!p_map->cid_map)
if (!((*pp_map)->cid_map))
continue;
if (cid >= p_map->start_cid &&
cid < p_map->start_cid + p_map->max_count)
if (cid >= (*pp_map)->start_cid &&
cid < (*pp_map)->start_cid + (*pp_map)->max_count)
break;
}
*p_type = p;
if (p == MAX_CONN_TYPES) {
DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
return false;
if (*p_type == MAX_CONN_TYPES) {
DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
goto fail;
}
rel_cid = cid - p_map->start_cid;
if (!test_bit(rel_cid, p_map->cid_map)) {
DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
return false;
rel_cid = cid - (*pp_map)->start_cid;
if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
cid, vfid);
goto fail;
}
return true;
fail:
*p_type = MAX_CONN_TYPES;
*pp_map = NULL;
return false;
}
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map = NULL;
enum protocol_type type;
bool b_acquired;
u32 rel_cid;
if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
DP_NOTICE(p_hwfn,
"Trying to return incorrect CID belonging to VF %02x\n",
vfid);
return;
}
/* Test acquired and find matching per-protocol map */
b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
&type, &p_map);
if (!b_acquired)
return;
rel_cid = cid - p_mngr->acquired[type].start_cid;
__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
rel_cid = cid - p_map->start_cid;
clear_bit(rel_cid, p_map->cid_map);
DP_VERBOSE(p_hwfn, QED_MSG_CXT,
"Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
cid, rel_cid, vfid, type);
}
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
{
_qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
}
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map = NULL;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
enum protocol_type type;
bool b_acquired;
/* Test acquired and find matching per-protocol map */
b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
QED_CXT_PF_CID, &type, &p_map);
if (!b_acquired)
return -EINVAL;
......@@ -2012,8 +2116,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
struct qed_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params;
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
p_params->num_cons, 1);
if (!p_params->num_vf_cons)
p_params->num_vf_cons =
ETH_PF_PARAMS_VF_CONS_DEFAULT;
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
p_params->num_cons,
p_params->num_vf_cons);
p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
break;
}
......
......@@ -53,19 +53,6 @@ struct qed_tid_mem {
u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
};
/**
* @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
*
* @param p_hwfn
* @param type
* @param p_cid
*
* @return int
*/
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type,
u32 *p_cid);
/**
* @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
*
......@@ -195,14 +182,51 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_CXT_PF_CID (0xff)
/**
* @brief qed_cxt_release - Release a cid
*
* @param p_hwfn
* @param cid
*/
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid);
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
/**
* @brief qed_cxt_release - Release a cid belonging to a vf-queue
*
* @param p_hwfn
* @param cid
* @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
*/
void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
/**
* @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
*
* @param p_hwfn
* @param type
* @param p_cid
*
* @return int
*/
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid);
/**
* @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type
* for a vf-queue
*
* @param p_hwfn
* @param type
* @param p_cid
* @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
*
* @return int
*/
int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid, u8 vfid);
int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
enum qed_cxt_elem_type elem_type, u32 iid);
u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
......
......@@ -69,12 +69,6 @@ static DEFINE_SPINLOCK(qm_lock);
#define QED_MIN_DPIS (4)
#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
/* API common to all protocols */
enum BAR_ID {
BAR_ID_0, /* used for GRC */
BAR_ID_1 /* Used for doorbells */
};
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, enum BAR_ID bar_id)
{
......@@ -83,7 +77,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
u32 val;
if (IS_VF(p_hwfn->cdev))
return 1 << 17;
return qed_vf_hw_bar_size(p_hwfn, bar_id);
val = qed_rd(p_hwfn, p_ptt, bar_reg);
if (val)
......@@ -154,8 +148,11 @@ void qed_resc_free(struct qed_dev *cdev)
{
int i;
if (IS_VF(cdev))
if (IS_VF(cdev)) {
for_each_hwfn(cdev, i)
qed_l2_free(&cdev->hwfns[i]);
return;
}
kfree(cdev->fw_data);
cdev->fw_data = NULL;
......@@ -183,6 +180,7 @@ void qed_resc_free(struct qed_dev *cdev)
qed_ooo_free(p_hwfn);
}
qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
}
......@@ -848,8 +846,14 @@ int qed_resc_alloc(struct qed_dev *cdev)
u32 line_count;
int i, rc = 0;
if (IS_VF(cdev))
if (IS_VF(cdev)) {
for_each_hwfn(cdev, i) {
rc = qed_l2_alloc(&cdev->hwfns[i]);
if (rc)
return rc;
}
return rc;
}
cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
if (!cdev->fw_data)
......@@ -960,6 +964,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc)
goto alloc_err;
rc = qed_l2_alloc(p_hwfn);
if (rc)
goto alloc_err;
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2) {
rc = qed_ll2_alloc(p_hwfn);
......@@ -1011,8 +1019,11 @@ void qed_resc_setup(struct qed_dev *cdev)
{
int i;
if (IS_VF(cdev))
if (IS_VF(cdev)) {
for_each_hwfn(cdev, i)
qed_l2_setup(&cdev->hwfns[i]);
return;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
......@@ -1030,6 +1041,7 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
qed_l2_setup(p_hwfn);
qed_iov_setup(p_hwfn);
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2)
......
......@@ -65,26 +65,162 @@
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
struct qed_l2_info {
u32 queues;
unsigned long **pp_qid_usage;
/* The lock is meant to synchronize access to the qid usage */
struct mutex lock;
};
int qed_l2_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_l2_info *p_l2_info;
unsigned long **pp_qids;
u32 i;
if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
return 0;
p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
if (!p_l2_info)
return -ENOMEM;
p_hwfn->p_l2_info = p_l2_info;
if (IS_PF(p_hwfn->cdev)) {
p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
} else {
u8 rx = 0, tx = 0;
qed_vf_get_num_rxqs(p_hwfn, &rx);
qed_vf_get_num_txqs(p_hwfn, &tx);
p_l2_info->queues = max_t(u8, rx, tx);
}
pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues,
GFP_KERNEL);
if (!pp_qids)
return -ENOMEM;
p_l2_info->pp_qid_usage = pp_qids;
for (i = 0; i < p_l2_info->queues; i++) {
pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
if (!pp_qids[i])
return -ENOMEM;
}
return 0;
}
void qed_l2_setup(struct qed_hwfn *p_hwfn)
{
if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
return;
mutex_init(&p_hwfn->p_l2_info->lock);
}
void qed_l2_free(struct qed_hwfn *p_hwfn)
{
u32 i;
if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
return;
if (!p_hwfn->p_l2_info)
return;
if (!p_hwfn->p_l2_info->pp_qid_usage)
goto out_l2_info;
/* Free until hit first uninitialized entry */
for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
if (!p_hwfn->p_l2_info->pp_qid_usage[i])
break;
kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
}
kfree(p_hwfn->p_l2_info->pp_qid_usage);
out_l2_info:
kfree(p_hwfn->p_l2_info);
p_hwfn->p_l2_info = NULL;
}
static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid)
{
struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
u16 queue_id = p_cid->rel.queue_id;
bool b_rc = true;
u8 first;
mutex_lock(&p_l2_info->lock);
if (queue_id > p_l2_info->queues) {
DP_NOTICE(p_hwfn,
"Requested to increase usage for qzone %04x out of %08x\n",
queue_id, p_l2_info->queues);
b_rc = false;
goto out;
}
first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
MAX_QUEUES_PER_QZONE);
if (first >= MAX_QUEUES_PER_QZONE) {
b_rc = false;
goto out;
}
__set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
p_cid->qid_usage_idx = first;
out:
mutex_unlock(&p_l2_info->lock);
return b_rc;
}
static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid)
{
mutex_lock(&p_hwfn->p_l2_info->lock);
clear_bit(p_cid->qid_usage_idx,
p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
mutex_unlock(&p_hwfn->p_l2_info->lock);
}
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid)
{
/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
if (!p_cid->is_vf && IS_PF(p_hwfn->cdev))
qed_cxt_release_cid(p_hwfn, p_cid->cid);
bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
_qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
/* For PF's VFs we maintain the index inside queue-zone in IOV */
if (p_cid->vfid == QED_QUEUE_CID_SELF)
qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
vfree(p_cid);
}
/* The internal is only meant to be directly called by PFs initializeing CIDs
* for their VFs.
*/
struct qed_queue_cid *
static struct qed_queue_cid *
_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
u8 vf_qid,
struct qed_queue_start_common_params *p_params)
struct qed_queue_start_common_params *p_params,
bool b_is_rx,
struct qed_queue_cid_vf_params *p_vf_params)
{
bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
struct qed_queue_cid *p_cid;
int rc;
......@@ -95,10 +231,25 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
p_cid->opaque_fid = opaque_fid;
p_cid->cid = cid;
p_cid->vf_qid = vf_qid;
p_cid->rel = *p_params;
p_cid->p_owner = p_hwfn;
/* Fill in parameters */
p_cid->rel.vport_id = p_params->vport_id;
p_cid->rel.queue_id = p_params->queue_id;
p_cid->rel.stats_id = p_params->stats_id;
p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
p_cid->b_is_rx = b_is_rx;
p_cid->sb_idx = p_params->sb_idx;
/* Fill-in bits related to VFs' queues if information was provided */
if (p_vf_params) {
p_cid->vfid = p_vf_params->vfid;
p_cid->vf_qid = p_vf_params->vf_qid;
p_cid->vf_legacy = p_vf_params->vf_legacy;
} else {
p_cid->vfid = QED_QUEUE_CID_SELF;
}
/* Don't try calculating the absolute indices for VFs */
if (IS_VF(p_hwfn->cdev)) {
p_cid->abs = p_cid->rel;
......@@ -120,7 +271,7 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
/* In case of a PF configuring its VF's queues, the stats-id is already
* absolute [since there's a single index that's suitable per-VF].
*/
if (b_is_same) {
if (p_cid->vfid == QED_QUEUE_CID_SELF) {
rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
&p_cid->abs.stats_id);
if (rc)
......@@ -129,27 +280,29 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
p_cid->abs.stats_id = p_cid->rel.stats_id;
}
/* SBs relevant information was already provided as absolute */
p_cid->abs.sb = p_cid->rel.sb;
p_cid->abs.sb_idx = p_cid->rel.sb_idx;
/* This is tricky - we're actually interested in whehter this is a PF
* entry meant for the VF.
*/
if (!b_is_same)
p_cid->is_vf = true;
out:
/* VF-images have provided the qid_usage_idx on their own.
* Otherwise, we need to allocate a unique one.
*/
if (!p_vf_params) {
if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
goto fail;
} else {
p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
}
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
"opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
p_cid->opaque_fid,
p_cid->cid,
p_cid->rel.vport_id,
p_cid->abs.vport_id,
p_cid->rel.queue_id,
p_cid->qid_usage_idx,
p_cid->abs.queue_id,
p_cid->rel.stats_id,
p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx);
p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
return p_cid;
......@@ -158,32 +311,61 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
return NULL;
}
static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid, struct
qed_queue_start_common_params
*p_params)
struct qed_queue_cid *
qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_queue_start_common_params *p_params,
bool b_is_rx,
struct qed_queue_cid_vf_params *p_vf_params)
{
struct qed_queue_cid *p_cid;
u8 vfid = QED_CXT_PF_CID;
bool b_legacy_vf = false;
u32 cid = 0;
/* In case of legacy VFs, The CID can be derived from the additional
* VF parameters - the VF assumes queue X uses CID X, so we can simply
* use the vf_qid for this purpose as well.
*/
if (p_vf_params) {
vfid = p_vf_params->vfid;
if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
b_legacy_vf = true;
cid = p_vf_params->vf_qid;
}
}
/* Get a unique firmware CID for this queue, in case it's a PF.
* VF's don't need a CID as the queue configuration will be done
* by PF.
*/
if (IS_PF(p_hwfn->cdev)) {
if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
&cid, vfid)) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return NULL;
}
}
p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
if (!p_cid && IS_PF(p_hwfn->cdev))
qed_cxt_release_cid(p_hwfn, cid);
p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
p_params, b_is_rx, p_vf_params);
if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
_qed_cxt_release_cid(p_hwfn, cid, vfid);
return p_cid;
}
static struct qed_queue_cid *
qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
bool b_is_rx,
struct qed_queue_start_common_params *p_params)
{
return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
NULL);
}
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params)
{
......@@ -681,7 +863,7 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
p_cid->opaque_fid, p_cid->cid,
p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb);
p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
......@@ -697,8 +879,8 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
p_ramrod->sb_index = p_cid->abs.sb_idx;
p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
p_ramrod->sb_index = p_cid->sb_idx;
p_ramrod->vport_id = p_cid->abs.vport_id;
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
......@@ -711,13 +893,15 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
if (p_cid->is_vf) {
if (p_cid->vfid != QED_QUEUE_CID_SELF) {
bool b_legacy_vf = !!(p_cid->vf_legacy &
QED_QCID_LEGACY_VF_RX_PROD);
p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
!!p_cid->b_legacy_vf ? " [legacy]" : "",
p_cid->vf_qid);
p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
}
return qed_spq_post(p_hwfn, p_ent, NULL);
......@@ -761,7 +945,7 @@ qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
int rc;
/* Allocate a CID for the queue */
p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
if (!p_cid)
return -ENOMEM;
......@@ -863,10 +1047,11 @@ qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
/* Cleaning the queue requires the completion to arrive there.
* In addition, VFs require the answer to come as eqe to PF.
*/
p_ramrod->complete_cqe_flg = (!p_cid->is_vf &&
p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
!b_eq_completion_only) ||
b_cqe_completion;
p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
b_eq_completion_only;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
......@@ -915,8 +1100,8 @@ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.tx_queue_start;
p_ramrod->vport_id = p_cid->abs.vport_id;
p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
p_ramrod->sb_index = p_cid->abs.sb_idx;
p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
p_ramrod->sb_index = p_cid->sb_idx;
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
......@@ -965,7 +1150,7 @@ qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid;
int rc;
p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
if (!p_cid)
return -EINVAL;
......@@ -1934,15 +2119,26 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr);
info->xdp_supported = true;
} else {
qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
if (cdev->num_hwfns > 1) {
u8 queues = 0;
u16 total_cids = 0;
qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
/* Determine queues & XDP support */
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u8 queues, cids;
qed_vf_get_num_cids(p_hwfn, &cids);
qed_vf_get_num_rxqs(p_hwfn, &queues);
info->num_queues += queues;
total_cids += cids;
}
/* Enable VF XDP in case PF guarntees sufficient connections */
if (total_cids >= info->num_queues * 3)
info->xdp_supported = true;
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
(u8 *)&info->num_vlan_filters);
qed_vf_get_num_mac_filters(&cdev->hwfns[0],
......@@ -2193,9 +2389,9 @@ static int qed_start_rxq(struct qed_dev *cdev,
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
"Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
"Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
p_params->queue_id, rss_num, p_params->vport_id,
p_params->sb);
p_params->p_sb->igu_sb_id);
return 0;
}
......@@ -2243,9 +2439,9 @@ static int qed_start_txq(struct qed_dev *cdev,
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
"Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
"Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
p_params->queue_id, rss_num, p_params->vport_id,
p_params->sb);
p_params->p_sb->igu_sb_id);
return 0;
}
......
......@@ -277,40 +277,87 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
struct qed_queue_cid {
/* 'Relative' is a relative term ;-). Usually the indices [not counting
* SBs] would be PF-relative, but there are some cases where that isn't
* the case - specifically for a PF configuring its VF indices it's
* possible some fields [E.g., stats-id] in 'rel' would already be abs.
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
#define QED_QUEUE_CID_SELF (0xff)
/* Almost identical to the qed_queue_start_common_params,
* but here we maintain the SB index in IGU CAM.
*/
struct qed_queue_cid_params {
u8 vport_id;
u16 queue_id;
u8 stats_id;
};
/* Additional parameters required for initialization of the queue_cid
* and are relevant only for a PF initializing one for its VFs.
*/
struct qed_queue_cid_vf_params {
/* Should match the VF's relative index */
u8 vfid;
/* 0-based queue index. Should reflect the relative qzone the
* VF thinks is associated with it [in its range].
*/
u8 vf_qid;
/* Indicates a VF is legacy, making it differ in several things:
* - Producers would be placed in a different place.
* - Makes assumptions regarding the CIDs.
*/
struct qed_queue_start_common_params rel;
struct qed_queue_start_common_params abs;
u8 vf_legacy;
u8 qid_usage_idx;
};
struct qed_queue_cid {
/* For stats-id, the `rel' is actually absolute as well */
struct qed_queue_cid_params rel;
struct qed_queue_cid_params abs;
/* These have no 'relative' meaning */
u16 sb_igu_id;
u8 sb_idx;
u32 cid;
u16 opaque_fid;
bool b_is_rx;
/* VFs queues are mapped differently, so we need to know the
* relative queue associated with them [0-based].
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
* and not on the VF itself.
*/
bool is_vf;
u8 vfid;
u8 vf_qid;
/* Legacy VFs might have Rx producer located elsewhere */
bool b_legacy_vf;
/* We need an additional index to differentiate between queues opened
* for same queue-zone, as VFs would have to communicate the info
* to the PF [otherwise PF has no way to differentiate].
*/
u8 qid_usage_idx;
u8 vf_legacy;
#define QED_QCID_LEGACY_VF_RX_PROD (BIT(0))
#define QED_QCID_LEGACY_VF_CID (BIT(1))
struct qed_hwfn *p_owner;
};
int qed_l2_alloc(struct qed_hwfn *p_hwfn);
void qed_l2_setup(struct qed_hwfn *p_hwfn);
void qed_l2_free(struct qed_hwfn *p_hwfn);
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid);
struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
u8 vf_qid,
struct qed_queue_start_common_params
*p_params);
struct qed_queue_cid *
qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_queue_start_common_params *p_params,
bool b_is_rx,
struct qed_queue_cid_vf_params *p_vf_params);
int
qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
......
......@@ -122,7 +122,7 @@ static void qed_free_pci(struct qed_dev *cdev)
{
struct pci_dev *pdev = cdev->pdev;
if (cdev->doorbells)
if (cdev->doorbells && cdev->db_size)
iounmap(cdev->doorbells);
if (cdev->regview)
iounmap(cdev->regview);
......@@ -206,16 +206,24 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
goto err2;
}
if (IS_PF(cdev)) {
cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
cdev->db_size = pci_resource_len(cdev->pdev, 2);
cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
if (!cdev->doorbells) {
DP_NOTICE(cdev, "Cannot map doorbell space\n");
return -ENOMEM;
cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
cdev->db_size = pci_resource_len(cdev->pdev, 2);
if (!cdev->db_size) {
if (IS_PF(cdev)) {
DP_NOTICE(cdev, "No Doorbell bar available\n");
return -EINVAL;
} else {
return 0;
}
}
cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
if (!cdev->doorbells) {
DP_NOTICE(cdev, "Cannot map doorbell space\n");
return -ENOMEM;
}
return 0;
err2:
......
......@@ -560,6 +560,7 @@
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
0x2aae64UL
#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
......
......@@ -45,6 +45,21 @@
#include "qed_sriov.h"
#include "qed_vf.h"
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{
u8 legacy = 0;
if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN)
legacy |= QED_QCID_LEGACY_VF_RX_PROD;
if (!(p_vf->acquire.vfdev_info.capabilities &
VFPF_ACQUIRE_CAP_QUEUE_QIDS))
legacy |= QED_QCID_LEGACY_VF_CID;
return legacy;
}
/* IOV ramrods */
static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
{
......@@ -178,6 +193,19 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
return vf;
}
static struct qed_queue_cid *
qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
{
int i;
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
return p_queue->cids[i].p_cid;
}
return NULL;
}
enum qed_iov_validate_q_mode {
QED_IOV_VALIDATE_Q_NA,
QED_IOV_VALIDATE_Q_ENABLE,
......@@ -190,12 +218,24 @@ static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
enum qed_iov_validate_q_mode mode,
bool b_is_tx)
{
int i;
if (mode == QED_IOV_VALIDATE_Q_NA)
return true;
if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
(!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
struct qed_vf_queue_cid *p_qcid;
p_qcid = &p_vf->vf_queues[qid].cids[i];
if (!p_qcid->p_cid)
continue;
if (p_qcid->b_is_tx != b_is_tx)
continue;
return mode == QED_IOV_VALIDATE_Q_ENABLE;
}
/* In case we haven't found any valid cid, then its disabled */
return mode == QED_IOV_VALIDATE_Q_DISABLE;
......@@ -1019,20 +1059,15 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
vf->num_txqs = num_of_vf_avaiable_chains;
for (i = 0; i < vf->num_rxqs; i++) {
struct qed_vf_q_info *p_queue = &vf->vf_queues[i];
struct qed_vf_queue *p_queue = &vf->vf_queues[i];
p_queue->fw_rx_qid = p_params->req_rx_queue[i];
p_queue->fw_tx_qid = p_params->req_tx_queue[i];
/* CIDs are per-VF, so no problem having them 0-based. */
p_queue->fw_cid = i;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
vf->relative_vf_id,
i, vf->igu_sbs[i],
p_queue->fw_rx_qid,
p_queue->fw_tx_qid, p_queue->fw_cid);
"VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
vf->relative_vf_id, i, vf->igu_sbs[i],
p_queue->fw_rx_qid, p_queue->fw_tx_qid);
}
/* Update the link configuration in bulletin */
......@@ -1319,7 +1354,7 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf)
{
u32 i;
u32 i, j;
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
......@@ -1332,16 +1367,15 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
p_vf->num_active_rxqs = 0;
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i];
struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
if (p_queue->p_rx_cid) {
qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
p_queue->p_rx_cid = NULL;
}
for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
if (!p_queue->cids[j].p_cid)
continue;
if (p_queue->p_tx_cid) {
qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
p_queue->p_tx_cid = NULL;
qed_eth_queue_cid_release(p_hwfn,
p_queue->cids[j].p_cid);
p_queue->cids[j].p_cid = NULL;
}
}
......@@ -1350,13 +1384,67 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
}
/* Returns either 0, or log(size) */
static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
if (val)
return val + 11;
return 0;
}
static void
qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
qed_db_addr_vf(0, DQ_DEMS_LEGACY);
u32 bar_size;
p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
/* If VF didn't bother asking for QIDs than don't bother limiting
* number of CIDs. The VF doesn't care about the number, and this
* has the likely result of causing an additional acquisition.
*/
if (!(p_vf->acquire.vfdev_info.capabilities &
VFPF_ACQUIRE_CAP_QUEUE_QIDS))
return;
/* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
* that would make sure doorbells for all CIDs fall within the bar.
* If it doesn't, make sure regview window is sufficient.
*/
if (p_vf->acquire.vfdev_info.capabilities &
VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
if (bar_size)
bar_size = 1 << bar_size;
if (p_hwfn->cdev->num_hwfns > 1)
bar_size /= 2;
} else {
bar_size = PXP_VF_BAR0_DQ_LENGTH;
}
if (bar_size / db_size < 256)
p_resp->num_cids = min_t(u8, p_resp->num_cids,
(u8)(bar_size / db_size));
}
static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
int i;
u8 i;
/* Queue related information */
p_resp->num_rxqs = p_vf->num_rxqs;
......@@ -1374,7 +1462,7 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
for (i = 0; i < p_resp->num_rxqs; i++) {
qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
(u16 *)&p_resp->hw_qid[i]);
p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
p_resp->cid[i] = i;
}
/* Filter related information */
......@@ -1383,6 +1471,8 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
p_req->num_vlan_filters);
qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
/* This isn't really needed/enforced, but some legacy VFs might depend
* on the correct filling of this field.
*/
......@@ -1394,10 +1484,11 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_resp->num_sbs < p_req->num_sbs ||
p_resp->num_mac_filters < p_req->num_mac_filters ||
p_resp->num_vlan_filters < p_req->num_vlan_filters ||
p_resp->num_mc_filters < p_req->num_mc_filters) {
p_resp->num_mc_filters < p_req->num_mc_filters ||
p_resp->num_cids < p_req->num_cids) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
"VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
p_vf->abs_vf_id,
p_req->num_rxqs,
p_resp->num_rxqs,
......@@ -1409,7 +1500,9 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_resp->num_mac_filters,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
p_req->num_mc_filters, p_resp->num_mc_filters);
p_req->num_mc_filters,
p_resp->num_mc_filters,
p_req->num_cids, p_resp->num_cids);
/* Some legacy OSes are incapable of correctly handling this
* failure.
......@@ -1525,6 +1618,15 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->num_hwfns > 1)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
/* Share our ability to use multiple queue-ids only with VFs
* that request it.
*/
if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
/* Share the sizes of the bars with VF */
resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
......@@ -1749,9 +1851,11 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
/* Update all the Rx queues */
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
struct qed_queue_cid *p_cid;
struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
struct qed_queue_cid *p_cid = NULL;
p_cid = p_vf->vf_queues[i].p_rx_cid;
/* There can be at most 1 Rx queue on qzone. Find it */
p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
if (!p_cid)
continue;
......@@ -1942,16 +2046,55 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, bool b_is_tx)
{
struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
struct vfpf_qid_tlv *p_qid_tlv;
/* Search for the qid if the VF published its going to provide it */
if (!(p_vf->acquire.vfdev_info.capabilities &
VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
if (b_is_tx)
return QED_IOV_LEGACY_QID_TX;
else
return QED_IOV_LEGACY_QID_RX;
}
p_qid_tlv = (struct vfpf_qid_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
CHANNEL_TLV_QID);
if (!p_qid_tlv) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%2x]: Failed to provide qid\n",
p_vf->relative_vf_id);
return QED_IOV_QID_INVALID;
}
if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%02x]: Provided qid out-of-bounds %02x\n",
p_vf->relative_vf_id, p_qid_tlv->qid);
return QED_IOV_QID_INVALID;
}
return p_qid_tlv->qid;
}
static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
struct qed_queue_start_common_params params;
struct qed_queue_cid_vf_params vf_params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct qed_vf_q_info *p_queue;
u8 qid_usage_idx, vf_legacy = 0;
struct vfpf_start_rxq_tlv *req;
bool b_legacy_vf = false;
struct qed_vf_queue *p_queue;
struct qed_queue_cid *p_cid;
struct qed_sb_info sb_dummy;
int rc;
req = &mbx->req_virt->start_rxq;
......@@ -1961,53 +2104,64 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
/* Acquire a new queue-cid */
qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
if (qid_usage_idx == QED_IOV_QID_INVALID)
goto out;
p_queue = &vf->vf_queues[req->rx_qid];
if (p_queue->cids[qid_usage_idx].p_cid)
goto out;
vf_legacy = qed_vf_calculate_legacy(vf);
/* Acquire a new queue-cid */
memset(&params, 0, sizeof(params));
params.queue_id = p_queue->fw_rx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
params.sb = req->hw_sb;
/* Since IGU index is passed via sb_info, construct a dummy one */
memset(&sb_dummy, 0, sizeof(sb_dummy));
sb_dummy.igu_sb_id = req->hw_sb;
params.p_sb = &sb_dummy;
params.sb_idx = req->sb_index;
p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
vf->opaque_fid,
p_queue->fw_cid,
req->rx_qid, &params);
if (!p_queue->p_rx_cid)
memset(&vf_params, 0, sizeof(vf_params));
vf_params.vfid = vf->relative_vf_id;
vf_params.vf_qid = (u8)req->rx_qid;
vf_params.vf_legacy = vf_legacy;
vf_params.qid_usage_idx = qid_usage_idx;
p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
&params, true, &vf_params);
if (!p_cid)
goto out;
/* Legacy VFs have their Producers in a different location, which they
* calculate on their own and clean the producer prior to this.
*/
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN) {
b_legacy_vf = true;
} else {
if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
0);
}
p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
rc = qed_eth_rxq_start_ramrod(p_hwfn,
p_queue->p_rx_cid,
rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
req->bd_max_bytes,
req->rxq_addr,
req->cqe_pbl_addr, req->cqe_pbl_size);
if (rc) {
status = PFVF_STATUS_FAILURE;
qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
p_queue->p_rx_cid = NULL;
qed_eth_queue_cid_release(p_hwfn, p_cid);
} else {
p_queue->cids[qid_usage_idx].p_cid = p_cid;
p_queue->cids[qid_usage_idx].b_is_tx = false;
status = PFVF_STATUS_SUCCESS;
vf->num_active_rxqs++;
}
out:
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
!!(vf_legacy &
QED_QCID_LEGACY_VF_RX_PROD));
}
static void
......@@ -2226,7 +2380,8 @@ static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf, u8 status)
struct qed_vf_info *p_vf,
u32 cid, u8 status)
{
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv;
......@@ -2254,12 +2409,8 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
u16 qid = mbx->req_virt->start_txq.tx_qid;
p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
DQ_DEMS_LEGACY);
}
if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
......@@ -2269,10 +2420,15 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf)
{
struct qed_queue_start_common_params params;
struct qed_queue_cid_vf_params vf_params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_txq_tlv *req;
struct qed_vf_q_info *p_queue;
struct qed_vf_queue *p_queue;
struct qed_queue_cid *p_cid;
struct qed_sb_info sb_dummy;
u8 qid_usage_idx, vf_legacy;
u32 cid = 0;
int rc;
u16 pq;
......@@ -2280,89 +2436,126 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
req = &mbx->req_virt->start_txq;
if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
QED_IOV_VALIDATE_Q_DISABLE) ||
QED_IOV_VALIDATE_Q_NA) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
/* Acquire a new queue-cid */
qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
if (qid_usage_idx == QED_IOV_QID_INVALID)
goto out;
p_queue = &vf->vf_queues[req->tx_qid];
if (p_queue->cids[qid_usage_idx].p_cid)
goto out;
vf_legacy = qed_vf_calculate_legacy(vf);
/* Acquire a new queue-cid */
params.queue_id = p_queue->fw_tx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
params.sb = req->hw_sb;
/* Since IGU index is passed via sb_info, construct a dummy one */
memset(&sb_dummy, 0, sizeof(sb_dummy));
sb_dummy.igu_sb_id = req->hw_sb;
params.p_sb = &sb_dummy;
params.sb_idx = req->sb_index;
p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
vf->opaque_fid,
p_queue->fw_cid,
req->tx_qid, &params);
if (!p_queue->p_tx_cid)
memset(&vf_params, 0, sizeof(vf_params));
vf_params.vfid = vf->relative_vf_id;
vf_params.vf_qid = (u8)req->tx_qid;
vf_params.vf_legacy = vf_legacy;
vf_params.qid_usage_idx = qid_usage_idx;
p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
&params, false, &vf_params);
if (!p_cid)
goto out;
pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
req->pbl_addr, req->pbl_size, pq);
if (rc) {
status = PFVF_STATUS_FAILURE;
qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
p_queue->p_tx_cid = NULL;
qed_eth_queue_cid_release(p_hwfn, p_cid);
} else {
status = PFVF_STATUS_SUCCESS;
p_queue->cids[qid_usage_idx].p_cid = p_cid;
p_queue->cids[qid_usage_idx].b_is_tx = true;
cid = p_cid->cid;
}
out:
qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
}
static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
u16 rxq_id, bool cqe_completion)
u16 rxq_id,
u8 qid_usage_idx, bool cqe_completion)
{
struct qed_vf_q_info *p_queue;
struct qed_vf_queue *p_queue;
int rc = 0;
if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
QED_IOV_VALIDATE_Q_ENABLE)) {
if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
vf->relative_vf_id, rxq_id);
"VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
vf->relative_vf_id, rxq_id, qid_usage_idx);
return -EINVAL;
}
p_queue = &vf->vf_queues[rxq_id];
/* We've validated the index and the existence of the active RXQ -
* now we need to make sure that it's using the correct qid.
*/
if (!p_queue->cids[qid_usage_idx].p_cid ||
p_queue->cids[qid_usage_idx].b_is_tx) {
struct qed_queue_cid *p_cid;
p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
vf->relative_vf_id,
rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
return -EINVAL;
}
/* Now that we know we have a valid Rx-queue - close it */
rc = qed_eth_rx_queue_stop(p_hwfn,
p_queue->p_rx_cid,
p_queue->cids[qid_usage_idx].p_cid,
false, cqe_completion);
if (rc)
return rc;
p_queue->p_rx_cid = NULL;
p_queue->cids[qid_usage_idx].p_cid = NULL;
vf->num_active_rxqs--;
return 0;
}
static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf, u16 txq_id)
struct qed_vf_info *vf,
u16 txq_id, u8 qid_usage_idx)
{
struct qed_vf_q_info *p_queue;
struct qed_vf_queue *p_queue;
int rc = 0;
if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
QED_IOV_VALIDATE_Q_ENABLE))
if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
return -EINVAL;
p_queue = &vf->vf_queues[txq_id];
if (!p_queue->cids[qid_usage_idx].p_cid ||
!p_queue->cids[qid_usage_idx].b_is_tx)
return -EINVAL;
rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
if (rc)
return rc;
p_queue->p_tx_cid = NULL;
p_queue->cids[qid_usage_idx].p_cid = NULL;
return 0;
}
......@@ -2374,6 +2567,7 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_rxqs_tlv *req;
u8 qid_usage_idx;
int rc;
/* There has never been an official driver that used this interface
......@@ -2389,8 +2583,13 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
goto out;
}
/* Find which qid-index is associated with the queue */
qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
if (qid_usage_idx == QED_IOV_QID_INVALID)
goto out;
rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
req->cqe_completion);
qid_usage_idx, req->cqe_completion);
if (!rc)
status = PFVF_STATUS_SUCCESS;
out:
......@@ -2406,6 +2605,7 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_txqs_tlv *req;
u8 qid_usage_idx;
int rc;
/* There has never been an official driver that used this interface
......@@ -2420,7 +2620,13 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
/* Find which qid-index is associated with the queue */
qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
if (qid_usage_idx == QED_IOV_QID_INVALID)
goto out;
rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
if (!rc)
status = PFVF_STATUS_SUCCESS;
......@@ -2440,7 +2646,7 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
u16 qid;
u8 qid_usage_idx;
int rc;
u8 i;
......@@ -2448,19 +2654,42 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
/* Validate inputs */
for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
if (qid_usage_idx == QED_IOV_QID_INVALID)
goto out;
/* There shouldn't exist a VF that uses queue-qids yet uses this
* API with multiple Rx queues. Validate this.
*/
if ((vf->acquire.vfdev_info.capabilities &
VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d] supports QIDs but sends multiple queues\n",
vf->relative_vf_id);
goto out;
}
/* Validate inputs - for the legacy case this is still true since
* qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
*/
for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
if (!qed_iov_validate_rxq(p_hwfn, vf, i,
QED_IOV_VALIDATE_Q_ENABLE)) {
DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
vf->relative_vf_id, req->rx_qid, req->num_rxqs);
QED_IOV_VALIDATE_Q_NA) ||
!vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
vf->relative_vf_id, req->rx_qid,
req->num_rxqs);
goto out;
}
}
/* Prepare the handlers */
for (i = 0; i < req->num_rxqs; i++) {
qid = req->rx_qid + i;
handlers[i] = vf->vf_queues[qid].p_rx_cid;
u16 qid = req->rx_qid + i;
handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
}
rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
......@@ -2674,6 +2903,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
(1 << p_rss_tlv->rss_table_size_log));
for (i = 0; i < table_size; i++) {
struct qed_queue_cid *p_cid;
q_idx = p_rss_tlv->rss_ind_table[i];
if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
QED_IOV_VALIDATE_Q_ENABLE)) {
......@@ -2685,7 +2916,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
goto out;
}
p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
p_rss->rss_ind_table[i] = p_cid;
}
p_data->rss_params = p_rss;
......
......@@ -149,12 +149,21 @@ struct qed_iov_vf_mbx {
struct vfpf_first_tlv first_tlv;
};
struct qed_vf_q_info {
#define QED_IOV_LEGACY_QID_RX (0)
#define QED_IOV_LEGACY_QID_TX (1)
#define QED_IOV_QID_INVALID (0xFE)
struct qed_vf_queue_cid {
bool b_is_tx;
struct qed_queue_cid *p_cid;
};
/* Describes a qzone associated with the VF */
struct qed_vf_queue {
u16 fw_rx_qid;
struct qed_queue_cid *p_rx_cid;
u16 fw_tx_qid;
struct qed_queue_cid *p_tx_cid;
u8 fw_cid;
struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
};
enum vf_state {
......@@ -212,7 +221,8 @@ struct qed_vf_info {
u8 num_mac_filters;
u8 num_vlan_filters;
struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
u8 num_active_rxqs;
struct qed_public_vf_info p_vf_info;
......
......@@ -153,6 +153,77 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
return rc;
}
static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_qid_tlv *p_qid_tlv;
/* Only add QIDs for the queue if it was negotiated with PF */
if (!(p_iov->acquire_resp.pfdev_info.capabilities &
PFVF_ACQUIRE_CAP_QUEUE_QIDS))
return;
p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
p_qid_tlv->qid = p_cid->qid_usage_idx;
}
int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
u32 size;
int rc;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
rc = -EAGAIN;
qed_vf_pf_req_end(p_hwfn, rc);
if (!b_final)
return rc;
p_hwfn->b_int_enabled = 0;
if (p_iov->vf2pf_request)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
p_iov->vf2pf_request,
p_iov->vf2pf_request_phys);
if (p_iov->pf2vf_reply)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union pfvf_tlvs),
p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
if (p_iov->bulletin.p_virt) {
size = sizeof(struct qed_bulletin_content);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
size,
p_iov->bulletin.p_virt, p_iov->bulletin.phys);
}
kfree(p_hwfn->vf_iov_info);
p_hwfn->vf_iov_info = NULL;
return rc;
}
int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
{
return _qed_vf_pf_release(p_hwfn, true);
}
#define VF_ACQUIRE_THRESH 3
static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
......@@ -160,7 +231,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
{
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
"PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
p_req->num_rxqs,
p_resp->num_rxqs,
p_req->num_rxqs,
......@@ -171,7 +242,8 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
p_resp->num_mac_filters,
p_req->num_vlan_filters,
p_resp->num_vlan_filters,
p_req->num_mc_filters, p_resp->num_mc_filters);
p_req->num_mc_filters,
p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids);
/* humble our request */
p_req->num_txqs = p_resp->num_txqs;
......@@ -180,6 +252,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
p_req->num_mac_filters = p_resp->num_mac_filters;
p_req->num_vlan_filters = p_resp->num_vlan_filters;
p_req->num_mc_filters = p_resp->num_mc_filters;
p_req->num_cids = p_resp->num_cids;
}
static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
......@@ -204,6 +277,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS;
req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
req->vfdev_info.fw_major = FW_MAJOR_VERSION;
......@@ -216,6 +290,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* Fill capability field with any non-deprecated config we support */
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
/* If we've mapped the doorbell bar, try using queue qids */
if (p_iov->b_doorbell_bar) {
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
VFPF_ACQUIRE_CAP_QUEUE_QIDS;
p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS;
}
/* pf 2 vf bulletin board address */
req->bulletin_addr = p_iov->bulletin.phys;
req->bulletin_size = p_iov->bulletin.size;
......@@ -307,6 +388,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
p_iov->b_pre_fp_hsi = true;
/* In case PF doesn't support multi-queue Tx, update the number of
* CIDs to reflect the number of queues [older PFs didn't fill that
* field].
*/
if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS))
resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs;
/* Update bulletin board size with response from PF */
p_iov->bulletin.size = resp->bulletin_size;
......@@ -338,10 +426,27 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
return rc;
}
u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
{
u32 bar_size;
/* Regview size is fixed */
if (bar_id == BAR_ID_0)
return 1 << 17;
/* Doorbell is received from PF */
bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
if (bar_size)
return 1 << bar_size;
return 0;
}
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
{
struct qed_hwfn *p_lead = QED_LEADING_HWFN(p_hwfn->cdev);
struct qed_vf_iov *p_iov;
u32 reg;
int rc;
/* Set number of hwfns - might be overriden once leading hwfn learns
* actual configuration from PF.
......@@ -349,10 +454,6 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
if (IS_LEAD_HWFN(p_hwfn))
p_hwfn->cdev->num_hwfns = 1;
/* Set the doorbell bar. Assumption: regview is set */
p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
PXP_VF_BAR0_START_DQ;
reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
......@@ -364,6 +465,30 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
if (!p_iov)
return -ENOMEM;
/* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
* value, but there are several incompatibily scenarios where that
* would be incorrect and we'd need to override it.
*/
if (!p_hwfn->doorbells) {
p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
PXP_VF_BAR0_START_DQ;
} else if (p_hwfn == p_lead) {
/* For leading hw-function, value is always correct, but need
* to handle scenario where legacy PF would not support 100g
* mapped bars later.
*/
p_iov->b_doorbell_bar = true;
} else {
/* here, value would be correct ONLY if the leading hwfn
* received indication that mapped-bars are supported.
*/
if (p_lead->vf_iov_info->b_doorbell_bar)
p_iov->b_doorbell_bar = true;
else
p_hwfn->doorbells = (u8 __iomem *)
p_hwfn->regview + PXP_VF_BAR0_START_DQ;
}
/* Allocate vf2pf msg */
p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
......@@ -403,7 +528,33 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
p_hwfn->hw_info.personality = QED_PCI_ETH;
return qed_vf_pf_acquire(p_hwfn);
rc = qed_vf_pf_acquire(p_hwfn);
/* If VF is 100g using a mapped bar and PF is too old to support that,
* acquisition would succeed - but the VF would have no way knowing
* the size of the doorbell bar configured in HW and thus will not
* know how to split it for 2nd hw-function.
* In this case we re-try without the indication of the mapped
* doorbell.
*/
if (!rc && p_iov->b_doorbell_bar &&
!qed_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
(p_hwfn->cdev->num_hwfns > 1)) {
rc = _qed_vf_pf_release(p_hwfn, false);
if (rc)
return rc;
p_iov->b_doorbell_bar = false;
p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
PXP_VF_BAR0_START_DQ;
rc = qed_vf_pf_acquire(p_hwfn);
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
p_hwfn->regview, p_hwfn->doorbells, p_hwfn->cdev->doorbells);
return rc;
free_vf2pf_request:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
......@@ -588,8 +739,8 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
req->hw_sb = p_cid->rel.sb;
req->sb_index = p_cid->rel.sb_idx;
req->hw_sb = p_cid->sb_igu_id;
req->sb_index = p_cid->sb_idx;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1;
......@@ -609,6 +760,9 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
}
qed_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
......@@ -657,6 +811,8 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
qed_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
......@@ -697,8 +853,10 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
req->hw_sb = p_cid->rel.sb;
req->sb_index = p_cid->rel.sb_idx;
req->hw_sb = p_cid->sb_igu_id;
req->sb_index = p_cid->sb_idx;
qed_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
......@@ -728,8 +886,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
qid, *pp_doorbell, resp->offset);
"Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n",
qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset);
exit:
qed_vf_pf_req_end(p_hwfn, rc);
......@@ -749,6 +907,8 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1;
qed_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
......@@ -1098,54 +1258,6 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
return rc;
}
int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
u32 size;
int rc;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
rc = -EAGAIN;
qed_vf_pf_req_end(p_hwfn, rc);
p_hwfn->b_int_enabled = 0;
if (p_iov->vf2pf_request)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union vfpf_tlvs),
p_iov->vf2pf_request,
p_iov->vf2pf_request_phys);
if (p_iov->pf2vf_reply)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(union pfvf_tlvs),
p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
if (p_iov->bulletin.p_virt) {
size = sizeof(struct qed_bulletin_content);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
size,
p_iov->bulletin.p_virt, p_iov->bulletin.phys);
}
kfree(p_hwfn->vf_iov_info);
p_hwfn->vf_iov_info = NULL;
return rc;
}
void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd)
{
......@@ -1363,6 +1475,16 @@ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
}
void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
{
*num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
}
void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
{
*num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids;
}
void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
{
memcpy(port_mac,
......
......@@ -46,7 +46,8 @@ struct vf_pf_resc_request {
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u16 padding;
u8 num_cids;
u8 padding;
};
struct hw_sb_info {
......@@ -113,6 +114,17 @@ struct vfpf_acquire_tlv {
struct vf_pf_vfdev_info {
#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
/* A requirement for supporting multi-Tx queues on a single queue-zone,
* VF would pass qids as additional information whenever passing queue
* references.
*/
#define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2)
/* The VF is using the physical bar. While this is mostly internal
* to the VF, might affect the number of CIDs supported assuming
* QUEUE_QIDS is set.
*/
#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR BIT(3)
u64 capabilities;
u8 fw_major;
u8 fw_minor;
......@@ -185,6 +197,9 @@ struct pfvf_acquire_resp_tlv {
*/
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
/* PF expects queues to be received with additional qids */
#define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3)
u16 db_size;
u8 indices_per_sb;
u8 os_type;
......@@ -193,7 +208,8 @@ struct pfvf_acquire_resp_tlv {
u16 chip_rev;
u8 dev_type;
u8 padding;
/* Doorbell bar size configured in HW: log(size) or 0 */
u8 bar_size;
struct pfvf_stats_info stats_info;
......@@ -221,7 +237,8 @@ struct pfvf_acquire_resp_tlv {
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u8 padding[2];
u8 num_cids;
u8 padding;
} resc;
u32 bulletin_size;
......@@ -234,6 +251,16 @@ struct pfvf_start_queue_resp_tlv {
u8 padding[4];
};
/* Extended queue information - additional index for reference inside qzone.
* If commmunicated between VF/PF, each TLV relating to queues should be
* extended by one such [or have a future base TLV that already contains info].
*/
struct vfpf_qid_tlv {
struct channel_tlv tl;
u8 qid;
u8 padding[3];
};
/* Setup Queue */
struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv;
......@@ -597,6 +624,8 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_RESERVED,
CHANNEL_TLV_QID,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
......@@ -605,6 +634,12 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
};
/* Default number of CIDs [total of both Rx and Tx] to be requested
* by default, and maximum possible number.
*/
#define QED_ETH_VF_DEFAULT_NUM_CIDS (32)
#define QED_ETH_VF_MAX_NUM_CIDS (250)
/* This data is held in the qed_hwfn structure for VFs only. */
struct qed_vf_iov {
union vfpf_tlvs *vf2pf_request;
......@@ -635,6 +670,11 @@ struct qed_vf_iov {
* compatibility [with older PFs] we'd still need to store these.
*/
struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
/* Determines whether VF utilizes doorbells via limited register
* bar or via the doorbell bar.
*/
bool b_doorbell_bar;
};
#ifdef CONFIG_QED_SRIOV
......@@ -683,6 +723,22 @@ void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
*/
void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
/**
* @brief Get number of Rx queues allocated for VF by qed
*
* @param p_hwfn
* @param num_txqs - allocated RX queues
*/
void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
/**
* @brief Get number of available connections [both Rx and Tx] for VF
*
* @param p_hwfn
* @param num_cids - allocated number of connections
*/
void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
/**
* @brief Get port mac address for VF
*
......@@ -935,6 +991,8 @@ void qed_iov_vf_task(struct work_struct *work);
void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
struct qed_tunnel_info *p_tunn);
u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
......@@ -956,6 +1014,14 @@ static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
{
}
static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
{
}
static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
{
}
static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
{
}
......@@ -1107,6 +1173,13 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
{
return -EINVAL;
}
static inline u32
qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn,
enum BAR_ID bar_id)
{
return 0;
}
#endif
#endif
......@@ -580,6 +580,24 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_features_check = qede_features_check,
};
static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
.ndo_start_xmit = qede_start_xmit,
.ndo_set_rx_mode = qede_set_rx_mode,
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
.ndo_xdp = qede_xdp,
};
/* -------------------------------------------------------------------------
* START OF PROBE / REMOVE
* -------------------------------------------------------------------------
......@@ -645,10 +663,14 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->watchdog_timeo = TX_TIMEOUT;
if (IS_VF(edev))
ndev->netdev_ops = &qede_netdev_vf_ops;
else
if (IS_VF(edev)) {
if (edev->dev_info.xdp_supported)
ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
else
ndev->netdev_ops = &qede_netdev_vf_ops;
} else {
ndev->netdev_ops = &qede_netdev_ops;
}
qede_set_ethtool_ops(ndev);
......@@ -846,6 +868,12 @@ static void qede_update_pf_params(struct qed_dev *cdev)
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
/* Same for VFs - make sure they'll have sufficient connections
* to support XDP Tx queues.
*/
pf_params.eth_pf_params.num_vf_cons = 48;
#ifdef CONFIG_RFS_ACCEL
pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
#endif
......@@ -1770,7 +1798,7 @@ static int qede_start_txq(struct qede_dev *edev,
else
params.queue_id = txq->index;
params.sb = fp->sb_info->igu_sb_id;
params.p_sb = fp->sb_info;
params.sb_idx = sb_idx;
rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
......@@ -1849,7 +1877,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
memset(&q_params, 0, sizeof(q_params));
q_params.queue_id = rxq->rxq_id;
q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id;
q_params.p_sb = fp->sb_info;
q_params.sb_idx = RX_PI;
p_phys_table =
......
......@@ -47,8 +47,7 @@ struct qed_queue_start_common_params {
/* Relative, but relevant only for PFs */
u8 stats_id;
/* These are always absolute */
u16 sb;
struct qed_sb_info *p_sb;
u8 sb_idx;
};
......@@ -74,6 +73,9 @@ struct qed_dev_eth_info {
/* Legacy VF - this affects the datapath, so qede has to know */
bool is_legacy;
/* Might depend on available resources [in case of VF] */
bool xdp_supported;
};
struct qed_update_vport_rss_params {
......
......@@ -185,6 +185,10 @@ struct qed_eth_pf_params {
*/
u16 num_cons;
/* per-VF number of CIDs */
u8 num_vf_cons;
#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
/* To enable arfs, previous to HW-init a positive number needs to be
* set [as filters require allocated searcher ILT memory].
* This will set the maximal number of configured steering-filters.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment