Commit 08bc8f15 authored by Mintz, Yuval's avatar Mintz, Yuval Committed by David S. Miller

qed: Multiple qzone queues for VFs

This adds the infrastructure for supporting VFs that want to open
multiple transmission queues on the same queue-zone.
At this point, there are no VFs that actually request this functionality,
but later patches would remedy that.

 a. VF and PF would communicate the capability during ACQUIRE;
    Legacy VFs would continue on behaving as they do today

 b. PF would communicate number of supported CIDs to the VF
    and would enforce said limitation

 c. Whenever VF passes a request for a given queue configuration
    it would also pass an associated index within said queue-zone
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 007bc371
...@@ -2116,8 +2116,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) ...@@ -2116,8 +2116,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
struct qed_eth_pf_params *p_params = struct qed_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params; &p_hwfn->pf_params.eth_pf_params;
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, if (!p_params->num_vf_cons)
p_params->num_cons, 1); p_params->num_vf_cons =
ETH_PF_PARAMS_VF_CONS_DEFAULT;
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
p_params->num_cons,
p_params->num_vf_cons);
p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
break; break;
} }
......
...@@ -198,10 +198,10 @@ static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, ...@@ -198,10 +198,10 @@ static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid) struct qed_queue_cid *p_cid)
{ {
/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */ bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
if ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
IS_PF(p_hwfn->cdev)) if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
qed_cxt_release_cid(p_hwfn, p_cid->cid); _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
/* For PF's VFs we maintain the index inside queue-zone in IOV */ /* For PF's VFs we maintain the index inside queue-zone in IOV */
if (p_cid->vfid == QED_QUEUE_CID_SELF) if (p_cid->vfid == QED_QUEUE_CID_SELF)
...@@ -319,18 +319,30 @@ qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, ...@@ -319,18 +319,30 @@ qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
struct qed_queue_cid_vf_params *p_vf_params) struct qed_queue_cid_vf_params *p_vf_params)
{ {
struct qed_queue_cid *p_cid; struct qed_queue_cid *p_cid;
u8 vfid = QED_CXT_PF_CID;
bool b_legacy_vf = false; bool b_legacy_vf = false;
u32 cid = 0; u32 cid = 0;
/* Currently, PF doesn't need to allocate CIDs for any VF */ /* In case of legacy VFs, The CID can be derived from the additional
if (p_vf_params) * VF parameters - the VF assumes queue X uses CID X, so we can simply
b_legacy_vf = true; * use the vf_qid for this purpose as well.
*/
if (p_vf_params) {
vfid = p_vf_params->vfid;
if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
b_legacy_vf = true;
cid = p_vf_params->vf_qid;
}
}
/* Get a unique firmware CID for this queue, in case it's a PF. /* Get a unique firmware CID for this queue, in case it's a PF.
* VF's don't need a CID as the queue configuration will be done * VF's don't need a CID as the queue configuration will be done
* by PF. * by PF.
*/ */
if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) { if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
&cid, vfid)) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return NULL; return NULL;
} }
...@@ -339,7 +351,7 @@ qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, ...@@ -339,7 +351,7 @@ qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
p_params, b_is_rx, p_vf_params); p_params, b_is_rx, p_vf_params);
if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
qed_cxt_release_cid(p_hwfn, cid); _qed_cxt_release_cid(p_hwfn, cid, vfid);
return p_cid; return p_cid;
} }
......
...@@ -47,12 +47,16 @@ ...@@ -47,12 +47,16 @@
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{ {
u8 legacy = QED_QCID_LEGACY_VF_CID; u8 legacy = 0;
if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN) ETH_HSI_VER_NO_PKT_LEN_TUNN)
legacy |= QED_QCID_LEGACY_VF_RX_PROD; legacy |= QED_QCID_LEGACY_VF_RX_PROD;
if (!(p_vf->acquire.vfdev_info.capabilities &
VFPF_ACQUIRE_CAP_QUEUE_QIDS))
legacy |= QED_QCID_LEGACY_VF_CID;
return legacy; return legacy;
} }
...@@ -1413,6 +1417,10 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, ...@@ -1413,6 +1417,10 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
p_req->num_vlan_filters); p_req->num_vlan_filters);
p_resp->num_cids =
min_t(u8, p_req->num_cids,
p_hwfn->pf_params.eth_pf_params.num_vf_cons);
/* This isn't really needed/enforced, but some legacy VFs might depend /* This isn't really needed/enforced, but some legacy VFs might depend
* on the correct filling of this field. * on the correct filling of this field.
*/ */
...@@ -1424,10 +1432,11 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, ...@@ -1424,10 +1432,11 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_resp->num_sbs < p_req->num_sbs || p_resp->num_sbs < p_req->num_sbs ||
p_resp->num_mac_filters < p_req->num_mac_filters || p_resp->num_mac_filters < p_req->num_mac_filters ||
p_resp->num_vlan_filters < p_req->num_vlan_filters || p_resp->num_vlan_filters < p_req->num_vlan_filters ||
p_resp->num_mc_filters < p_req->num_mc_filters) { p_resp->num_mc_filters < p_req->num_mc_filters ||
p_resp->num_cids < p_req->num_cids) {
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
QED_MSG_IOV, QED_MSG_IOV,
"VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n", "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
p_vf->abs_vf_id, p_vf->abs_vf_id,
p_req->num_rxqs, p_req->num_rxqs,
p_resp->num_rxqs, p_resp->num_rxqs,
...@@ -1439,7 +1448,9 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, ...@@ -1439,7 +1448,9 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_resp->num_mac_filters, p_resp->num_mac_filters,
p_req->num_vlan_filters, p_req->num_vlan_filters,
p_resp->num_vlan_filters, p_resp->num_vlan_filters,
p_req->num_mc_filters, p_resp->num_mc_filters); p_req->num_mc_filters,
p_resp->num_mc_filters,
p_req->num_cids, p_resp->num_cids);
/* Some legacy OSes are incapable of correctly handling this /* Some legacy OSes are incapable of correctly handling this
* failure. * failure.
...@@ -1555,6 +1566,12 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, ...@@ -1555,6 +1566,12 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->num_hwfns > 1) if (p_hwfn->cdev->num_hwfns > 1)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
/* Share our ability to use multiple queue-ids only with VFs
* that request it.
*/
if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
...@@ -1977,10 +1994,37 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, ...@@ -1977,10 +1994,37 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, bool b_is_tx) struct qed_vf_info *p_vf, bool b_is_tx)
{ {
if (b_is_tx) struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
return QED_IOV_LEGACY_QID_TX; struct vfpf_qid_tlv *p_qid_tlv;
else
return QED_IOV_LEGACY_QID_RX; /* Search for the qid if the VF published its going to provide it */
if (!(p_vf->acquire.vfdev_info.capabilities &
VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
if (b_is_tx)
return QED_IOV_LEGACY_QID_TX;
else
return QED_IOV_LEGACY_QID_RX;
}
p_qid_tlv = (struct vfpf_qid_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
CHANNEL_TLV_QID);
if (!p_qid_tlv) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%2x]: Failed to provide qid\n",
p_vf->relative_vf_id);
return QED_IOV_QID_INVALID;
}
if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%02x]: Provided qid out-of-bounds %02x\n",
p_vf->relative_vf_id, p_qid_tlv->qid);
return QED_IOV_QID_INVALID;
}
return p_qid_tlv->qid;
} }
static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
...@@ -2006,7 +2050,12 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, ...@@ -2006,7 +2050,12 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
goto out; goto out;
qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
if (qid_usage_idx == QED_IOV_QID_INVALID)
goto out;
p_queue = &vf->vf_queues[req->rx_qid]; p_queue = &vf->vf_queues[req->rx_qid];
if (p_queue->cids[qid_usage_idx].p_cid)
goto out;
vf_legacy = qed_vf_calculate_legacy(vf); vf_legacy = qed_vf_calculate_legacy(vf);
...@@ -2332,12 +2381,17 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, ...@@ -2332,12 +2381,17 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
req = &mbx->req_virt->start_txq; req = &mbx->req_virt->start_txq;
if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
QED_IOV_VALIDATE_Q_DISABLE) || QED_IOV_VALIDATE_Q_NA) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out; goto out;
qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
if (qid_usage_idx == QED_IOV_QID_INVALID)
goto out;
p_queue = &vf->vf_queues[req->tx_qid]; p_queue = &vf->vf_queues[req->tx_qid];
if (p_queue->cids[qid_usage_idx].p_cid)
goto out;
vf_legacy = qed_vf_calculate_legacy(vf); vf_legacy = qed_vf_calculate_legacy(vf);
...@@ -2388,17 +2442,33 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, ...@@ -2388,17 +2442,33 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_vf_queue *p_queue; struct qed_vf_queue *p_queue;
int rc = 0; int rc = 0;
if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
QED_IOV_VALIDATE_Q_ENABLE)) {
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
QED_MSG_IOV, QED_MSG_IOV,
"VF[%d] Tried Closing Rx 0x%04x which is inactive\n", "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
vf->relative_vf_id, rxq_id); vf->relative_vf_id, rxq_id, qid_usage_idx);
return -EINVAL; return -EINVAL;
} }
p_queue = &vf->vf_queues[rxq_id]; p_queue = &vf->vf_queues[rxq_id];
/* We've validated the index and the existence of the active RXQ -
* now we need to make sure that it's using the correct qid.
*/
if (!p_queue->cids[qid_usage_idx].p_cid ||
p_queue->cids[qid_usage_idx].b_is_tx) {
struct qed_queue_cid *p_cid;
p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
vf->relative_vf_id,
rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
return -EINVAL;
}
/* Now that we know we have a valid Rx-queue - close it */
rc = qed_eth_rx_queue_stop(p_hwfn, rc = qed_eth_rx_queue_stop(p_hwfn,
p_queue->cids[qid_usage_idx].p_cid, p_queue->cids[qid_usage_idx].p_cid,
false, cqe_completion); false, cqe_completion);
...@@ -2418,11 +2488,13 @@ static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, ...@@ -2418,11 +2488,13 @@ static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
struct qed_vf_queue *p_queue; struct qed_vf_queue *p_queue;
int rc = 0; int rc = 0;
if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
QED_IOV_VALIDATE_Q_ENABLE))
return -EINVAL; return -EINVAL;
p_queue = &vf->vf_queues[txq_id]; p_queue = &vf->vf_queues[txq_id];
if (!p_queue->cids[qid_usage_idx].p_cid ||
!p_queue->cids[qid_usage_idx].b_is_tx)
return -EINVAL;
rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
if (rc) if (rc)
...@@ -2458,6 +2530,8 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, ...@@ -2458,6 +2530,8 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
/* Find which qid-index is associated with the queue */ /* Find which qid-index is associated with the queue */
qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
if (qid_usage_idx == QED_IOV_QID_INVALID)
goto out;
rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
qid_usage_idx, req->cqe_completion); qid_usage_idx, req->cqe_completion);
...@@ -2494,6 +2568,8 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, ...@@ -2494,6 +2568,8 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
/* Find which qid-index is associated with the queue */ /* Find which qid-index is associated with the queue */
qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
if (qid_usage_idx == QED_IOV_QID_INVALID)
goto out;
rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
if (!rc) if (!rc)
...@@ -2524,15 +2600,35 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, ...@@ -2524,15 +2600,35 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
if (qid_usage_idx == QED_IOV_QID_INVALID)
goto out;
/* Validate inputs */ /* There shouldn't exist a VF that uses queue-qids yet uses this
for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) * API with multiple Rx queues. Validate this.
*/
if ((vf->acquire.vfdev_info.capabilities &
VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d] supports QIDs but sends multiple queues\n",
vf->relative_vf_id);
goto out;
}
/* Validate inputs - for the legacy case this is still true since
* qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
*/
for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
if (!qed_iov_validate_rxq(p_hwfn, vf, i, if (!qed_iov_validate_rxq(p_hwfn, vf, i,
QED_IOV_VALIDATE_Q_ENABLE)) { QED_IOV_VALIDATE_Q_NA) ||
DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
vf->relative_vf_id, req->rx_qid, req->num_rxqs); vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
vf->relative_vf_id, req->rx_qid,
req->num_rxqs);
goto out; goto out;
} }
}
/* Prepare the handlers */ /* Prepare the handlers */
for (i = 0; i < req->num_rxqs; i++) { for (i = 0; i < req->num_rxqs; i++) {
......
...@@ -151,6 +151,7 @@ struct qed_iov_vf_mbx { ...@@ -151,6 +151,7 @@ struct qed_iov_vf_mbx {
#define QED_IOV_LEGACY_QID_RX (0) #define QED_IOV_LEGACY_QID_RX (0)
#define QED_IOV_LEGACY_QID_TX (1) #define QED_IOV_LEGACY_QID_TX (1)
#define QED_IOV_QID_INVALID (0xFE)
struct qed_vf_queue_cid { struct qed_vf_queue_cid {
bool b_is_tx; bool b_is_tx;
......
...@@ -153,6 +153,22 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) ...@@ -153,6 +153,22 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
return rc; return rc;
} }
static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_qid_tlv *p_qid_tlv;
/* Only add QIDs for the queue if it was negotiated with PF */
if (!(p_iov->acquire_resp.pfdev_info.capabilities &
PFVF_ACQUIRE_CAP_QUEUE_QIDS))
return;
p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
p_qid_tlv->qid = p_cid->qid_usage_idx;
}
#define VF_ACQUIRE_THRESH 3 #define VF_ACQUIRE_THRESH 3
static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req, struct vf_pf_resc_request *p_req,
...@@ -160,7 +176,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, ...@@ -160,7 +176,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
{ {
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
QED_MSG_IOV, QED_MSG_IOV,
"PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n", "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
p_req->num_rxqs, p_req->num_rxqs,
p_resp->num_rxqs, p_resp->num_rxqs,
p_req->num_rxqs, p_req->num_rxqs,
...@@ -171,7 +187,8 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, ...@@ -171,7 +187,8 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
p_resp->num_mac_filters, p_resp->num_mac_filters,
p_req->num_vlan_filters, p_req->num_vlan_filters,
p_resp->num_vlan_filters, p_resp->num_vlan_filters,
p_req->num_mc_filters, p_resp->num_mc_filters); p_req->num_mc_filters,
p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids);
/* humble our request */ /* humble our request */
p_req->num_txqs = p_resp->num_txqs; p_req->num_txqs = p_resp->num_txqs;
...@@ -180,6 +197,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, ...@@ -180,6 +197,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
p_req->num_mac_filters = p_resp->num_mac_filters; p_req->num_mac_filters = p_resp->num_mac_filters;
p_req->num_vlan_filters = p_resp->num_vlan_filters; p_req->num_vlan_filters = p_resp->num_vlan_filters;
p_req->num_mc_filters = p_resp->num_mc_filters; p_req->num_mc_filters = p_resp->num_mc_filters;
p_req->num_cids = p_resp->num_cids;
} }
static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
...@@ -204,6 +222,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) ...@@ -204,6 +222,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS;
req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
req->vfdev_info.fw_major = FW_MAJOR_VERSION; req->vfdev_info.fw_major = FW_MAJOR_VERSION;
...@@ -307,6 +326,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) ...@@ -307,6 +326,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
p_iov->b_pre_fp_hsi = true; p_iov->b_pre_fp_hsi = true;
/* In case PF doesn't support multi-queue Tx, update the number of
* CIDs to reflect the number of queues [older PFs didn't fill that
* field].
*/
if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS))
resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs;
/* Update bulletin board size with response from PF */ /* Update bulletin board size with response from PF */
p_iov->bulletin.size = resp->bulletin_size; p_iov->bulletin.size = resp->bulletin_size;
...@@ -609,6 +635,9 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, ...@@ -609,6 +635,9 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val)); (u32 *)(&init_prod_val));
} }
qed_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */ /* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset, qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
...@@ -657,6 +686,8 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, ...@@ -657,6 +686,8 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
req->num_rxqs = 1; req->num_rxqs = 1;
req->cqe_completion = cqe_completion; req->cqe_completion = cqe_completion;
qed_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */ /* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset, qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
...@@ -700,6 +731,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, ...@@ -700,6 +731,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
req->hw_sb = p_cid->sb_igu_id; req->hw_sb = p_cid->sb_igu_id;
req->sb_index = p_cid->sb_idx; req->sb_index = p_cid->sb_idx;
qed_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */ /* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset, qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
...@@ -749,6 +782,8 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) ...@@ -749,6 +782,8 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
req->tx_qid = p_cid->rel.queue_id; req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1; req->num_txqs = 1;
qed_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */ /* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset, qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
......
...@@ -46,7 +46,8 @@ struct vf_pf_resc_request { ...@@ -46,7 +46,8 @@ struct vf_pf_resc_request {
u8 num_mac_filters; u8 num_mac_filters;
u8 num_vlan_filters; u8 num_vlan_filters;
u8 num_mc_filters; u8 num_mc_filters;
u16 padding; u8 num_cids;
u8 padding;
}; };
struct hw_sb_info { struct hw_sb_info {
...@@ -113,6 +114,11 @@ struct vfpf_acquire_tlv { ...@@ -113,6 +114,11 @@ struct vfpf_acquire_tlv {
struct vf_pf_vfdev_info { struct vf_pf_vfdev_info {
#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ #define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
/* A requirement for supporting multi-Tx queues on a single queue-zone,
* VF would pass qids as additional information whenever passing queue
* references.
*/
#define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2)
u64 capabilities; u64 capabilities;
u8 fw_major; u8 fw_major;
u8 fw_minor; u8 fw_minor;
...@@ -185,6 +191,9 @@ struct pfvf_acquire_resp_tlv { ...@@ -185,6 +191,9 @@ struct pfvf_acquire_resp_tlv {
*/ */
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2) #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
/* PF expects queues to be received with additional qids */
#define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3)
u16 db_size; u16 db_size;
u8 indices_per_sb; u8 indices_per_sb;
u8 os_type; u8 os_type;
...@@ -221,7 +230,8 @@ struct pfvf_acquire_resp_tlv { ...@@ -221,7 +230,8 @@ struct pfvf_acquire_resp_tlv {
u8 num_mac_filters; u8 num_mac_filters;
u8 num_vlan_filters; u8 num_vlan_filters;
u8 num_mc_filters; u8 num_mc_filters;
u8 padding[2]; u8 num_cids;
u8 padding;
} resc; } resc;
u32 bulletin_size; u32 bulletin_size;
...@@ -234,6 +244,16 @@ struct pfvf_start_queue_resp_tlv { ...@@ -234,6 +244,16 @@ struct pfvf_start_queue_resp_tlv {
u8 padding[4]; u8 padding[4];
}; };
/* Extended queue information - additional index for reference inside qzone.
* If commmunicated between VF/PF, each TLV relating to queues should be
* extended by one such [or have a future base TLV that already contains info].
*/
struct vfpf_qid_tlv {
struct channel_tlv tl;
u8 qid;
u8 padding[3];
};
/* Setup Queue */ /* Setup Queue */
struct vfpf_start_rxq_tlv { struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv; struct vfpf_first_tlv first_tlv;
...@@ -597,6 +617,8 @@ enum { ...@@ -597,6 +617,8 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_UPDATE_TUNN_PARAM, CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_RESERVED,
CHANNEL_TLV_QID,
CHANNEL_TLV_MAX, CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs. /* Required for iterating over vport-update tlvs.
...@@ -605,6 +627,12 @@ enum { ...@@ -605,6 +627,12 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
}; };
/* Default number of CIDs [total of both Rx and Tx] to be requested
* by default, and maximum possible number.
*/
#define QED_ETH_VF_DEFAULT_NUM_CIDS (32)
#define QED_ETH_VF_MAX_NUM_CIDS (250)
/* This data is held in the qed_hwfn structure for VFs only. */ /* This data is held in the qed_hwfn structure for VFs only. */
struct qed_vf_iov { struct qed_vf_iov {
union vfpf_tlvs *vf2pf_request; union vfpf_tlvs *vf2pf_request;
......
...@@ -185,6 +185,10 @@ struct qed_eth_pf_params { ...@@ -185,6 +185,10 @@ struct qed_eth_pf_params {
*/ */
u16 num_cons; u16 num_cons;
/* per-VF number of CIDs */
u8 num_vf_cons;
#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
/* To enable arfs, previous to HW-init a positive number needs to be /* To enable arfs, previous to HW-init a positive number needs to be
* set [as filters require allocated searcher ILT memory]. * set [as filters require allocated searcher ILT memory].
* This will set the maximal number of configured steering-filters. * This will set the maximal number of configured steering-filters.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment