Commit bf5a94bf authored by Rahul Verma's avatar Rahul Verma Committed by David S. Miller

qed: Read per queue coalesce from hardware

Retrieve the actual coalesce value from hardware for every Rx/Tx
queue, instead of Rx/Tx coalesce value cached during set coalesce.
Signed-off-by: default avatarRahul Verma <Rahul.Verma@cavium.com>
Signed-off-by: default avatarYuval Mintz <yuval.mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 477f2d14
......@@ -443,13 +443,25 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 id, bool is_vf);
/**
* @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx or
* Tx queue. We can configure coalescing to up to 511, but on
* varying accuracy [the bigger the value the less accurate] up to a mistake
* of 3usec for the highest values.
* While the API allows setting coalescing per-qid, all queues sharing a SB
* should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
* @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue.
*
* @param p_hwfn
* @param p_coal - store coalesce value read from the hardware.
* @param p_handle
*
* @return int
**/
int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
/**
* @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and
* Tx queue. The fact that we can configure coalescing to up to 511, but on
* varying accuracy [the bigger the value the less accurate] up to a mistake
* of 3usec for the highest values.
* While the API allows setting coalescing per-qid, all queues sharing a SB
* should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
*
* @param rx_coal - Rx Coalesce value in micro seconds.
* @param tx_coal - TX Coalesce value in micro seconds.
......
......@@ -2047,6 +2047,106 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_queue_cid *p_cid, u16 *p_rx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
u8 timer_res;
int rc;
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
(u64)(uintptr_t)&sb_entry, 2, 0);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
}
timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
address = BAR0_MAP_REG_USDM_RAM +
USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
if (!is_valid)
return -EINVAL;
coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
*p_rx_coal = (u16)(coalesce << timer_res);
return 0;
}
int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_queue_cid *p_cid, u16 *p_tx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
u8 timer_res;
int rc;
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
(u64)(uintptr_t)&sb_entry, 2, 0);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
}
timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
address = BAR0_MAP_REG_XSDM_RAM +
XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
if (!is_valid)
return -EINVAL;
coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
*p_tx_coal = (u16)(coalesce << timer_res);
return 0;
}
int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
{
struct qed_queue_cid *p_cid = handle;
struct qed_ptt *p_ptt;
int rc = 0;
if (IS_VF(p_hwfn->cdev)) {
rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
if (rc)
DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
return rc;
}
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EAGAIN;
if (p_cid->b_is_rx) {
rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
if (rc)
goto out;
} else {
rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
if (rc)
goto out;
}
out:
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
......@@ -2696,6 +2796,20 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
return rc;
}
static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
{
struct qed_queue_cid *p_cid = handle;
struct qed_hwfn *p_hwfn;
int rc;
p_hwfn = p_cid->p_owner;
rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
if (rc)
DP_NOTICE(p_hwfn, "Unable to read queue calescing\n");
return rc;
}
static int qed_fp_cqe_completion(struct qed_dev *dev,
u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
{
......@@ -2739,6 +2853,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.tunn_config = &qed_tunn_configure,
.ntuple_filter_config = &qed_ntuple_arfs_filter_config,
.configure_arfs_searcher = &qed_configure_arfs_searcher,
.get_coalesce = &qed_get_coalesce,
};
const struct qed_eth_ops *qed_get_eth_ops(void)
......
......@@ -407,4 +407,13 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 coalesce, struct qed_queue_cid *p_cid);
#endif /* _QED_L2_H */
int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_queue_cid *p_cid, u16 *p_hw_coal);
int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_queue_cid *p_cid, u16 *p_hw_coal);
#endif
......@@ -1568,12 +1568,6 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
return rc;
}
static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
{
*rx_coal = cdev->rx_coalesce_usecs;
*tx_coal = cdev->tx_coalesce_usecs;
}
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
void *handle)
{
......@@ -1726,7 +1720,6 @@ const struct qed_common_ops qed_common_ops_pass = {
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
.nvm_get_image = &qed_nvm_get_image,
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
.update_drv_state = &qed_update_drv_state,
......
......@@ -3400,6 +3400,75 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
length, status);
}
static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf)
{
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_read_coal_resp_tlv *p_resp;
struct vfpf_read_coal_req_tlv *req;
u8 status = PFVF_STATUS_FAILURE;
struct qed_vf_queue *p_queue;
struct qed_queue_cid *p_cid;
u16 coal = 0, qid, i;
bool b_is_rx;
int rc = 0;
mbx->offset = (u8 *)mbx->reply_virt;
req = &mbx->req_virt->read_coal_req;
qid = req->qid;
b_is_rx = req->is_rx ? true : false;
if (b_is_rx) {
if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
QED_IOV_VALIDATE_Q_ENABLE)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d]: Invalid Rx queue_id = %d\n",
p_vf->abs_vf_id, qid);
goto send_resp;
}
p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
if (rc)
goto send_resp;
} else {
if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
QED_IOV_VALIDATE_Q_ENABLE)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d]: Invalid Tx queue_id = %d\n",
p_vf->abs_vf_id, qid);
goto send_resp;
}
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
p_queue = &p_vf->vf_queues[qid];
if ((!p_queue->cids[i].p_cid) ||
(!p_queue->cids[i].b_is_tx))
continue;
p_cid = p_queue->cids[i].p_cid;
rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
if (rc)
goto send_resp;
break;
}
}
status = PFVF_STATUS_SUCCESS;
send_resp:
p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
sizeof(*p_resp));
p_resp->coal = coal;
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
}
static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
......@@ -3450,6 +3519,7 @@ static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
goto out;
}
vf->rx_coal = rx_coal;
}
if (tx_coal) {
......@@ -3473,6 +3543,7 @@ static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
goto out;
}
}
vf->tx_coal = tx_coal;
}
status = PFVF_STATUS_SUCCESS;
......@@ -3808,6 +3879,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_UPDATE:
qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_COALESCE_READ:
qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
break;
}
} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
......
......@@ -217,6 +217,9 @@ struct qed_vf_info {
u8 num_rxqs;
u8 num_txqs;
u16 rx_coal;
u16 tx_coal;
u8 num_sbs;
u8 num_mac_filters;
......
......@@ -1343,6 +1343,37 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
return rc;
}
int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
u16 *p_coal, struct qed_queue_cid *p_cid)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_read_coal_resp_tlv *resp;
struct vfpf_read_coal_req_tlv *req;
int rc;
/* clear mailbox and prep header tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req));
req->qid = p_cid->rel.queue_id;
req->is_rx = p_cid->b_is_rx ? 1 : 0;
qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->read_coal_resp;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
goto exit;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
goto exit;
*p_coal = resp->coal;
exit:
qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
int
qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid)
......
......@@ -504,6 +504,20 @@ struct vfpf_update_coalesce {
u16 qid;
u8 padding[2];
};
struct vfpf_read_coal_req_tlv {
struct vfpf_first_tlv first_tlv;
u16 qid;
u8 is_rx;
u8 padding[5];
};
struct pfvf_read_coal_resp_tlv {
struct pfvf_tlv hdr;
u16 coal;
u8 padding[6];
};
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
......@@ -517,7 +531,7 @@ union vfpf_tlvs {
struct vfpf_ucast_filter_tlv ucast_filter;
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
struct channel_list_end_tlv list_end;
struct vfpf_read_coal_req_tlv read_coal_req;
struct tlv_buffer_size tlv_buf_size;
};
......@@ -527,6 +541,7 @@ union pfvf_tlvs {
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
struct pfvf_update_tunn_param_tlv tunn_param_resp;
struct pfvf_read_coal_resp_tlv read_coal_resp;
};
enum qed_bulletin_bit {
......@@ -634,6 +649,7 @@ enum {
CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID,
CHANNEL_TLV_COALESCE_READ,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
......@@ -699,6 +715,17 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 rx_coal,
u16 tx_coal, struct qed_queue_cid *p_cid);
/**
* @brief VF - Get coalesce per VF's relative queue.
*
* @param p_hwfn
* @param p_coal - coalesce value in micro second for VF queues.
* @param p_cid - queue cid
*
**/
int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
u16 *p_coal, struct qed_queue_cid *p_cid);
#ifdef CONFIG_QED_SRIOV
/**
* @brief Read the VF bulletin and act on it if needed
......
......@@ -702,16 +702,53 @@ static u32 qede_get_link(struct net_device *dev)
static int qede_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
void *rx_handle = NULL, *tx_handle = NULL;
struct qede_dev *edev = netdev_priv(dev);
u16 rxc, txc;
u16 rx_coal, tx_coal, i, rc = 0;
struct qede_fastpath *fp;
rx_coal = QED_DEFAULT_RX_USECS;
tx_coal = QED_DEFAULT_TX_USECS;
memset(coal, 0, sizeof(struct ethtool_coalesce));
edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
coal->rx_coalesce_usecs = rxc;
coal->tx_coalesce_usecs = txc;
__qede_lock(edev);
if (edev->state == QEDE_STATE_OPEN) {
for_each_queue(i) {
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_RX) {
rx_handle = fp->rxq->handle;
break;
}
}
return 0;
rc = edev->ops->get_coalesce(edev->cdev, &rx_coal, rx_handle);
if (rc) {
DP_INFO(edev, "Read Rx coalesce error\n");
goto out;
}
for_each_queue(i) {
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
tx_handle = fp->txq->handle;
break;
}
}
rc = edev->ops->get_coalesce(edev->cdev, &tx_coal, tx_handle);
if (rc)
DP_INFO(edev, "Read Tx coalesce error\n");
}
out:
__qede_unlock(edev);
coal->rx_coalesce_usecs = rx_coal;
coal->tx_coalesce_usecs = tx_coal;
return rc;
}
static int qede_set_coalesce(struct net_device *dev,
......
......@@ -323,6 +323,7 @@ struct qed_eth_ops {
int (*configure_arfs_searcher)(struct qed_dev *cdev,
bool en_searcher);
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
......
......@@ -186,6 +186,7 @@ enum qed_led_mode {
#define QED_COALESCE_MAX 0xFF
#define QED_DEFAULT_RX_USECS 12
#define QED_DEFAULT_TX_USECS 48
/* forward */
struct qed_dev;
......@@ -673,16 +674,6 @@ struct qed_common_ops {
int (*nvm_get_image)(struct qed_dev *cdev,
enum qed_nvm_images type, u8 *buf, u16 len);
/**
* @brief get_coalesce - Get coalesce parameters in usec
*
* @param cdev
* @param rx_coal - Rx coalesce value in usec
* @param tx_coal - Tx coalesce value in usec
*
*/
void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
/**
* @brief set_coalesce - Configure Rx coalesce value in usec
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment