Commit 722003ac authored by Sudarsana Reddy Kalluru's avatar Sudarsana Reddy Kalluru Committed by David S. Miller

qed: Add support for coalescing config read/update.

This patch adds support for configuring the device tx/rx coalescing
timeout values in the order of micro seconds. It also adds APIs for
upper layer drivers for reading/updating the coalescing values.
Signed-off-by: default avatarSudarsana Reddy Kalluru <sudarsana.kalluru@qlogic.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dc14341e
...@@ -2222,6 +2222,110 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, ...@@ -2222,6 +2222,110 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 hw_addr, void *p_eth_qzone,
size_t eth_qzone_size, u8 timeset)
{
struct coalescing_timeset *p_coal_timeset;
if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
return -EINVAL;
}
p_coal_timeset = p_eth_qzone;
memset(p_coal_timeset, 0, eth_qzone_size);
SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
return 0;
}
int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 coalesce, u8 qid, u16 sb_id)
{
struct ustorm_eth_queue_zone eth_qzone;
u8 timeset, timer_res;
u16 fw_qid = 0;
u32 address;
int rc;
/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
if (coalesce <= 0x7F) {
timer_res = 0;
} else if (coalesce <= 0xFF) {
timer_res = 1;
} else if (coalesce <= 0x1FF) {
timer_res = 2;
} else {
DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
return -EINVAL;
}
timeset = (u8)(coalesce >> timer_res);
rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
if (rc)
return rc;
rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
if (rc)
goto out;
address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct ustorm_eth_queue_zone), timeset);
if (rc)
goto out;
p_hwfn->cdev->rx_coalesce_usecs = coalesce;
out:
return rc;
}
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 coalesce, u8 qid, u16 sb_id)
{
struct xstorm_eth_queue_zone eth_qzone;
u8 timeset, timer_res;
u16 fw_qid = 0;
u32 address;
int rc;
/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
if (coalesce <= 0x7F) {
timer_res = 0;
} else if (coalesce <= 0xFF) {
timer_res = 1;
} else if (coalesce <= 0x1FF) {
timer_res = 2;
} else {
DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
return -EINVAL;
}
timeset = (u8)(coalesce >> timer_res);
rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
if (rc)
return rc;
rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
if (rc)
goto out;
address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
if (rc)
goto out;
p_hwfn->cdev->tx_coalesce_usecs = coalesce;
out:
return rc;
}
/* Calculate final WFQ values for all vports and configure them. /* Calculate final WFQ values for all vports and configure them.
* After this configuration each vport will have * After this configuration each vport will have
* approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
......
...@@ -212,6 +212,20 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, ...@@ -212,6 +212,20 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 size_in_dwords, u32 size_in_dwords,
u32 flags); u32 flags);
/**
* @brief qed_dmae_grc2host - Read data from dmae data offset
* to source address using the given ptt
*
* @param p_ptt
* @param grc_addr (dmae_data_offset)
* @param dest_addr
* @param size_in_dwords
* @param flags - one of the flags defined above
*/
int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
u32 flags);
/** /**
* @brief qed_dmae_host2host - copy data from to source address * @brief qed_dmae_host2host - copy data from to source address
* to a destination adress (for SRIOV) using the given ptt * to a destination adress (for SRIOV) using the given ptt
...@@ -308,4 +322,37 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, ...@@ -308,4 +322,37 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
int qed_final_cleanup(struct qed_hwfn *p_hwfn, int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 id, bool is_vf); struct qed_ptt *p_ptt, u16 id, bool is_vf);
/**
* @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
* The fact that we can configure coalescing to up to 511, but on varying
* accuracy [the bigger the value the less accurate] up to a mistake of 3usec
* for the highest values.
*
* @param p_hwfn
* @param p_ptt
* @param coalesce - Coalesce value in micro seconds.
* @param qid - Queue index.
* @param qid - SB Id
*
* @return int
*/
int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 coalesce, u8 qid, u16 sb_id);
/**
* @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue
* While the API allows setting coalescing per-qid, all tx queues sharing a
* SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
* @param p_hwfn
* @param p_ptt
* @param coalesce - Coalesce value in micro seconds.
* @param qid - Queue index.
* @param qid - SB Id
*
* @return int
*/
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 coalesce, u8 qid, u16 sb_id);
#endif #endif
...@@ -768,6 +768,29 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, ...@@ -768,6 +768,29 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int
qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct qed_dmae_params params;
int rc;
memset(&params, 0, sizeof(struct qed_dmae_params));
params.flags = flags;
mutex_lock(&p_hwfn->dmae_info.mutex);
rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
dest_addr, QED_DMAE_ADDRESS_GRC,
QED_DMAE_ADDRESS_HOST_VIRT,
size_in_dwords, &params);
mutex_unlock(&p_hwfn->dmae_info.mutex);
return rc;
}
int int
qed_dmae_host2host(struct qed_hwfn *p_hwfn, qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
......
...@@ -2418,6 +2418,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, ...@@ -2418,6 +2418,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
{ {
struct qed_dev *cdev = p_hwfn->cdev; struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state; u32 cau_state;
u8 timer_res;
memset(p_sb_entry, 0, sizeof(*p_sb_entry)); memset(p_sb_entry, 0, sizeof(*p_sb_entry));
...@@ -2443,6 +2444,23 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, ...@@ -2443,6 +2444,23 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
} }
/* Coalesce = (timeset << timer-res), timeset is 7bit wide */
if (cdev->rx_coalesce_usecs <= 0x7F)
timer_res = 0;
else if (cdev->rx_coalesce_usecs <= 0xFF)
timer_res = 1;
else
timer_res = 2;
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
if (cdev->tx_coalesce_usecs <= 0x7F)
timer_res = 0;
else if (cdev->tx_coalesce_usecs <= 0xFF)
timer_res = 1;
else
timer_res = 2;
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
} }
...@@ -2484,17 +2502,28 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, ...@@ -2484,17 +2502,28 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
/* Configure pi coalescing if set */ /* Configure pi coalescing if set */
if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >> u8 timeset, timer_res;
(QED_CAU_DEF_RX_TIMER_RES + 1);
u8 num_tc = 1, i; u8 num_tc = 1, i;
/* timeset = (coalesce >> timer-res), timeset is 7bit wide */
if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
timer_res = 0;
else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
timer_res = 1;
else
timer_res = 2;
timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
QED_COAL_RX_STATE_MACHINE, QED_COAL_RX_STATE_MACHINE,
timeset); timeset);
timeset = p_hwfn->cdev->tx_coalesce_usecs >> if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
(QED_CAU_DEF_TX_TIMER_RES + 1); timer_res = 0;
else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
timer_res = 1;
else
timer_res = 2;
timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
for (i = 0; i < num_tc; i++) { for (i = 0; i < num_tc; i++) {
qed_int_cau_conf_pi(p_hwfn, p_ptt, qed_int_cau_conf_pi(p_hwfn, p_ptt,
igu_sb_id, TX_PI(i), igu_sb_id, TX_PI(i),
...@@ -3199,3 +3228,39 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev) ...@@ -3199,3 +3228,39 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev)
for_each_hwfn(cdev, i) for_each_hwfn(cdev, i)
cdev->hwfns[i].b_int_requested = false; cdev->hwfns[i].b_int_requested = false;
} }
int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 timer_res, u16 sb_id, bool tx)
{
struct cau_sb_entry sb_entry;
int rc;
if (!p_hwfn->hw_init_done) {
DP_ERR(p_hwfn, "hardware not initialized yet\n");
return -EINVAL;
}
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
sb_id * sizeof(u64),
(u64)(uintptr_t)&sb_entry, 2, 0);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
}
if (tx)
SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
else
SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(u64)(uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
sb_id * sizeof(u64), 2, 0);
if (rc) {
DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
return rc;
}
return rc;
}
...@@ -389,6 +389,9 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, ...@@ -389,6 +389,9 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
u16 vf_number, u16 vf_number,
u8 vf_valid); u8 vf_valid);
int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 timer_res, u16 sb_id, bool tx);
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
#endif #endif
...@@ -1303,6 +1303,38 @@ static int qed_drain(struct qed_dev *cdev) ...@@ -1303,6 +1303,38 @@ static int qed_drain(struct qed_dev *cdev)
return 0; return 0;
} }
static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
{
*rx_coal = cdev->rx_coalesce_usecs;
*tx_coal = cdev->tx_coalesce_usecs;
}
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
u8 qid, u16 sb_id)
{
struct qed_hwfn *hwfn;
struct qed_ptt *ptt;
int hwfn_index;
int status = 0;
hwfn_index = qid % cdev->num_hwfns;
hwfn = &cdev->hwfns[hwfn_index];
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EAGAIN;
status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
qid / cdev->num_hwfns, sb_id);
if (status)
goto out;
status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
qid / cdev->num_hwfns, sb_id);
out:
qed_ptt_release(hwfn, ptt);
return status;
}
static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
{ {
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
...@@ -1349,5 +1381,7 @@ const struct qed_common_ops qed_common_ops_pass = { ...@@ -1349,5 +1381,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.update_msglvl = &qed_init_dp, .update_msglvl = &qed_init_dp,
.chain_alloc = &qed_chain_alloc, .chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free, .chain_free = &qed_chain_free,
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led, .set_led = &qed_set_led,
}; };
...@@ -80,6 +80,8 @@ ...@@ -80,6 +80,8 @@
0x1f00000UL 0x1f00000UL
#define BAR0_MAP_REG_TSDM_RAM \ #define BAR0_MAP_REG_TSDM_RAM \
0x1c80000UL 0x1c80000UL
#define BAR0_MAP_REG_XSDM_RAM \
0x1e00000UL
#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ #define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
0x5011f4UL 0x5011f4UL
#define PRS_REG_SEARCH_TCP \ #define PRS_REG_SEARCH_TCP \
......
...@@ -488,6 +488,30 @@ struct qed_common_ops { ...@@ -488,6 +488,30 @@ struct qed_common_ops {
void (*chain_free)(struct qed_dev *cdev, void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain); struct qed_chain *p_chain);
/**
* @brief get_coalesce - Get coalesce parameters in usec
*
* @param cdev
* @param rx_coal - Rx coalesce value in usec
* @param tx_coal - Tx coalesce value in usec
*
*/
void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
/**
* @brief set_coalesce - Configure Rx coalesce value in usec
*
* @param cdev
* @param rx_coal - Rx coalesce value in usec
* @param tx_coal - Tx coalesce value in usec
* @param qid - Queue index
* @param sb_id - Status Block Id
*
* @return 0 on success, error otherwise.
*/
int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
u8 qid, u16 sb_id);
/** /**
* @brief set_led - Configure LED mode * @brief set_led - Configure LED mode
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment