Commit e4d986a8 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-series'

Saeed Mahameed says:

====================
Mellanox 100G mlx5 fixes 2016-08-29

This series contains some bug fixes for the mlx5 core and mlx5
ethernet driver.

From Saeed, Fix UMR to consider hardware translation table field
size limitation when calculating the maximum number of MTTs required
by the driver.  Three patches to speed-up netdevice close time by
serializing channel (SQs & RQs) destruction rather than issuing and
waiting for hardware interrupts to free them.

From Eran, Fix ethtool ring parameter reporting for striding RQ layout.
Add error prints on ETS validation failure.

From Kamal, Fix memory leak on error flow.

From Maor, Fix ethtool steering priorities number.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9dbeea7f e5835f28
......@@ -73,8 +73,12 @@
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
(rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
#define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
......@@ -219,9 +223,8 @@ struct mlx5e_tstamp {
};
enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
MLX5E_RQ_STATE_FLUSH,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
MLX5E_RQ_STATE_FLUSH_TIMEOUT,
MLX5E_RQ_STATE_AM,
};
......@@ -304,6 +307,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
u32 mpwqe_mtt_offset;
struct mlx5e_rx_am am; /* Adaptive Moderation */
......@@ -365,9 +369,8 @@ struct mlx5e_sq_dma {
};
enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
MLX5E_SQ_STATE_FLUSH,
MLX5E_SQ_STATE_BF_ENABLE,
MLX5E_SQ_STATE_TX_TIMEOUT,
};
struct mlx5e_ico_wqe_info {
......@@ -698,7 +701,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
......@@ -814,11 +816,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS);
}
static inline int mlx5e_get_mtt_octw(int npages)
{
return ALIGN(npages, 8) / 2;
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
......
......@@ -139,7 +139,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
struct mlx5e_tir *tir;
void *in;
int inlen;
int err;
int err = 0;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
......@@ -151,10 +151,11 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
if (err)
return err;
goto out;
}
out:
kvfree(in);
return 0;
return err;
}
......@@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
}
static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
struct ieee_ets *ets)
{
int bw_sum = 0;
int i;
/* Validate Priority */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY)
if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
netdev_err(netdev,
"Failed to validate ETS: priority value greater than max(%d)\n",
MLX5E_MAX_PRIORITY);
return -EINVAL;
}
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
if (!ets->tc_tx_bw[i])
if (!ets->tc_tx_bw[i]) {
netdev_err(netdev,
"Failed to validate ETS: BW 0 is illegal\n");
return -EINVAL;
}
bw_sum += ets->tc_tx_bw[i];
}
}
if (bw_sum != 0 && bw_sum != 100)
if (bw_sum != 0 && bw_sum != 100) {
netdev_err(netdev,
"Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
}
return 0;
}
......@@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
err = mlx5e_dbcnl_validate_ets(ets);
err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err)
return err;
......
......@@ -352,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
sq_stats_desc, j);
}
static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
int num_wqe)
{
int packets_per_wqe;
int stride_size;
int num_strides;
int wqe_size;
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_wqe;
stride_size = 1 << priv->params.mpwqe_log_stride_sz;
num_strides = 1 << priv->params.mpwqe_log_num_strides;
wqe_size = stride_size * num_strides;
packets_per_wqe = wqe_size /
ALIGN(ETH_DATA_LEN, stride_size);
return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
}
static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
int num_packets)
{
int packets_per_wqe;
int stride_size;
int num_strides;
int wqe_size;
int num_wqes;
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_packets;
stride_size = 1 << priv->params.mpwqe_log_stride_sz;
num_strides = 1 << priv->params.mpwqe_log_num_strides;
wqe_size = stride_size * num_strides;
num_packets = (1 << order_base_2(num_packets));
packets_per_wqe = wqe_size /
ALIGN(ETH_DATA_LEN, stride_size);
num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
return 1 << (order_base_2(num_wqes));
}
static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int rq_wq_type = priv->params.rq_wq_type;
param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_max_log_rq_size(rq_wq_type));
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->params.log_rq_size;
param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << priv->params.log_rq_size);
param->tx_pending = 1 << priv->params.log_sq_size;
}
......@@ -370,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
bool was_opened;
int rq_wq_type = priv->params.rq_wq_type;
u32 rx_pending_wqes;
u32 min_rq_size;
u32 max_rq_size;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
u32 num_mtts;
int err = 0;
if (param->rx_jumbo_pending) {
......@@ -385,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev,
__func__);
return -EINVAL;
}
if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_min_log_rq_size(rq_wq_type));
max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_max_log_rq_size(rq_wq_type));
rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
param->rx_pending);
if (param->rx_pending < min_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
1 << mlx5_min_log_rq_size(rq_wq_type));
min_rq_size);
return -EINVAL;
}
if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
if (param->rx_pending > max_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
1 << mlx5_max_log_rq_size(rq_wq_type));
max_rq_size);
return -EINVAL;
}
num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
rx_pending_wqes);
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
__func__, param->rx_pending);
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
__func__, param->tx_pending,
......@@ -410,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return -EINVAL;
}
log_rq_size = order_base_2(param->rx_pending);
log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
if (log_rq_size == priv->params.log_rq_size &&
log_sq_size == priv->params.log_sq_size &&
......@@ -454,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev,
unsigned int count = ch->combined_count;
bool arfs_enabled;
bool was_opened;
u32 num_mtts;
int err = 0;
if (!count) {
......@@ -472,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev,
return -EINVAL;
}
num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
__func__, count);
return -EINVAL;
}
if (priv->params.num_channels == count)
return 0;
......
......@@ -39,13 +39,6 @@
#include "eswitch.h"
#include "vxlan.h"
enum {
MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
};
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
......@@ -162,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_xmit_more += sq_stats->xmit_more;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
tx_offload_none += sq_stats->csum_none;
}
......@@ -340,6 +334,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->mpwqe_mtt_offset = c->ix *
MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
......@@ -428,7 +425,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
......@@ -525,6 +521,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
return -ETIMEDOUT;
}
static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_ix_be;
u16 wqe_ix;
/* UMR WQE (if in progress) is always at wq->head */
if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
while (!mlx5_wq_ll_is_empty(wq)) {
wqe_ix_be = *wq->tail_next;
wqe_ix = be16_to_cpu(wqe_ix_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
rq->dealloc_wqe(rq, wqe_ix);
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
}
static int mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
......@@ -548,8 +565,6 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (param->am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
sq->ico_wqe_info[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
......@@ -566,23 +581,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
int tout = 0;
int err;
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
napi_synchronize(&rq->channel->napi);
cancel_work_sync(&rq->am.work);
mlx5e_disable_rq(rq);
......@@ -821,7 +821,6 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
goto err_disable_sq;
if (sq->txq) {
set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
}
......@@ -845,38 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
int tout = 0;
int err;
if (sq->txq) {
clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
/* prevent netif_tx_wake_queue */
napi_synchronize(&sq->channel->napi);
if (sq->txq) {
netif_tx_disable_queue(sq->txq);
/* ensure hw is notified of all pending wqes */
/* last doorbell out, godspeed .. */
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_ERR, false, 0);
if (err)
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
/* wait till sq is empty, unless a TX timeout occurred on this SQ */
while (sq->cc != sq->pc &&
!test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
napi_synchronize(&sq->channel->napi);
mlx5e_free_tx_descs(sq);
mlx5e_disable_sq(sq);
mlx5e_free_tx_descs(sq);
mlx5e_destroy_sq(sq);
}
......@@ -2796,7 +2777,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
continue;
sched_work = true;
set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
}
......@@ -3233,8 +3214,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_mkey_seg *mkc;
int inlen = sizeof(*in);
u64 npages =
priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
int err;
in = mlx5_vzalloc(inlen);
......@@ -3248,10 +3229,12 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
MLX5_PERM_LOCAL_WRITE |
MLX5_ACCESS_MODE_MTT;
npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
mkc->log2_page_size = PAGE_SHIFT;
err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
......
......@@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
}
}
static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix)
static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
{
return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS +
return rq->mpwqe_mtt_offset +
wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
}
......@@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5_wqe_data_seg *dseg = &wqe->data;
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix);
u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
memset(wqe, 0, sizeof(*wqe));
cseg->opmod_idx_opcode =
......@@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
ucseg->klm_octowords =
cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE));
cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
ucseg->bsf_octowords =
cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset));
cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
dseg->lkey = sq->mkey_be;
......@@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
{
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
int mtt_sz = mlx5e_get_wqe_mtt_sz();
u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT;
u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
int i;
wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
......@@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
return;
}
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
rq->stats.mpwqe_frag++;
......@@ -595,25 +601,8 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
wi->free_wqe(rq, wi);
}
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_ix_be;
u16 wqe_ix;
while (!mlx5_wq_ll_is_empty(wq)) {
wqe_ix_be = *wq->tail_next;
wqe_ix = be16_to_cpu(wqe_ix_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
rq->dealloc_wqe(rq, wqe_ix);
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
}
#define RQ_CANNOT_POST(rq) \
(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
......@@ -916,7 +905,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done = 0;
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
return 0;
if (cq->decmprs_left)
......
......@@ -70,6 +70,7 @@ struct mlx5e_sw_stats {
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 tx_xmit_more;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_mpwqe_frag;
......@@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
......@@ -298,6 +300,7 @@ struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
u64 bytes;
u64 xmit_more;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
......@@ -324,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
};
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
......
......@@ -375,6 +375,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.packets++;
sq->stats.bytes += num_bytes;
sq->stats.xmit_more += skb->xmit_more;
return NETDEV_TX_OK;
dma_unmap_wqe_err:
......@@ -394,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb);
}
void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
u16 ci;
int i;
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
skb = sq->skb[ci];
wi = &sq->wqe_info[ci];
if (!skb) { /* nop */
sq->cc++;
continue;
}
for (i = 0; i < wi->num_dma; i++) {
struct mlx5e_sq_dma *dma =
mlx5e_dma_get(sq, sq->dma_fifo_cc++);
mlx5e_tx_dma_unmap(sq->pdev, dma);
}
dev_kfree_skb_any(skb);
sq->cc += wi->num_wqebbs;
}
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
......@@ -434,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
return false;
npkts = 0;
......@@ -512,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
netif_tx_wake_queue(sq->txq);
sq->stats.wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
u16 ci;
int i;
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
skb = sq->skb[ci];
wi = &sq->wqe_info[ci];
if (!skb) { /* nop */
sq->cc++;
continue;
}
for (i = 0; i < wi->num_dma; i++) {
struct mlx5e_sq_dma *dma =
mlx5e_dma_get(sq, sq->dma_fifo_cc++);
mlx5e_tx_dma_unmap(sq->pdev, dma);
}
dev_kfree_skb_any(skb);
sq->cc += wi->num_wqebbs;
}
}
......@@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
struct mlx5_wq_cyc *wq;
struct mlx5_cqe64 *cqe;
struct mlx5e_sq *sq;
u16 sqcc;
if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
return;
cqe = mlx5e_get_cqe(cq);
if (likely(!cqe))
return;
sq = container_of(cq, struct mlx5e_sq, cq);
wq = &sq->wq;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
......
......@@ -80,7 +80,7 @@
LEFTOVERS_NUM_PRIOS)
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 10
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, aRFS */
#define KERNEL_NIC_PRIO_NUM_LEVELS 4
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment