Commit f39c6b29 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-06-01

1) From Tariq, Two patches to Fix IPoIB issues introduced in
   "net/mlx5e: TX, Use actual WQE size for SQ edge fill"

2) From Eran, Additional improvements to mlx5e statistics reporting

3) From Maor, Increase aRFS flow tables size

4) From Adi, Support MTU change for ethernet representors

5) From Ilan and Adi, Handle QP error events in FPGA

6) From Tariq, last 10 patches mainly deals with RX buffer scheme improvements for legacy RQ
   to use only order-0 pages and fragmented SKBs for large MTUs.

-  Tariq starts with some refactoring and removing HW LRO support from traditional
   (legacy) RQ, since it complicates the buffer scheme and removing it makes it smoother
   to move to cyclic descriptor buffer for traditional RQ.

- Use cyclic WQ in legacy RQ, which has many benefits and paves the way for fragmented SKBs
  for large MTUs.

- Enhance legacy Receive Queue memory scheme, such that only order-0 pages are used.
  Whenever possible, prefer using a linear SKB, and build it wrapping the WQE buffer.
  Otherwise (for example, jumbo frames on x86), use non-linear SKB, with as many frags
  as needed. In this case, multiple WQE scatter entries are used, up to a maximum of 4
  frags and 10KB of MTU.

- TX statistics access improvements.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 21ad1173 f65a59ff
......@@ -101,18 +101,22 @@ struct page_pool;
(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
(MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
#define MLX5E_LOG_MAX_RX_WQE_BULK \
(ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
#define MLX5E_RX_MAX_HEAD (256)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
......@@ -186,9 +190,13 @@ struct mlx5e_tx_wqe {
struct mlx5_wqe_data_seg data[0];
};
struct mlx5e_rx_wqe {
struct mlx5e_rx_wqe_ll {
struct mlx5_wqe_srq_next_seg next;
struct mlx5_wqe_data_seg data;
struct mlx5_wqe_data_seg data[0];
};
struct mlx5e_rx_wqe_cyc {
struct mlx5_wqe_data_seg data[0];
};
struct mlx5e_umr_wqe {
......@@ -458,8 +466,9 @@ struct mlx5e_dma_info {
};
struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info di;
struct mlx5e_dma_info *di;
u32 offset;
bool last_in_page;
};
struct mlx5e_umr_dma_info {
......@@ -472,6 +481,8 @@ struct mlx5e_mpw_info {
DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
};
#define MLX5E_MAX_RX_FRAGS 4
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
......@@ -489,6 +500,9 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
......@@ -496,19 +510,30 @@ enum mlx5e_rq_flag {
MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
};
struct mlx5e_rq_frag_info {
int frag_size;
int frag_stride;
};
struct mlx5e_rq_frags_info {
struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
u8 num_frags;
u8 log_num_frags;
u8 wqe_bulk;
};
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
union {
struct {
struct mlx5e_wqe_frag_info *frag_info;
u32 frag_sz; /* max possible skb frag_sz */
union {
bool page_reuse;
};
struct mlx5_wq_cyc wq;
struct mlx5e_wqe_frag_info *frags;
struct mlx5e_dma_info *di;
struct mlx5e_rq_frags_info info;
mlx5e_fp_skb_from_cqe skb_from_cqe;
} wqe;
struct {
struct mlx5_wq_ll wq;
struct mlx5e_umr_wqe umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
......@@ -519,7 +544,6 @@ struct mlx5e_rq {
};
struct {
u16 headroom;
u8 page_order;
u8 map_dir; /* dma map direction */
} buff;
......@@ -777,8 +801,6 @@ struct mlx5e_priv {
struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq;
rwlock_t stats_lock; /* Protects channels SW stats updates */
bool channels_active;
struct mlx5e_channels channels;
u32 tisn[MLX5E_MAX_NUM_TC];
struct mlx5e_rqt indir_rqt;
......@@ -877,6 +899,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
void mlx5e_update_stats(struct mlx5e_priv *priv);
......@@ -1104,6 +1132,10 @@ void mlx5e_update_stats_work(struct work_struct *work);
int mlx5e_bits_invert(unsigned long a, int size);
typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
change_hw_mtu_cb set_mtu_cb);
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo);
......
......@@ -213,7 +213,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
}
#define MLX5E_ARFS_NUM_GROUPS 2
#define MLX5E_ARFS_GROUP1_SIZE BIT(12)
#define MLX5E_ARFS_GROUP1_SIZE (BIT(16) - 1)
#define MLX5E_ARFS_GROUP2_SIZE BIT(0)
#define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
MLX5E_ARFS_GROUP2_SIZE)
......
......@@ -1515,6 +1515,9 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return -EOPNOTSUPP;
if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
return -EINVAL;
} else if (priv->channels.params.lro_en) {
netdev_warn(netdev, "Can't set legacy RQ with LRO, disable LRO first\n");
return -EINVAL;
}
new_channels.params = priv->channels.params;
......@@ -1589,6 +1592,10 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
out:
mutex_unlock(&priv->state_lock);
/* Need to fix some features.. */
netdev_update_features(netdev);
return err;
}
......
......@@ -130,10 +130,6 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
struct mlx5e_sq_stats *sq_stats;
int i, j;
read_lock(&priv->stats_lock);
if (!priv->channels_active)
goto out;
memset(s, 0, sizeof(*s));
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
......@@ -150,8 +146,6 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
s->tx_bytes += sq_stats->bytes;
}
}
out:
read_unlock(&priv->stats_lock);
}
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
......@@ -906,6 +900,11 @@ static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
.switchdev_port_attr_get = mlx5e_attr_get,
};
int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu)
{
return mlx5e_change_mtu(netdev, new_mtu, NULL);
}
static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_open = mlx5e_rep_open,
.ndo_stop = mlx5e_rep_close,
......@@ -915,6 +914,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats,
.ndo_change_mtu = mlx5e_change_rep_mtu,
};
static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
......@@ -927,7 +927,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->sw_mtu = mtu;
params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
......@@ -941,6 +941,10 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
static void mlx5e_build_rep_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u16 max_mtu;
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
netdev->watchdog_timeo = 15 * HZ;
......@@ -953,6 +957,10 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_TC;
eth_hw_addr_random(netdev);
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
}
static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
......
......@@ -100,7 +100,7 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
#ifdef CONFIG_INET
/* loopback test */
#define MLX5E_TEST_PKT_SIZE (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD - NET_IP_ALIGN)
#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
......
......@@ -64,11 +64,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
......@@ -114,9 +114,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
int i;
memset(s, 0, sizeof(*s));
read_lock(&priv->stats_lock);
if (!priv->channels_active)
goto out;
for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
struct mlx5e_channel_stats *channel_stats =
......@@ -177,8 +174,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
}
memcpy(&priv->stats.sw, s, sizeof(*s));
out:
read_unlock(&priv->stats_lock);
}
static const struct counter_desc q_stats_desc[] = {
......@@ -1142,11 +1137,11 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
};
static const struct counter_desc ch_stats_desc[] = {
......@@ -1161,9 +1156,6 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
int max_nch = priv->profile->max_nch(priv->mdev);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
(NUM_SQ_STATS * max_nch * priv->max_opened_tc);
......@@ -1175,9 +1167,6 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int max_nch = priv->profile->max_nch(priv->mdev);
int i, j, tc;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return idx;
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
......@@ -1187,7 +1176,6 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
/* priv->channel_tc2txq[i][tc] is valid only when device is open */
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
......@@ -1204,9 +1192,6 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int max_nch = priv->profile->max_nch(priv->mdev);
int i, j, tc;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return idx;
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
data[idx++] =
......
......@@ -75,11 +75,11 @@ struct mlx5e_sw_stats {
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 tx_xmit_more;
u64 tx_cqe_err;
u64 tx_recover;
u64 tx_queue_wake;
u64 tx_cqe_err;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_buff_alloc_err;
......@@ -203,10 +203,11 @@ struct mlx5e_sq_stats {
/* less likely accessed in data path */
u64 csum_none;
u64 stopped;
u64 wake;
u64 dropped;
u64 cqe_err;
u64 recover;
/* dirtied @completion */
u64 wake ____cacheline_aligned_in_smp;
u64 cqe_err;
};
struct mlx5e_ch_stats {
......
......@@ -188,28 +188,16 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
return min_t(u16, hlen, skb_headlen(skb));
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
unsigned int *skb_len,
unsigned int len)
{
*skb_len -= len;
*skb_data += len;
}
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
unsigned char **skb_data,
unsigned int *skb_len)
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
{
struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
int cpy1_sz = 2 * ETH_ALEN;
int cpy2_sz = ihs - cpy1_sz;
memcpy(vhdr, *skb_data, cpy1_sz);
mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
memcpy(vhdr, skb->data, cpy1_sz);
vhdr->h_vlan_proto = skb->vlan_proto;
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
static inline void
......@@ -357,8 +345,6 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
unsigned char *skb_data = skb->data;
unsigned int skb_len = skb->len;
u16 ds_cnt, ds_cnt_inl = 0;
u16 headlen, ihs, frag_pi;
u8 num_wqebbs, opcode;
......@@ -385,7 +371,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->bytes += num_bytes;
stats->xmit_more += skb->xmit_more;
headlen = skb_len - ihs - skb->data_len;
headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen;
ds_cnt += skb_shinfo(skb)->nr_frags;
......@@ -414,15 +400,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->mss = mss;
if (ihs) {
eseg->inline_hdr.sz = cpu_to_be16(ihs);
if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(eseg->inline_hdr.start, skb,
ihs - VLAN_HLEN, &skb_data, &skb_len);
ihs -= VLAN_HLEN;
mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs);
stats->added_vlan_packets++;
} else {
memcpy(eseg->inline_hdr.start, skb_data, ihs);
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
memcpy(eseg->inline_hdr.start, skb->data, ihs);
}
eseg->inline_hdr.sz = cpu_to_be16(ihs);
dseg += ds_cnt_inl;
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
......@@ -432,7 +417,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->added_vlan_packets++;
}
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg);
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
......@@ -644,8 +629,6 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
unsigned char *skb_data = skb->data;
unsigned int skb_len = skb->len;
u16 headlen, ihs, pi, frag_pi;
u16 ds_cnt, ds_cnt_inl = 0;
u8 num_wqebbs, opcode;
......@@ -653,8 +636,6 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
int num_dma;
__be16 mss;
mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
/* Calc ihs and ds cnt, no writes to wqe yet */
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
if (skb_is_gso(skb)) {
......@@ -674,7 +655,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->bytes += num_bytes;
stats->xmit_more += skb->xmit_more;
headlen = skb_len - ihs - skb->data_len;
headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen;
ds_cnt += skb_shinfo(skb)->nr_frags;
......@@ -686,10 +667,12 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
}
mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
/* fill wqe */
wi = &sq->db.wqe_info[pi];
cseg = &wqe->ctrl;
......@@ -704,12 +687,12 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->mss = mss;
if (ihs) {
memcpy(eseg->inline_hdr.start, skb_data, ihs);
memcpy(eseg->inline_hdr.start, skb->data, ihs);
eseg->inline_hdr.sz = cpu_to_be16(ihs);
dseg += ds_cnt_inl;
}
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg);
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
......
......@@ -50,6 +50,11 @@ static const char *const mlx5_fpga_error_strings[] = {
"Temperature Critical",
};
static const char * const mlx5_fpga_qp_error_strings[] = {
"Null Syndrome",
"Retry Counter Expired",
"RNR Expired",
};
static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void)
{
struct mlx5_fpga_device *fdev = NULL;
......@@ -271,23 +276,38 @@ static const char *mlx5_fpga_syndrome_to_string(u8 syndrome)
return "Unknown";
}
static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome)
{
if (syndrome < ARRAY_SIZE(mlx5_fpga_qp_error_strings))
return mlx5_fpga_qp_error_strings[syndrome];
return "Unknown";
}
void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
const char *event_name;
bool teardown = false;
unsigned long flags;
u32 fpga_qpn;
u8 syndrome;
if (event != MLX5_EVENT_TYPE_FPGA_ERROR) {
switch (event) {
case MLX5_EVENT_TYPE_FPGA_ERROR:
syndrome = MLX5_GET(fpga_error_event, data, syndrome);
event_name = mlx5_fpga_syndrome_to_string(syndrome);
break;
case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
syndrome = MLX5_GET(fpga_qp_error_event, data, syndrome);
event_name = mlx5_fpga_qp_syndrome_to_string(syndrome);
fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn);
break;
default:
mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n",
event);
return;
}
syndrome = MLX5_GET(fpga_error_event, data, syndrome);
event_name = mlx5_fpga_syndrome_to_string(syndrome);
spin_lock_irqsave(&fdev->state_lock, flags);
switch (fdev->state) {
case MLX5_FPGA_STATUS_SUCCESS:
......
......@@ -85,6 +85,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
MLX5_GET(wq, wqc, log_wq_sz),
fbc);
wq->sz = wq->fbc.sz_m1 + 1;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
......
......@@ -51,6 +51,9 @@ struct mlx5_wq_ctrl {
struct mlx5_wq_cyc {
struct mlx5_frag_buf_ctrl fbc;
__be32 *db;
u16 sz;
u16 wqe_ctr;
u16 cur_sz;
};
struct mlx5_wq_qp {
......@@ -95,6 +98,43 @@ u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq)
{
return wq->cur_sz == wq->sz;
}
static inline int mlx5_wq_cyc_missing(struct mlx5_wq_cyc *wq)
{
return wq->sz - wq->cur_sz;
}
static inline int mlx5_wq_cyc_is_empty(struct mlx5_wq_cyc *wq)
{
return !wq->cur_sz;
}
static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq)
{
wq->wqe_ctr++;
wq->cur_sz++;
}
static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n)
{
wq->wqe_ctr += n;
wq->cur_sz += n;
}
static inline void mlx5_wq_cyc_pop(struct mlx5_wq_cyc *wq)
{
wq->cur_sz--;
}
static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq)
{
*wq->db = cpu_to_be32(wq->wqe_ctr);
}
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
{
return ctr & wq->fbc.sz_m1;
......@@ -105,6 +145,16 @@ static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
return ctr & wq->fbc.frag_sz_m1;
}
static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
{
return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
}
static inline u16 mlx5_wq_cyc_get_tail(struct mlx5_wq_cyc *wq)
{
return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz);
}
static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
{
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
......@@ -179,11 +229,6 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
return !wq->cur_sz;
}
static inline u16 mlx5_wq_ll_ctr2ix(struct mlx5_wq_ll *wq, u16 ctr)
{
return ctr & wq->fbc.sz_m1;
}
static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
{
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment