Commit 501ec187 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2017-01-31' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-01-31

This series includes some updates to mlx5 core and ethernet driver.

We got one patch from Or to fix some static checker warnings.

2nd patche from Dan came to add the support for 128B cache line
in the HCA, which will configures the hardware to use 128B alignment only
on systems with 128B cache lines, otherwise it will be kept as the current
default of 64B.

From me three patches to support no inline copy on TX on ConnectX-5 and
later HCAs.  Starting with two small infrastructure changes and
refactoring patches followed by two patches to add the actual support for
both xmit ndo and XDP xmit routines.
Last patch is a simple fix to return a mistakenly removed pointer from the
SQ structure, which was remove in previous submission of mlx5 4K UAR.

Saeed.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 219189e7 8ca967ab
...@@ -2984,20 +2984,20 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg, ...@@ -2984,20 +2984,20 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
if (wr->opcode == IB_WR_LSO) { if (wr->opcode == IB_WR_LSO) {
struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start); int size_of_inl_hdr_start = sizeof(eseg->inline_hdr.start);
u64 left, leftlen, copysz; u64 left, leftlen, copysz;
void *pdata = ud_wr->header; void *pdata = ud_wr->header;
left = ud_wr->hlen; left = ud_wr->hlen;
eseg->mss = cpu_to_be16(ud_wr->mss); eseg->mss = cpu_to_be16(ud_wr->mss);
eseg->inline_hdr_sz = cpu_to_be16(left); eseg->inline_hdr.sz = cpu_to_be16(left);
/* /*
* check if there is space till the end of queue, if yes, * check if there is space till the end of queue, if yes,
* copy all in one shot, otherwise copy till the end of queue, * copy all in one shot, otherwise copy till the end of queue,
* rollback and than the copy the left * rollback and than the copy the left
*/ */
leftlen = qend - (void *)eseg->inline_hdr_start; leftlen = qend - (void *)eseg->inline_hdr.start;
copysz = min_t(u64, leftlen, left); copysz = min_t(u64, leftlen, left);
memcpy(seg - size_of_inl_hdr_start, pdata, copysz); memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
......
...@@ -70,8 +70,13 @@ ...@@ -70,8 +70,13 @@
#define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_RX_HEADROOM NET_SKB_PAD
#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */ #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
#define MLX5_MPWRQ_LOG_WQE_SZ 18 #define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
...@@ -115,8 +120,7 @@ ...@@ -115,8 +120,7 @@
#define MLX5E_XDP_IHS_DS_COUNT \ #define MLX5E_XDP_IHS_DS_COUNT \
DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS) DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT \ #define MLX5E_XDP_TX_DS_COUNT \
(MLX5E_XDP_IHS_DS_COUNT + \ ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
#define MLX5E_XDP_TX_WQEBBS \ #define MLX5E_XDP_TX_WQEBBS \
DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS) DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
...@@ -471,6 +475,7 @@ struct mlx5e_sq { ...@@ -471,6 +475,7 @@ struct mlx5e_sq {
/* read only */ /* read only */
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
u32 dma_fifo_mask; u32 dma_fifo_mask;
void __iomem *uar_map;
struct netdev_queue *txq; struct netdev_queue *txq;
u32 sqn; u32 sqn;
u16 bf_buf_size; u16 bf_buf_size;
...@@ -827,9 +832,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, ...@@ -827,9 +832,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/ */
wmb(); wmb();
if (bf_sz) if (bf_sz)
__iowrite64_copy(sq->bfreg.map + ofst, ctrl, bf_sz); __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
else else
mlx5_write64((__be32 *)ctrl, sq->bfreg.map + ofst, NULL); mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
/* flush the write-combining mapped buffer */ /* flush the write-combining mapped buffer */
wmb(); wmb();
......
...@@ -89,8 +89,8 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) ...@@ -89,8 +89,8 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
priv->params.mpwqe_log_stride_sz = priv->params.mpwqe_log_stride_sz =
MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) :
MLX5_MPWRQ_LOG_STRIDE_SIZE; MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev);
priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
priv->params.mpwqe_log_stride_sz; priv->params.mpwqe_log_stride_sz;
break; break;
...@@ -1016,6 +1016,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -1016,6 +1016,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err) if (err)
return err; return err;
sq->uar_map = sq->bfreg.map;
param->wq.db_numa_node = cpu_to_node(c->cpu); param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
...@@ -1029,9 +1030,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -1029,9 +1030,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline; sq->max_inline = param->max_inline;
sq->min_inline_mode = sq->min_inline_mode = param->min_inline_mode;
MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ?
param->min_inline_mode : 0;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err) if (err)
...@@ -1095,7 +1094,10 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) ...@@ -1095,7 +1094,10 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ? MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
0 : priv->tisn[sq->tc]); 0 : priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1); MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
...@@ -1805,8 +1807,7 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, ...@@ -1805,8 +1807,7 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
param->max_inline = priv->params.tx_max_inline; param->max_inline = priv->params.tx_max_inline;
/* FOR XDP SQs will support only L2 inline mode */ param->min_inline_mode = priv->params.tx_min_inline_mode;
param->min_inline_mode = MLX5_INLINE_MODE_NONE;
param->type = MLX5E_SQ_XDP; param->type = MLX5E_SQ_XDP;
} }
...@@ -3533,6 +3534,10 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, ...@@ -3533,6 +3534,10 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
if (priv->params.tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
priv->params.tx_min_inline_mode = MLX5_INLINE_MODE_L2;
priv->params.num_tc = 1; priv->params.num_tc = 1;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR; priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
......
...@@ -657,9 +657,10 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -657,9 +657,10 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg; struct mlx5_wqe_data_seg *dseg;
u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT;
ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE; dma_addr_t dma_addr = di->addr + data_offset;
unsigned int dma_len = xdp->data_end - xdp->data; unsigned int dma_len = xdp->data_end - xdp->data;
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
...@@ -680,17 +681,22 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -680,17 +681,22 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
return false; return false;
} }
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
memset(wqe, 0, sizeof(*wqe)); memset(wqe, 0, sizeof(*wqe));
/* copy the inline part */ dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
memcpy(eseg->inline_hdr_start, xdp->data, MLX5E_XDP_MIN_INLINE); /* copy the inline part if required */
eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1); ds_cnt += MLX5E_XDP_IHS_DS_COUNT;
dseg++;
}
/* write the dma part */ /* write the dma part */
dseg->addr = cpu_to_be64(dma_addr); dseg->addr = cpu_to_be64(dma_addr);
...@@ -698,7 +704,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -698,7 +704,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
sq->db.xdp.di[pi] = *di; sq->db.xdp.di[pi] = *di;
wi->opcode = MLX5_OPCODE_SEND; wi->opcode = MLX5_OPCODE_SEND;
......
...@@ -154,6 +154,8 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, ...@@ -154,6 +154,8 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
int hlen; int hlen;
switch (mode) { switch (mode) {
case MLX5_INLINE_MODE_NONE:
return 0;
case MLX5_INLINE_MODE_TCP_UDP: case MLX5_INLINE_MODE_TCP_UDP:
hlen = eth_get_headlen(skb->data, skb_headlen(skb)); hlen = eth_get_headlen(skb->data, skb_headlen(skb));
if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
...@@ -283,21 +285,23 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -283,21 +285,23 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
wi->num_bytes = num_bytes; wi->num_bytes = num_bytes;
if (skb_vlan_tag_present(skb)) { ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, if (ihs) {
&skb_len); if (skb_vlan_tag_present(skb)) {
ihs += VLAN_HLEN; mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
} else { ihs += VLAN_HLEN;
memcpy(eseg->inline_hdr_start, skb_data, ihs); } else {
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); memcpy(eseg->inline_hdr.start, skb_data, ihs);
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
}
eseg->inline_hdr.sz = cpu_to_be16(ihs);
ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
} }
eseg->inline_hdr_sz = cpu_to_be16(ihs); dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
MLX5_SEND_WQE_DS);
dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
wi->num_dma = 0; wi->num_dma = 0;
......
...@@ -543,6 +543,12 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) ...@@ -543,6 +543,12 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
MLX5_SET(cmd_hca_cap,
set_hca_cap,
cache_line_128byte,
cache_line_size() == 128 ? 1 : 0);
err = set_caps(dev, set_ctx, set_sz, err = set_caps(dev, set_ctx, set_sz,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
......
...@@ -67,10 +67,11 @@ ...@@ -67,10 +67,11 @@
/* insert a value to a struct */ /* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \ #define MLX5_SET(typ, p, fld, v) do { \
u32 _v = v; \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
(~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
<< __mlx5_dw_bit_off(typ, fld))); \ << __mlx5_dw_bit_off(typ, fld))); \
} while (0) } while (0)
......
...@@ -577,7 +577,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { ...@@ -577,7 +577,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 lro_cap[0x1]; u8 lro_cap[0x1];
u8 lro_psh_flag[0x1]; u8 lro_psh_flag[0x1];
u8 lro_time_stamp[0x1]; u8 lro_time_stamp[0x1];
u8 reserved_at_5[0x3]; u8 reserved_at_5[0x2];
u8 wqe_vlan_insert[0x1];
u8 self_lb_en_modifiable[0x1]; u8 self_lb_en_modifiable[0x1];
u8 reserved_at_9[0x2]; u8 reserved_at_9[0x2];
u8 max_lso_cap[0x5]; u8 max_lso_cap[0x5];
...@@ -804,10 +805,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -804,10 +805,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_150[0xa]; u8 reserved_at_150[0xa];
u8 log_max_ra_res_qp[0x6]; u8 log_max_ra_res_qp[0x6];
u8 pad_cap[0x1]; u8 end_pad[0x1];
u8 cc_query_allowed[0x1]; u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1]; u8 cc_modify_allowed[0x1];
u8 reserved_at_163[0xd]; u8 start_pad[0x1];
u8 cache_line_128byte[0x1];
u8 reserved_at_163[0xb];
u8 gid_table_size[0x10]; u8 gid_table_size[0x10];
u8 out_of_seq_cnt[0x1]; u8 out_of_seq_cnt[0x1];
......
...@@ -221,14 +221,26 @@ enum { ...@@ -221,14 +221,26 @@ enum {
MLX5_ETH_WQE_L4_CSUM = 1 << 7, MLX5_ETH_WQE_L4_CSUM = 1 << 7,
}; };
enum {
MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
};
struct mlx5_wqe_eth_seg { struct mlx5_wqe_eth_seg {
u8 rsvd0[4]; u8 rsvd0[4];
u8 cs_flags; u8 cs_flags;
u8 rsvd1; u8 rsvd1;
__be16 mss; __be16 mss;
__be32 rsvd2; __be32 rsvd2;
__be16 inline_hdr_sz; union {
u8 inline_hdr_start[2]; struct {
__be16 sz;
u8 start[2];
} inline_hdr;
struct {
__be16 type;
__be16 vlan_tci;
} insert;
};
}; };
struct mlx5_wqe_xrc_seg { struct mlx5_wqe_xrc_seg {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment