Commit e5a3cc83 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: Use runtime page_shift for striding RQ

This commit allows striding RQ to determine MTT page size at runtime,
instead of sticking to the compile-time PAGE_SIZE. This functionality
will be used by a following commit that adjusts the MTT page size to the
XSK frame size.

Stick with PAGE_SIZE for XSK on legacy RQ, as frag_stride is not used in
data path, it only helps calculate how pages are partitioned into
fragments, and PAGE_SIZE will ensure each fragment starts at the
beginning of a new allocation unit (XSK frame).
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 9ca66afe
...@@ -93,28 +93,28 @@ struct page_pool; ...@@ -93,28 +93,28 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \ #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD)) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
#define MLX5_MPWRQ_LOG_WQE_SZ 18 #define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) /* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) * These are theoretical maximums, which can be further restricted by
* capabilities. These values are used for static resource allocations and
* sanity checks.
* MLX5_SEND_WQE_MAX_SIZE is a bit bigger than the maximum cacheline-aligned WQE
* size actually used at runtime, but it's not a problem when calculating static
* array sizes.
*/
#define MLX5_UMR_MAX_MTT_SPACE \
(ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
MLX5_UMR_MTT_ALIGNMENT))
#define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt))
#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8)) #define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2) #define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts))) #define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
* WQEs, This page will absorb write overflow by the hardware, when
* receiving packets larger than MTU. These oversize packets are
* dropped by the driver at a later stage.
*/
#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
#define MLX5E_MAX_RQ_NUM_MTTS \ #define MLX5E_MAX_RQ_NUM_MTTS \
(ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
(MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM)) #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
#define MLX5E_LOG_MAX_RX_WQE_BULK \ #define MLX5E_LOG_MAX_RX_WQE_BULK \
...@@ -126,8 +126,7 @@ struct page_pool; ...@@ -126,8 +126,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK) #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \ #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
...@@ -613,7 +612,7 @@ struct mlx5e_wqe_frag_info { ...@@ -613,7 +612,7 @@ struct mlx5e_wqe_frag_info {
struct mlx5e_mpw_info { struct mlx5e_mpw_info {
u16 consumed_strides; u16 consumed_strides;
DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
struct mlx5e_dma_info dma_info[]; struct mlx5e_dma_info dma_info[];
}; };
...@@ -622,8 +621,8 @@ struct mlx5e_mpw_info { ...@@ -622,8 +621,8 @@ struct mlx5e_mpw_info {
/* a single cache unit is capable to serve one napi call (for non-striding rq) /* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq). * or a MPWQE (for striding rq).
*/ */
#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \ #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_MAX_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT) MLX5_MPWRQ_MAX_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT)) #define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
struct mlx5e_page_cache { struct mlx5e_page_cache {
u32 head; u32 head;
...@@ -1008,7 +1007,7 @@ struct mlx5e_profile { ...@@ -1008,7 +1007,7 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void); void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift);
void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close); void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
......
...@@ -86,8 +86,13 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile, ...@@ -86,8 +86,13 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
/* Striding RQ dynamic parameters */ /* Striding RQ dynamic parameters */
u16 mlx5e_mpwrq_umr_wqe_sz(u8 pages_per_wqe); u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwrq_umr_wqebbs(u8 pages_per_wqe); u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift);
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift);
u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift);
u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift);
u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift);
u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift);
/* Parameter calculations */ /* Parameter calculations */
...@@ -106,12 +111,14 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params * ...@@ -106,12 +111,14 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk); struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params, bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk); struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk); struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params, u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk); struct mlx5e_xsk_param *xsk);
u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev, u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params); struct mlx5e_params *params);
......
...@@ -5,20 +5,19 @@ ...@@ -5,20 +5,19 @@
#include "en/params.h" #include "en/params.h"
#include "en/txrx.h" #include "en/txrx.h"
#include "en/health.h" #include "en/health.h"
#include <net/xdp_sock_drv.h>
/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may /* The limitation of 2048 can be altered, but shouldn't go beyond the minimal
* change unexpectedly, and mlx5e has a minimum valid stride size for striding * stride size of striding RQ.
* RQ, keep this check in the driver.
*/ */
#define MLX5E_MIN_XSK_CHUNK_SIZE 2048 #define MLX5E_MIN_XSK_CHUNK_SIZE max(2048, XDP_UMEM_MIN_CHUNK_SIZE)
bool mlx5e_validate_xsk_param(struct mlx5e_params *params, bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev) struct mlx5_core_dev *mdev)
{ {
/* AF_XDP doesn't support frames larger than PAGE_SIZE. */ /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
if (xsk->chunk_size > PAGE_SIZE || if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
return false; return false;
/* frag_sz is different for regular and XSK RQs, so ensure that linear /* frag_sz is different for regular and XSK RQs, so ensure that linear
...@@ -28,7 +27,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, ...@@ -28,7 +27,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk); return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
return mlx5e_rx_is_linear_skb(params, xsk); return mlx5e_rx_is_linear_skb(mdev, params, xsk);
} }
} }
......
...@@ -311,7 +311,13 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, ...@@ -311,7 +311,13 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param, struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param) struct kernel_ethtool_ringparam *kernel_param)
{ {
param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; /* Limitation for regular RQ. XSK RQ may clamp the queue length in
* mlx5e_mpwqe_get_log_rq_size.
*/
u8 max_log_mpwrq_pkts = mlx5e_mpwrq_max_log_rq_pkts(priv->mdev, PAGE_SHIFT);
param->rx_max_pending = 1 << min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
max_log_mpwrq_pkts);
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames; param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size; param->tx_pending = 1 << priv->channels.params.log_sq_size;
......
...@@ -68,25 +68,24 @@ ...@@ -68,25 +68,24 @@
#include "qos.h" #include "qos.h"
#include "en/trap.h" #include "en/trap.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift)
{ {
bool striding_rq_umr, inline_umr; u16 umr_wqebbs, max_wqebbs;
u16 max_wqebbs; bool striding_rq_umr;
u16 umr_wqebbs;
striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) && striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
MLX5_CAP_ETH(mdev, reg_umr_sq); MLX5_CAP_ETH(mdev, reg_umr_sq);
max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(MLX5_MPWRQ_PAGES_PER_WQE);
inline_umr = umr_wqebbs <= max_wqebbs;
if (!striding_rq_umr) if (!striding_rq_umr)
return false; return false;
if (!inline_umr) {
mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%u) exceeds maximum supported (%u).\n", umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift);
umr_wqebbs * MLX5_SEND_WQE_BB, max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
max_wqebbs * MLX5_SEND_WQE_BB); /* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
* calculated from mlx5e_get_max_sq_aligned_wqebbs.
*/
if (WARN_ON(umr_wqebbs > max_wqebbs))
return false; return false;
}
return true; return true;
} }
...@@ -211,7 +210,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ...@@ -211,7 +210,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
u8 ds_cnt; u8 ds_cnt;
ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mpwqe.pages_per_wqe), ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift),
MLX5_SEND_WQE_DS); MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
...@@ -591,13 +590,16 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, ...@@ -591,13 +590,16 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq->mpwqe.page_shift = PAGE_SHIFT; rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
rq->mpwqe.pages_per_wqe = MLX5_MPWRQ_PAGES_PER_WQE; rq->mpwqe.pages_per_wqe =
rq->mpwqe.umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(rq->mpwqe.pages_per_wqe); mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift);
rq->mpwqe.mtts_per_wqe = MLX5E_REQUIRED_WQE_MTTS; rq->mpwqe.umr_wqebbs =
mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift);
rq->mpwqe.mtts_per_wqe =
mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift);
pool_size = rq->mpwqe.pages_per_wqe << pool_size = rq->mpwqe.pages_per_wqe <<
mlx5e_mpwqe_get_log_rq_size(params, xsk); mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides = rq->mpwqe.num_strides =
...@@ -4030,14 +4032,16 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, ...@@ -4030,14 +4032,16 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
return true; return true;
} }
static bool mlx5e_params_validate_xdp(struct net_device *netdev, struct mlx5e_params *params) static bool mlx5e_params_validate_xdp(struct net_device *netdev,
struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{ {
bool is_linear; bool is_linear;
/* No XSK params: AF_XDP can't be enabled yet at the point of setting /* No XSK params: AF_XDP can't be enabled yet at the point of setting
* the XDP program. * the XDP program.
*/ */
is_linear = mlx5e_rx_is_linear_skb(params, NULL); is_linear = mlx5e_rx_is_linear_skb(mdev, params, NULL);
if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n", netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
...@@ -4074,7 +4078,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, ...@@ -4074,7 +4078,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (err) if (err)
goto out; goto out;
if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, &new_params)) { if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, priv->mdev,
&new_params)) {
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
...@@ -4094,8 +4099,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, ...@@ -4094,8 +4099,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL); bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
&new_params, NULL); &new_params, NULL);
u8 sz_old = mlx5e_mpwqe_get_log_rq_size(params, NULL); u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL);
u8 sz_new = mlx5e_mpwqe_get_log_rq_size(&new_params, NULL); u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL);
/* Always reset in linear mode - hw_mtu is used in data path. /* Always reset in linear mode - hw_mtu is used in data path.
* Check that the mode was non-linear and didn't change. * Check that the mode was non-linear and didn't change.
...@@ -4553,7 +4558,7 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) ...@@ -4553,7 +4558,7 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
new_params = priv->channels.params; new_params = priv->channels.params;
new_params.xdp_prog = prog; new_params.xdp_prog = prog;
if (!mlx5e_params_validate_xdp(netdev, &new_params)) if (!mlx5e_params_validate_xdp(netdev, priv->mdev, &new_params))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -4924,7 +4929,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4924,7 +4929,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (!!MLX5_CAP_ETH(mdev, lro_cap) && if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) && !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_gre) && !MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
mlx5e_check_fragmented_striding_rq_cap(mdev)) mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT))
netdev->vlan_features |= NETIF_F_LRO; netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features; netdev->hw_features = netdev->vlan_features;
......
...@@ -2431,7 +2431,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool ...@@ -2431,7 +2431,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
rq->wqe.skb_from_cqe = xsk ? rq->wqe.skb_from_cqe = xsk ?
mlx5e_xsk_skb_from_cqe_linear : mlx5e_xsk_skb_from_cqe_linear :
mlx5e_rx_is_linear_skb(params, NULL) ? mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
mlx5e_skb_from_cqe_linear : mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear; mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes; rq->post_wqes = mlx5e_post_rx_wqes;
...@@ -2485,7 +2485,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe ...@@ -2485,7 +2485,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params) void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
{ {
rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ? rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
mlx5e_skb_from_cqe_linear : mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear; mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes; rq->post_wqes = mlx5e_post_rx_wqes;
......
...@@ -162,6 +162,8 @@ enum { ...@@ -162,6 +162,8 @@ enum {
MLX5_SEND_WQE_MAX_WQEBBS = 16, MLX5_SEND_WQE_MAX_WQEBBS = 16,
}; };
#define MLX5_SEND_WQE_MAX_SIZE (MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQE_BB)
enum { enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment