Commit 258e655c authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: Make dma_info array dynamic in struct mlx5e_mpw_info

This commit moves the dma_info array to the end of struct mlx5e_mpw_info
to make it a flexible array. It also removes the intermediate struct
mlx5e_umr_dma_info, which used to contain only this array. The
flexibility of dma_info will allow to choose its size dynamically in a
following commit.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Reviewed-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 3904d2af
...@@ -619,14 +619,10 @@ struct mlx5e_wqe_frag_info { ...@@ -619,14 +619,10 @@ struct mlx5e_wqe_frag_info {
bool last_in_page; bool last_in_page;
}; };
struct mlx5e_umr_dma_info {
struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
};
struct mlx5e_mpw_info { struct mlx5e_mpw_info {
struct mlx5e_umr_dma_info umr;
u16 consumed_strides; u16 consumed_strides;
DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
struct mlx5e_dma_info dma_info[];
}; };
#define MLX5E_MAX_RX_FRAGS 4 #define MLX5E_MAX_RX_FRAGS 4
......
...@@ -30,7 +30,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, ...@@ -30,7 +30,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset, u32 head_offset,
u32 page_idx) u32 page_idx)
{ {
struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk; struct xdp_buff *xdp = wi->dma_info[page_idx].xsk;
struct bpf_prog *prog; struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */ /* Check packet size. Note LRO doesn't use linear SKB */
......
...@@ -260,10 +260,12 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) ...@@ -260,10 +260,12 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{ {
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
size_t alloc_size;
rq->mpwqe.info = kvzalloc_node(array_size(wq_sz, alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info, dma_info,
sizeof(*rq->mpwqe.info)), MLX5_MPWRQ_PAGES_PER_WQE));
GFP_KERNEL, node);
rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
if (!rq->mpwqe.info) if (!rq->mpwqe.info)
return -ENOMEM; return -ENOMEM;
......
...@@ -75,6 +75,13 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { ...@@ -75,6 +75,13 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
.handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo, .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
}; };
static struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
{
size_t isz = struct_size(rq->mpwqe.info, dma_info, MLX5_MPWRQ_PAGES_PER_WQE);
return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
}
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{ {
return config->rx_filter == HWTSTAMP_FILTER_ALL; return config->rx_filter == HWTSTAMP_FILTER_ALL;
...@@ -478,7 +485,7 @@ static void ...@@ -478,7 +485,7 @@ static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle) mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{ {
bool no_xdp_xmit; bool no_xdp_xmit;
struct mlx5e_dma_info *dma_info = wi->umr.dma_info; struct mlx5e_dma_info *dma_info = wi->dma_info;
int i; int i;
/* A common case for AF_XDP. */ /* A common case for AF_XDP. */
...@@ -660,8 +667,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) ...@@ -660,8 +667,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{ {
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; struct mlx5e_dma_info *dma_info = &wi->dma_info[0];
struct mlx5e_icosq *sq = rq->icosq; struct mlx5e_icosq *sq = rq->icosq;
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe; struct mlx5e_umr_wqe *umr_wqe;
...@@ -768,7 +775,7 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close ...@@ -768,7 +775,7 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{ {
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
/* Don't recycle, this function is called on rq/netdev close */ /* Don't recycle, this function is called on rq/netdev close */
mlx5e_free_rx_mpwqe(rq, wi, false); mlx5e_free_rx_mpwqe(rq, wi, false);
} }
...@@ -1795,7 +1802,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 ...@@ -1795,7 +1802,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
{ {
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id); u16 wqe_id = be16_to_cpu(cqe->wqe_id);
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
u32 head_offset = wqe_offset & (PAGE_SIZE - 1); u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
...@@ -1878,7 +1885,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w ...@@ -1878,7 +1885,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u16 cqe_bcnt, u32 head_offset, u32 page_idx) u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{ {
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; struct mlx5e_dma_info *di = &wi->dma_info[page_idx];
u32 frag_offset = head_offset + headlen; u32 frag_offset = head_offset + headlen;
u32 byte_cnt = cqe_bcnt - headlen; u32 byte_cnt = cqe_bcnt - headlen;
struct mlx5e_dma_info *head_di = di; struct mlx5e_dma_info *head_di = di;
...@@ -1912,7 +1919,7 @@ static struct sk_buff * ...@@ -1912,7 +1919,7 @@ static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx) u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{ {
struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; struct mlx5e_dma_info *di = &wi->dma_info[page_idx];
u16 rx_headroom = rq->buff.headroom; u16 rx_headroom = rq->buff.headroom;
struct bpf_prog *prog; struct bpf_prog *prog;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -2078,7 +2085,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq ...@@ -2078,7 +2085,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
struct mlx5e_mpw_info *wi; struct mlx5e_mpw_info *wi;
struct mlx5_wq_ll *wq; struct mlx5_wq_ll *wq;
wi = &rq->mpwqe.info[wqe_id]; wi = mlx5e_get_mpw_info(rq, wqe_id);
wi->consumed_strides += cstrides; wi->consumed_strides += cstrides;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
...@@ -2124,7 +2131,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq ...@@ -2124,7 +2131,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
} }
if (likely(head_size)) { if (likely(head_size)) {
di = &wi->umr.dma_info[page_idx]; di = &wi->dma_info[page_idx];
mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
} }
...@@ -2147,7 +2154,7 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq ...@@ -2147,7 +2154,7 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
{ {
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id); u16 wqe_id = be16_to_cpu(cqe->wqe_id);
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
u32 head_offset = wqe_offset & (PAGE_SIZE - 1); u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment