Commit 1c1b5228 authored by Tariq Toukan's avatar Tariq Toukan Committed by David S. Miller

net/mlx5e: Implement Fragmented Work Queue (WQ)

Add new type of struct mlx5_frag_buf which is used to allocate fragmented
buffers rather than contiguous, and make the Completion Queues (CQs) use
it as they are big (default of 2MB per CQ in Striding RQ).

This fixes the failures of type:
"mlx5e_open_locked: mlx5e_open_channels failed, -12"
due to dma_zalloc_coherent insufficient contiguous coherent memory to
satisfy the driver's request when the user tries to setup more or larger
rings.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Reported-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6c0c6203
...@@ -106,6 +106,63 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) ...@@ -106,6 +106,63 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
} }
EXPORT_SYMBOL_GPL(mlx5_buf_free); EXPORT_SYMBOL_GPL(mlx5_buf_free);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node)
{
int i;
buf->size = size;
buf->npages = 1 << get_order(size);
buf->page_shift = PAGE_SHIFT;
buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
GFP_KERNEL);
if (!buf->frags)
goto err_out;
for (i = 0; i < buf->npages; i++) {
struct mlx5_buf_list *frag = &buf->frags[i];
int frag_sz = min_t(int, size, PAGE_SIZE);
frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
&frag->map, node);
if (!frag->buf)
goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) {
dma_free_coherent(&dev->pdev->dev, frag_sz,
buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: 0x%p, page_shift=%d\n",
(void *)frag->map, buf->page_shift);
goto err_free_buf;
}
size -= frag_sz;
}
return 0;
err_free_buf:
while (i--)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map);
kfree(buf->frags);
err_out:
return -ENOMEM;
}
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
int size = buf->size;
int i;
for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE);
dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
buf->frags[i].map);
size -= frag_sz;
}
kfree(buf->frags);
}
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node) int node)
{ {
...@@ -230,3 +287,12 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) ...@@ -230,3 +287,12 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
} }
} }
EXPORT_SYMBOL_GPL(mlx5_fill_page_array); EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
{
int i;
for (i = 0; i < buf->npages; i++)
pas[i] = cpu_to_be64(buf->frags[i].map);
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);
...@@ -286,7 +286,7 @@ struct mlx5e_cq { ...@@ -286,7 +286,7 @@ struct mlx5e_cq {
u16 decmprs_wqe_counter; u16 decmprs_wqe_counter;
/* control */ /* control */
struct mlx5_wq_ctrl wq_ctrl; struct mlx5_frag_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_rq; struct mlx5e_rq;
......
...@@ -1201,7 +1201,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, ...@@ -1201,7 +1201,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
static void mlx5e_destroy_cq(struct mlx5e_cq *cq) static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
{ {
mlx5_wq_destroy(&cq->wq_ctrl); mlx5_cqwq_destroy(&cq->wq_ctrl);
} }
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
...@@ -1218,7 +1218,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) ...@@ -1218,7 +1218,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
int err; int err;
inlen = MLX5_ST_SZ_BYTES(create_cq_in) + inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages; sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
...@@ -1227,15 +1227,15 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) ...@@ -1227,15 +1227,15 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
memcpy(cqc, param->cqc, sizeof(param->cqc)); memcpy(cqc, param->cqc, sizeof(param->cqc));
mlx5_fill_page_array(&cq->wq_ctrl.buf, mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT); MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
......
...@@ -101,13 +101,15 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -101,13 +101,15 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq, void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl) struct mlx5_frag_wq_ctrl *wq_ctrl)
{ {
int err; int err;
wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
wq->sz_m1 = (1 << wq->log_sz) - 1; wq->sz_m1 = (1 << wq->log_sz) - 1;
wq->log_frag_strides = PAGE_SHIFT - wq->log_stride;
wq->frag_sz_m1 = (1 << wq->log_frag_strides) - 1;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) { if (err) {
...@@ -115,14 +117,16 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -115,14 +117,16 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err; return err;
} }
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node); &wq_ctrl->frag_buf,
param->buf_numa_node);
if (err) { if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
err);
goto err_db_free; goto err_db_free;
} }
wq->buf = wq_ctrl->buf.direct.buf; wq->frag_buf = wq_ctrl->frag_buf;
wq->db = wq_ctrl->db.db; wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev; wq_ctrl->mdev = mdev;
...@@ -184,3 +188,9 @@ void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) ...@@ -184,3 +188,9 @@ void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
} }
void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl)
{
mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf);
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
}
...@@ -47,6 +47,12 @@ struct mlx5_wq_ctrl { ...@@ -47,6 +47,12 @@ struct mlx5_wq_ctrl {
struct mlx5_db db; struct mlx5_db db;
}; };
struct mlx5_frag_wq_ctrl {
struct mlx5_core_dev *mdev;
struct mlx5_frag_buf frag_buf;
struct mlx5_db db;
};
struct mlx5_wq_cyc { struct mlx5_wq_cyc {
void *buf; void *buf;
__be32 *db; __be32 *db;
...@@ -55,12 +61,14 @@ struct mlx5_wq_cyc { ...@@ -55,12 +61,14 @@ struct mlx5_wq_cyc {
}; };
struct mlx5_cqwq { struct mlx5_cqwq {
void *buf; struct mlx5_frag_buf frag_buf;
__be32 *db; __be32 *db;
u32 sz_m1; u32 sz_m1;
u32 frag_sz_m1;
u32 cc; /* consumer counter */ u32 cc; /* consumer counter */
u8 log_sz; u8 log_sz;
u8 log_stride; u8 log_stride;
u8 log_frag_strides;
}; };
struct mlx5_wq_ll { struct mlx5_wq_ll {
...@@ -81,7 +89,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); ...@@ -81,7 +89,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq, void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl); struct mlx5_frag_wq_ctrl *wq_ctrl);
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
...@@ -90,6 +98,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -90,6 +98,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl);
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
{ {
...@@ -116,7 +125,10 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) ...@@ -116,7 +125,10 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
{ {
return wq->buf + (ix << wq->log_stride); unsigned int frag = (ix >> wq->log_frag_strides);
return wq->frag_buf.frags[frag].buf +
((wq->frag_sz_m1 & ix) << wq->log_stride);
} }
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
......
...@@ -318,6 +318,13 @@ struct mlx5_buf { ...@@ -318,6 +318,13 @@ struct mlx5_buf {
u8 page_shift; u8 page_shift;
}; };
struct mlx5_frag_buf {
struct mlx5_buf_list *frags;
int npages;
int size;
u8 page_shift;
};
struct mlx5_eq_tasklet { struct mlx5_eq_tasklet {
struct list_head list; struct list_head list;
struct list_head process_list; struct list_head process_list;
...@@ -822,6 +829,9 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, ...@@ -822,6 +829,9 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node); struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages); gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
...@@ -866,6 +876,7 @@ void mlx5_unregister_debugfs(void); ...@@ -866,6 +876,7 @@ void mlx5_unregister_debugfs(void);
int mlx5_eq_init(struct mlx5_core_dev *dev); int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment