Commit 0a324f31 authored by Moshe Lazer's avatar Moshe Lazer Committed by David S. Miller

net/mlx5_core: Support MANAGE_PAGES and QUERY_PAGES firmware command changes

In the previous QUERY_PAGES command version we used one command to get the
required amount of boot, init and post init pages.  The new version uses the
op_mod field to specify whether the query is for the required amount of boot,
init or post init pages. In addition the output field size for the required
amount of pages increased from 16 to 32 bits.

In MANAGE_PAGES command the input_num_entries and output_num_entries fields
sizes changed from 16 to 32 bits and the PAS tables offset changed to 0x10.

In the pages request event the num_pages field also changed to 32 bits.

In the HCA-capabilities-layout the size and location of max_qp_mcg field has
been changed to support 24 bits.

This patch isn't compatible with firmware versions < 5; however, it  turns out that the
first GA firmware we will publish will not support previous versions so this should be OK.
Signed-off-by: default avatarMoshe Lazer <moshel@mellanox.com>
Signed-off-by: default avatarEli Cohen <eli@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 15718ea0
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#include "mlx5_core.h" #include "mlx5_core.h"
enum { enum {
CMD_IF_REV = 4, CMD_IF_REV = 5,
}; };
enum { enum {
......
...@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) ...@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
case MLX5_EVENT_TYPE_PAGE_REQUEST: case MLX5_EVENT_TYPE_PAGE_REQUEST:
{ {
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages); mlx5_core_req_pages_handler(dev, func_id, npages);
......
...@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, ...@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
caps->log_max_mcg = out->hca_cap.log_max_mcg; caps->log_max_mcg = out->hca_cap.log_max_mcg;
caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
......
...@@ -43,10 +43,16 @@ enum { ...@@ -43,10 +43,16 @@ enum {
MLX5_PAGES_TAKE = 2 MLX5_PAGES_TAKE = 2
}; };
enum {
MLX5_BOOT_PAGES = 1,
MLX5_INIT_PAGES = 2,
MLX5_POST_INIT_PAGES = 3
};
struct mlx5_pages_req { struct mlx5_pages_req {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
u32 func_id; u32 func_id;
s16 npages; s32 npages;
struct work_struct work; struct work_struct work;
}; };
...@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox { ...@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
struct mlx5_query_pages_outbox { struct mlx5_query_pages_outbox {
struct mlx5_outbox_hdr hdr; struct mlx5_outbox_hdr hdr;
__be16 num_boot_pages; __be16 rsvd;
__be16 func_id; __be16 func_id;
__be16 init_pages; __be32 num_pages;
__be16 num_pages;
}; };
struct mlx5_manage_pages_inbox { struct mlx5_manage_pages_inbox {
struct mlx5_inbox_hdr hdr; struct mlx5_inbox_hdr hdr;
__be16 rsvd0; __be16 rsvd;
__be16 func_id; __be16 func_id;
__be16 rsvd1; __be32 num_entries;
__be16 num_entries;
u8 rsvd2[16];
__be64 pas[0]; __be64 pas[0];
}; };
struct mlx5_manage_pages_outbox { struct mlx5_manage_pages_outbox {
struct mlx5_outbox_hdr hdr; struct mlx5_outbox_hdr hdr;
u8 rsvd0[2]; __be32 num_entries;
__be16 num_entries; u8 rsvd[4];
u8 rsvd1[20];
__be64 pas[0]; __be64 pas[0];
}; };
...@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) ...@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
} }
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s16 *pages, s16 *init_pages, u16 *boot_pages) s32 *npages, int boot)
{ {
struct mlx5_query_pages_inbox in; struct mlx5_query_pages_inbox in;
struct mlx5_query_pages_outbox out; struct mlx5_query_pages_outbox out;
...@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, ...@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
memset(&in, 0, sizeof(in)); memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out)); memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err) if (err)
return err; return err;
...@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, ...@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
if (out.hdr.status) if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr); return mlx5_cmd_status_to_err(&out.hdr);
if (pages) *npages = be32_to_cpu(out.num_pages);
*pages = be16_to_cpu(out.num_pages);
if (init_pages)
*init_pages = be16_to_cpu(out.init_pages);
if (boot_pages)
*boot_pages = be16_to_cpu(out.num_boot_pages);
*func_id = be16_to_cpu(out.func_id); *func_id = be16_to_cpu(out.func_id);
return err; return err;
...@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, ...@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
in->func_id = cpu_to_be16(func_id); in->func_id = cpu_to_be16(func_id);
in->num_entries = cpu_to_be16(npages); in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
mlx5_core_dbg(dev, "err %d\n", err); mlx5_core_dbg(dev, "err %d\n", err);
if (err) { if (err) {
...@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, ...@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
in.func_id = cpu_to_be16(func_id); in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be16(npages); in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) { if (err) {
...@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, ...@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
goto out_free; goto out_free;
} }
num_claimed = be16_to_cpu(out->num_entries); num_claimed = be32_to_cpu(out->num_entries);
if (nclaimed) if (nclaimed)
*nclaimed = num_claimed; *nclaimed = num_claimed;
...@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work) ...@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
} }
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s16 npages) s32 npages)
{ {
struct mlx5_pages_req *req; struct mlx5_pages_req *req;
...@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, ...@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
{ {
u16 uninitialized_var(boot_pages);
s16 uninitialized_var(init_pages);
u16 uninitialized_var(func_id); u16 uninitialized_var(func_id);
s32 uninitialized_var(npages);
int err; int err;
err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
&boot_pages);
if (err) if (err)
return err; return err;
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
npages, boot ? "boot" : "init", func_id);
mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", return give_pages(dev, func_id, npages, 0);
init_pages, boot_pages, func_id);
return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
} }
static int optimal_reclaimed_pages(void) static int optimal_reclaimed_pages(void)
......
...@@ -309,21 +309,20 @@ struct mlx5_hca_cap { ...@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
__be16 max_desc_sz_rq; __be16 max_desc_sz_rq;
u8 rsvd21[2]; u8 rsvd21[2];
__be16 max_desc_sz_sq_dc; __be16 max_desc_sz_sq_dc;
u8 rsvd22[4]; __be32 max_qp_mcg;
__be16 max_qp_mcg; u8 rsvd22[3];
u8 rsvd23;
u8 log_max_mcg; u8 log_max_mcg;
u8 rsvd24; u8 rsvd23;
u8 log_max_pd; u8 log_max_pd;
u8 rsvd25; u8 rsvd24;
u8 log_max_xrcd; u8 log_max_xrcd;
u8 rsvd26[42]; u8 rsvd25[42];
__be16 log_uar_page_sz; __be16 log_uar_page_sz;
u8 rsvd27[28]; u8 rsvd26[28];
u8 log_msx_atomic_size_qp; u8 log_msx_atomic_size_qp;
u8 rsvd28[2]; u8 rsvd27[2];
u8 log_msx_atomic_size_dc; u8 log_msx_atomic_size_dc;
u8 rsvd29[76]; u8 rsvd28[76];
}; };
...@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd { ...@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
struct mlx5_eqe_page_req { struct mlx5_eqe_page_req {
u8 rsvd0[2]; u8 rsvd0[2];
__be16 func_id; __be16 func_id;
u8 rsvd1[2]; __be32 num_pages;
__be16 num_pages; __be32 rsvd1[5];
__be32 rsvd2[5];
}; };
union ev_data { union ev_data {
......
...@@ -358,7 +358,7 @@ struct mlx5_caps { ...@@ -358,7 +358,7 @@ struct mlx5_caps {
u32 reserved_lkey; u32 reserved_lkey;
u8 local_ca_ack_delay; u8 local_ca_ack_delay;
u8 log_max_mcg; u8 log_max_mcg;
u16 max_qp_mcg; u32 max_qp_mcg;
int min_page_sz; int min_page_sz;
}; };
...@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); ...@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s16 npages); s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void); void mlx5_register_debugfs(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment