Commit 0cf53c12 authored by Saeed Mahameed's avatar Saeed Mahameed

net/mlx5: FWPage, Use async events chain

Remove the explicit call to mlx5_core_req_pages_handler on
MLX5_EVENT_TYPE_PAGE_REQUEST and let FW page logic  to register its own
handler when its ready.
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 6933a937
...@@ -398,17 +398,6 @@ static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr) ...@@ -398,17 +398,6 @@ static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
mlx5_eq_cq_event(eq, cqn, eqe->type); mlx5_eq_cq_event(eq, cqn, eqe->type);
break; break;
case MLX5_EVENT_TYPE_PAGE_REQUEST:
{
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages);
}
break;
case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
mlx5_port_module_event(dev, eqe); mlx5_port_module_event(dev, eqe);
break; break;
......
...@@ -916,16 +916,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -916,16 +916,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages; goto reclaim_boot_pages;
} }
err = mlx5_pagealloc_start(dev);
if (err) {
dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
goto reclaim_boot_pages;
}
err = mlx5_cmd_init_hca(dev, sw_owner_id); err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) { if (err) {
dev_err(&pdev->dev, "init hca failed\n"); dev_err(&pdev->dev, "init hca failed\n");
goto err_pagealloc_stop; goto reclaim_boot_pages;
} }
mlx5_set_driver_version(dev); mlx5_set_driver_version(dev);
...@@ -953,6 +947,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -953,6 +947,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_get_uars; goto err_get_uars;
} }
mlx5_pagealloc_start(dev);
err = mlx5_eq_table_create(dev); err = mlx5_eq_table_create(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to create EQs\n"); dev_err(&pdev->dev, "Failed to create EQs\n");
...@@ -1039,6 +1035,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1039,6 +1035,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_eq_table_destroy(dev); mlx5_eq_table_destroy(dev);
err_eq_table: err_eq_table:
mlx5_pagealloc_stop(dev);
mlx5_put_uars_page(dev, priv->uar); mlx5_put_uars_page(dev, priv->uar);
err_get_uars: err_get_uars:
...@@ -1052,9 +1049,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1052,9 +1049,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto out_err; goto out_err;
} }
err_pagealloc_stop:
mlx5_pagealloc_stop(dev);
reclaim_boot_pages: reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev); mlx5_reclaim_startup_pages(dev);
...@@ -1100,16 +1094,18 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1100,16 +1094,18 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_fpga_device_stop(dev); mlx5_fpga_device_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer); mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev); mlx5_eq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
mlx5_put_uars_page(dev, priv->uar); mlx5_put_uars_page(dev, priv->uar);
if (cleanup) if (cleanup)
mlx5_cleanup_once(dev); mlx5_cleanup_once(dev);
mlx5_stop_health_poll(dev, cleanup); mlx5_stop_health_poll(dev, cleanup);
err = mlx5_cmd_teardown_hca(dev); err = mlx5_cmd_teardown_hca(dev);
if (err) { if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
goto out; goto out;
} }
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev); mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0); mlx5_core_disable_hca(dev, 0);
mlx5_cmd_cleanup(dev); mlx5_cmd_cleanup(dev);
...@@ -1186,12 +1182,14 @@ static int init_one(struct pci_dev *pdev, ...@@ -1186,12 +1182,14 @@ static int init_one(struct pci_dev *pdev,
goto close_pci; goto close_pci;
} }
mlx5_pagealloc_init(dev); err = mlx5_pagealloc_init(dev);
if (err)
goto err_pagealloc_init;
err = mlx5_load_one(dev, priv, true); err = mlx5_load_one(dev, priv, true);
if (err) { if (err) {
dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err); dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
goto clean_health; goto err_load_one;
} }
request_module_nowait(MLX5_IB_MOD); request_module_nowait(MLX5_IB_MOD);
...@@ -1205,8 +1203,9 @@ static int init_one(struct pci_dev *pdev, ...@@ -1205,8 +1203,9 @@ static int init_one(struct pci_dev *pdev,
clean_load: clean_load:
mlx5_unload_one(dev, priv, true); mlx5_unload_one(dev, priv, true);
clean_health: err_load_one:
mlx5_pagealloc_cleanup(dev); mlx5_pagealloc_cleanup(dev);
err_pagealloc_init:
mlx5_health_cleanup(dev); mlx5_health_cleanup(dev);
close_pci: close_pci:
mlx5_pci_close(dev, priv); mlx5_pci_close(dev, priv);
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h> #include <linux/mlx5/cmd.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lib/eq.h"
enum { enum {
MLX5_PAGES_CANT_GIVE = 0, MLX5_PAGES_CANT_GIVE = 0,
...@@ -433,15 +434,28 @@ static void pages_work_handler(struct work_struct *work) ...@@ -433,15 +434,28 @@ static void pages_work_handler(struct work_struct *work)
kfree(req); kfree(req);
} }
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, static int req_pages_handler(struct notifier_block *nb,
s32 npages) unsigned long type, void *data)
{ {
struct mlx5_pages_req *req; struct mlx5_pages_req *req;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
u16 func_id;
s32 npages;
priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
dev = container_of(priv, struct mlx5_core_dev, priv);
eqe = data;
func_id = be16_to_cpu(eqe->data.req_pages.func_id);
npages = be32_to_cpu(eqe->data.req_pages.num_pages);
mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
func_id, npages);
req = kzalloc(sizeof(*req), GFP_ATOMIC); req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) { if (!req) {
mlx5_core_warn(dev, "failed to allocate pages request\n"); mlx5_core_warn(dev, "failed to allocate pages request\n");
return; return NOTIFY_DONE;
} }
req->dev = dev; req->dev = dev;
...@@ -449,6 +463,7 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, ...@@ -449,6 +463,7 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
req->npages = npages; req->npages = npages;
INIT_WORK(&req->work, pages_work_handler); INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work); queue_work(dev->priv.pg_wq, &req->work);
return NOTIFY_OK;
} }
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
...@@ -524,29 +539,32 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) ...@@ -524,29 +539,32 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
return 0; return 0;
} }
void mlx5_pagealloc_init(struct mlx5_core_dev *dev) int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
{ {
dev->priv.page_root = RB_ROOT; dev->priv.page_root = RB_ROOT;
INIT_LIST_HEAD(&dev->priv.free_list); INIT_LIST_HEAD(&dev->priv.free_list);
dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
if (!dev->priv.pg_wq)
return -ENOMEM;
return 0;
} }
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
{ {
/* nothing */ destroy_workqueue(dev->priv.pg_wq);
} }
int mlx5_pagealloc_start(struct mlx5_core_dev *dev) void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
{ {
dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
if (!dev->priv.pg_wq) mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
return -ENOMEM;
return 0;
} }
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
{ {
destroy_workqueue(dev->priv.pg_wq); mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
flush_workqueue(dev->priv.pg_wq);
} }
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
......
...@@ -564,6 +564,7 @@ struct mlx5_priv { ...@@ -564,6 +564,7 @@ struct mlx5_priv {
struct mlx5_eq_table *eq_table; struct mlx5_eq_table *eq_table;
/* pages stuff */ /* pages stuff */
struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq; struct workqueue_struct *pg_wq;
struct rb_root page_root; struct rb_root page_root;
int fw_pages; int fw_pages;
...@@ -962,9 +963,9 @@ int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); ...@@ -962,9 +963,9 @@ int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port); u16 opmod, u8 port);
void mlx5_pagealloc_init(struct mlx5_core_dev *dev); int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages); s32 npages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment