Commit 185b9826 authored by Aharon Landau's avatar Aharon Landau Committed by Jason Gunthorpe

RDMA/mlx5: Remove redundant work in struct mlx5_cache_ent

delayed_cache_work_func() and the cache_work_func() are both wrappers of
__cache_work_func(). Instead of having a special not delayed work, use the
delayed work with delay = 0.

Link: https://lore.kernel.org/r/18b6ae205e75f087aa4a2a05c81ea8b66d8d88dc.1644947594.git.leonro@nvidia.comSigned-off-by: default avatarAharon Landau <aharonl@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 748663c8
......@@ -788,7 +788,6 @@ struct mlx5_cache_ent {
u32 miss;
struct mlx5_ib_dev *dev;
struct work_struct work;
struct delayed_work dwork;
};
......
......@@ -465,14 +465,14 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
return;
if (ent->available_mrs < ent->limit) {
ent->fill_to_high_water = true;
queue_work(ent->dev->cache.wq, &ent->work);
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->fill_to_high_water &&
ent->available_mrs + ent->pending < 2 * ent->limit) {
/*
* Once we start populating due to hitting a low water mark
* continue until we pass the high water mark.
*/
queue_work(ent->dev->cache.wq, &ent->work);
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->available_mrs == 2 * ent->limit) {
ent->fill_to_high_water = false;
} else if (ent->available_mrs > 2 * ent->limit) {
......@@ -482,7 +482,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(1000));
else
queue_work(ent->dev->cache.wq, &ent->work);
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
}
}
......@@ -558,14 +558,6 @@ static void delayed_cache_work_func(struct work_struct *work)
__cache_work_func(ent);
}
static void cache_work_func(struct work_struct *work)
{
struct mlx5_cache_ent *ent;
ent = container_of(work, struct mlx5_cache_ent, work);
__cache_work_func(ent);
}
/* Allocate a special entry from the cache */
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
unsigned int entry, int access_flags)
......@@ -726,7 +718,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->dev = dev;
ent->limit = 0;
INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
if (i > MR_CACHE_LAST_STD_ENTRY) {
......@@ -770,7 +761,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
spin_lock_irq(&ent->lock);
ent->disabled = true;
spin_unlock_irq(&ent->lock);
cancel_work_sync(&ent->work);
cancel_delayed_work_sync(&ent->dwork);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment