Commit 7c8691a3 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Rename the tracking variables for the MR cache

The old names do not clearly indicate the intent.

Link: https://lore.kernel.org/r/20200310082238.239865-6-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent f743ff3b
...@@ -699,15 +699,26 @@ struct mlx5_cache_ent { ...@@ -699,15 +699,26 @@ struct mlx5_cache_ent {
u32 access_mode; u32 access_mode;
u32 page; u32 page;
u32 size; /*
u32 cur; * - available_mrs is the length of list head, ie the number of MRs
* available for immediate allocation.
* - total_mrs is available_mrs plus all in use MRs that could be
* returned to the cache.
* - limit is the low water mark for available_mrs, 2* limit is the
* upper water mark.
* - pending is the number of MRs currently being created
*/
u32 total_mrs;
u32 available_mrs;
u32 limit;
u32 pending;
/* Statistics */
u32 miss; u32 miss;
u32 limit;
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
struct work_struct work; struct work_struct work;
struct delayed_work dwork; struct delayed_work dwork;
int pending;
struct completion compl; struct completion compl;
}; };
......
...@@ -144,8 +144,8 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) ...@@ -144,8 +144,8 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
spin_lock_irqsave(&ent->lock, flags); spin_lock_irqsave(&ent->lock, flags);
list_add_tail(&mr->list, &ent->head); list_add_tail(&mr->list, &ent->head);
ent->cur++; ent->available_mrs++;
ent->size++; ent->total_mrs++;
spin_unlock_irqrestore(&ent->lock, flags); spin_unlock_irqrestore(&ent->lock, flags);
if (!completion_done(&ent->compl)) if (!completion_done(&ent->compl))
...@@ -231,8 +231,8 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) ...@@ -231,8 +231,8 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
} }
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
list_move(&mr->list, &del_list); list_move(&mr->list, &del_list);
ent->cur--; ent->available_mrs--;
ent->size--; ent->total_mrs--;
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
} }
...@@ -265,16 +265,16 @@ static ssize_t size_write(struct file *filp, const char __user *buf, ...@@ -265,16 +265,16 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
if (var < ent->limit) if (var < ent->limit)
return -EINVAL; return -EINVAL;
if (var > ent->size) { if (var > ent->total_mrs) {
do { do {
err = add_keys(dev, c, var - ent->size); err = add_keys(dev, c, var - ent->total_mrs);
if (err && err != -EAGAIN) if (err && err != -EAGAIN)
return err; return err;
usleep_range(3000, 5000); usleep_range(3000, 5000);
} while (err); } while (err);
} else if (var < ent->size) { } else if (var < ent->total_mrs) {
remove_keys(dev, c, ent->size - var); remove_keys(dev, c, ent->total_mrs - var);
} }
return count; return count;
...@@ -287,7 +287,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count, ...@@ -287,7 +287,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20]; char lbuf[20];
int err; int err;
err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
if (err < 0) if (err < 0)
return err; return err;
...@@ -320,13 +320,13 @@ static ssize_t limit_write(struct file *filp, const char __user *buf, ...@@ -320,13 +320,13 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
if (sscanf(lbuf, "%u", &var) != 1) if (sscanf(lbuf, "%u", &var) != 1)
return -EINVAL; return -EINVAL;
if (var > ent->size) if (var > ent->total_mrs)
return -EINVAL; return -EINVAL;
ent->limit = var; ent->limit = var;
if (ent->cur < ent->limit) { if (ent->available_mrs < ent->limit) {
err = add_keys(dev, c, 2 * ent->limit - ent->cur); err = add_keys(dev, c, 2 * ent->limit - ent->available_mrs);
if (err) if (err)
return err; return err;
} }
...@@ -360,7 +360,7 @@ static int someone_adding(struct mlx5_mr_cache *cache) ...@@ -360,7 +360,7 @@ static int someone_adding(struct mlx5_mr_cache *cache)
int i; int i;
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
if (cache->ent[i].cur < cache->ent[i].limit) if (cache->ent[i].available_mrs < cache->ent[i].limit)
return 1; return 1;
} }
...@@ -378,9 +378,9 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) ...@@ -378,9 +378,9 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
return; return;
ent = &dev->cache.ent[i]; ent = &dev->cache.ent[i];
if (ent->cur < 2 * ent->limit && !dev->fill_delay) { if (ent->available_mrs < 2 * ent->limit && !dev->fill_delay) {
err = add_keys(dev, i, 1); err = add_keys(dev, i, 1);
if (ent->cur < 2 * ent->limit) { if (ent->available_mrs < 2 * ent->limit) {
if (err == -EAGAIN) { if (err == -EAGAIN) {
mlx5_ib_dbg(dev, "returned eagain, order %d\n", mlx5_ib_dbg(dev, "returned eagain, order %d\n",
i + 2); i + 2);
...@@ -395,7 +395,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) ...@@ -395,7 +395,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
queue_work(cache->wq, &ent->work); queue_work(cache->wq, &ent->work);
} }
} }
} else if (ent->cur > 2 * ent->limit) { } else if (ent->available_mrs > 2 * ent->limit) {
/* /*
* The remove_keys() logic is performed as garbage collection * The remove_keys() logic is performed as garbage collection
* task. Such task is intended to be run when no other active * task. Such task is intended to be run when no other active
...@@ -411,7 +411,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) ...@@ -411,7 +411,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
if (!need_resched() && !someone_adding(cache) && if (!need_resched() && !someone_adding(cache) &&
time_after(jiffies, cache->last_add + 300 * HZ)) { time_after(jiffies, cache->last_add + 300 * HZ)) {
remove_keys(dev, i, 1); remove_keys(dev, i, 1);
if (ent->cur > ent->limit) if (ent->available_mrs > ent->limit)
queue_work(cache->wq, &ent->work); queue_work(cache->wq, &ent->work);
} else { } else {
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
...@@ -462,9 +462,9 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) ...@@ -462,9 +462,9 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
list); list);
list_del(&mr->list); list_del(&mr->list);
ent->cur--; ent->available_mrs--;
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
if (ent->cur < ent->limit) if (ent->available_mrs < ent->limit)
queue_work(cache->wq, &ent->work); queue_work(cache->wq, &ent->work);
return mr; return mr;
} }
...@@ -497,9 +497,9 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) ...@@ -497,9 +497,9 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
list); list);
list_del(&mr->list); list_del(&mr->list);
ent->cur--; ent->available_mrs--;
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
if (ent->cur < ent->limit) if (ent->available_mrs < ent->limit)
queue_work(cache->wq, &ent->work); queue_work(cache->wq, &ent->work);
break; break;
} }
...@@ -531,7 +531,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -531,7 +531,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->allocated_from_cache = false; mr->allocated_from_cache = false;
destroy_mkey(dev, mr); destroy_mkey(dev, mr);
ent = &cache->ent[c]; ent = &cache->ent[c];
if (ent->cur < ent->limit) if (ent->available_mrs < ent->limit)
queue_work(cache->wq, &ent->work); queue_work(cache->wq, &ent->work);
return; return;
} }
...@@ -539,8 +539,8 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -539,8 +539,8 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
ent = &cache->ent[c]; ent = &cache->ent[c];
spin_lock_irq(&ent->lock); spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head); list_add_tail(&mr->list, &ent->head);
ent->cur++; ent->available_mrs++;
if (ent->cur > 2 * ent->limit) if (ent->available_mrs > 2 * ent->limit)
shrink = 1; shrink = 1;
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
...@@ -565,8 +565,8 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ...@@ -565,8 +565,8 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
} }
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
list_move(&mr->list, &del_list); list_move(&mr->list, &del_list);
ent->cur--; ent->available_mrs--;
ent->size--; ent->total_mrs--;
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
} }
...@@ -604,7 +604,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) ...@@ -604,7 +604,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
dir = debugfs_create_dir(ent->name, cache->root); dir = debugfs_create_dir(ent->name, cache->root);
debugfs_create_file("size", 0600, dir, ent, &size_fops); debugfs_create_file("size", 0600, dir, ent, &size_fops);
debugfs_create_file("limit", 0600, dir, ent, &limit_fops); debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
debugfs_create_u32("cur", 0400, dir, &ent->cur); debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
debugfs_create_u32("miss", 0600, dir, &ent->miss); debugfs_create_u32("miss", 0600, dir, &ent->miss);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment