Commit 65edd0e7 authored by Daniel Jurgens's avatar Daniel Jurgens Committed by Doug Ledford

IB/mlx5: Only synchronize RCU once when removing mkeys

Instead synchronizing RCU in a loop when removing mkeys in a batch do it
once at the end before freeing them. The result is only waiting for one
RCU grace period instead of many serially.
Signed-off-by: default avatarDaniel Jurgens <danielj@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 72f7cc09
...@@ -220,26 +220,32 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) ...@@ -220,26 +220,32 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{ {
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
int err; LIST_HEAD(del_list);
int i; int i;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
spin_lock_irq(&ent->lock); spin_lock_irq(&ent->lock);
if (list_empty(&ent->head)) { if (list_empty(&ent->head)) {
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
return; break;
} }
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
list_del(&mr->list); list_move(&mr->list, &del_list);
ent->cur--; ent->cur--;
ent->size--; ent->size--;
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
err = destroy_mkey(dev, mr); mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
if (err) }
mlx5_ib_warn(dev, "failed destroy mkey\n");
else #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
kfree(mr); synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
kfree(mr);
} }
} }
...@@ -562,26 +568,32 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ...@@ -562,26 +568,32 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
{ {
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
int err; LIST_HEAD(del_list);
cancel_delayed_work(&ent->dwork); cancel_delayed_work(&ent->dwork);
while (1) { while (1) {
spin_lock_irq(&ent->lock); spin_lock_irq(&ent->lock);
if (list_empty(&ent->head)) { if (list_empty(&ent->head)) {
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
return; break;
} }
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
list_del(&mr->list); list_move(&mr->list, &del_list);
ent->cur--; ent->cur--;
ent->size--; ent->size--;
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
err = destroy_mkey(dev, mr); mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
if (err) }
mlx5_ib_warn(dev, "failed destroy mkey\n");
else #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
kfree(mr); synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
kfree(mr);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment