Commit 528e083d authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: rename rmn to amn in the MMU notifier code (v2)

Just a copy&paste leftover from radeon.

v2: rebase (Alex)
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent abea57d7
...@@ -64,7 +64,7 @@ struct amdgpu_mn_node { ...@@ -64,7 +64,7 @@ struct amdgpu_mn_node {
}; };
/** /**
* amdgpu_mn_destroy - destroy the rmn * amdgpu_mn_destroy - destroy the amn
* *
* @work: previously sheduled work item * @work: previously sheduled work item
* *
...@@ -72,26 +72,26 @@ struct amdgpu_mn_node { ...@@ -72,26 +72,26 @@ struct amdgpu_mn_node {
*/ */
static void amdgpu_mn_destroy(struct work_struct *work) static void amdgpu_mn_destroy(struct work_struct *work)
{ {
struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work); struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
struct amdgpu_device *adev = rmn->adev; struct amdgpu_device *adev = amn->adev;
struct amdgpu_mn_node *node, *next_node; struct amdgpu_mn_node *node, *next_node;
struct amdgpu_bo *bo, *next_bo; struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock); mutex_lock(&adev->mn_lock);
down_write(&rmn->lock); down_write(&amn->lock);
hash_del(&rmn->node); hash_del(&amn->node);
rbtree_postorder_for_each_entry_safe(node, next_node, rbtree_postorder_for_each_entry_safe(node, next_node,
&rmn->objects.rb_root, it.rb) { &amn->objects.rb_root, it.rb) {
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
bo->mn = NULL; bo->mn = NULL;
list_del_init(&bo->mn_list); list_del_init(&bo->mn_list);
} }
kfree(node); kfree(node);
} }
up_write(&rmn->lock); up_write(&amn->lock);
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
kfree(rmn); kfree(amn);
} }
/** /**
...@@ -105,9 +105,9 @@ static void amdgpu_mn_destroy(struct work_struct *work) ...@@ -105,9 +105,9 @@ static void amdgpu_mn_destroy(struct work_struct *work)
static void amdgpu_mn_release(struct mmu_notifier *mn, static void amdgpu_mn_release(struct mmu_notifier *mn,
struct mm_struct *mm) struct mm_struct *mm)
{ {
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
INIT_WORK(&rmn->work, amdgpu_mn_destroy); INIT_WORK(&amn->work, amdgpu_mn_destroy);
schedule_work(&rmn->work); schedule_work(&amn->work);
} }
...@@ -130,31 +130,31 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn) ...@@ -130,31 +130,31 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
} }
/** /**
* amdgpu_mn_read_lock - take the rmn read lock * amdgpu_mn_read_lock - take the amn read lock
* *
* @rmn: our notifier * @amn: our notifier
* *
* Take the rmn read side lock. * Take the amn read side lock.
*/ */
static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn) static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
{ {
mutex_lock(&rmn->read_lock); mutex_lock(&amn->read_lock);
if (atomic_inc_return(&rmn->recursion) == 1) if (atomic_inc_return(&amn->recursion) == 1)
down_read_non_owner(&rmn->lock); down_read_non_owner(&amn->lock);
mutex_unlock(&rmn->read_lock); mutex_unlock(&amn->read_lock);
} }
/** /**
* amdgpu_mn_read_unlock - drop the rmn read lock * amdgpu_mn_read_unlock - drop the amn read lock
* *
* @rmn: our notifier * @amn: our notifier
* *
* Drop the rmn read side lock. * Drop the amn read side lock.
*/ */
static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn) static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
{ {
if (atomic_dec_return(&rmn->recursion) == 0) if (atomic_dec_return(&amn->recursion) == 0)
up_read_non_owner(&rmn->lock); up_read_non_owner(&amn->lock);
} }
/** /**
...@@ -202,15 +202,15 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, ...@@ -202,15 +202,15 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
unsigned long start, unsigned long start,
unsigned long end) unsigned long end)
{ {
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it; struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
end -= 1; end -= 1;
amdgpu_mn_read_lock(rmn); amdgpu_mn_read_lock(amn);
it = interval_tree_iter_first(&rmn->objects, start, end); it = interval_tree_iter_first(&amn->objects, start, end);
while (it) { while (it) {
struct amdgpu_mn_node *node; struct amdgpu_mn_node *node;
...@@ -238,15 +238,15 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, ...@@ -238,15 +238,15 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
unsigned long start, unsigned long start,
unsigned long end) unsigned long end)
{ {
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it; struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
end -= 1; end -= 1;
amdgpu_mn_read_lock(rmn); amdgpu_mn_read_lock(amn);
it = interval_tree_iter_first(&rmn->objects, start, end); it = interval_tree_iter_first(&amn->objects, start, end);
while (it) { while (it) {
struct amdgpu_mn_node *node; struct amdgpu_mn_node *node;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
...@@ -279,9 +279,9 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, ...@@ -279,9 +279,9 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
unsigned long start, unsigned long start,
unsigned long end) unsigned long end)
{ {
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
amdgpu_mn_read_unlock(rmn); amdgpu_mn_read_unlock(amn);
} }
static const struct mmu_notifier_ops amdgpu_mn_ops[] = { static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
...@@ -315,7 +315,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, ...@@ -315,7 +315,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
enum amdgpu_mn_type type) enum amdgpu_mn_type type)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct amdgpu_mn *rmn; struct amdgpu_mn *amn;
unsigned long key = AMDGPU_MN_KEY(mm, type); unsigned long key = AMDGPU_MN_KEY(mm, type);
int r; int r;
...@@ -325,41 +325,41 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, ...@@ -325,41 +325,41 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
} }
hash_for_each_possible(adev->mn_hash, rmn, node, key) hash_for_each_possible(adev->mn_hash, amn, node, key)
if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key) if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
goto release_locks; goto release_locks;
rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); amn = kzalloc(sizeof(*amn), GFP_KERNEL);
if (!rmn) { if (!amn) {
rmn = ERR_PTR(-ENOMEM); amn = ERR_PTR(-ENOMEM);
goto release_locks; goto release_locks;
} }
rmn->adev = adev; amn->adev = adev;
rmn->mm = mm; amn->mm = mm;
init_rwsem(&rmn->lock); init_rwsem(&amn->lock);
rmn->type = type; amn->type = type;
rmn->mn.ops = &amdgpu_mn_ops[type]; amn->mn.ops = &amdgpu_mn_ops[type];
rmn->objects = RB_ROOT_CACHED; amn->objects = RB_ROOT_CACHED;
mutex_init(&rmn->read_lock); mutex_init(&amn->read_lock);
atomic_set(&rmn->recursion, 0); atomic_set(&amn->recursion, 0);
r = __mmu_notifier_register(&rmn->mn, mm); r = __mmu_notifier_register(&amn->mn, mm);
if (r) if (r)
goto free_rmn; goto free_amn;
hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type)); hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
release_locks: release_locks:
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
return rmn; return amn;
free_rmn: free_amn:
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
kfree(rmn); kfree(amn);
return ERR_PTR(r); return ERR_PTR(r);
} }
...@@ -379,14 +379,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -379,14 +379,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
enum amdgpu_mn_type type = enum amdgpu_mn_type type =
bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX; bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
struct amdgpu_mn *rmn; struct amdgpu_mn *amn;
struct amdgpu_mn_node *node = NULL, *new_node; struct amdgpu_mn_node *node = NULL, *new_node;
struct list_head bos; struct list_head bos;
struct interval_tree_node *it; struct interval_tree_node *it;
rmn = amdgpu_mn_get(adev, type); amn = amdgpu_mn_get(adev, type);
if (IS_ERR(rmn)) if (IS_ERR(amn))
return PTR_ERR(rmn); return PTR_ERR(amn);
new_node = kmalloc(sizeof(*new_node), GFP_KERNEL); new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node) if (!new_node)
...@@ -394,12 +394,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -394,12 +394,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos); INIT_LIST_HEAD(&bos);
down_write(&rmn->lock); down_write(&amn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
kfree(node); kfree(node);
node = container_of(it, struct amdgpu_mn_node, it); node = container_of(it, struct amdgpu_mn_node, it);
interval_tree_remove(&node->it, &rmn->objects); interval_tree_remove(&node->it, &amn->objects);
addr = min(it->start, addr); addr = min(it->start, addr);
end = max(it->last, end); end = max(it->last, end);
list_splice(&node->bos, &bos); list_splice(&node->bos, &bos);
...@@ -410,7 +410,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -410,7 +410,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
else else
kfree(new_node); kfree(new_node);
bo->mn = rmn; bo->mn = amn;
node->it.start = addr; node->it.start = addr;
node->it.last = end; node->it.last = end;
...@@ -418,9 +418,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -418,9 +418,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
list_splice(&bos, &node->bos); list_splice(&bos, &node->bos);
list_add(&bo->mn_list, &node->bos); list_add(&bo->mn_list, &node->bos);
interval_tree_insert(&node->it, &rmn->objects); interval_tree_insert(&node->it, &amn->objects);
up_write(&rmn->lock); up_write(&amn->lock);
return 0; return 0;
} }
...@@ -435,18 +435,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) ...@@ -435,18 +435,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
void amdgpu_mn_unregister(struct amdgpu_bo *bo) void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_mn *rmn; struct amdgpu_mn *amn;
struct list_head *head; struct list_head *head;
mutex_lock(&adev->mn_lock); mutex_lock(&adev->mn_lock);
rmn = bo->mn; amn = bo->mn;
if (rmn == NULL) { if (amn == NULL) {
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
return; return;
} }
down_write(&rmn->lock); down_write(&amn->lock);
/* save the next list entry for later */ /* save the next list entry for later */
head = bo->mn_list.next; head = bo->mn_list.next;
...@@ -457,11 +457,11 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) ...@@ -457,11 +457,11 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
if (list_empty(head)) { if (list_empty(head)) {
struct amdgpu_mn_node *node; struct amdgpu_mn_node *node;
node = container_of(head, struct amdgpu_mn_node, bos); node = container_of(head, struct amdgpu_mn_node, bos);
interval_tree_remove(&node->it, &rmn->objects); interval_tree_remove(&node->it, &amn->objects);
kfree(node); kfree(node);
} }
up_write(&rmn->lock); up_write(&amn->lock);
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment