Commit 646b9025 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: use a single linked list for amdgpu_vm_bo_base

Instead of the double linked list. Gets the size of amdgpu_vm_pt down to
64 bytes again.

We could even reduce it down to 32 bytes, but that would require some
rather extreme hacks.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e83dfe4d
...@@ -448,7 +448,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -448,7 +448,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
return -ENOMEM; return -ENOMEM;
drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va); bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
bp->domain; bp->domain;
bo->allowed_domains = bo->preferred_domains; bo->allowed_domains = bo->preferred_domains;
......
...@@ -89,8 +89,8 @@ struct amdgpu_bo { ...@@ -89,8 +89,8 @@ struct amdgpu_bo {
void *metadata; void *metadata;
u32 metadata_size; u32 metadata_size;
unsigned prime_shared_count; unsigned prime_shared_count;
/* list of all virtual address to which this bo is associated to */ /* per VM structure for page tables and with virtual addresses */
struct list_head va; struct amdgpu_vm_bo_base *vm_bo;
/* Constant after initialization */ /* Constant after initialization */
struct drm_gem_object gem_base; struct drm_gem_object gem_base;
struct amdgpu_bo *parent; struct amdgpu_bo *parent;
......
...@@ -309,12 +309,13 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, ...@@ -309,12 +309,13 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
{ {
base->vm = vm; base->vm = vm;
base->bo = bo; base->bo = bo;
INIT_LIST_HEAD(&base->bo_list); base->next = NULL;
INIT_LIST_HEAD(&base->vm_status); INIT_LIST_HEAD(&base->vm_status);
if (!bo) if (!bo)
return; return;
list_add_tail(&base->bo_list, &bo->va); base->next = bo->vm_bo;
bo->vm_bo = base;
if (bo->tbo.resv != vm->root.base.bo->tbo.resv) if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
return; return;
...@@ -352,7 +353,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) ...@@ -352,7 +353,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
if (!parent) if (!parent)
return NULL; return NULL;
return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list); return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
} }
/** /**
...@@ -954,7 +955,7 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev, ...@@ -954,7 +955,7 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) { for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
if (entry->base.bo) { if (entry->base.bo) {
list_del(&entry->base.bo_list); entry->base.bo->vm_bo = NULL;
list_del(&entry->base.vm_status); list_del(&entry->base.vm_status);
amdgpu_bo_unref(&entry->base.bo->shadow); amdgpu_bo_unref(&entry->base.bo->shadow);
amdgpu_bo_unref(&entry->base.bo); amdgpu_bo_unref(&entry->base.bo);
...@@ -1162,12 +1163,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ ...@@ -1162,12 +1163,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo) struct amdgpu_bo *bo)
{ {
struct amdgpu_bo_va *bo_va; struct amdgpu_vm_bo_base *base;
list_for_each_entry(bo_va, &bo->va, base.bo_list) { for (base = bo->vm_bo; base; base = base->next) {
if (bo_va->base.vm == vm) { if (base->vm != vm)
return bo_va; continue;
}
return container_of(base, struct amdgpu_bo_va, base);
} }
return NULL; return NULL;
} }
...@@ -2728,11 +2730,21 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, ...@@ -2728,11 +2730,21 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping, *next; struct amdgpu_bo_va_mapping *mapping, *next;
struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_vm_bo_base **base;
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) if (bo) {
vm->bulk_moveable = false; if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
vm->bulk_moveable = false;
list_del(&bo_va->base.bo_list); for (base = &bo_va->base.bo->vm_bo; *base;
base = &(*base)->next) {
if (*base != &bo_va->base)
continue;
*base = bo_va->base.next;
break;
}
}
spin_lock(&vm->invalidated_lock); spin_lock(&vm->invalidated_lock);
list_del(&bo_va->base.vm_status); list_del(&bo_va->base.vm_status);
...@@ -2774,7 +2786,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, ...@@ -2774,7 +2786,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
if (bo->parent && bo->parent->shadow == bo) if (bo->parent && bo->parent->shadow == bo)
bo = bo->parent; bo = bo->parent;
list_for_each_entry(bo_base, &bo->va, bo_list) { for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm; struct amdgpu_vm *vm = bo_base->vm;
if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
......
...@@ -129,7 +129,7 @@ struct amdgpu_vm_bo_base { ...@@ -129,7 +129,7 @@ struct amdgpu_vm_bo_base {
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
/* protected by bo being reserved */ /* protected by bo being reserved */
struct list_head bo_list; struct amdgpu_vm_bo_base *next;
/* protected by spinlock */ /* protected by spinlock */
struct list_head vm_status; struct list_head vm_status;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment