Commit 3f4299be authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: abstract bo_base init function

Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 7e4237db
...@@ -94,6 +94,36 @@ struct amdgpu_prt_cb { ...@@ -94,6 +94,36 @@ struct amdgpu_prt_cb {
struct dma_fence_cb cb; struct dma_fence_cb cb;
}; };
static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
{
base->vm = vm;
base->bo = bo;
INIT_LIST_HEAD(&base->bo_list);
INIT_LIST_HEAD(&base->vm_status);
if (!bo)
return;
list_add_tail(&base->bo_list, &bo->va);
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
return;
if (bo->preferred_domains &
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
return;
/*
* we checked all the prerequisites, but it looks like this per vm bo
* is currently evicted. add the bo to the evicted list to make sure it
* is validated on next vm use to avoid fault.
* */
spin_lock(&vm->status_lock);
list_move_tail(&base->vm_status, &vm->evicted);
spin_unlock(&vm->status_lock);
}
/** /**
* amdgpu_vm_level_shift - return the addr shift for each level * amdgpu_vm_level_shift - return the addr shift for each level
* *
...@@ -446,11 +476,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, ...@@ -446,11 +476,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
*/ */
pt->parent = amdgpu_bo_ref(parent->base.bo); pt->parent = amdgpu_bo_ref(parent->base.bo);
entry->base.vm = vm; amdgpu_vm_bo_base_init(&entry->base, vm, pt);
entry->base.bo = pt;
list_add_tail(&entry->base.bo_list, &pt->va);
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
list_add(&entry->base.vm_status, &vm->relocated); list_move(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
} }
...@@ -1841,36 +1869,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, ...@@ -1841,36 +1869,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo_va == NULL) { if (bo_va == NULL) {
return NULL; return NULL;
} }
bo_va->base.vm = vm; amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
bo_va->base.bo = bo;
INIT_LIST_HEAD(&bo_va->base.bo_list);
INIT_LIST_HEAD(&bo_va->base.vm_status);
bo_va->ref_count = 1; bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->invalids);
if (!bo)
return bo_va;
list_add_tail(&bo_va->base.bo_list, &bo->va);
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
return bo_va;
if (bo->preferred_domains &
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
return bo_va;
/*
* We checked all the prerequisites, but it looks like this per VM BO
* is currently evicted. add the BO to the evicted list to make sure it
* is validated on next VM use to avoid fault.
* */
spin_lock(&vm->status_lock);
list_move_tail(&bo_va->base.vm_status, &vm->evicted);
spin_unlock(&vm->status_lock);
return bo_va; return bo_va;
} }
...@@ -2370,6 +2374,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2370,6 +2374,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid) int vm_context, unsigned int pasid)
{ {
struct amdgpu_bo_param bp; struct amdgpu_bo_param bp;
struct amdgpu_bo *root;
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT(adev) * 8); AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance; unsigned ring_instance;
...@@ -2431,23 +2436,21 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2431,23 +2436,21 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.flags = flags; bp.flags = flags;
bp.type = ttm_bo_type_kernel; bp.type = ttm_bo_type_kernel;
bp.resv = NULL; bp.resv = NULL;
r = amdgpu_bo_create(adev, &bp, &vm->root.base.bo); r = amdgpu_bo_create(adev, &bp, &root);
if (r) if (r)
goto error_free_sched_entity; goto error_free_sched_entity;
r = amdgpu_bo_reserve(vm->root.base.bo, true); r = amdgpu_bo_reserve(root, true);
if (r) if (r)
goto error_free_root; goto error_free_root;
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, r = amdgpu_vm_clear_bo(adev, vm, root,
adev->vm_manager.root_level, adev->vm_manager.root_level,
vm->pte_support_ats); vm->pte_support_ats);
if (r) if (r)
goto error_unreserve; goto error_unreserve;
vm->root.base.vm = vm; amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
list_add_tail(&vm->root.base.vm_status, &vm->evicted);
amdgpu_bo_unreserve(vm->root.base.bo); amdgpu_bo_unreserve(vm->root.base.bo);
if (pasid) { if (pasid) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment