Commit 56467ebf authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: split VM PD and PT handling during CS

This way we avoid the extra allocation for the page directory entry.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
parent 3c0eea6c
......@@ -980,9 +980,10 @@ struct amdgpu_vm_manager {
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm,
struct list_head *duplicates);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync);
......@@ -1253,6 +1254,7 @@ struct amdgpu_cs_parser {
unsigned nchunks;
struct amdgpu_cs_chunk *chunks;
/* relocations */
struct amdgpu_bo_list_entry vm_pd;
struct amdgpu_bo_list_entry *vm_bos;
struct list_head validated;
struct fence *fence;
......
......@@ -387,8 +387,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
}
INIT_LIST_HEAD(&duplicates);
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
&p->validated, &duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
......@@ -397,6 +396,12 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
if (unlikely(r != 0))
goto error_reserve;
p->vm_bos = amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
if (!p->vm_bos) {
r = -ENOMEM;
goto error_validate;
}
r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
if (r)
goto error_validate;
......
......@@ -448,6 +448,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
{
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry *vm_bos;
struct amdgpu_bo_list_entry vm_pd;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
unsigned domain;
......@@ -460,14 +461,18 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
tv.shared = true;
list_add(&tv.head, &list);
vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list, &duplicates);
if (!vm_bos)
return;
amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
/* Provide duplicates to avoid -EALREADY */
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_free;
goto error_print;
vm_bos = amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
if (!vm_bos) {
r = -ENOMEM;
goto error_unreserve;
}
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
......@@ -489,10 +494,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
error_unreserve:
ttm_eu_backoff_reservation(&ticket, &list);
error_free:
drm_free_large(vm_bos);
error_print:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
......
......@@ -75,39 +75,50 @@ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
}
/**
* amdgpu_vm_get_bos - add the vm BOs to a validation list
* amdgpu_vm_get_pd_bo - add the VM PD to a validation list
*
* @vm: vm providing the BOs
* @validated: head of validation list
* @entry: entry to add
*
* Add the page directory to the list of BOs to
* validate for command submission.
*/
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry)
{
entry->robj = vm->page_directory;
entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->priority = 0;
entry->tv.bo = &vm->page_directory->tbo;
entry->tv.shared = true;
list_add(&entry->tv.head, validated);
}
/**
* amdgpu_vm_get_bos - add the vm BOs to a validation list
*
* @vm: vm providing the BOs
* @duplicates: head of duplicates list
*
* Add the page directory to the list of BOs to
* validate for command submission (cayman+).
*/
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm,
struct list_head *duplicates)
{
struct amdgpu_bo_list_entry *list;
unsigned i, idx;
list = drm_malloc_ab(vm->max_pde_used + 2,
list = drm_malloc_ab(vm->max_pde_used + 1,
sizeof(struct amdgpu_bo_list_entry));
if (!list) {
if (!list)
return NULL;
}
/* add the vm page table to the list */
list[0].robj = vm->page_directory;
list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
list[0].priority = 0;
list[0].tv.bo = &vm->page_directory->tbo;
list[0].tv.shared = true;
list_add(&list[0].tv.head, validated);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
for (i = 0, idx = 0; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment