Commit 5b21d3e5 authored by Felix Kuehling's avatar Felix Kuehling Committed by Oded Gabbay

drm/amdgpu: Move KFD-specific fields into struct amdgpu_vm

Remove struct amdkfd_vm and move the fields into struct amdgpu_vm.
This will allow turning a VM created by a DRM render node into a
KFD VM.

v2: Removed vm_context field
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent 48a44387
...@@ -93,27 +93,6 @@ struct amdkfd_process_info { ...@@ -93,27 +93,6 @@ struct amdkfd_process_info {
struct amdgpu_amdkfd_fence *eviction_fence; struct amdgpu_amdkfd_fence *eviction_fence;
}; };
/* struct amdkfd_vm -
* For Memory Eviction KGD requires a mechanism to keep track of all KFD BOs
* belonging to a KFD process. All the VMs belonging to the same process point
* to the same amdkfd_process_info.
*/
struct amdkfd_vm {
/* Keep base as the first parameter for pointer compatibility between
* amdkfd_vm and amdgpu_vm.
*/
struct amdgpu_vm base;
/* List node in amdkfd_process_info.vm_list_head*/
struct list_head vm_list_node;
struct amdgpu_device *adev;
/* Points to the KFD process VM info*/
struct amdkfd_process_info *process_info;
uint64_t pd_phys_addr;
};
int amdgpu_amdkfd_init(void); int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void); void amdgpu_amdkfd_fini(void);
......
...@@ -333,9 +333,9 @@ static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) ...@@ -333,9 +333,9 @@ static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
* again. Page directories are only updated after updating page * again. Page directories are only updated after updating page
* tables. * tables.
*/ */
static int vm_validate_pt_pd_bos(struct amdkfd_vm *vm) static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
{ {
struct amdgpu_bo *pd = vm->base.root.base.bo; struct amdgpu_bo *pd = vm->root.base.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
struct amdgpu_vm_parser param; struct amdgpu_vm_parser param;
uint64_t addr, flags = AMDGPU_PTE_VALID; uint64_t addr, flags = AMDGPU_PTE_VALID;
...@@ -344,7 +344,7 @@ static int vm_validate_pt_pd_bos(struct amdkfd_vm *vm) ...@@ -344,7 +344,7 @@ static int vm_validate_pt_pd_bos(struct amdkfd_vm *vm)
param.domain = AMDGPU_GEM_DOMAIN_VRAM; param.domain = AMDGPU_GEM_DOMAIN_VRAM;
param.wait = false; param.wait = false;
ret = amdgpu_vm_validate_pt_bos(adev, &vm->base, amdgpu_amdkfd_validate, ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
&param); &param);
if (ret) { if (ret) {
pr_err("amdgpu: failed to validate PT BOs\n"); pr_err("amdgpu: failed to validate PT BOs\n");
...@@ -357,11 +357,11 @@ static int vm_validate_pt_pd_bos(struct amdkfd_vm *vm) ...@@ -357,11 +357,11 @@ static int vm_validate_pt_pd_bos(struct amdkfd_vm *vm)
return ret; return ret;
} }
addr = amdgpu_bo_gpu_offset(vm->base.root.base.bo); addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags); amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags);
vm->pd_phys_addr = addr; vm->pd_phys_addr = addr;
if (vm->base.use_cpu_for_update) { if (vm->use_cpu_for_update) {
ret = amdgpu_bo_kmap(pd, NULL); ret = amdgpu_bo_kmap(pd, NULL);
if (ret) { if (ret) {
pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret); pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
...@@ -415,14 +415,12 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) ...@@ -415,14 +415,12 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
* 4a. Validate new page tables and directories * 4a. Validate new page tables and directories
*/ */
static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_vm *avm, bool is_aql, struct amdgpu_vm *vm, bool is_aql,
struct kfd_bo_va_list **p_bo_va_entry) struct kfd_bo_va_list **p_bo_va_entry)
{ {
int ret; int ret;
struct kfd_bo_va_list *bo_va_entry; struct kfd_bo_va_list *bo_va_entry;
struct amdkfd_vm *kvm = container_of(avm, struct amdgpu_bo *pd = vm->root.base.bo;
struct amdkfd_vm, base);
struct amdgpu_bo *pd = avm->root.base.bo;
struct amdgpu_bo *bo = mem->bo; struct amdgpu_bo *bo = mem->bo;
uint64_t va = mem->va; uint64_t va = mem->va;
struct list_head *list_bo_va = &mem->bo_va_list; struct list_head *list_bo_va = &mem->bo_va_list;
...@@ -441,10 +439,10 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, ...@@ -441,10 +439,10 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
return -ENOMEM; return -ENOMEM;
pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
va + bo_size, avm); va + bo_size, vm);
/* Add BO to VM internal data structures*/ /* Add BO to VM internal data structures*/
bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, avm, bo); bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
if (!bo_va_entry->bo_va) { if (!bo_va_entry->bo_va) {
ret = -EINVAL; ret = -EINVAL;
pr_err("Failed to add BO object to VM. ret == %d\n", pr_err("Failed to add BO object to VM. ret == %d\n",
...@@ -467,28 +465,28 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, ...@@ -467,28 +465,28 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
* fence, so remove it temporarily. * fence, so remove it temporarily.
*/ */
amdgpu_amdkfd_remove_eviction_fence(pd, amdgpu_amdkfd_remove_eviction_fence(pd,
kvm->process_info->eviction_fence, vm->process_info->eviction_fence,
NULL, NULL); NULL, NULL);
ret = amdgpu_vm_alloc_pts(adev, avm, va, amdgpu_bo_size(bo)); ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
if (ret) { if (ret) {
pr_err("Failed to allocate pts, err=%d\n", ret); pr_err("Failed to allocate pts, err=%d\n", ret);
goto err_alloc_pts; goto err_alloc_pts;
} }
ret = vm_validate_pt_pd_bos(kvm); ret = vm_validate_pt_pd_bos(vm);
if (ret) { if (ret) {
pr_err("validate_pt_pd_bos() failed\n"); pr_err("validate_pt_pd_bos() failed\n");
goto err_alloc_pts; goto err_alloc_pts;
} }
/* Add the eviction fence back */ /* Add the eviction fence back */
amdgpu_bo_fence(pd, &kvm->process_info->eviction_fence->base, true); amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
return 0; return 0;
err_alloc_pts: err_alloc_pts:
amdgpu_bo_fence(pd, &kvm->process_info->eviction_fence->base, true); amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va); amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
list_del(&bo_va_entry->bo_list); list_del(&bo_va_entry->bo_list);
err_vmadd: err_vmadd:
...@@ -703,7 +701,6 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, ...@@ -703,7 +701,6 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
{ {
struct amdgpu_bo_va *bo_va = entry->bo_va; struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_vm *vm = bo_va->base.vm;
struct amdkfd_vm *kvm = container_of(vm, struct amdkfd_vm, base);
struct amdgpu_bo *pd = vm->root.base.bo; struct amdgpu_bo *pd = vm->root.base.bo;
/* Remove eviction fence from PD (and thereby from PTs too as /* Remove eviction fence from PD (and thereby from PTs too as
...@@ -713,14 +710,14 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, ...@@ -713,14 +710,14 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
* trigger the eviction fence. * trigger the eviction fence.
*/ */
amdgpu_amdkfd_remove_eviction_fence(pd, amdgpu_amdkfd_remove_eviction_fence(pd,
kvm->process_info->eviction_fence, vm->process_info->eviction_fence,
NULL, NULL); NULL, NULL);
amdgpu_vm_bo_unmap(adev, bo_va, entry->va); amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
/* Add the eviction fence back */ /* Add the eviction fence back */
amdgpu_bo_fence(pd, &kvm->process_info->eviction_fence->base, true); amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
sync_vm_fence(adev, sync, bo_va->last_pt_update); sync_vm_fence(adev, sync, bo_va->last_pt_update);
...@@ -780,7 +777,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev, ...@@ -780,7 +777,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
static int process_validate_vms(struct amdkfd_process_info *process_info) static int process_validate_vms(struct amdkfd_process_info *process_info)
{ {
struct amdkfd_vm *peer_vm; struct amdgpu_vm *peer_vm;
int ret; int ret;
list_for_each_entry(peer_vm, &process_info->vm_list_head, list_for_each_entry(peer_vm, &process_info->vm_list_head,
...@@ -796,12 +793,12 @@ static int process_validate_vms(struct amdkfd_process_info *process_info) ...@@ -796,12 +793,12 @@ static int process_validate_vms(struct amdkfd_process_info *process_info)
static int process_update_pds(struct amdkfd_process_info *process_info, static int process_update_pds(struct amdkfd_process_info *process_info,
struct amdgpu_sync *sync) struct amdgpu_sync *sync)
{ {
struct amdkfd_vm *peer_vm; struct amdgpu_vm *peer_vm;
int ret; int ret;
list_for_each_entry(peer_vm, &process_info->vm_list_head, list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) { vm_list_node) {
ret = vm_update_pds(&peer_vm->base, sync); ret = vm_update_pds(peer_vm, sync);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -814,7 +811,7 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm, ...@@ -814,7 +811,7 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
struct dma_fence **ef) struct dma_fence **ef)
{ {
int ret; int ret;
struct amdkfd_vm *new_vm; struct amdgpu_vm *new_vm;
struct amdkfd_process_info *info; struct amdkfd_process_info *info;
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
...@@ -823,12 +820,11 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm, ...@@ -823,12 +820,11 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
return -ENOMEM; return -ENOMEM;
/* Initialize the VM context, allocate the page directory and zero it */ /* Initialize the VM context, allocate the page directory and zero it */
ret = amdgpu_vm_init(adev, &new_vm->base, AMDGPU_VM_CONTEXT_COMPUTE, 0); ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0);
if (ret) { if (ret) {
pr_err("Failed init vm ret %d\n", ret); pr_err("Failed init vm ret %d\n", ret);
goto vm_init_fail; goto vm_init_fail;
} }
new_vm->adev = adev;
if (!*process_info) { if (!*process_info) {
info = kzalloc(sizeof(*info), GFP_KERNEL); info = kzalloc(sizeof(*info), GFP_KERNEL);
...@@ -871,7 +867,7 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm, ...@@ -871,7 +867,7 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
mutex_destroy(&info->lock); mutex_destroy(&info->lock);
kfree(info); kfree(info);
alloc_process_info_fail: alloc_process_info_fail:
amdgpu_vm_fini(adev, &new_vm->base); amdgpu_vm_fini(adev, new_vm);
vm_init_fail: vm_init_fail:
kfree(new_vm); kfree(new_vm);
return ret; return ret;
...@@ -881,8 +877,7 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm, ...@@ -881,8 +877,7 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdkfd_vm *kfd_vm = (struct amdkfd_vm *) vm; struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
struct amdgpu_vm *avm = &kfd_vm->base;
struct amdgpu_bo *pd; struct amdgpu_bo *pd;
struct amdkfd_process_info *process_info; struct amdkfd_process_info *process_info;
...@@ -896,11 +891,11 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) ...@@ -896,11 +891,11 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
amdgpu_bo_fence(pd, NULL, false); amdgpu_bo_fence(pd, NULL, false);
amdgpu_bo_unreserve(pd); amdgpu_bo_unreserve(pd);
process_info = kfd_vm->process_info; process_info = avm->process_info;
mutex_lock(&process_info->lock); mutex_lock(&process_info->lock);
process_info->n_vms--; process_info->n_vms--;
list_del(&kfd_vm->vm_list_node); list_del(&avm->vm_list_node);
mutex_unlock(&process_info->lock); mutex_unlock(&process_info->lock);
/* Release per-process resources */ /* Release per-process resources */
...@@ -919,7 +914,7 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) ...@@ -919,7 +914,7 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
{ {
struct amdkfd_vm *avm = (struct amdkfd_vm *)vm; struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
} }
...@@ -930,7 +925,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -930,7 +925,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
uint64_t *offset, uint32_t flags) uint64_t *offset, uint32_t flags)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdkfd_vm *kfd_vm = (struct amdkfd_vm *)vm; struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
int byte_align; int byte_align;
u32 alloc_domain; u32 alloc_domain;
...@@ -1010,8 +1005,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1010,8 +1005,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
(*mem)->va = va; (*mem)->va = va;
(*mem)->domain = alloc_domain; (*mem)->domain = alloc_domain;
(*mem)->mapped_to_gpu_memory = 0; (*mem)->mapped_to_gpu_memory = 0;
(*mem)->process_info = kfd_vm->process_info; (*mem)->process_info = avm->process_info;
add_kgd_mem_to_kfd_bo_list(*mem, kfd_vm->process_info); add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info);
if (offset) if (offset)
*offset = amdgpu_bo_mmap_offset(bo); *offset = amdgpu_bo_mmap_offset(bo);
...@@ -1092,7 +1087,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( ...@@ -1092,7 +1087,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdkfd_vm *kfd_vm = (struct amdkfd_vm *)vm; struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
int ret; int ret;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
uint32_t domain; uint32_t domain;
...@@ -1128,19 +1123,19 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( ...@@ -1128,19 +1123,19 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
if (unlikely(ret)) if (unlikely(ret))
goto out; goto out;
if (check_if_add_bo_to_vm((struct amdgpu_vm *)vm, mem)) { if (check_if_add_bo_to_vm(avm, mem)) {
ret = add_bo_to_vm(adev, mem, (struct amdgpu_vm *)vm, false, ret = add_bo_to_vm(adev, mem, avm, false,
&bo_va_entry); &bo_va_entry);
if (ret) if (ret)
goto add_bo_to_vm_failed; goto add_bo_to_vm_failed;
if (mem->aql_queue) { if (mem->aql_queue) {
ret = add_bo_to_vm(adev, mem, (struct amdgpu_vm *)vm, ret = add_bo_to_vm(adev, mem, avm,
true, &bo_va_entry_aql); true, &bo_va_entry_aql);
if (ret) if (ret)
goto add_bo_to_vm_failed_aql; goto add_bo_to_vm_failed_aql;
} }
} else { } else {
ret = vm_validate_pt_pd_bos((struct amdkfd_vm *)vm); ret = vm_validate_pt_pd_bos(avm);
if (unlikely(ret)) if (unlikely(ret))
goto add_bo_to_vm_failed; goto add_bo_to_vm_failed;
} }
...@@ -1184,7 +1179,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( ...@@ -1184,7 +1179,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count) if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
amdgpu_bo_fence(bo, amdgpu_bo_fence(bo,
&kfd_vm->process_info->eviction_fence->base, &avm->process_info->eviction_fence->base,
true); true);
ret = unreserve_bo_and_vms(&ctx, false, false); ret = unreserve_bo_and_vms(&ctx, false, false);
...@@ -1209,7 +1204,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( ...@@ -1209,7 +1204,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
{ {
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdkfd_process_info *process_info = struct amdkfd_process_info *process_info =
((struct amdkfd_vm *)vm)->process_info; ((struct amdgpu_vm *)vm)->process_info;
unsigned long bo_size = mem->bo->tbo.mem.size; unsigned long bo_size = mem->bo->tbo.mem.size;
struct kfd_bo_va_list *entry; struct kfd_bo_va_list *entry;
struct bo_vm_reservation_context ctx; struct bo_vm_reservation_context ctx;
...@@ -1226,7 +1221,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( ...@@ -1226,7 +1221,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
goto unreserve_out; goto unreserve_out;
} }
ret = vm_validate_pt_pd_bos((struct amdkfd_vm *)vm); ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
if (unlikely(ret)) if (unlikely(ret))
goto unreserve_out; goto unreserve_out;
...@@ -1368,7 +1363,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) ...@@ -1368,7 +1363,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
{ {
struct amdgpu_bo_list_entry *pd_bo_list; struct amdgpu_bo_list_entry *pd_bo_list;
struct amdkfd_process_info *process_info = info; struct amdkfd_process_info *process_info = info;
struct amdkfd_vm *peer_vm; struct amdgpu_vm *peer_vm;
struct kgd_mem *mem; struct kgd_mem *mem;
struct bo_vm_reservation_context ctx; struct bo_vm_reservation_context ctx;
struct amdgpu_amdkfd_fence *new_fence; struct amdgpu_amdkfd_fence *new_fence;
...@@ -1390,8 +1385,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) ...@@ -1390,8 +1385,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
mutex_lock(&process_info->lock); mutex_lock(&process_info->lock);
list_for_each_entry(peer_vm, &process_info->vm_list_head, list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) vm_list_node)
amdgpu_vm_get_pd_bo(&peer_vm->base, &ctx.list, amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
&pd_bo_list[i++]);
/* Reserve all BOs and page tables/directory. Add all BOs from /* Reserve all BOs and page tables/directory. Add all BOs from
* kfd_bo_list to ctx.list * kfd_bo_list to ctx.list
...@@ -1422,7 +1416,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) ...@@ -1422,7 +1416,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
/* FIXME: I think this isn't needed */ /* FIXME: I think this isn't needed */
list_for_each_entry(peer_vm, &process_info->vm_list_head, list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) { vm_list_node) {
struct amdgpu_bo *bo = peer_vm->base.root.base.bo; struct amdgpu_bo *bo = peer_vm->root.base.bo;
ttm_bo_wait(&bo->tbo, false, false); ttm_bo_wait(&bo->tbo, false, false);
} }
...@@ -1491,7 +1485,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) ...@@ -1491,7 +1485,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
/* Attach eviction fence to PD / PT BOs */ /* Attach eviction fence to PD / PT BOs */
list_for_each_entry(peer_vm, &process_info->vm_list_head, list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) { vm_list_node) {
struct amdgpu_bo *bo = peer_vm->base.root.base.bo; struct amdgpu_bo *bo = peer_vm->root.base.bo;
amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
} }
......
...@@ -207,6 +207,15 @@ struct amdgpu_vm { ...@@ -207,6 +207,15 @@ struct amdgpu_vm {
/* Limit non-retry fault storms */ /* Limit non-retry fault storms */
unsigned int fault_credit; unsigned int fault_credit;
/* Points to the KFD process VM info */
struct amdkfd_process_info *process_info;
/* List node in amdkfd_process_info.vm_list_head */
struct list_head vm_list_node;
/* Valid while the PD is reserved or fenced */
uint64_t pd_phys_addr;
}; };
struct amdgpu_vm_manager { struct amdgpu_vm_manager {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment