Commit 90b69cdc authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: stop adding VM updates fences to the resv obj

Don't add the VM update fences to the resv object and remove
the handling to stop implicitely syncing to them.

Ongoing updates prevent page tables from being evicted and we manually
block for all updates to complete before releasing PDs and PTS.

This way we can do updates even without the resv obj locked.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e095fc17
...@@ -240,13 +240,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, ...@@ -240,13 +240,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
continue; continue;
if (amdgpu_sync_same_dev(adev, f)) { if (amdgpu_sync_same_dev(adev, f)) {
/* VM updates are only interesting /* VM updates only sync with moves but not with user
* for other VM updates and moves. * command submissions or KFD evictions fences
*/ */
if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) && if (owner == AMDGPU_FENCE_OWNER_VM &&
(fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) && fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
((owner == AMDGPU_FENCE_OWNER_VM) !=
(fence_owner == AMDGPU_FENCE_OWNER_VM)))
continue; continue;
/* Ignore fence from the same owner and explicit one as /* Ignore fence from the same owner and explicit one as
......
...@@ -562,8 +562,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, ...@@ -562,8 +562,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{ {
entry->priority = 0; entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo; entry->tv.bo = &vm->root.base.bo->tbo;
/* One for the VM updates, one for TTM and one for the CS job */ /* One for TTM and one for the CS job */
entry->tv.num_shared = 3; entry->tv.num_shared = 2;
entry->user_pages = NULL; entry->user_pages = NULL;
list_add(&entry->tv.head, validated); list_add(&entry->tv.head, validated);
} }
...@@ -2518,6 +2518,11 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) ...@@ -2518,6 +2518,11 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
return false; return false;
/* Don't evict VM page tables while they are updated */
if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
!dma_fence_is_signaled(bo_base->vm->last_delayed))
return false;
return true; return true;
} }
...@@ -2683,8 +2688,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, ...@@ -2683,8 +2688,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/ */
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{ {
return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
true, true, timeout); true, true, timeout);
if (timeout <= 0)
return timeout;
timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
if (timeout <= 0)
return timeout;
return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
} }
/** /**
...@@ -2753,6 +2766,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2753,6 +2766,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else else
vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL; vm->last_update = NULL;
vm->last_direct = dma_fence_get_stub();
vm->last_delayed = dma_fence_get_stub();
amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
...@@ -2803,6 +2818,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2803,6 +2818,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->root.base.bo = NULL; vm->root.base.bo = NULL;
error_free_delayed: error_free_delayed:
dma_fence_put(vm->last_direct);
dma_fence_put(vm->last_delayed);
drm_sched_entity_destroy(&vm->delayed); drm_sched_entity_destroy(&vm->delayed);
error_free_direct: error_free_direct:
...@@ -3003,6 +3020,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -3003,6 +3020,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0; vm->pasid = 0;
} }
dma_fence_wait(vm->last_direct, false);
dma_fence_put(vm->last_direct);
dma_fence_wait(vm->last_delayed, false);
dma_fence_put(vm->last_delayed);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm); amdgpu_vm_prt_fini(adev, vm);
......
...@@ -266,6 +266,10 @@ struct amdgpu_vm { ...@@ -266,6 +266,10 @@ struct amdgpu_vm {
struct drm_sched_entity direct; struct drm_sched_entity direct;
struct drm_sched_entity delayed; struct drm_sched_entity delayed;
/* Last submission to the scheduler entities */
struct dma_fence *last_direct;
struct dma_fence *last_delayed;
unsigned int pasid; unsigned int pasid;
/* dedicated to vm */ /* dedicated to vm */
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
......
...@@ -95,11 +95,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, ...@@ -95,11 +95,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_bo *root = p->vm->root.base.bo;
struct amdgpu_ib *ib = p->job->ibs; struct amdgpu_ib *ib = p->job->ibs;
struct drm_sched_entity *entity; struct drm_sched_entity *entity;
struct dma_fence *f, *tmp;
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
struct dma_fence *f;
int r; int r;
entity = p->direct ? &p->vm->direct : &p->vm->delayed; entity = p->direct ? &p->vm->direct : &p->vm->delayed;
...@@ -112,7 +111,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, ...@@ -112,7 +111,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r) if (r)
goto error; goto error;
amdgpu_bo_fence(root, f, true); tmp = dma_fence_get(f);
if (p->direct)
swap(p->vm->last_direct, tmp);
else
swap(p->vm->last_delayed, tmp);
dma_fence_put(tmp);
if (fence && !p->direct) if (fence && !p->direct)
swap(*fence, f); swap(*fence, f);
dma_fence_put(f); dma_fence_put(f);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment