Commit a2cf3247 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: split the VM entity into direct and delayed

For page fault handling we need to use a direct update which can't be
blocked by ongoing user CS.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3084cf46
...@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, ...@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates)) !dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL; updates = NULL;
if ((*id)->owner != vm->entity.fence_context || if ((*id)->owner != vm->direct.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr || job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush || updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context && ((*id)->last_flush->context != fence_context &&
...@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, ...@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed; struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */ /* Check all the prerequisites to using this VMID */
if ((*id)->owner != vm->entity.fence_context) if ((*id)->owner != vm->direct.fence_context)
continue; continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr) if ((*id)->pd_gpu_addr != job->vm_pd_addr)
...@@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
} }
id->pd_gpu_addr = job->vm_pd_addr; id->pd_gpu_addr = job->vm_pd_addr;
id->owner = vm->entity.fence_context; id->owner = vm->direct.fence_context;
if (job->vm_needs_flush) { if (job->vm_needs_flush) {
dma_fence_put(id->last_flush); dma_fence_put(id->last_flush);
......
...@@ -2671,12 +2671,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2671,12 +2671,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->invalidated_lock); spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->freed); INIT_LIST_HEAD(&vm->freed);
/* create scheduler entity for page table updates */ /* create scheduler entities for page table updates */
r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs, r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
adev->vm_manager.vm_pte_num_rqs, NULL); adev->vm_manager.vm_pte_num_rqs, NULL);
if (r) if (r)
return r; return r;
r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
adev->vm_manager.vm_pte_num_rqs, NULL);
if (r)
goto error_free_direct;
vm->pte_support_ats = false; vm->pte_support_ats = false;
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
...@@ -2705,7 +2710,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2705,7 +2710,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
r = amdgpu_bo_create(adev, &bp, &root); r = amdgpu_bo_create(adev, &bp, &root);
if (r) if (r)
goto error_free_sched_entity; goto error_free_delayed;
r = amdgpu_bo_reserve(root, true); r = amdgpu_bo_reserve(root, true);
if (r) if (r)
...@@ -2748,8 +2753,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2748,8 +2753,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_bo_unref(&vm->root.base.bo); amdgpu_bo_unref(&vm->root.base.bo);
vm->root.base.bo = NULL; vm->root.base.bo = NULL;
error_free_sched_entity: error_free_delayed:
drm_sched_entity_destroy(&vm->entity); drm_sched_entity_destroy(&vm->delayed);
error_free_direct:
drm_sched_entity_destroy(&vm->direct);
return r; return r;
} }
...@@ -2938,7 +2946,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -2938,7 +2946,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
} }
drm_sched_entity_destroy(&vm->entity); drm_sched_entity_destroy(&vm->direct);
drm_sched_entity_destroy(&vm->delayed);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n"); dev_err(adev->dev, "still active bo inside vm\n");
......
...@@ -257,8 +257,9 @@ struct amdgpu_vm { ...@@ -257,8 +257,9 @@ struct amdgpu_vm {
struct amdgpu_vm_pt root; struct amdgpu_vm_pt root;
struct dma_fence *last_update; struct dma_fence *last_update;
/* Scheduler entity for page table updates */ /* Scheduler entities for page table updates */
struct drm_sched_entity entity; struct drm_sched_entity direct;
struct drm_sched_entity delayed;
unsigned int pasid; unsigned int pasid;
/* dedicated to vm */ /* dedicated to vm */
......
...@@ -99,12 +99,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, ...@@ -99,12 +99,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence *f; struct dma_fence *f;
int r; int r;
ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched); ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
sched);
WARN_ON(ib->length_dw == 0); WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib); amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left); WARN_ON(ib->length_dw > p->num_dw_left);
r = amdgpu_job_submit(p->job, &p->vm->entity, r = amdgpu_job_submit(p->job, &p->vm->delayed,
AMDGPU_FENCE_OWNER_VM, &f); AMDGPU_FENCE_OWNER_VM, &f);
if (r) if (r)
goto error; goto error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment