Commit 126be9b2 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: sync to KFD fences before clearing PTEs

This patch tries to solve the basic problem we also need to sync to
the KFD fences of the BO because otherwise it can be that we clear
PTEs while the KFD queues are still running.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarFelix Kuehling <felix.kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4771d2ec
...@@ -260,6 +260,36 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, ...@@ -260,6 +260,36 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0; return 0;
} }
/**
* amdgpu_sync_kfd - sync to KFD fences
*
* @sync: sync object to add KFD fences to
* @resv: reservation object with KFD fences
*
* Extract all KFD fences and add them to the sync object.
*/
int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv)
{
struct dma_resv_iter cursor;
struct dma_fence *f;
int r = 0;
dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, f) {
void *fence_owner = amdgpu_sync_get_owner(f);
if (fence_owner != AMDGPU_FENCE_OWNER_KFD)
continue;
r = amdgpu_sync_fence(sync, f);
if (r)
break;
}
dma_resv_iter_end(&cursor);
return r;
}
/* Free the entry back to the slab */ /* Free the entry back to the slab */
static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e) static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
{ {
......
...@@ -51,6 +51,7 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f); ...@@ -51,6 +51,7 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f);
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode, struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner); void *owner);
int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring); struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
......
...@@ -1169,6 +1169,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, ...@@ -1169,6 +1169,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
AMDGPU_SYNC_EQ_OWNER, vm); AMDGPU_SYNC_EQ_OWNER, vm);
if (r) if (r)
goto error_free; goto error_free;
if (bo) {
r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
if (r)
goto error_free;
}
} else { } else {
struct drm_gem_object *obj = &bo->tbo.base; struct drm_gem_object *obj = &bo->tbo.base;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment