Commit 5d319660 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: rework job synchronization v2

For unlocked page table updates we need to be able
to sync to fences of a specific VM.

v2: use SYNC_ALWAYS in the UVD code
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 114fbc31
......@@ -847,9 +847,9 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
vm_list_node) {
struct amdgpu_bo *pd = peer_vm->root.base.bo;
ret = amdgpu_sync_resv(NULL,
sync, pd->tbo.base.resv,
AMDGPU_FENCE_OWNER_KFD, false);
ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
AMDGPU_SYNC_NE_OWNER,
AMDGPU_FENCE_OWNER_KFD);
if (ret)
return ret;
}
......
......@@ -658,10 +658,12 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct dma_resv *resv = bo->tbo.base.resv;
enum amdgpu_sync_mode sync_mode;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, &fpriv->vm,
amdgpu_bo_explicit_sync(bo));
sync_mode = amdgpu_bo_explicit_sync(bo) ?
AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
&fpriv->vm);
if (r)
return r;
}
......
......@@ -1419,7 +1419,8 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
int r;
amdgpu_sync_create(&sync);
amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
AMDGPU_SYNC_NE_OWNER, owner);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
......
......@@ -202,18 +202,17 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
*
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
* @explicit_sync: true if we should only sync to the exclusive fence
* @mode: how owner affects which fences we sync to
* @owner: owner of the planned job submission
*
* Sync to the fence
*/
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct dma_resv *resv,
void *owner, bool explicit_sync)
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner)
{
struct dma_resv_list *flist;
struct dma_fence *f;
void *fence_owner;
unsigned i;
int r = 0;
......@@ -229,6 +228,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return r;
for (i = 0; i < flist->shared_count; ++i) {
void *fence_owner;
f = rcu_dereference_protected(flist->shared[i],
dma_resv_held(resv));
/* We only want to trigger KFD eviction fences on
......@@ -239,20 +240,34 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
if (amdgpu_sync_same_dev(adev, f)) {
/* VM updates only sync with moves but not with user
* command submissions or KFD evictions fences
*/
if (owner == AMDGPU_FENCE_OWNER_VM &&
fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
if (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
owner == AMDGPU_FENCE_OWNER_VM)
continue;
/* Ignore fence from the same owner and explicit one as
* long as it isn't undefined.
*/
if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
(fence_owner == owner || explicit_sync))
/* Ignore fences depending on the sync mode */
switch (mode) {
case AMDGPU_SYNC_ALWAYS:
break;
case AMDGPU_SYNC_NE_OWNER:
if (amdgpu_sync_same_dev(adev, f) &&
fence_owner == owner)
continue;
break;
case AMDGPU_SYNC_EQ_OWNER:
if (amdgpu_sync_same_dev(adev, f) &&
fence_owner != owner)
continue;
break;
case AMDGPU_SYNC_EXPLICIT:
if (owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
break;
}
r = amdgpu_sync_fence(sync, f, false);
......
......@@ -31,6 +31,13 @@ struct dma_resv;
struct amdgpu_device;
struct amdgpu_ring;
enum amdgpu_sync_mode {
AMDGPU_SYNC_ALWAYS,
AMDGPU_SYNC_NE_OWNER,
AMDGPU_SYNC_EQ_OWNER,
AMDGPU_SYNC_EXPLICIT
};
/*
* Container for fences used to sync command submissions.
*/
......@@ -43,11 +50,9 @@ void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
bool explicit);
int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct dma_resv *resv,
void *owner,
bool explicit_sync);
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
......
......@@ -2113,8 +2113,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
}
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED,
false);
AMDGPU_SYNC_ALWAYS,
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
......@@ -2198,7 +2198,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false);
AMDGPU_SYNC_ALWAYS,
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
......
......@@ -1099,7 +1099,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err_free;
} else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false);
AMDGPU_SYNC_ALWAYS,
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r)
goto err_free;
......
......@@ -80,7 +80,7 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
return 0;
return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
owner, false);
AMDGPU_SYNC_NE_OWNER, owner);
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment