Commit 4b559c90 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: make sure the fence is emitted before ring to get it.

Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Acked-by: default avatarChristian K?nig <christian.koenig@amd.com>
Reviewed-by: default avatarJammy Zhou <Jammy.Zhou@amd.com>
parent b43a9a7e
...@@ -81,6 +81,7 @@ extern int amdgpu_vm_size; ...@@ -81,6 +81,7 @@ extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size; extern int amdgpu_vm_block_size;
extern int amdgpu_enable_scheduler; extern int amdgpu_enable_scheduler;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* AMDGPU_IB_POOL_SIZE must be a power of 2 */ /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
...@@ -1239,6 +1240,7 @@ struct amdgpu_cs_parser { ...@@ -1239,6 +1240,7 @@ struct amdgpu_cs_parser {
/* user fence */ /* user fence */
struct amdgpu_user_fence uf; struct amdgpu_user_fence uf;
struct amdgpu_ring *ring;
struct mutex job_lock; struct mutex job_lock;
struct work_struct job_work; struct work_struct job_work;
int (*prepare_job)(struct amdgpu_cs_parser *sched_job); int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
......
...@@ -915,7 +915,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -915,7 +915,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out; goto out;
} else } else
parser->prepare_job = amdgpu_cs_parser_prepare_job; parser->prepare_job = amdgpu_cs_parser_prepare_job;
parser->ring = ring;
parser->run_job = amdgpu_cs_parser_run_job; parser->run_job = amdgpu_cs_parser_run_job;
parser->free_job = amdgpu_cs_parser_free_job; parser->free_job = amdgpu_cs_parser_free_job;
amd_sched_push_job(ring->scheduler, amd_sched_push_job(ring->scheduler,
...@@ -965,24 +965,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, ...@@ -965,24 +965,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL) if (ctx == NULL)
return -EINVAL; return -EINVAL;
if (amdgpu_enable_scheduler) {
r = amd_sched_wait_ts(&ctx->rings[ring->idx].c_entity,
wait->in.handle, true, timeout);
if (r)
return r;
r = 1;
} else {
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
if (IS_ERR(fence))
r = PTR_ERR(fence);
else if (fence) { fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
r = fence_wait_timeout(fence, true, timeout); if (IS_ERR(fence))
fence_put(fence); r = PTR_ERR(fence);
else if (fence) {
r = fence_wait_timeout(fence, true, timeout);
fence_put(fence);
} else
r = 1;
} else
r = 1;
}
amdgpu_ctx_put(ctx); amdgpu_ctx_put(ctx);
if (r < 0) if (r < 0)
return r; return r;
......
...@@ -261,6 +261,16 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, ...@@ -261,6 +261,16 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
struct fence *fence; struct fence *fence;
uint64_t queued_seq; uint64_t queued_seq;
int r;
if (amdgpu_enable_scheduler) {
r = amd_sched_wait_emit(&cring->c_entity,
seq,
true,
AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS);
if (r)
return NULL;
}
spin_lock(&ctx->ring_lock); spin_lock(&ctx->ring_lock);
if (amdgpu_enable_scheduler) if (amdgpu_enable_scheduler)
......
...@@ -56,12 +56,15 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, ...@@ -56,12 +56,15 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
sched_job->filp); sched_job->filp);
if (r) if (r)
goto err; goto err;
if (sched_job->run_job) { if (sched_job->run_job) {
r = sched_job->run_job(sched_job); r = sched_job->run_job(sched_job);
if (r) if (r)
goto err; goto err;
} }
atomic64_set(&c_entity->last_emitted_v_seq,
sched_job->uf.sequence);
wake_up_all(&c_entity->wait_emit);
mutex_unlock(&sched_job->job_lock); mutex_unlock(&sched_job->job_lock);
return; return;
err: err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment