Commit 916044fa authored by Daniel Vetter's avatar Daniel Vetter

drm/v3d: Move drm_sched_job_init to v3d_job_init

Prep work for using the scheduler dependency handling. We need to call
drm_sched_job_init earlier so we can use the new drm_sched_job_await*
functions for dependency handling here.

v2: Slightly better commit message and rebase to include the
drm_sched_job_arm() call (Emma).

v3: Cleanup jobs under construction correctly (Emma)

v4: Rebase over perfmon patch

Reviewed-by: Melissa Wen <mwen@igalia.com> (v3)
Acked-by: default avatarEmma Anholt <emma@anholt.net>
Cc: Melissa Wen <melissa.srw@gmail.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Cc: Emma Anholt <emma@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20210805104705.862416-10-daniel.vetter@ffwll.ch
parent c79a4487
......@@ -379,6 +379,7 @@ int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void v3d_job_cleanup(struct v3d_job *job);
void v3d_job_put(struct v3d_job *job);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
......
......@@ -397,6 +397,12 @@ v3d_render_job_free(struct kref *ref)
v3d_job_free(ref);
}
void v3d_job_cleanup(struct v3d_job *job)
{
drm_sched_job_cleanup(&job->base);
v3d_job_put(job);
}
void v3d_job_put(struct v3d_job *job)
{
kref_put(&job->refcount, job->free);
......@@ -438,9 +444,10 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
static int
v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
struct v3d_job *job, void (*free)(struct kref *ref),
u32 in_sync)
u32 in_sync, enum v3d_queue queue)
{
struct dma_fence *in_fence = NULL;
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
int ret;
job->v3d = v3d;
......@@ -451,35 +458,33 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
return ret;
xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
v3d_priv);
if (ret)
goto fail;
ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
if (ret == -EINVAL)
goto fail;
goto fail_job;
ret = drm_gem_fence_array_add(&job->deps, in_fence);
if (ret)
goto fail;
goto fail_job;
kref_init(&job->refcount);
return 0;
fail_job:
drm_sched_job_cleanup(&job->base);
fail:
xa_destroy(&job->deps);
pm_runtime_put_autosuspend(v3d->drm.dev);
return ret;
}
static int
v3d_push_job(struct v3d_file_priv *v3d_priv,
struct v3d_job *job, enum v3d_queue queue)
static void
v3d_push_job(struct v3d_job *job)
{
int ret;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
v3d_priv);
if (ret)
return ret;
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
......@@ -488,8 +493,6 @@ v3d_push_job(struct v3d_file_priv *v3d_priv,
kref_get(&job->refcount);
drm_sched_entity_push_job(&job->base);
return 0;
}
static void
......@@ -564,7 +567,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
INIT_LIST_HEAD(&render->unref_list);
ret = v3d_job_init(v3d, file_priv, &render->base,
v3d_render_job_free, args->in_sync_rcl);
v3d_render_job_free, args->in_sync_rcl, V3D_RENDER);
if (ret) {
kfree(render);
return ret;
......@@ -578,7 +581,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl);
v3d_job_free, args->in_sync_bcl, V3D_BIN);
if (ret) {
v3d_job_put(&render->base);
kfree(bin);
......@@ -600,7 +603,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
goto fail;
}
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
if (ret) {
kfree(clean_job);
clean_job = NULL;
......@@ -635,9 +638,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (bin) {
bin->base.perfmon = render->base.perfmon;
v3d_perfmon_get(bin->base.perfmon);
ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
if (ret)
goto fail_unreserve;
v3d_push_job(&bin->base);
ret = drm_gem_fence_array_add(&render->base.deps,
dma_fence_get(bin->base.done_fence));
......@@ -645,9 +646,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
goto fail_unreserve;
}
ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
if (ret)
goto fail_unreserve;
v3d_push_job(&render->base);
if (clean_job) {
struct dma_fence *render_fence =
......@@ -657,9 +656,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
goto fail_unreserve;
clean_job->perfmon = render->base.perfmon;
v3d_perfmon_get(clean_job->perfmon);
ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
if (ret)
goto fail_unreserve;
v3d_push_job(clean_job);
}
mutex_unlock(&v3d->sched_lock);
......@@ -684,10 +681,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
last_job->bo_count, &acquire_ctx);
fail:
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
v3d_job_cleanup(&bin->base);
v3d_job_cleanup(&render->base);
if (clean_job)
v3d_job_put(clean_job);
v3d_job_cleanup(clean_job);
return ret;
}
......@@ -706,7 +703,6 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_tfu *args = data;
struct v3d_tfu_job *job;
struct ww_acquire_ctx acquire_ctx;
......@@ -719,7 +715,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = v3d_job_init(v3d, file_priv, &job->base,
v3d_job_free, args->in_sync);
v3d_job_free, args->in_sync, V3D_TFU);
if (ret) {
kfree(job);
return ret;
......@@ -763,9 +759,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
goto fail;
mutex_lock(&v3d->sched_lock);
ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
if (ret)
goto fail_unreserve;
v3d_push_job(&job->base);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
......@@ -777,12 +771,8 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
&acquire_ctx);
fail:
v3d_job_put(&job->base);
v3d_job_cleanup(&job->base);
return ret;
}
......@@ -820,7 +810,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = v3d_job_init(v3d, file_priv, &job->base,
v3d_job_free, args->in_sync);
v3d_job_free, args->in_sync, V3D_CSD);
if (ret) {
kfree(job);
return ret;
......@@ -833,7 +823,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
if (ret) {
v3d_job_put(&job->base);
kfree(clean_job);
......@@ -861,18 +851,14 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&v3d->sched_lock);
ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
if (ret)
goto fail_unreserve;
v3d_push_job(&job->base);
ret = drm_gem_fence_array_add(&clean_job->deps,
dma_fence_get(job->base.done_fence));
if (ret)
goto fail_unreserve;
ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
if (ret)
goto fail_unreserve;
v3d_push_job(clean_job);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
......@@ -891,8 +877,8 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
&acquire_ctx);
fail:
v3d_job_put(&job->base);
v3d_job_put(clean_job);
v3d_job_cleanup(&job->base);
v3d_job_cleanup(clean_job);
return ret;
}
......
......@@ -55,12 +55,11 @@ to_csd_job(struct drm_sched_job *sched_job)
}
static void
v3d_job_free(struct drm_sched_job *sched_job)
v3d_sched_job_free(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
drm_sched_job_cleanup(sched_job);
v3d_job_put(job);
v3d_job_cleanup(job);
}
static void
......@@ -376,35 +375,35 @@ static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_bin_job_run,
.timedout_job = v3d_bin_job_timedout,
.free_job = v3d_job_free,
.free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_render_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_render_job_run,
.timedout_job = v3d_render_job_timedout,
.free_job = v3d_job_free,
.free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_tfu_job_run,
.timedout_job = v3d_generic_job_timedout,
.free_job = v3d_job_free,
.free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_csd_job_run,
.timedout_job = v3d_csd_job_timedout,
.free_job = v3d_job_free
.free_job = v3d_sched_job_free
};
static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_cache_clean_job_run,
.timedout_job = v3d_generic_job_timedout,
.free_job = v3d_job_free
.free_job = v3d_sched_job_free
};
int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment