Commit 7d7a0fc4 authored by Daniel Vetter's avatar Daniel Vetter

drm/panfrost: Use xarray and helpers for depedency tracking

More consistency and prep work for the next patch.

Aside: I wonder whether we shouldn't just move this entire xarray
business into the scheduler so that not everyone has to reinvent the
same wheels. Cc'ing some scheduler people for this too.

v2: Correctly handle sched_lock since Lucas pointed out it's needed.

v3: Rebase, dma_resv_get_excl_unlocked got renamed

v4: Don't leak job references on failure (Steven).
Reviewed-by: default avatarBoris Brezillon <boris.brezillon@collabora.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210622165511.3169559-6-daniel.vetter@ffwll.ch
parent 94dd80fe
...@@ -138,12 +138,6 @@ panfrost_lookup_bos(struct drm_device *dev, ...@@ -138,12 +138,6 @@ panfrost_lookup_bos(struct drm_device *dev,
if (!job->bo_count) if (!job->bo_count)
return 0; return 0;
job->implicit_fences = kvmalloc_array(job->bo_count,
sizeof(struct dma_fence *),
GFP_KERNEL | __GFP_ZERO);
if (!job->implicit_fences)
return -ENOMEM;
ret = drm_gem_objects_lookup(file_priv, ret = drm_gem_objects_lookup(file_priv,
(void __user *)(uintptr_t)args->bo_handles, (void __user *)(uintptr_t)args->bo_handles,
job->bo_count, &job->bos); job->bo_count, &job->bos);
...@@ -174,7 +168,7 @@ panfrost_lookup_bos(struct drm_device *dev, ...@@ -174,7 +168,7 @@ panfrost_lookup_bos(struct drm_device *dev,
} }
/** /**
* panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects * panfrost_copy_in_sync() - Sets up job->deps with the sync objects
* referenced by the job. * referenced by the job.
* @dev: DRM device * @dev: DRM device
* @file_priv: DRM file for this fd * @file_priv: DRM file for this fd
...@@ -194,22 +188,14 @@ panfrost_copy_in_sync(struct drm_device *dev, ...@@ -194,22 +188,14 @@ panfrost_copy_in_sync(struct drm_device *dev,
{ {
u32 *handles; u32 *handles;
int ret = 0; int ret = 0;
int i; int i, in_fence_count;
job->in_fence_count = args->in_sync_count; in_fence_count = args->in_sync_count;
if (!job->in_fence_count) if (!in_fence_count)
return 0; return 0;
job->in_fences = kvmalloc_array(job->in_fence_count, handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL);
sizeof(struct dma_fence *),
GFP_KERNEL | __GFP_ZERO);
if (!job->in_fences) {
DRM_DEBUG("Failed to allocate job in fences\n");
return -ENOMEM;
}
handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL);
if (!handles) { if (!handles) {
ret = -ENOMEM; ret = -ENOMEM;
DRM_DEBUG("Failed to allocate incoming syncobj handles\n"); DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
...@@ -218,16 +204,23 @@ panfrost_copy_in_sync(struct drm_device *dev, ...@@ -218,16 +204,23 @@ panfrost_copy_in_sync(struct drm_device *dev,
if (copy_from_user(handles, if (copy_from_user(handles,
(void __user *)(uintptr_t)args->in_syncs, (void __user *)(uintptr_t)args->in_syncs,
job->in_fence_count * sizeof(u32))) { in_fence_count * sizeof(u32))) {
ret = -EFAULT; ret = -EFAULT;
DRM_DEBUG("Failed to copy in syncobj handles\n"); DRM_DEBUG("Failed to copy in syncobj handles\n");
goto fail; goto fail;
} }
for (i = 0; i < job->in_fence_count; i++) { for (i = 0; i < in_fence_count; i++) {
struct dma_fence *fence;
ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0, ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
&job->in_fences[i]); &fence);
if (ret == -EINVAL) if (ret)
goto fail;
ret = drm_gem_fence_array_add(&job->deps, fence);
if (ret)
goto fail; goto fail;
} }
...@@ -265,6 +258,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, ...@@ -265,6 +258,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
kref_init(&job->refcount); kref_init(&job->refcount);
xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
job->pfdev = pfdev; job->pfdev = pfdev;
job->jc = args->jc; job->jc = args->jc;
job->requirements = args->requirements; job->requirements = args->requirements;
......
...@@ -196,14 +196,21 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) ...@@ -196,14 +196,21 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
} }
static void panfrost_acquire_object_fences(struct drm_gem_object **bos, static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
int bo_count, int bo_count,
struct dma_fence **implicit_fences) struct xarray *deps)
{ {
int i; int i, ret;
for (i = 0; i < bo_count; i++) for (i = 0; i < bo_count; i++) {
implicit_fences[i] = dma_resv_get_excl_unlocked(bos[i]->resv); struct dma_fence *fence = dma_resv_get_excl_unlocked(bos[i]->resv);
ret = drm_gem_fence_array_add(deps, fence);
if (ret)
return ret;
}
return 0;
} }
static void panfrost_attach_object_fences(struct drm_gem_object **bos, static void panfrost_attach_object_fences(struct drm_gem_object **bos,
...@@ -240,10 +247,14 @@ int panfrost_job_push(struct panfrost_job *job) ...@@ -240,10 +247,14 @@ int panfrost_job_push(struct panfrost_job *job)
job->render_done_fence = dma_fence_get(&job->base.s_fence->finished); job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
kref_get(&job->refcount); /* put by scheduler job completion */ ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
&job->deps);
if (ret) {
mutex_unlock(&pfdev->sched_lock);
goto unlock;
}
panfrost_acquire_object_fences(job->bos, job->bo_count, kref_get(&job->refcount); /* put by scheduler job completion */
job->implicit_fences);
drm_sched_entity_push_job(&job->base, entity); drm_sched_entity_push_job(&job->base, entity);
...@@ -262,18 +273,15 @@ static void panfrost_job_cleanup(struct kref *ref) ...@@ -262,18 +273,15 @@ static void panfrost_job_cleanup(struct kref *ref)
{ {
struct panfrost_job *job = container_of(ref, struct panfrost_job, struct panfrost_job *job = container_of(ref, struct panfrost_job,
refcount); refcount);
struct dma_fence *fence;
unsigned long index;
unsigned int i; unsigned int i;
if (job->in_fences) { xa_for_each(&job->deps, index, fence) {
for (i = 0; i < job->in_fence_count; i++) dma_fence_put(fence);
dma_fence_put(job->in_fences[i]);
kvfree(job->in_fences);
}
if (job->implicit_fences) {
for (i = 0; i < job->bo_count; i++)
dma_fence_put(job->implicit_fences[i]);
kvfree(job->implicit_fences);
} }
xa_destroy(&job->deps);
dma_fence_put(job->done_fence); dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence); dma_fence_put(job->render_done_fence);
...@@ -316,26 +324,9 @@ static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job ...@@ -316,26 +324,9 @@ static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job
struct drm_sched_entity *s_entity) struct drm_sched_entity *s_entity)
{ {
struct panfrost_job *job = to_panfrost_job(sched_job); struct panfrost_job *job = to_panfrost_job(sched_job);
struct dma_fence *fence;
unsigned int i;
/* Explicit fences */
for (i = 0; i < job->in_fence_count; i++) {
if (job->in_fences[i]) {
fence = job->in_fences[i];
job->in_fences[i] = NULL;
return fence;
}
}
/* Implicit fences, max. one per BO */ if (!xa_empty(&job->deps))
for (i = 0; i < job->bo_count; i++) { return xa_erase(&job->deps, job->last_dep++);
if (job->implicit_fences[i]) {
fence = job->implicit_fences[i];
job->implicit_fences[i] = NULL;
return fence;
}
}
return NULL; return NULL;
} }
......
...@@ -19,9 +19,9 @@ struct panfrost_job { ...@@ -19,9 +19,9 @@ struct panfrost_job {
struct panfrost_device *pfdev; struct panfrost_device *pfdev;
struct panfrost_file_priv *file_priv; struct panfrost_file_priv *file_priv;
/* Optional fences userspace can pass in for the job to depend on. */ /* Contains both explicit and implicit fences */
struct dma_fence **in_fences; struct xarray deps;
u32 in_fence_count; unsigned long last_dep;
/* Fence to be signaled by IRQ handler when the job is complete. */ /* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence; struct dma_fence *done_fence;
...@@ -30,8 +30,6 @@ struct panfrost_job { ...@@ -30,8 +30,6 @@ struct panfrost_job {
__u32 requirements; __u32 requirements;
__u32 flush_id; __u32 flush_id;
/* Exclusive fences we have taken from the BOs to wait for */
struct dma_fence **implicit_fences;
struct panfrost_gem_mapping **mappings; struct panfrost_gem_mapping **mappings;
struct drm_gem_object **bos; struct drm_gem_object **bos;
u32 bo_count; u32 bo_count;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment