Commit 68002469 authored by Rob Clark's avatar Rob Clark

drm/msm: One sched entity per process per priority

Some userspace apps make assumptions that rendering against multiple
contexts within the same process (from the same thread, with appropriate
MakeCurrent() calls) provides sufficient synchronization without any
external synchronization (ie. glFenceSync()/glWaitSync()).  Since a
submitqueue maps to a gl/vk context, having multiple sched entities of
the same priority only works with implicit sync enabled.

To fix this, limit things to a single sched entity per priority level
per process.

An alternative would be sharing submitqueues between contexts in
userspace, but tracking of per-context faults (ie. GL_EXT_robustness)
is already done at the submitqueue level, so this is not an option.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 4cd82aa3
...@@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, ...@@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (!submit) if (!submit)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = drm_sched_job_init(&submit->base, &queue->entity, queue); ret = drm_sched_job_init(&submit->base, queue->entity, queue);
if (ret) { if (ret) {
kfree(submit); kfree(submit);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -907,7 +907,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -907,7 +907,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* The scheduler owns a ref now: */ /* The scheduler owns a ref now: */
msm_gem_submit_get(submit); msm_gem_submit_get(submit);
drm_sched_entity_push_job(&submit->base, &queue->entity); drm_sched_entity_push_job(&submit->base, queue->entity);
args->fence = submit->fence_id; args->fence = submit->fence_id;
......
...@@ -275,6 +275,19 @@ struct msm_file_private { ...@@ -275,6 +275,19 @@ struct msm_file_private {
struct msm_gem_address_space *aspace; struct msm_gem_address_space *aspace;
struct kref ref; struct kref ref;
int seqno; int seqno;
/**
* entities:
*
* Table of per-priority-level sched entities used by submitqueues
* associated with this &drm_file. Because some userspace apps
* make assumptions about rendering from multiple gl contexts
* (of the same priority) within the process happening in FIFO
* order without requiring any fencing beyond MakeCurrent(), we
* create at most one &drm_sched_entity per-process per-priority-
* level.
*/
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
}; };
/** /**
...@@ -355,7 +368,7 @@ struct msm_gpu_submitqueue { ...@@ -355,7 +368,7 @@ struct msm_gpu_submitqueue {
struct idr fence_idr; struct idr fence_idr;
struct mutex lock; struct mutex lock;
struct kref ref; struct kref ref;
struct drm_sched_entity entity; struct drm_sched_entity *entity;
}; };
struct msm_gpu_state_bo { struct msm_gpu_state_bo {
...@@ -456,14 +469,7 @@ void msm_submitqueue_close(struct msm_file_private *ctx); ...@@ -456,14 +469,7 @@ void msm_submitqueue_close(struct msm_file_private *ctx);
void msm_submitqueue_destroy(struct kref *kref); void msm_submitqueue_destroy(struct kref *kref);
static inline void __msm_file_private_destroy(struct kref *kref) void __msm_file_private_destroy(struct kref *kref);
{
struct msm_file_private *ctx = container_of(kref,
struct msm_file_private, ref);
msm_gem_address_space_put(ctx->aspace);
kfree(ctx);
}
static inline void msm_file_private_put(struct msm_file_private *ctx) static inline void msm_file_private_put(struct msm_file_private *ctx)
{ {
......
...@@ -7,6 +7,24 @@ ...@@ -7,6 +7,24 @@
#include "msm_gpu.h" #include "msm_gpu.h"
void __msm_file_private_destroy(struct kref *kref)
{
struct msm_file_private *ctx = container_of(kref,
struct msm_file_private, ref);
int i;
for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
if (!ctx->entities[i])
continue;
drm_sched_entity_destroy(ctx->entities[i]);
kfree(ctx->entities[i]);
}
msm_gem_address_space_put(ctx->aspace);
kfree(ctx);
}
void msm_submitqueue_destroy(struct kref *kref) void msm_submitqueue_destroy(struct kref *kref)
{ {
struct msm_gpu_submitqueue *queue = container_of(kref, struct msm_gpu_submitqueue *queue = container_of(kref,
...@@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref) ...@@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref)
idr_destroy(&queue->fence_idr); idr_destroy(&queue->fence_idr);
drm_sched_entity_destroy(&queue->entity);
msm_file_private_put(queue->ctx); msm_file_private_put(queue->ctx);
kfree(queue); kfree(queue);
...@@ -61,13 +77,47 @@ void msm_submitqueue_close(struct msm_file_private *ctx) ...@@ -61,13 +77,47 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
} }
} }
static struct drm_sched_entity *
get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
unsigned ring_nr, enum drm_sched_priority sched_prio)
{
static DEFINE_MUTEX(entity_lock);
unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
/* We should have already validated that the requested priority is
* valid by the time we get here.
*/
if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
return ERR_PTR(-EINVAL);
mutex_lock(&entity_lock);
if (!ctx->entities[idx]) {
struct drm_sched_entity *entity;
struct drm_gpu_scheduler *sched = &ring->sched;
int ret;
entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
if (ret) {
kfree(entity);
return ERR_PTR(ret);
}
ctx->entities[idx] = entity;
}
mutex_unlock(&entity_lock);
return ctx->entities[idx];
}
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id) u32 prio, u32 flags, u32 *id)
{ {
struct msm_drm_private *priv = drm->dev_private; struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue; struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
struct drm_gpu_scheduler *sched;
enum drm_sched_priority sched_prio; enum drm_sched_priority sched_prio;
unsigned ring_nr; unsigned ring_nr;
int ret; int ret;
...@@ -91,12 +141,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, ...@@ -91,12 +141,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->flags = flags; queue->flags = flags;
queue->ring_nr = ring_nr; queue->ring_nr = ring_nr;
ring = priv->gpu->rb[ring_nr]; queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
sched = &ring->sched; ring_nr, sched_prio);
if (IS_ERR(queue->entity)) {
ret = drm_sched_entity_init(&queue->entity, ret = PTR_ERR(queue->entity);
sched_prio, &sched, 1, NULL);
if (ret) {
kfree(queue); kfree(queue);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment