Commit 266a240b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Use engine->context_pin() to report the intel_ring

Since unifying ringbuffer/execlist submission to use
engine->pin_context, we ensure that the intel_ring is available before
we start constructing the request. We can therefore move the assignment
of the request->ring to the central i915_gem_request_alloc() and not
require it in every engine->request_alloc() callback. Another small step
towards simplification (of the core, but at a cost of handling error
pointers in less important callers of engine->pin_context).

v2: Rearrange a few branches to reduce impact of PTR_ERR() on gcc's code
generation.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarOscar Mateo <oscar.mateo@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170504093308.4137-1-chris@chris-wilson.co.uk
parent c944a308
...@@ -180,6 +180,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -180,6 +180,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret; int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
...@@ -198,8 +199,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -198,8 +199,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
* shadow_ctx pages invalid. So gvt need to pin itself. After update * shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely. * the guest context, gvt can unpin the shadow_ctx safely.
*/ */
ret = engine->context_pin(engine, shadow_ctx); ring = engine->context_pin(engine, shadow_ctx);
if (ret) { if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n"); gvt_vgpu_err("fail to pin shadow context\n");
workload->status = ret; workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
......
...@@ -551,6 +551,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -551,6 +551,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
{ {
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
struct intel_ring *ring;
int ret; int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
...@@ -565,9 +566,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -565,9 +566,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* GGTT space, so do this first before we reserve a seqno for * GGTT space, so do this first before we reserve a seqno for
* ourselves. * ourselves.
*/ */
ret = engine->context_pin(engine, ctx); ring = engine->context_pin(engine, ctx);
if (ret) if (IS_ERR(ring))
return ERR_PTR(ret); return ERR_CAST(ring);
GEM_BUG_ON(!ring);
ret = reserve_seqno(engine); ret = reserve_seqno(engine);
if (ret) if (ret)
...@@ -633,6 +635,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -633,6 +635,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->i915 = dev_priv; req->i915 = dev_priv;
req->engine = engine; req->engine = engine;
req->ctx = ctx; req->ctx = ctx;
req->ring = ring;
/* No zalloc, must clear what we need by hand */ /* No zalloc, must clear what we need by hand */
req->global_seqno = 0; req->global_seqno = 0;
......
...@@ -744,6 +744,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) ...@@ -744,6 +744,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{ {
struct drm_i915_private *dev_priv = stream->dev_priv; struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[RCS]; struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct intel_ring *ring;
int ret; int ret;
ret = i915_mutex_lock_interruptible(&dev_priv->drm); ret = i915_mutex_lock_interruptible(&dev_priv->drm);
...@@ -755,9 +756,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) ...@@ -755,9 +756,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* *
* NB: implied RCS engine... * NB: implied RCS engine...
*/ */
ret = engine->context_pin(engine, stream->ctx); ring = engine->context_pin(engine, stream->ctx);
if (ret) mutex_unlock(&dev_priv->drm.struct_mutex);
goto unlock; if (IS_ERR(ring))
return PTR_ERR(ring);
/* Explicitly track the ID (instead of calling i915_ggtt_offset() /* Explicitly track the ID (instead of calling i915_ggtt_offset()
* on the fly) considering the difference with gen8+ and * on the fly) considering the difference with gen8+ and
...@@ -766,10 +768,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) ...@@ -766,10 +768,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
dev_priv->perf.oa.specific_ctx_id = dev_priv->perf.oa.specific_ctx_id =
i915_ggtt_offset(stream->ctx->engine[engine->id].state); i915_ggtt_offset(stream->ctx->engine[engine->id].state);
unlock: return 0;
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
} }
/** /**
......
...@@ -469,6 +469,7 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) ...@@ -469,6 +469,7 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
*/ */
int intel_engine_init_common(struct intel_engine_cs *engine) int intel_engine_init_common(struct intel_engine_cs *engine)
{ {
struct intel_ring *ring;
int ret; int ret;
engine->set_default_submission(engine); engine->set_default_submission(engine);
...@@ -480,9 +481,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -480,9 +481,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default * be available. To avoid this we always pin the default
* context. * context.
*/ */
ret = engine->context_pin(engine, engine->i915->kernel_context); ring = engine->context_pin(engine, engine->i915->kernel_context);
if (ret) if (IS_ERR(ring))
return ret; return PTR_ERR(ring);
ret = intel_engine_init_breadcrumbs(engine); ret = intel_engine_init_breadcrumbs(engine);
if (ret) if (ret)
......
...@@ -740,7 +740,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) ...@@ -740,7 +740,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
/* XXX Do we need to preempt to make room for us and our deps? */ /* XXX Do we need to preempt to make room for us and our deps? */
} }
static int execlists_context_pin(struct intel_engine_cs *engine, static struct intel_ring *
execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = &ctx->engine[engine->id];
...@@ -750,8 +751,8 @@ static int execlists_context_pin(struct intel_engine_cs *engine, ...@@ -750,8 +751,8 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
lockdep_assert_held(&ctx->i915->drm.struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
if (ce->pin_count++) if (likely(ce->pin_count++))
return 0; goto out;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
if (!ce->state) { if (!ce->state) {
...@@ -788,7 +789,8 @@ static int execlists_context_pin(struct intel_engine_cs *engine, ...@@ -788,7 +789,8 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
ce->state->obj->mm.dirty = true; ce->state->obj->mm.dirty = true;
i915_gem_context_get(ctx); i915_gem_context_get(ctx);
return 0; out:
return ce->ring;
unpin_map: unpin_map:
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
...@@ -796,7 +798,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine, ...@@ -796,7 +798,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
__i915_vma_unpin(ce->state); __i915_vma_unpin(ce->state);
err: err:
ce->pin_count = 0; ce->pin_count = 0;
return ret; return ERR_PTR(ret);
} }
static void execlists_context_unpin(struct intel_engine_cs *engine, static void execlists_context_unpin(struct intel_engine_cs *engine,
...@@ -833,9 +835,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request) ...@@ -833,9 +835,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
*/ */
request->reserved_space += EXECLISTS_REQUEST_SIZE; request->reserved_space += EXECLISTS_REQUEST_SIZE;
GEM_BUG_ON(!ce->ring);
request->ring = ce->ring;
if (i915.enable_guc_submission) { if (i915.enable_guc_submission) {
/* /*
* Check that the GuC has space for the request before * Check that the GuC has space for the request before
......
...@@ -1475,7 +1475,8 @@ alloc_context_vma(struct intel_engine_cs *engine) ...@@ -1475,7 +1475,8 @@ alloc_context_vma(struct intel_engine_cs *engine)
return vma; return vma;
} }
static int intel_ring_context_pin(struct intel_engine_cs *engine, static struct intel_ring *
intel_ring_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = &ctx->engine[engine->id];
...@@ -1483,8 +1484,8 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine, ...@@ -1483,8 +1484,8 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
lockdep_assert_held(&ctx->i915->drm.struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
if (ce->pin_count++) if (likely(ce->pin_count++))
return 0; goto out;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
if (!ce->state && engine->context_size) { if (!ce->state && engine->context_size) {
...@@ -1493,7 +1494,7 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine, ...@@ -1493,7 +1494,7 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
vma = alloc_context_vma(engine); vma = alloc_context_vma(engine);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto error; goto err;
} }
ce->state = vma; ce->state = vma;
...@@ -1502,7 +1503,7 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine, ...@@ -1502,7 +1503,7 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
if (ce->state) { if (ce->state) {
ret = context_pin(ctx); ret = context_pin(ctx);
if (ret) if (ret)
goto error; goto err;
ce->state->obj->mm.dirty = true; ce->state->obj->mm.dirty = true;
} }
...@@ -1518,11 +1519,14 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine, ...@@ -1518,11 +1519,14 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
ce->initialised = true; ce->initialised = true;
i915_gem_context_get(ctx); i915_gem_context_get(ctx);
return 0;
error: out:
/* One ringbuffer to rule them all */
return engine->buffer;
err:
ce->pin_count = 0; ce->pin_count = 0;
return ret; return ERR_PTR(ret);
} }
static void intel_ring_context_unpin(struct intel_engine_cs *engine, static void intel_ring_context_unpin(struct intel_engine_cs *engine,
...@@ -1634,9 +1638,6 @@ static int ring_request_alloc(struct drm_i915_gem_request *request) ...@@ -1634,9 +1638,6 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
*/ */
request->reserved_space += LEGACY_REQUEST_SIZE; request->reserved_space += LEGACY_REQUEST_SIZE;
GEM_BUG_ON(!request->engine->buffer);
request->ring = request->engine->buffer;
cs = intel_ring_begin(request, 0); cs = intel_ring_begin(request, 0);
if (IS_ERR(cs)) if (IS_ERR(cs))
return PTR_ERR(cs); return PTR_ERR(cs);
......
...@@ -271,7 +271,7 @@ struct intel_engine_cs { ...@@ -271,7 +271,7 @@ struct intel_engine_cs {
void (*set_default_submission)(struct intel_engine_cs *engine); void (*set_default_submission)(struct intel_engine_cs *engine);
int (*context_pin)(struct intel_engine_cs *engine, struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
struct i915_gem_context *ctx); struct i915_gem_context *ctx);
void (*context_unpin)(struct intel_engine_cs *engine, void (*context_unpin)(struct intel_engine_cs *engine,
struct i915_gem_context *ctx); struct i915_gem_context *ctx);
......
...@@ -52,11 +52,12 @@ static void hw_delay_complete(unsigned long data) ...@@ -52,11 +52,12 @@ static void hw_delay_complete(unsigned long data)
spin_unlock(&engine->hw_lock); spin_unlock(&engine->hw_lock);
} }
static int mock_context_pin(struct intel_engine_cs *engine, static struct intel_ring *
mock_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
i915_gem_context_get(ctx); i915_gem_context_get(ctx);
return 0; return engine->buffer;
} }
static void mock_context_unpin(struct intel_engine_cs *engine, static void mock_context_unpin(struct intel_engine_cs *engine,
...@@ -72,7 +73,6 @@ static int mock_request_alloc(struct drm_i915_gem_request *request) ...@@ -72,7 +73,6 @@ static int mock_request_alloc(struct drm_i915_gem_request *request)
INIT_LIST_HEAD(&mock->link); INIT_LIST_HEAD(&mock->link);
mock->delay = 0; mock->delay = 0;
request->ring = request->engine->buffer;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment