Commit 4c60b1aa authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Make deferred context allocation explicit

Refactor the backends to handle the deferred context allocation in a
consistent manner, and allow calling it as an explicit first step in
pinning a context for the first time. This should make it easier for
backends to keep track of partially constructed contexts from
initialisation.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190809182518.20486-2-chris@chris-wilson.co.uk
parent 72e27775
......@@ -53,6 +53,14 @@ int __intel_context_do_pin(struct intel_context *ce)
if (likely(!atomic_read(&ce->pin_count))) {
intel_wakeref_t wakeref;
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = ce->ops->alloc(ce);
if (unlikely(err))
goto err;
__set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
}
err = 0;
with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
err = ce->ops->pin(ce);
......
......@@ -23,6 +23,8 @@ struct intel_context;
struct intel_ring;
struct intel_context_ops {
int (*alloc)(struct intel_context *ce);
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce);
......@@ -52,6 +54,9 @@ struct intel_context {
struct i915_vma *state;
struct intel_ring *ring;
unsigned long flags;
#define CONTEXT_ALLOC_BIT 0
u32 *lrc_reg_state;
u64 lrc_desc;
......
......@@ -219,8 +219,9 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
return container_of(engine, struct virtual_engine, base);
}
static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_engine_cs *engine);
static int __execlists_context_alloc(struct intel_context *ce,
struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
struct intel_context *ce,
struct intel_engine_cs *engine,
......@@ -1614,9 +1615,6 @@ __execlists_context_pin(struct intel_context *ce,
void *vaddr;
int ret;
ret = execlists_context_deferred_alloc(ce, engine);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
ret = intel_context_active_acquire(ce);
......@@ -1655,6 +1653,11 @@ static int execlists_context_pin(struct intel_context *ce)
return __execlists_context_pin(ce, ce->engine);
}
static int execlists_context_alloc(struct intel_context *ce)
{
return __execlists_context_alloc(ce, ce->engine);
}
static void execlists_context_reset(struct intel_context *ce)
{
/*
......@@ -1678,6 +1681,8 @@ static void execlists_context_reset(struct intel_context *ce)
}
static const struct intel_context_ops execlists_context_ops = {
.alloc = execlists_context_alloc,
.pin = execlists_context_pin,
.unpin = execlists_context_unpin,
......@@ -3075,8 +3080,8 @@ get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt)
return intel_timeline_create(gt, NULL);
}
static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_engine_cs *engine)
static int __execlists_context_alloc(struct intel_context *ce,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
struct i915_vma *vma;
......@@ -3085,9 +3090,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_timeline *timeline;
int ret;
if (ce->state)
return 0;
GEM_BUG_ON(ce->state);
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
/*
......@@ -3533,6 +3536,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
err = __execlists_context_alloc(&ve->context, siblings[0]);
if (err)
goto err_put;
__set_bit(CONTEXT_ALLOC_BIT, &ve->context.flags);
return &ve->context;
err_put:
......
......@@ -1480,16 +1480,16 @@ alloc_context_vma(struct intel_engine_cs *engine)
return ERR_PTR(err);
}
static int ring_context_pin(struct intel_context *ce)
static int ring_context_alloc(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
int err;
/* One ringbuffer to rule them all */
GEM_BUG_ON(!engine->buffer);
ce->ring = engine->buffer;
if (!ce->state && engine->context_size) {
GEM_BUG_ON(ce->state);
if (engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
......@@ -1499,6 +1499,13 @@ static int ring_context_pin(struct intel_context *ce)
ce->state = vma;
}
return 0;
}
static int ring_context_pin(struct intel_context *ce)
{
int err;
err = intel_context_active_acquire(ce);
if (err)
return err;
......@@ -1520,6 +1527,8 @@ static void ring_context_reset(struct intel_context *ce)
}
static const struct intel_context_ops ring_context_ops = {
.alloc = ring_context_alloc,
.pin = ring_context_pin,
.unpin = ring_context_unpin,
......
......@@ -147,16 +147,19 @@ static void mock_context_destroy(struct kref *ref)
intel_context_free(ce);
}
static int mock_context_alloc(struct intel_context *ce)
{
ce->ring = mock_ring(ce->engine);
if (!ce->ring)
return -ENOMEM;
return 0;
}
static int mock_context_pin(struct intel_context *ce)
{
int ret;
if (!ce->ring) {
ce->ring = mock_ring(ce->engine);
if (!ce->ring)
return -ENOMEM;
}
ret = intel_context_active_acquire(ce);
if (ret)
return ret;
......@@ -166,6 +169,8 @@ static int mock_context_pin(struct intel_context *ce)
}
static const struct intel_context_ops mock_context_ops = {
.alloc = mock_context_alloc,
.pin = mock_context_pin,
.unpin = mock_context_unpin,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment