Commit 38775829 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Allocate kernel_contexts directly

Ignore the central i915->kernel_context for allocating an engine, as
that GEM context is being phased out. For internal clients, we just need
the per-engine logical state, so allocate it at the point of use.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190808110612.23539-1-chris@chris-wilson.co.uk
parent ca883c30
...@@ -95,8 +95,6 @@ gt-y += \ ...@@ -95,8 +95,6 @@ gt-y += \
gt/gen7_renderstate.o \ gt/gen7_renderstate.o \
gt/gen8_renderstate.o \ gt/gen8_renderstate.o \
gt/gen9_renderstate.o gt/gen9_renderstate.o
gt-$(CONFIG_DRM_I915_SELFTEST) += \
gt/mock_engine.o
i915-y += $(gt-y) i915-y += $(gt-y)
# GEM (Graphics Execution Management) code # GEM (Graphics Execution Management) code
......
...@@ -707,26 +707,6 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) ...@@ -707,26 +707,6 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
return dw; return dw;
} }
static int pin_context(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context **out)
{
struct intel_context *ce;
int err;
ce = i915_gem_context_get_engine(ctx, engine->id);
if (IS_ERR(ce))
return PTR_ERR(ce);
err = intel_context_pin(ce);
intel_context_put(ce);
if (err)
return err;
*out = ce;
return 0;
}
void void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass) intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{ {
...@@ -748,6 +728,25 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass) ...@@ -748,6 +728,25 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
#endif #endif
} }
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
struct intel_context *ce;
int err;
ce = intel_context_create(engine->i915->kernel_context, engine);
if (IS_ERR(ce))
return ce;
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
return ERR_PTR(err);
}
return ce;
}
/** /**
* intel_engines_init_common - initialize cengine state which might require hw access * intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize. * @engine: Engine to initialize.
...@@ -761,22 +760,24 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass) ...@@ -761,22 +760,24 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
*/ */
int intel_engine_init_common(struct intel_engine_cs *engine) int intel_engine_init_common(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *i915 = engine->i915; struct intel_context *ce;
int ret; int ret;
engine->set_default_submission(engine); engine->set_default_submission(engine);
/* We may need to do things with the shrinker which /*
* We may need to do things with the shrinker which
* require us to immediately switch back to the default * require us to immediately switch back to the default
* context. This can cause a problem as pinning the * context. This can cause a problem as pinning the
* default context also requires GTT space which may not * default context also requires GTT space which may not
* be available. To avoid this we always pin the default * be available. To avoid this we always pin the default
* context. * context.
*/ */
ret = pin_context(i915->kernel_context, engine, ce = create_kernel_context(engine);
&engine->kernel_context); if (IS_ERR(ce))
if (ret) return PTR_ERR(ce);
return ret;
engine->kernel_context = ce;
ret = measure_breadcrumb_dw(engine); ret = measure_breadcrumb_dw(engine);
if (ret < 0) if (ret < 0)
...@@ -787,7 +788,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -787,7 +788,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
return 0; return 0;
err_unpin: err_unpin:
intel_context_unpin(engine->kernel_context); intel_context_unpin(ce);
intel_context_put(ce);
return ret; return ret;
} }
...@@ -812,6 +814,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) ...@@ -812,6 +814,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
i915_gem_object_put(engine->default_state); i915_gem_object_put(engine->default_state);
intel_context_unpin(engine->kernel_context); intel_context_unpin(engine->kernel_context);
intel_context_put(engine->kernel_context);
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
intel_wa_list_free(&engine->ctx_wa_list); intel_wa_list_free(&engine->ctx_wa_list);
...@@ -1573,5 +1576,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) ...@@ -1573,5 +1576,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "mock_engine.c"
#include "selftest_engine_cs.c" #include "selftest_engine_cs.c"
#endif #endif
...@@ -286,8 +286,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, ...@@ -286,8 +286,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
int mock_engine_init(struct intel_engine_cs *engine) int mock_engine_init(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *i915 = engine->i915; struct intel_context *ce;
int err;
intel_engine_init_active(engine, ENGINE_MOCK); intel_engine_init_active(engine, ENGINE_MOCK);
intel_engine_init_breadcrumbs(engine); intel_engine_init_breadcrumbs(engine);
...@@ -295,16 +294,11 @@ int mock_engine_init(struct intel_engine_cs *engine) ...@@ -295,16 +294,11 @@ int mock_engine_init(struct intel_engine_cs *engine)
intel_engine_init__pm(engine); intel_engine_init__pm(engine);
intel_engine_pool_init(&engine->pool); intel_engine_pool_init(&engine->pool);
engine->kernel_context = ce = create_kernel_context(engine);
i915_gem_context_get_engine(i915->kernel_context, engine->id); if (IS_ERR(ce))
if (IS_ERR(engine->kernel_context))
goto err_breadcrumbs;
err = intel_context_pin(engine->kernel_context);
intel_context_put(engine->kernel_context);
if (err)
goto err_breadcrumbs; goto err_breadcrumbs;
engine->kernel_context = ce;
return 0; return 0;
err_breadcrumbs: err_breadcrumbs:
...@@ -338,6 +332,7 @@ void mock_engine_free(struct intel_engine_cs *engine) ...@@ -338,6 +332,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
GEM_BUG_ON(timer_pending(&mock->hw_delay)); GEM_BUG_ON(timer_pending(&mock->hw_delay));
intel_context_unpin(engine->kernel_context); intel_context_unpin(engine->kernel_context);
intel_context_put(engine->kernel_context);
intel_engine_fini_breadcrumbs(engine); intel_engine_fini_breadcrumbs(engine);
......
...@@ -1229,7 +1229,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1229,7 +1229,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&s->workload_q_head[i]); INIT_LIST_HEAD(&s->workload_q_head[i]);
s->shadow[i] = ERR_PTR(-EINVAL); s->shadow[i] = ERR_PTR(-EINVAL);
ce = i915_gem_context_get_engine(ctx, i); ce = intel_context_create(ctx, engine);
if (IS_ERR(ce)) { if (IS_ERR(ce)) {
ret = PTR_ERR(ce); ret = PTR_ERR(ce);
goto out_shadow_ctx; goto out_shadow_ctx;
...@@ -1270,6 +1270,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1270,6 +1270,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
break; break;
intel_context_unpin(s->shadow[i]); intel_context_unpin(s->shadow[i]);
intel_context_put(s->shadow[i]);
} }
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
return ret; return ret;
......
...@@ -1256,9 +1256,8 @@ int i915_gem_init_hw(struct drm_i915_private *i915) ...@@ -1256,9 +1256,8 @@ int i915_gem_init_hw(struct drm_i915_private *i915)
static int __intel_engines_record_defaults(struct drm_i915_private *i915) static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{ {
struct i915_request *requests[I915_NUM_ENGINES] = {};
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
struct i915_gem_engines *e;
enum intel_engine_id id; enum intel_engine_id id;
int err = 0; int err = 0;
...@@ -1271,20 +1270,25 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) ...@@ -1271,20 +1270,25 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
* from the same default HW values. * from the same default HW values.
*/ */
ctx = i915_gem_context_create_kernel(i915, 0);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
e = i915_gem_context_lock_engines(ctx);
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
struct intel_context *ce = e->engines[id]; struct intel_context *ce;
struct i915_request *rq; struct i915_request *rq;
/* We must be able to switch to something! */
GEM_BUG_ON(!engine->kernel_context);
engine->serial++; /* force the kernel context switch */
ce = intel_context_create(i915->kernel_context, engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
rq = intel_context_create_request(ce); rq = intel_context_create_request(ce);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_active; intel_context_put(ce);
goto out;
} }
err = intel_engine_emit_ctx_wa(rq); err = intel_engine_emit_ctx_wa(rq);
...@@ -1305,26 +1309,33 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) ...@@ -1305,26 +1309,33 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_rq; goto err_rq;
err_rq: err_rq:
requests[id] = i915_request_get(rq);
i915_request_add(rq); i915_request_add(rq);
if (err) if (err)
goto err_active; goto out;
} }
/* Flush the default context image to memory, and enable powersaving. */ /* Flush the default context image to memory, and enable powersaving. */
if (!i915_gem_load_power_context(i915)) { if (!i915_gem_load_power_context(i915)) {
err = -EIO; err = -EIO;
goto err_active; goto out;
} }
for_each_engine(engine, i915, id) { for (id = 0; id < ARRAY_SIZE(requests); id++) {
struct intel_context *ce = e->engines[id]; struct i915_request *rq;
struct i915_vma *state = ce->state; struct i915_vma *state;
void *vaddr; void *vaddr;
if (!state) rq = requests[id];
if (!rq)
continue; continue;
GEM_BUG_ON(intel_context_is_pinned(ce)); /* We want to be able to unbind the state from the GGTT */
GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
state = rq->hw_context->state;
if (!state)
continue;
/* /*
* As we will hold a reference to the logical state, it will * As we will hold a reference to the logical state, it will
...@@ -1336,43 +1347,49 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) ...@@ -1336,43 +1347,49 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
*/ */
err = i915_vma_unbind(state); err = i915_vma_unbind(state);
if (err) if (err)
goto err_active; goto out;
i915_gem_object_lock(state->obj); i915_gem_object_lock(state->obj);
err = i915_gem_object_set_to_cpu_domain(state->obj, false); err = i915_gem_object_set_to_cpu_domain(state->obj, false);
i915_gem_object_unlock(state->obj); i915_gem_object_unlock(state->obj);
if (err) if (err)
goto err_active; goto out;
engine->default_state = i915_gem_object_get(state->obj); i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
i915_gem_object_set_cache_coherency(engine->default_state,
I915_CACHE_LLC);
/* Check we can acquire the image of the context state */ /* Check we can acquire the image of the context state */
vaddr = i915_gem_object_pin_map(engine->default_state, vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
I915_MAP_FORCE_WB);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr); err = PTR_ERR(vaddr);
goto err_active; goto out;
} }
i915_gem_object_unpin_map(engine->default_state); rq->engine->default_state = i915_gem_object_get(state->obj);
i915_gem_object_unpin_map(state->obj);
} }
out_ctx: out:
i915_gem_context_unlock_engines(ctx);
i915_gem_context_set_closed(ctx);
i915_gem_context_put(ctx);
return err;
err_active:
/* /*
* If we have to abandon now, we expect the engines to be idle * If we have to abandon now, we expect the engines to be idle
* and ready to be torn-down. The quickest way we can accomplish * and ready to be torn-down. The quickest way we can accomplish
* this is by declaring ourselves wedged. * this is by declaring ourselves wedged.
*/ */
intel_gt_set_wedged(&i915->gt); if (err)
goto out_ctx; intel_gt_set_wedged(&i915->gt);
for (id = 0; id < ARRAY_SIZE(requests); id++) {
struct intel_context *ce;
struct i915_request *rq;
rq = requests[id];
if (!rq)
continue;
ce = rq->hw_context;
i915_request_put(rq);
intel_context_put(ce);
}
return err;
} }
static int static int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment