Commit b146e5ef authored by Chris Wilson's avatar Chris Wilson

drm/i915: Pass around the intel_context

Instead of passing the gem_context and engine to find the instance of
the intel_context to use, pass around the intel_context instead. This is
useful for the next few patches, where the intel_context is no longer a
direct lookup.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190306084704.15755-1-chris@chris-wilson.co.uk
parent 103b76ee
...@@ -3112,7 +3112,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, ...@@ -3112,7 +3112,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine, void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx, struct intel_context *ce,
u32 *reg_state); u32 *reg_state);
/* i915_gem_evict.c */ /* i915_gem_evict.c */
......
...@@ -1629,13 +1629,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) ...@@ -1629,13 +1629,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
* It's fine to put out-of-date values into these per-context registers * It's fine to put out-of-date values into these per-context registers
* in the case that the OA unit has been disabled. * in the case that the OA unit has been disabled.
*/ */
static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, static void
gen8_update_reg_state_unlocked(struct intel_context *ce,
u32 *reg_state, u32 *reg_state,
const struct i915_oa_config *oa_config) const struct i915_oa_config *oa_config)
{ {
struct drm_i915_private *dev_priv = ctx->i915; struct drm_i915_private *i915 = ce->gem_context->i915;
u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */ /* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = { i915_reg_t flex_regs[] = {
EU_PERF_CNTL0, EU_PERF_CNTL0,
...@@ -1649,8 +1650,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, ...@@ -1649,8 +1650,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
int i; int i;
CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL, CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
(dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
(dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
GEN8_OA_COUNTER_RESUME); GEN8_OA_COUNTER_RESUME);
for (i = 0; i < ARRAY_SIZE(flex_regs); i++) { for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
...@@ -1678,10 +1679,9 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, ...@@ -1678,10 +1679,9 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
CTX_REG(reg_state, state_offset, flex_regs[i], value); CTX_REG(reg_state, state_offset, flex_regs[i], value);
} }
CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, CTX_REG(reg_state,
gen8_make_rpcs(dev_priv, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
&to_intel_context(ctx, gen8_make_rpcs(i915, &ce->sseu));
dev_priv->engine[RCS0])->sseu));
} }
/* /*
...@@ -1754,7 +1754,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, ...@@ -1754,7 +1754,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
ce->state->obj->mm.dirty = true; ce->state->obj->mm.dirty = true;
regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
gen8_update_reg_state_unlocked(ctx, regs, oa_config); gen8_update_reg_state_unlocked(ce, regs, oa_config);
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
} }
...@@ -2138,8 +2138,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, ...@@ -2138,8 +2138,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
} }
void i915_oa_init_reg_state(struct intel_engine_cs *engine, void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx, struct intel_context *ce,
u32 *reg_state) u32 *regs)
{ {
struct i915_perf_stream *stream; struct i915_perf_stream *stream;
...@@ -2148,7 +2148,7 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine, ...@@ -2148,7 +2148,7 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine,
stream = engine->i915->perf.oa.exclusive_stream; stream = engine->i915->perf.oa.exclusive_stream;
if (stream) if (stream)
gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config); gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
} }
/** /**
......
...@@ -170,7 +170,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -170,7 +170,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct intel_context *ce); struct intel_context *ce);
static void execlists_init_reg_state(u32 *reg_state, static void execlists_init_reg_state(u32 *reg_state,
struct i915_gem_context *ctx, struct intel_context *ce,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct intel_ring *ring); struct intel_ring *ring);
...@@ -1320,8 +1320,8 @@ __execlists_update_reg_state(struct intel_engine_cs *engine, ...@@ -1320,8 +1320,8 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
/* RPCS */ /* RPCS */
if (engine->class == RENDER_CLASS) if (engine->class == RENDER_CLASS)
regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915, regs[CTX_R_PWR_CLK_STATE + 1] =
&ce->sseu); gen8_make_rpcs(engine->i915, &ce->sseu);
} }
static struct intel_context * static struct intel_context *
...@@ -2021,7 +2021,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -2021,7 +2021,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
rq->ring->head = intel_ring_wrap(rq->ring, rq->head); rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
intel_ring_update_space(rq->ring); intel_ring_update_space(rq->ring);
execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring); execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring);
__execlists_update_reg_state(engine, rq->hw_context); __execlists_update_reg_state(engine, rq->hw_context);
out_unlock: out_unlock:
...@@ -2659,13 +2659,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) ...@@ -2659,13 +2659,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
} }
static void execlists_init_reg_state(u32 *regs, static void execlists_init_reg_state(u32 *regs,
struct i915_gem_context *ctx, struct intel_context *ce,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct intel_ring *ring) struct intel_ring *ring)
{ {
struct drm_i915_private *dev_priv = engine->i915; struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt;
u32 base = engine->mmio_base;
bool rcs = engine->class == RENDER_CLASS; bool rcs = engine->class == RENDER_CLASS;
u32 base = engine->mmio_base;
/* A context is actually a big batch buffer with several /* A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
...@@ -2680,7 +2680,7 @@ static void execlists_init_reg_state(u32 *regs, ...@@ -2680,7 +2680,7 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine), CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH)); _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
if (INTEL_GEN(dev_priv) < 11) { if (INTEL_GEN(engine->i915) < 11) {
regs[CTX_CONTEXT_CONTROL + 1] |= regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE); CTX_CTRL_RS_CTX_ENABLE);
...@@ -2735,33 +2735,33 @@ static void execlists_init_reg_state(u32 *regs, ...@@ -2735,33 +2735,33 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
if (i915_vm_is_48bit(&ctx->ppgtt->vm)) { if (i915_vm_is_48bit(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical) /* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and * PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored. * other PDP Descriptors are ignored.
*/ */
ASSIGN_CTX_PML4(ctx->ppgtt, regs); ASSIGN_CTX_PML4(ppgtt, regs);
} else { } else {
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3); ASSIGN_CTX_PDP(ppgtt, regs, 3);
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2); ASSIGN_CTX_PDP(ppgtt, regs, 2);
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1); ASSIGN_CTX_PDP(ppgtt, regs, 1);
ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0); ASSIGN_CTX_PDP(ppgtt, regs, 0);
} }
if (rcs) { if (rcs) {
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
i915_oa_init_reg_state(engine, ctx, regs); i915_oa_init_reg_state(engine, ce, regs);
} }
regs[CTX_END] = MI_BATCH_BUFFER_END; regs[CTX_END] = MI_BATCH_BUFFER_END;
if (INTEL_GEN(dev_priv) >= 10) if (INTEL_GEN(engine->i915) >= 10)
regs[CTX_END] |= BIT(0); regs[CTX_END] |= BIT(0);
} }
static int static int
populate_lr_context(struct i915_gem_context *ctx, populate_lr_context(struct intel_context *ce,
struct drm_i915_gem_object *ctx_obj, struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct intel_ring *ring) struct intel_ring *ring)
...@@ -2807,11 +2807,12 @@ populate_lr_context(struct i915_gem_context *ctx, ...@@ -2807,11 +2807,12 @@ populate_lr_context(struct i915_gem_context *ctx,
/* The second page of the context object contains some fields which must /* The second page of the context object contains some fields which must
* be set up prior to the first execution. */ * be set up prior to the first execution. */
regs = vaddr + LRC_STATE_PN * PAGE_SIZE; regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
execlists_init_reg_state(regs, ctx, engine, ring); execlists_init_reg_state(regs, ce, engine, ring);
if (!engine->default_state) if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |= regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11) if (ce->gem_context == engine->i915->preempt_context &&
INTEL_GEN(engine->i915) < 11)
regs[CTX_CONTEXT_CONTROL + 1] |= regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
...@@ -2866,7 +2867,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2866,7 +2867,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
goto error_deref_obj; goto error_deref_obj;
} }
ret = populate_lr_context(ctx, ctx_obj, engine, ring); ret = populate_lr_context(ce, ctx_obj, engine, ring);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error_ring_free; goto error_ring_free;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment