Commit 02942b42 authored by John Harrison's avatar John Harrison

drm/i915/guc: Do not conflate lrc_desc with GuC id for registration

The LRC descriptor pool is going away. So, stop using it as a check for
context registration, use the GuC id instead (being the thing that
actually gets registered with the GuC).

Also, rename the set/clear/query helper functions for context id
mappings to better reflect their purpose and to differentiate from
other registration related helper functions.
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220302003357.4188363-2-John.C.Harrison@Intel.com
parent b2006061
...@@ -511,30 +511,19 @@ static inline bool guc_submission_initialized(struct intel_guc *guc) ...@@ -511,30 +511,19 @@ static inline bool guc_submission_initialized(struct intel_guc *guc)
return !!guc->lrc_desc_pool_vaddr; return !!guc->lrc_desc_pool_vaddr;
} }
static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) static inline void _reset_lrc_desc(struct intel_guc *guc, u32 id)
{ {
if (likely(guc_submission_initialized(guc))) {
struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
unsigned long flags;
memset(desc, 0, sizeof(*desc)); memset(desc, 0, sizeof(*desc));
/*
* xarray API doesn't have xa_erase_irqsave wrapper, so calling
* the lower level functions directly.
*/
xa_lock_irqsave(&guc->context_lookup, flags);
__xa_erase(&guc->context_lookup, id);
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
} }
static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
{ {
return __get_context(guc, id); return __get_context(guc, id);
} }
static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
struct intel_context *ce) struct intel_context *ce)
{ {
unsigned long flags; unsigned long flags;
...@@ -548,6 +537,24 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, ...@@ -548,6 +537,24 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
xa_unlock_irqrestore(&guc->context_lookup, flags); xa_unlock_irqrestore(&guc->context_lookup, flags);
} }
static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
{
unsigned long flags;
if (unlikely(!guc_submission_initialized(guc)))
return;
_reset_lrc_desc(guc, id);
/*
* xarray API doesn't have xa_erase_irqsave wrapper, so calling
* the lower level functions directly.
*/
xa_lock_irqsave(&guc->context_lookup, flags);
__xa_erase(&guc->context_lookup, id);
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
static void decr_outstanding_submission_g2h(struct intel_guc *guc) static void decr_outstanding_submission_g2h(struct intel_guc *guc)
{ {
if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
...@@ -792,7 +799,7 @@ static int __guc_wq_item_append(struct i915_request *rq) ...@@ -792,7 +799,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce)); GEM_BUG_ON(context_guc_id_invalid(ce));
GEM_BUG_ON(context_wait_for_deregister_to_register(ce)); GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)); GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
/* Insert NOOP if this work queue item will wrap the tail pointer. */ /* Insert NOOP if this work queue item will wrap the tail pointer. */
if (wqi_size > wq_space_until_wrap(ce)) { if (wqi_size > wq_space_until_wrap(ce)) {
...@@ -920,7 +927,7 @@ static int guc_dequeue_one_context(struct intel_guc *guc) ...@@ -920,7 +927,7 @@ static int guc_dequeue_one_context(struct intel_guc *guc)
if (submit) { if (submit) {
struct intel_context *ce = request_to_scheduling_context(last); struct intel_context *ce = request_to_scheduling_context(last);
if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) && if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
!intel_context_is_banned(ce))) { !intel_context_is_banned(ce))) {
ret = guc_lrc_desc_pin(ce, false); ret = guc_lrc_desc_pin(ce, false);
if (unlikely(ret == -EPIPE)) { if (unlikely(ret == -EPIPE)) {
...@@ -1884,7 +1891,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq) ...@@ -1884,7 +1891,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
return submission_disabled(guc) || guc->stalled_request || return submission_disabled(guc) || guc->stalled_request ||
!i915_sched_engine_is_empty(sched_engine) || !i915_sched_engine_is_empty(sched_engine) ||
!lrc_desc_registered(guc, ce->guc_id.id); !ctx_id_mapped(guc, ce->guc_id.id);
} }
static void guc_submit_request(struct i915_request *rq) static void guc_submit_request(struct i915_request *rq)
...@@ -1941,7 +1948,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) ...@@ -1941,7 +1948,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
else else
ida_simple_remove(&guc->submission_state.guc_ids, ida_simple_remove(&guc->submission_state.guc_ids,
ce->guc_id.id); ce->guc_id.id);
reset_lrc_desc(guc, ce->guc_id.id); clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce); set_context_guc_id_invalid(ce);
} }
if (!list_empty(&ce->guc_id.link)) if (!list_empty(&ce->guc_id.link))
...@@ -2237,10 +2244,10 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) ...@@ -2237,10 +2244,10 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) != GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
i915_gem_object_is_lmem(ce->ring->vma->obj)); i915_gem_object_is_lmem(ce->ring->vma->obj));
context_registered = lrc_desc_registered(guc, desc_idx); context_registered = ctx_id_mapped(guc, desc_idx);
reset_lrc_desc(guc, desc_idx); clr_ctx_id_mapping(guc, desc_idx);
set_lrc_desc_registered(guc, desc_idx, ce); set_ctx_id_mapping(guc, desc_idx, ce);
desc = __get_lrc_desc(guc, desc_idx); desc = __get_lrc_desc(guc, desc_idx);
desc->engine_class = engine_class_to_guc_class(engine->class); desc->engine_class = engine_class_to_guc_class(engine->class);
...@@ -2311,7 +2318,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) ...@@ -2311,7 +2318,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
} }
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(disabled)) { if (unlikely(disabled)) {
reset_lrc_desc(guc, desc_idx); clr_ctx_id_mapping(guc, desc_idx);
return 0; /* Will get registered later */ return 0; /* Will get registered later */
} }
...@@ -2327,9 +2334,9 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) ...@@ -2327,9 +2334,9 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
with_intel_runtime_pm(runtime_pm, wakeref) with_intel_runtime_pm(runtime_pm, wakeref)
ret = register_context(ce, loop); ret = register_context(ce, loop);
if (unlikely(ret == -EBUSY)) { if (unlikely(ret == -EBUSY)) {
reset_lrc_desc(guc, desc_idx); clr_ctx_id_mapping(guc, desc_idx);
} else if (unlikely(ret == -ENODEV)) { } else if (unlikely(ret == -ENODEV)) {
reset_lrc_desc(guc, desc_idx); clr_ctx_id_mapping(guc, desc_idx);
ret = 0; /* Will get registered later */ ret = 0; /* Will get registered later */
} }
} }
...@@ -2516,7 +2523,7 @@ static bool context_cant_unblock(struct intel_context *ce) ...@@ -2516,7 +2523,7 @@ static bool context_cant_unblock(struct intel_context *ce)
return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
context_guc_id_invalid(ce) || context_guc_id_invalid(ce) ||
!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) || !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
!intel_context_is_pinned(ce); !intel_context_is_pinned(ce);
} }
...@@ -2686,7 +2693,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) ...@@ -2686,7 +2693,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
bool disabled; bool disabled;
GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
GEM_BUG_ON(context_enabled(ce)); GEM_BUG_ON(context_enabled(ce));
...@@ -2803,7 +2810,7 @@ static void guc_context_destroy(struct kref *kref) ...@@ -2803,7 +2810,7 @@ static void guc_context_destroy(struct kref *kref)
*/ */
spin_lock_irqsave(&guc->submission_state.lock, flags); spin_lock_irqsave(&guc->submission_state.lock, flags);
destroy = submission_disabled(guc) || context_guc_id_invalid(ce) || destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
!lrc_desc_registered(guc, ce->guc_id.id); !ctx_id_mapped(guc, ce->guc_id.id);
if (likely(!destroy)) { if (likely(!destroy)) {
if (!list_empty(&ce->guc_id.link)) if (!list_empty(&ce->guc_id.link))
list_del_init(&ce->guc_id.link); list_del_init(&ce->guc_id.link);
...@@ -3046,7 +3053,7 @@ static void guc_signal_context_fence(struct intel_context *ce) ...@@ -3046,7 +3053,7 @@ static void guc_signal_context_fence(struct intel_context *ce)
static bool context_needs_register(struct intel_context *ce, bool new_guc_id) static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
{ {
return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) && !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
!submission_disabled(ce_to_guc(ce)); !submission_disabled(ce_to_guc(ce));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment