Commit 09c5e3a5 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Assign contexts in parent-child relationship consecutive guc_ids

Assign contexts in parent-child relationship consecutive guc_ids. This
is accomplished by partitioning guc_id space between ones that need to
be consecutive (1/16 available guc_ids) and ones that do not (15/16 of
available guc_ids). The consecutive search is implemented via the bitmap
API.

This is a precursor to the full GuC multi-lrc implementation but aligns
to how GuC mutli-lrc interface is defined - guc_ids must be consecutive
when using the GuC multi-lrc interface.

v2:
 (Daniel Vetter)
  - Explicitly state why we assign consecutive guc_ids
v3:
 (John Harrison)
  - Bring back in spin lock
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-11-matthew.brost@intel.com
parent 44d25fec
...@@ -82,9 +82,13 @@ struct intel_guc { ...@@ -82,9 +82,13 @@ struct intel_guc {
*/ */
spinlock_t lock; spinlock_t lock;
/** /**
* @guc_ids: used to allocate new guc_ids * @guc_ids: used to allocate new guc_ids, single-lrc
*/ */
struct ida guc_ids; struct ida guc_ids;
/**
* @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
*/
unsigned long *guc_ids_bitmap;
/** /**
* @guc_id_list: list of intel_context with valid guc_ids but no * @guc_id_list: list of intel_context with valid guc_ids but no
* refs * refs
......
...@@ -128,6 +128,16 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count); ...@@ -128,6 +128,16 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
#define GUC_REQUEST_SIZE 64 /* bytes */ #define GUC_REQUEST_SIZE 64 /* bytes */
/*
* We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
* per the GuC submission interface. A different allocation algorithm is used
* (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
* partition the guc_id space. We believe the number of multi-lrc contexts in
* use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
* multi-lrc.
*/
#define NUMBER_MULTI_LRC_GUC_ID (GUC_MAX_LRC_DESCRIPTORS / 16)
/* /*
* Below is a set of functions which control the GuC scheduling state which * Below is a set of functions which control the GuC scheduling state which
* require a lock. * require a lock.
...@@ -1207,6 +1217,11 @@ int intel_guc_submission_init(struct intel_guc *guc) ...@@ -1207,6 +1217,11 @@ int intel_guc_submission_init(struct intel_guc *guc)
INIT_WORK(&guc->submission_state.destroyed_worker, INIT_WORK(&guc->submission_state.destroyed_worker,
destroyed_worker_func); destroyed_worker_func);
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID, GFP_KERNEL);
if (!guc->submission_state.guc_ids_bitmap)
return -ENOMEM;
return 0; return 0;
} }
...@@ -1218,6 +1233,7 @@ void intel_guc_submission_fini(struct intel_guc *guc) ...@@ -1218,6 +1233,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
guc_flush_destroyed_contexts(guc); guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy(guc); guc_lrc_desc_pool_destroy(guc);
i915_sched_engine_put(guc->sched_engine); i915_sched_engine_put(guc->sched_engine);
bitmap_free(guc->submission_state.guc_ids_bitmap);
} }
static inline void queue_request(struct i915_sched_engine *sched_engine, static inline void queue_request(struct i915_sched_engine *sched_engine,
...@@ -1269,16 +1285,41 @@ static void guc_submit_request(struct i915_request *rq) ...@@ -1269,16 +1285,41 @@ static void guc_submit_request(struct i915_request *rq)
spin_unlock_irqrestore(&sched_engine->lock, flags); spin_unlock_irqrestore(&sched_engine->lock, flags);
} }
static int new_guc_id(struct intel_guc *guc) static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
{ {
return ida_simple_get(&guc->submission_state.guc_ids, 0, int ret;
GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL |
__GFP_RETRY_MAYFAIL | __GFP_NOWARN); GEM_BUG_ON(intel_context_is_child(ce));
if (intel_context_is_parent(ce))
ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
NUMBER_MULTI_LRC_GUC_ID,
order_base_2(ce->parallel.number_children
+ 1));
else
ret = ida_simple_get(&guc->submission_state.guc_ids,
NUMBER_MULTI_LRC_GUC_ID,
GUC_MAX_LRC_DESCRIPTORS,
GFP_KERNEL | __GFP_RETRY_MAYFAIL |
__GFP_NOWARN);
if (unlikely(ret < 0))
return ret;
ce->guc_id.id = ret;
return 0;
} }
static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
{ {
GEM_BUG_ON(intel_context_is_child(ce));
if (!context_guc_id_invalid(ce)) { if (!context_guc_id_invalid(ce)) {
if (intel_context_is_parent(ce))
bitmap_release_region(guc->submission_state.guc_ids_bitmap,
ce->guc_id.id,
order_base_2(ce->parallel.number_children
+ 1));
else
ida_simple_remove(&guc->submission_state.guc_ids, ida_simple_remove(&guc->submission_state.guc_ids,
ce->guc_id.id); ce->guc_id.id);
reset_lrc_desc(guc, ce->guc_id.id); reset_lrc_desc(guc, ce->guc_id.id);
...@@ -1297,49 +1338,64 @@ static void release_guc_id(struct intel_guc *guc, struct intel_context *ce) ...@@ -1297,49 +1338,64 @@ static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
spin_unlock_irqrestore(&guc->submission_state.lock, flags); spin_unlock_irqrestore(&guc->submission_state.lock, flags);
} }
static int steal_guc_id(struct intel_guc *guc) static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
{ {
struct intel_context *ce; struct intel_context *cn;
int guc_id;
lockdep_assert_held(&guc->submission_state.lock); lockdep_assert_held(&guc->submission_state.lock);
GEM_BUG_ON(intel_context_is_child(ce));
GEM_BUG_ON(intel_context_is_parent(ce));
if (!list_empty(&guc->submission_state.guc_id_list)) { if (!list_empty(&guc->submission_state.guc_id_list)) {
ce = list_first_entry(&guc->submission_state.guc_id_list, cn = list_first_entry(&guc->submission_state.guc_id_list,
struct intel_context, struct intel_context,
guc_id.link); guc_id.link);
GEM_BUG_ON(atomic_read(&ce->guc_id.ref)); GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce)); GEM_BUG_ON(context_guc_id_invalid(cn));
GEM_BUG_ON(intel_context_is_child(cn));
GEM_BUG_ON(intel_context_is_parent(cn));
list_del_init(&ce->guc_id.link); list_del_init(&cn->guc_id.link);
guc_id = ce->guc_id.id; ce->guc_id = cn->guc_id;
spin_lock(&ce->guc_state.lock); spin_lock(&ce->guc_state.lock);
clr_context_registered(ce); clr_context_registered(cn);
spin_unlock(&ce->guc_state.lock); spin_unlock(&ce->guc_state.lock);
set_context_guc_id_invalid(ce); set_context_guc_id_invalid(cn);
return guc_id;
return 0;
} else { } else {
return -EAGAIN; return -EAGAIN;
} }
} }
static int assign_guc_id(struct intel_guc *guc, u16 *out) static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
{ {
int ret; int ret;
lockdep_assert_held(&guc->submission_state.lock); lockdep_assert_held(&guc->submission_state.lock);
GEM_BUG_ON(intel_context_is_child(ce));
ret = new_guc_id(guc); ret = new_guc_id(guc, ce);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
ret = steal_guc_id(guc); if (intel_context_is_parent(ce))
return -ENOSPC;
ret = steal_guc_id(guc, ce);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
*out = ret; if (intel_context_is_parent(ce)) {
struct intel_context *child;
int i = 1;
for_each_child(ce, child)
child->guc_id.id = ce->guc_id.id + i++;
}
return 0; return 0;
} }
...@@ -1357,7 +1413,7 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) ...@@ -1357,7 +1413,7 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
might_lock(&ce->guc_state.lock); might_lock(&ce->guc_state.lock);
if (context_guc_id_invalid(ce)) { if (context_guc_id_invalid(ce)) {
ret = assign_guc_id(guc, &ce->guc_id.id); ret = assign_guc_id(guc, ce);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = 1; /* Indidcates newly assigned guc_id */ ret = 1; /* Indidcates newly assigned guc_id */
...@@ -1399,8 +1455,10 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) ...@@ -1399,8 +1455,10 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
unsigned long flags; unsigned long flags;
GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0); GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
GEM_BUG_ON(intel_context_is_child(ce));
if (unlikely(context_guc_id_invalid(ce))) if (unlikely(context_guc_id_invalid(ce) ||
intel_context_is_parent(ce)))
return; return;
spin_lock_irqsave(&guc->submission_state.lock, flags); spin_lock_irqsave(&guc->submission_state.lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment