Commit e6e7eff6 authored by Michal Wajdeczko's avatar Michal Wajdeczko

drm/xe/guc: Use GuC ID Manager in submission code

We are ready to replace private guc_ids management code with
separate GuC ID Manager that can be shared with upcoming SR-IOV
PF provisioning code.

Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240313221112.1089-5-michal.wajdeczko@intel.com
parent f4fb157c
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "xe_guc.h" #include "xe_guc.h"
#include "xe_guc_ct.h" #include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h" #include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
#include "xe_guc_submit_types.h" #include "xe_guc_submit_types.h"
#include "xe_hw_engine.h" #include "xe_hw_engine.h"
#include "xe_hw_fence.h" #include "xe_hw_fence.h"
...@@ -236,16 +237,10 @@ static void guc_submit_fini(struct drm_device *drm, void *arg) ...@@ -236,16 +237,10 @@ static void guc_submit_fini(struct drm_device *drm, void *arg)
struct xe_guc *guc = arg; struct xe_guc *guc = arg;
xa_destroy(&guc->submission_state.exec_queue_lookup); xa_destroy(&guc->submission_state.exec_queue_lookup);
ida_destroy(&guc->submission_state.guc_ids);
bitmap_free(guc->submission_state.guc_ids_bitmap);
free_submit_wq(guc); free_submit_wq(guc);
mutex_destroy(&guc->submission_state.lock); mutex_destroy(&guc->submission_state.lock);
} }
#define GUC_ID_NUMBER_MLRC 4096
#define GUC_ID_NUMBER_SLRC (GUC_ID_MAX - GUC_ID_NUMBER_MLRC)
#define GUC_ID_START_MLRC GUC_ID_NUMBER_SLRC
static const struct xe_exec_queue_ops guc_exec_queue_ops; static const struct xe_exec_queue_ops guc_exec_queue_ops;
static void primelockdep(struct xe_guc *guc) static void primelockdep(struct xe_guc *guc)
...@@ -268,22 +263,14 @@ int xe_guc_submit_init(struct xe_guc *guc) ...@@ -268,22 +263,14 @@ int xe_guc_submit_init(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc); struct xe_gt *gt = guc_to_gt(guc);
int err; int err;
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(GUC_ID_NUMBER_MLRC, GFP_KERNEL);
if (!guc->submission_state.guc_ids_bitmap)
return -ENOMEM;
err = alloc_submit_wq(guc); err = alloc_submit_wq(guc);
if (err) { if (err)
bitmap_free(guc->submission_state.guc_ids_bitmap);
return err; return err;
}
gt->exec_queue_ops = &guc_exec_queue_ops; gt->exec_queue_ops = &guc_exec_queue_ops;
mutex_init(&guc->submission_state.lock); mutex_init(&guc->submission_state.lock);
xa_init(&guc->submission_state.exec_queue_lookup); xa_init(&guc->submission_state.exec_queue_lookup);
ida_init(&guc->submission_state.guc_ids);
spin_lock_init(&guc->submission_state.suspend.lock); spin_lock_init(&guc->submission_state.suspend.lock);
guc->submission_state.suspend.context = dma_fence_context_alloc(1); guc->submission_state.suspend.context = dma_fence_context_alloc(1);
...@@ -294,6 +281,10 @@ int xe_guc_submit_init(struct xe_guc *guc) ...@@ -294,6 +281,10 @@ int xe_guc_submit_init(struct xe_guc *guc)
if (err) if (err)
return err; return err;
err = xe_guc_id_mgr_init(&guc->submission_state.idm, ~0);
if (err)
return err;
return 0; return 0;
} }
...@@ -306,12 +297,8 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa ...@@ -306,12 +297,8 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
for (i = 0; i < xa_count; ++i) for (i = 0; i < xa_count; ++i)
xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i); xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
if (xe_exec_queue_is_parallel(q)) xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
bitmap_release_region(guc->submission_state.guc_ids_bitmap, q->guc->id, q->width);
q->guc->id - GUC_ID_START_MLRC,
order_base_2(q->width));
else
ida_free(&guc->submission_state.guc_ids, q->guc->id);
} }
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
...@@ -329,21 +316,12 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) ...@@ -329,21 +316,12 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
*/ */
lockdep_assert_held(&guc->submission_state.lock); lockdep_assert_held(&guc->submission_state.lock);
if (xe_exec_queue_is_parallel(q)) { ret = xe_guc_id_mgr_reserve_locked(&guc->submission_state.idm,
void *bitmap = guc->submission_state.guc_ids_bitmap; q->width);
ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC,
order_base_2(q->width));
} else {
ret = ida_alloc_max(&guc->submission_state.guc_ids,
GUC_ID_NUMBER_SLRC - 1, GFP_NOWAIT);
}
if (ret < 0) if (ret < 0)
return ret; return ret;
q->guc->id = ret; q->guc->id = ret;
if (xe_exec_queue_is_parallel(q))
q->guc->id += GUC_ID_START_MLRC;
for (i = 0; i < q->width; ++i) { for (i = 0; i < q->width; ++i) {
ptr = xa_store(&guc->submission_state.exec_queue_lookup, ptr = xa_store(&guc->submission_state.exec_queue_lookup,
......
...@@ -68,10 +68,6 @@ struct xe_guc { ...@@ -68,10 +68,6 @@ struct xe_guc {
struct xe_guc_id_mgr idm; struct xe_guc_id_mgr idm;
/** @submission_state.exec_queue_lookup: Lookup an xe_engine from guc_id */ /** @submission_state.exec_queue_lookup: Lookup an xe_engine from guc_id */
struct xarray exec_queue_lookup; struct xarray exec_queue_lookup;
/** @submission_state.guc_ids: used to allocate new guc_ids, single-lrc */
struct ida guc_ids;
/** @submission_state.guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
unsigned long *guc_ids_bitmap;
/** @submission_state.stopped: submissions are stopped */ /** @submission_state.stopped: submissions are stopped */
atomic_t stopped; atomic_t stopped;
/** @submission_state.lock: protects submission state */ /** @submission_state.lock: protects submission state */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment