Commit 71b0846c authored by Chris Wilson's avatar Chris Wilson

drm/i915/guc: Remove preemption support for current fw

Preemption via GuC submission is not being supported with its current
legacy incarnation. The current FW does support a similar pre-emption
flow via H2G, but it is class-based instead of being instance-based,
which doesn't fit well with the i915 tracking. To fix this, the
firmware is being updated to better support our needs with a new flow,
so we can safely remove the old code.

v2 (Daniele): resurrect & rebase, reword commit message, remove
preempt_context as well
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Acked-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190710005437.3496-2-daniele.ceraolospurio@intel.com
parent bf1315b8
...@@ -644,18 +644,12 @@ static void init_contexts(struct drm_i915_private *i915) ...@@ -644,18 +644,12 @@ static void init_contexts(struct drm_i915_private *i915)
init_llist_head(&i915->contexts.free_list); init_llist_head(&i915->contexts.free_list);
} }
static bool needs_preempt_context(struct drm_i915_private *i915)
{
return USES_GUC_SUBMISSION(i915);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv) int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */ /* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context); GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
init_contexts(dev_priv); init_contexts(dev_priv);
...@@ -676,15 +670,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) ...@@ -676,15 +670,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count)); GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
dev_priv->kernel_context = ctx; dev_priv->kernel_context = ctx;
/* highest priority; preempting task */
if (needs_preempt_context(dev_priv)) {
ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
if (!IS_ERR(ctx))
dev_priv->preempt_context = ctx;
else
DRM_ERROR("Failed to create preempt context; disabling preemption\n");
}
DRM_DEBUG_DRIVER("%s context support initialized\n", DRM_DEBUG_DRIVER("%s context support initialized\n",
DRIVER_CAPS(dev_priv)->has_logical_contexts ? DRIVER_CAPS(dev_priv)->has_logical_contexts ?
"logical" : "fake"); "logical" : "fake");
...@@ -695,8 +680,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915) ...@@ -695,8 +680,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
{ {
lockdep_assert_held(&i915->drm.struct_mutex); lockdep_assert_held(&i915->drm.struct_mutex);
if (i915->preempt_context)
destroy_kernel_context(&i915->preempt_context);
destroy_kernel_context(&i915->kernel_context); destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */ /* Must free all deferred contexts (via flush_workqueue) first */
......
...@@ -841,15 +841,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -841,15 +841,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret) if (ret)
return ret; return ret;
/*
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time. However, as preemption
* is optional, we allow it to fail.
*/
if (i915->preempt_context)
pin_context(i915->preempt_context, engine,
&engine->preempt_context);
ret = measure_breadcrumb_dw(engine); ret = measure_breadcrumb_dw(engine);
if (ret < 0) if (ret < 0)
goto err_unpin; goto err_unpin;
...@@ -861,8 +852,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -861,8 +852,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
return 0; return 0;
err_unpin: err_unpin:
if (engine->preempt_context)
intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context); intel_context_unpin(engine->kernel_context);
return ret; return ret;
} }
...@@ -887,8 +876,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) ...@@ -887,8 +876,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state) if (engine->default_state)
i915_gem_object_put(engine->default_state); i915_gem_object_put(engine->default_state);
if (engine->preempt_context)
intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context); intel_context_unpin(engine->kernel_context);
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
......
...@@ -288,7 +288,6 @@ struct intel_engine_cs { ...@@ -288,7 +288,6 @@ struct intel_engine_cs {
struct llist_head barrier_tasks; struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */ struct intel_context *kernel_context; /* pinned */
struct intel_context *preempt_context; /* pinned; optional */
intel_engine_mask_t saturated; /* submitting semaphores too late? */ intel_engine_mask_t saturated; /* submitting semaphores too late? */
......
...@@ -145,10 +145,6 @@ int intel_gt_resume(struct intel_gt *gt) ...@@ -145,10 +145,6 @@ int intel_gt_resume(struct intel_gt *gt)
if (ce) if (ce)
ce->ops->reset(ce); ce->ops->reset(ce);
ce = engine->preempt_context;
if (ce)
ce->ops->reset(ce);
engine->serial++; /* kernel context lost */ engine->serial++; /* kernel context lost */
err = engine->resume(engine); err = engine->resume(engine);
......
...@@ -2010,11 +2010,6 @@ static int i915_guc_info(struct seq_file *m, void *data) ...@@ -2010,11 +2010,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
i915_guc_client_info(m, dev_priv, guc->execbuf_client); i915_guc_client_info(m, dev_priv, guc->execbuf_client);
if (guc->preempt_client) {
seq_printf(m, "\nGuC preempt client @ %p:\n",
guc->preempt_client);
i915_guc_client_info(m, dev_priv, guc->preempt_client);
}
/* Add more as required ... */ /* Add more as required ... */
......
...@@ -1378,8 +1378,6 @@ struct drm_i915_private { ...@@ -1378,8 +1378,6 @@ struct drm_i915_private {
struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct intel_engine_cs *engine[I915_NUM_ENGINES];
/* Context used internally to idle the GPU and setup initial state */ /* Context used internally to idle the GPU and setup initial state */
struct i915_gem_context *kernel_context; struct i915_gem_context *kernel_context;
/* Context only to be used for injecting preemption commands */
struct i915_gem_context *preempt_context;
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1]; [MAX_ENGINE_INSTANCE + 1];
......
...@@ -101,8 +101,6 @@ void intel_guc_init_early(struct intel_guc *guc) ...@@ -101,8 +101,6 @@ void intel_guc_init_early(struct intel_guc *guc)
static int guc_init_wq(struct intel_guc *guc) static int guc_init_wq(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc);
/* /*
* GuC log buffer flush work item has to do register access to * GuC log buffer flush work item has to do register access to
* send the ack to GuC and this work item, if not synced before * send the ack to GuC and this work item, if not synced before
...@@ -122,31 +120,6 @@ static int guc_init_wq(struct intel_guc *guc) ...@@ -122,31 +120,6 @@ static int guc_init_wq(struct intel_guc *guc)
return -ENOMEM; return -ENOMEM;
} }
/*
* Even though both sending GuC action, and adding a new workitem to
* GuC workqueue are serialized (each with its own locking), since
* we're using mutliple engines, it's possible that we're going to
* issue a preempt request with two (or more - each for different
* engine) workitems in GuC queue. In this situation, GuC may submit
* all of them, which will make us very confused.
* Our preemption contexts may even already be complete - before we
* even had the chance to sent the preempt action to GuC!. Rather
* than introducing yet another lock, we can just use ordered workqueue
* to make sure we're always sending a single preemption request with a
* single workitem.
*/
if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
USES_GUC_SUBMISSION(dev_priv)) {
guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
WQ_HIGHPRI);
if (!guc->preempt_wq) {
destroy_workqueue(guc->log.relay.flush_wq);
DRM_ERROR("Couldn't allocate workqueue for GuC "
"preemption\n");
return -ENOMEM;
}
}
return 0; return 0;
} }
...@@ -154,10 +127,6 @@ static void guc_fini_wq(struct intel_guc *guc) ...@@ -154,10 +127,6 @@ static void guc_fini_wq(struct intel_guc *guc)
{ {
struct workqueue_struct *wq; struct workqueue_struct *wq;
wq = fetch_and_zero(&guc->preempt_wq);
if (wq)
destroy_workqueue(wq);
wq = fetch_and_zero(&guc->log.relay.flush_wq); wq = fetch_and_zero(&guc->log.relay.flush_wq);
if (wq) if (wq)
destroy_workqueue(wq); destroy_workqueue(wq);
......
...@@ -37,11 +37,6 @@ ...@@ -37,11 +37,6 @@
struct __guc_ads_blob; struct __guc_ads_blob;
struct guc_preempt_work {
struct work_struct work;
struct intel_engine_cs *engine;
};
/* /*
* Top level structure of GuC. It handles firmware loading and manages client * Top level structure of GuC. It handles firmware loading and manages client
* pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
...@@ -76,10 +71,6 @@ struct intel_guc { ...@@ -76,10 +71,6 @@ struct intel_guc {
void *shared_data_vaddr; void *shared_data_vaddr;
struct intel_guc_client *execbuf_client; struct intel_guc_client *execbuf_client;
struct intel_guc_client *preempt_client;
struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
struct workqueue_struct *preempt_wq;
DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS); DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
/* Cyclic counter mod pagesize */ /* Cyclic counter mod pagesize */
......
...@@ -46,11 +46,10 @@ enum { ...@@ -46,11 +46,10 @@ enum {
* *
* GuC client: * GuC client:
* A intel_guc_client refers to a submission path through GuC. Currently, there * A intel_guc_client refers to a submission path through GuC. Currently, there
* are two clients. One of them (the execbuf_client) is charged with all * is only one client, which is charged with all submissions to the GuC. This
* submissions to the GuC, the other one (preempt_client) is responsible for * struct is the owner of a doorbell, a process descriptor and a workqueue (all
* preempting the execbuf_client. This struct is the owner of a doorbell, a * of them inside a single gem object that contains all required pages for these
* process descriptor and a workqueue (all of them inside a single gem object * elements).
* that contains all required pages for these elements).
* *
* GuC stage descriptor: * GuC stage descriptor:
* During initialization, the driver allocates a static pool of 1024 such * During initialization, the driver allocates a static pool of 1024 such
...@@ -88,12 +87,6 @@ enum { ...@@ -88,12 +87,6 @@ enum {
* *
*/ */
static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
I915_GEM_HWS_PREEMPT_ADDR);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb) static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{ {
return rb_entry(rb, struct i915_priolist, node); return rb_entry(rb, struct i915_priolist, node);
...@@ -563,126 +556,6 @@ static void flush_ggtt_writes(struct i915_vma *vma) ...@@ -563,126 +556,6 @@ static void flush_ggtt_writes(struct i915_vma *vma)
intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS); intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
} }
static void inject_preempt_context(struct work_struct *work)
{
struct guc_preempt_work *preempt_work =
container_of(work, typeof(*preempt_work), work);
struct intel_engine_cs *engine = preempt_work->engine;
struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
struct intel_context *ce = engine->preempt_context;
u32 data[7];
if (!ce->ring->emit) { /* recreate upon load/resume */
u32 addr = intel_hws_preempt_done_address(engine);
u32 *cs;
cs = ce->ring->vaddr;
if (engine->class == RENDER_CLASS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
addr,
PIPE_CONTROL_CS_STALL);
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
addr,
0);
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
flush_ggtt_writes(ce->ring->vma);
}
spin_lock_irq(&client->wq_lock);
guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
spin_unlock_irq(&client->wq_lock);
/*
* If GuC firmware performs an engine reset while that engine had
* a preemption pending, it will set the terminated attribute bit
* on our preemption stage descriptor. GuC firmware retains all
* pending work items for a high-priority GuC client, unlike the
* normal-priority GuC client where work items are dropped. It
* wants to make sure the preempt-to-idle work doesn't run when
* scheduling resumes, and uses this bit to inform its scheduler
* and presumably us as well. Our job is to clear it for the next
* preemption after reset, otherwise that and future preemptions
* will never complete. We'll just clear it every time.
*/
stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
data[1] = client->stage_id;
data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
data[3] = engine->guc_id;
data[4] = guc->execbuf_client->priority;
data[5] = guc->execbuf_client->stage_id;
data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
intel_write_status_page(engine,
I915_GEM_HWS_PREEMPT,
GUC_PREEMPT_NONE);
tasklet_schedule(&engine->execlists.tasklet);
}
(void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++);
}
/*
* We're using user interrupt and HWSP value to mark that preemption has
* finished and GPU is idle. Normally, we could unwind and continue similar to
* execlists submission path. Unfortunately, with GuC we also need to wait for
* it to finish its own postprocessing, before attempting to submit. Otherwise
* GuC may silently ignore our submissions, and thus we risk losing request at
* best, executing out-of-order and causing kernel panic at worst.
*/
#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
{
struct intel_guc *guc = &engine->i915->guc;
struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
struct guc_ctx_report *report =
&data->preempt_ctx_report[engine->guc_id];
if (wait_for_atomic(report->report_return_status ==
INTEL_GUC_REPORT_STATUS_COMPLETE,
GUC_PREEMPT_POSTPROCESS_DELAY_MS))
DRM_ERROR("Timed out waiting for GuC preemption report\n");
/*
* GuC is expecting that we're also going to clear the affected context
* counter, let's also reset the return status to not depend on GuC
* resetting it after recieving another preempt action
*/
report->affected_count = 0;
report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
}
static void complete_preempt_context(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
if (inject_preempt_hang(execlists))
return;
execlists_cancel_port_requests(execlists);
execlists_unwind_incomplete_requests(execlists);
wait_for_guc_preempt_report(engine);
intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, GUC_PREEMPT_NONE);
}
static void guc_submit(struct intel_engine_cs *engine, static void guc_submit(struct intel_engine_cs *engine,
struct i915_request **out, struct i915_request **out,
struct i915_request **end) struct i915_request **end)
...@@ -707,16 +580,6 @@ static inline int rq_prio(const struct i915_request *rq) ...@@ -707,16 +580,6 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority | __NO_PREEMPTION; return rq->sched.attr.priority | __NO_PREEMPTION;
} }
static inline int effective_prio(const struct i915_request *rq)
{
int prio = rq_prio(rq);
if (i915_request_has_nopreempt(rq))
prio = I915_PRIORITY_UNPREEMPTABLE;
return prio;
}
static struct i915_request *schedule_in(struct i915_request *rq, int idx) static struct i915_request *schedule_in(struct i915_request *rq, int idx)
{ {
trace_i915_request_in(rq, idx); trace_i915_request_in(rq, idx);
...@@ -752,22 +615,6 @@ static void __guc_dequeue(struct intel_engine_cs *engine) ...@@ -752,22 +615,6 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
lockdep_assert_held(&engine->active.lock); lockdep_assert_held(&engine->active.lock);
if (last) { if (last) {
if (intel_engine_has_preemption(engine)) {
struct guc_preempt_work *preempt_work =
&engine->i915->guc.preempt_work[engine->id];
int prio = execlists->queue_priority_hint;
if (i915_scheduler_need_preempt(prio,
effective_prio(last))) {
intel_write_status_page(engine,
I915_GEM_HWS_PREEMPT,
GUC_PREEMPT_INPROGRESS);
queue_work(engine->i915->guc.preempt_wq,
&preempt_work->work);
return;
}
}
if (*++first) if (*++first)
return; return;
...@@ -831,12 +678,7 @@ static void guc_submission_tasklet(unsigned long data) ...@@ -831,12 +678,7 @@ static void guc_submission_tasklet(unsigned long data)
memmove(execlists->inflight, port, rem * sizeof(*port)); memmove(execlists->inflight, port, rem * sizeof(*port));
} }
if (intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) == __guc_dequeue(engine);
GUC_PREEMPT_FINISHED)
complete_preempt_context(engine);
if (!intel_read_status_page(engine, I915_GEM_HWS_PREEMPT))
__guc_dequeue(engine);
spin_unlock_irqrestore(&engine->active.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
} }
...@@ -857,16 +699,6 @@ static void guc_reset_prepare(struct intel_engine_cs *engine) ...@@ -857,16 +699,6 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
* prevents the race. * prevents the race.
*/ */
__tasklet_disable_sync_once(&execlists->tasklet); __tasklet_disable_sync_once(&execlists->tasklet);
/*
* We're using worker to queue preemption requests from the tasklet in
* GuC submission mode.
* Even though tasklet was disabled, we may still have a worker queued.
* Let's make sure that all workers scheduled before disabling the
* tasklet are completed before continuing with the reset.
*/
if (engine->i915->guc.preempt_wq)
flush_workqueue(engine->i915->guc.preempt_wq);
} }
static void guc_reset(struct intel_engine_cs *engine, bool stalled) static void guc_reset(struct intel_engine_cs *engine, bool stalled)
...@@ -1123,7 +955,6 @@ static int guc_clients_create(struct intel_guc *guc) ...@@ -1123,7 +955,6 @@ static int guc_clients_create(struct intel_guc *guc)
struct intel_guc_client *client; struct intel_guc_client *client;
GEM_BUG_ON(guc->execbuf_client); GEM_BUG_ON(guc->execbuf_client);
GEM_BUG_ON(guc->preempt_client);
client = guc_client_alloc(dev_priv, client = guc_client_alloc(dev_priv,
INTEL_INFO(dev_priv)->engine_mask, INTEL_INFO(dev_priv)->engine_mask,
...@@ -1135,20 +966,6 @@ static int guc_clients_create(struct intel_guc *guc) ...@@ -1135,20 +966,6 @@ static int guc_clients_create(struct intel_guc *guc)
} }
guc->execbuf_client = client; guc->execbuf_client = client;
if (dev_priv->preempt_context) {
client = guc_client_alloc(dev_priv,
INTEL_INFO(dev_priv)->engine_mask,
GUC_CLIENT_PRIORITY_KMD_HIGH,
dev_priv->preempt_context);
if (IS_ERR(client)) {
DRM_ERROR("Failed to create GuC client for preemption!\n");
guc_client_free(guc->execbuf_client);
guc->execbuf_client = NULL;
return PTR_ERR(client);
}
guc->preempt_client = client;
}
return 0; return 0;
} }
...@@ -1156,10 +973,6 @@ static void guc_clients_destroy(struct intel_guc *guc) ...@@ -1156,10 +973,6 @@ static void guc_clients_destroy(struct intel_guc *guc)
{ {
struct intel_guc_client *client; struct intel_guc_client *client;
client = fetch_and_zero(&guc->preempt_client);
if (client)
guc_client_free(client);
client = fetch_and_zero(&guc->execbuf_client); client = fetch_and_zero(&guc->execbuf_client);
if (client) if (client)
guc_client_free(client); guc_client_free(client);
...@@ -1202,28 +1015,11 @@ static void __guc_client_disable(struct intel_guc_client *client) ...@@ -1202,28 +1015,11 @@ static void __guc_client_disable(struct intel_guc_client *client)
static int guc_clients_enable(struct intel_guc *guc) static int guc_clients_enable(struct intel_guc *guc)
{ {
int ret; return __guc_client_enable(guc->execbuf_client);
ret = __guc_client_enable(guc->execbuf_client);
if (ret)
return ret;
if (guc->preempt_client) {
ret = __guc_client_enable(guc->preempt_client);
if (ret) {
__guc_client_disable(guc->execbuf_client);
return ret;
}
}
return 0;
} }
static void guc_clients_disable(struct intel_guc *guc) static void guc_clients_disable(struct intel_guc *guc)
{ {
if (guc->preempt_client)
__guc_client_disable(guc->preempt_client);
if (guc->execbuf_client) if (guc->execbuf_client)
__guc_client_disable(guc->execbuf_client); __guc_client_disable(guc->execbuf_client);
} }
...@@ -1234,9 +1030,6 @@ static void guc_clients_disable(struct intel_guc *guc) ...@@ -1234,9 +1030,6 @@ static void guc_clients_disable(struct intel_guc *guc)
*/ */
int intel_guc_submission_init(struct intel_guc *guc) int intel_guc_submission_init(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret; int ret;
if (guc->stage_desc_pool) if (guc->stage_desc_pool)
...@@ -1256,11 +1049,6 @@ int intel_guc_submission_init(struct intel_guc *guc) ...@@ -1256,11 +1049,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
if (ret) if (ret)
goto err_pool; goto err_pool;
for_each_engine(engine, dev_priv, id) {
guc->preempt_work[id].engine = engine;
INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
}
return 0; return 0;
err_pool: err_pool:
...@@ -1270,13 +1058,6 @@ int intel_guc_submission_init(struct intel_guc *guc) ...@@ -1270,13 +1058,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
void intel_guc_submission_fini(struct intel_guc *guc) void intel_guc_submission_fini(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id)
cancel_work_sync(&guc->preempt_work[id].work);
guc_clients_destroy(guc); guc_clients_destroy(guc);
WARN_ON(!guc_verify_doorbells(guc)); WARN_ON(!guc_verify_doorbells(guc));
......
...@@ -103,13 +103,10 @@ static int ring_doorbell_nop(struct intel_guc_client *client) ...@@ -103,13 +103,10 @@ static int ring_doorbell_nop(struct intel_guc_client *client)
/* /*
* Basic client sanity check, handy to validate create_clients. * Basic client sanity check, handy to validate create_clients.
*/ */
static int validate_client(struct intel_guc_client *client, static int validate_client(struct intel_guc_client *client, int client_priority)
int client_priority,
bool is_preempt_client)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(client->guc); struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
struct i915_gem_context *ctx_owner = is_preempt_client ? struct i915_gem_context *ctx_owner = dev_priv->kernel_context;
dev_priv->preempt_context : dev_priv->kernel_context;
if (client->owner != ctx_owner || if (client->owner != ctx_owner ||
client->engines != INTEL_INFO(dev_priv)->engine_mask || client->engines != INTEL_INFO(dev_priv)->engine_mask ||
...@@ -163,7 +160,7 @@ static int igt_guc_clients(void *args) ...@@ -163,7 +160,7 @@ static int igt_guc_clients(void *args)
*/ */
guc_clients_disable(guc); guc_clients_disable(guc);
guc_clients_destroy(guc); guc_clients_destroy(guc);
if (guc->execbuf_client || guc->preempt_client) { if (guc->execbuf_client) {
pr_err("guc_clients_destroy lied!\n"); pr_err("guc_clients_destroy lied!\n");
err = -EINVAL; err = -EINVAL;
goto unlock; goto unlock;
...@@ -177,24 +174,14 @@ static int igt_guc_clients(void *args) ...@@ -177,24 +174,14 @@ static int igt_guc_clients(void *args)
GEM_BUG_ON(!guc->execbuf_client); GEM_BUG_ON(!guc->execbuf_client);
err = validate_client(guc->execbuf_client, err = validate_client(guc->execbuf_client,
GUC_CLIENT_PRIORITY_KMD_NORMAL, false); GUC_CLIENT_PRIORITY_KMD_NORMAL);
if (err) { if (err) {
pr_err("execbug client validation failed\n"); pr_err("execbug client validation failed\n");
goto out; goto out;
} }
if (guc->preempt_client) { /* the client should now have reserved a doorbell */
err = validate_client(guc->preempt_client, if (!has_doorbell(guc->execbuf_client)) {
GUC_CLIENT_PRIORITY_KMD_HIGH, true);
if (err) {
pr_err("preempt client validation failed\n");
goto out;
}
}
/* each client should now have reserved a doorbell */
if (!has_doorbell(guc->execbuf_client) ||
(guc->preempt_client && !has_doorbell(guc->preempt_client))) {
pr_err("guc_clients_create didn't reserve doorbells\n"); pr_err("guc_clients_create didn't reserve doorbells\n");
err = -EINVAL; err = -EINVAL;
goto out; goto out;
...@@ -204,8 +191,7 @@ static int igt_guc_clients(void *args) ...@@ -204,8 +191,7 @@ static int igt_guc_clients(void *args)
guc_clients_enable(guc); guc_clients_enable(guc);
/* each client should now have received a doorbell */ /* each client should now have received a doorbell */
if (!client_doorbell_in_sync(guc->execbuf_client) || if (!client_doorbell_in_sync(guc->execbuf_client)) {
!client_doorbell_in_sync(guc->preempt_client)) {
pr_err("failed to initialize the doorbells\n"); pr_err("failed to initialize the doorbells\n");
err = -EINVAL; err = -EINVAL;
goto out; goto out;
...@@ -300,8 +286,7 @@ static int igt_guc_doorbells(void *arg) ...@@ -300,8 +286,7 @@ static int igt_guc_doorbells(void *arg)
goto out; goto out;
} }
err = validate_client(clients[i], err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM);
i % GUC_CLIENT_PRIORITY_NUM, false);
if (err) { if (err) {
pr_err("[%d] client_alloc sanity check failed!\n", i); pr_err("[%d] client_alloc sanity check failed!\n", i);
err = -EINVAL; err = -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment