Commit 2913fa4d authored by Emil Renner Berthing's avatar Emil Renner Berthing Committed by Daniel Vetter

drm/i915/gt: use new tasklet API for execution list

This converts the driver to use the new tasklet API introduced in
commit 12cc923f ("tasklet: Introduce new initialization API")

v2: Fix up selftests/execlists.
Signed-off-by: default avatarEmil Renner Berthing <kernel@esmil.dk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20210126150155.1617-1-kernel@esmil.dkSigned-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent a29a2291
...@@ -1220,14 +1220,14 @@ void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync) ...@@ -1220,14 +1220,14 @@ void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
{ {
struct tasklet_struct *t = &engine->execlists.tasklet; struct tasklet_struct *t = &engine->execlists.tasklet;
if (!t->func) if (!t->callback)
return; return;
local_bh_disable(); local_bh_disable();
if (tasklet_trylock(t)) { if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */ /* Must wait for any GPU reset in progress. */
if (__tasklet_is_enabled(t)) if (__tasklet_is_enabled(t))
t->func(t->data); t->callback(t);
tasklet_unlock(t); tasklet_unlock(t);
} }
local_bh_enable(); local_bh_enable();
......
...@@ -2334,9 +2334,10 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine) ...@@ -2334,9 +2334,10 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
* Check the unread Context Status Buffers and manage the submission of new * Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly. * contexts to the ELSP accordingly.
*/ */
static void execlists_submission_tasklet(unsigned long data) static void execlists_submission_tasklet(struct tasklet_struct *t)
{ {
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; struct intel_engine_cs * const engine =
from_tasklet(engine, t, execlists.tasklet);
struct i915_request *post[2 * EXECLIST_MAX_PORTS]; struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive; struct i915_request **inactive;
...@@ -2914,9 +2915,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled) ...@@ -2914,9 +2915,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
rcu_read_unlock(); rcu_read_unlock();
} }
static void nop_submission_tasklet(unsigned long data) static void nop_submission_tasklet(struct tasklet_struct *t)
{ {
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; struct intel_engine_cs * const engine =
from_tasklet(engine, t, execlists.tasklet);
/* The driver is wedged; don't process any more events. */ /* The driver is wedged; don't process any more events. */
WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN); WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
...@@ -3000,7 +3002,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine) ...@@ -3000,7 +3002,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
execlists->queue = RB_ROOT_CACHED; execlists->queue = RB_ROOT_CACHED;
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
execlists->tasklet.func = nop_submission_tasklet; execlists->tasklet.callback = nop_submission_tasklet;
spin_unlock_irqrestore(&engine->active.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
rcu_read_unlock(); rcu_read_unlock();
...@@ -3061,7 +3063,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine) ...@@ -3061,7 +3063,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
{ {
engine->submit_request = execlists_submit_request; engine->submit_request = execlists_submit_request;
engine->schedule = i915_schedule; engine->schedule = i915_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet; engine->execlists.tasklet.callback = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare; engine->reset.prepare = execlists_reset_prepare;
engine->reset.rewind = execlists_reset_rewind; engine->reset.rewind = execlists_reset_rewind;
...@@ -3184,8 +3186,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) ...@@ -3184,8 +3186,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
struct intel_uncore *uncore = engine->uncore; struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base; u32 base = engine->mmio_base;
tasklet_init(&engine->execlists.tasklet, tasklet_setup(&engine->execlists.tasklet, execlists_submission_tasklet);
execlists_submission_tasklet, (unsigned long)engine);
timer_setup(&engine->execlists.timer, execlists_timeslice, 0); timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
timer_setup(&engine->execlists.preempt, execlists_preempt, 0); timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
...@@ -3427,9 +3428,10 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) ...@@ -3427,9 +3428,10 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
return mask; return mask;
} }
static void virtual_submission_tasklet(unsigned long data) static void virtual_submission_tasklet(struct tasklet_struct *t)
{ {
struct virtual_engine * const ve = (struct virtual_engine *)data; struct virtual_engine * const ve =
from_tasklet(ve, t, base.execlists.tasklet);
const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint); const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
intel_engine_mask_t mask; intel_engine_mask_t mask;
unsigned int n; unsigned int n;
...@@ -3639,9 +3641,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, ...@@ -3639,9 +3641,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
INIT_LIST_HEAD(virtual_queue(ve)); INIT_LIST_HEAD(virtual_queue(ve));
ve->base.execlists.queue_priority_hint = INT_MIN; ve->base.execlists.queue_priority_hint = INT_MIN;
tasklet_init(&ve->base.execlists.tasklet, tasklet_setup(&ve->base.execlists.tasklet, virtual_submission_tasklet);
virtual_submission_tasklet,
(unsigned long)ve);
intel_context_init(&ve->context, &ve->base); intel_context_init(&ve->context, &ve->base);
...@@ -3669,7 +3669,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, ...@@ -3669,7 +3669,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
* layering if we handle cloning of the requests and * layering if we handle cloning of the requests and
* submitting a copy into each backend. * submitting a copy into each backend.
*/ */
if (sibling->execlists.tasklet.func != if (sibling->execlists.tasklet.callback !=
execlists_submission_tasklet) { execlists_submission_tasklet) {
err = -ENODEV; err = -ENODEV;
goto err_put; goto err_put;
......
...@@ -608,7 +608,7 @@ static int live_hold_reset(void *arg) ...@@ -608,7 +608,7 @@ static int live_hold_reset(void *arg)
} }
tasklet_disable(&engine->execlists.tasklet); tasklet_disable(&engine->execlists.tasklet);
engine->execlists.tasklet.func(engine->execlists.tasklet.data); engine->execlists.tasklet.callback(&engine->execlists.tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq); GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq); i915_request_get(rq);
...@@ -4594,7 +4594,7 @@ static int reset_virtual_engine(struct intel_gt *gt, ...@@ -4594,7 +4594,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
} }
tasklet_disable(&engine->execlists.tasklet); tasklet_disable(&engine->execlists.tasklet);
engine->execlists.tasklet.func(engine->execlists.tasklet.data); engine->execlists.tasklet.callback(&engine->execlists.tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq); GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Fake a preemption event; failed of course */ /* Fake a preemption event; failed of course */
......
...@@ -237,9 +237,10 @@ static void __guc_dequeue(struct intel_engine_cs *engine) ...@@ -237,9 +237,10 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
execlists->active = execlists->inflight; execlists->active = execlists->inflight;
} }
static void guc_submission_tasklet(unsigned long data) static void guc_submission_tasklet(struct tasklet_struct *t)
{ {
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; struct intel_engine_cs * const engine =
from_tasklet(engine, t, execlists.tasklet);
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port, *rq; struct i915_request **port, *rq;
unsigned long flags; unsigned long flags;
...@@ -608,7 +609,7 @@ static void guc_set_default_submission(struct intel_engine_cs *engine) ...@@ -608,7 +609,7 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
{ {
engine->submit_request = guc_submit_request; engine->submit_request = guc_submit_request;
engine->schedule = i915_schedule; engine->schedule = i915_schedule;
engine->execlists.tasklet.func = guc_submission_tasklet; engine->execlists.tasklet.callback = guc_submission_tasklet;
engine->reset.prepare = guc_reset_prepare; engine->reset.prepare = guc_reset_prepare;
engine->reset.rewind = guc_reset_rewind; engine->reset.rewind = guc_reset_rewind;
...@@ -700,8 +701,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) ...@@ -700,8 +701,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
*/ */
GEM_BUG_ON(INTEL_GEN(i915) < 11); GEM_BUG_ON(INTEL_GEN(i915) < 11);
tasklet_init(&engine->execlists.tasklet, tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
guc_submission_tasklet, (unsigned long)engine);
guc_default_vfuncs(engine); guc_default_vfuncs(engine);
guc_default_irqs(engine); guc_default_irqs(engine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment