Commit c6dce8f1 authored by Sagar Arun Kamble's avatar Sagar Arun Kamble Committed by Chris Wilson

drm/i915: Update execlists tasklet naming

intel_lrc_irq_handler and i915_guc_irq_handler are HW submission related
tasklet functions. Name them with "submission_tasklet" suffix and
remove intel/i915 prefix as they are static. Also rename irq_tasklet
as just tasklet for clarity.

v2: s/_bh/_tasklet (Chris)
Suggested-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: default avatarSagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1510839162-25197-2-git-send-email-sagar.a.kamble@intel.comSigned-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent d710fc16
...@@ -2933,13 +2933,13 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) ...@@ -2933,13 +2933,13 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
* Prevent request submission to the hardware until we have * Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request * completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request * is completed by one engine, it may then queue a request
* to a second via its engine->irq_tasklet *just* as we are * to a second via its execlists->tasklet *just* as we are
* calling engine->init_hw() and also writing the ELSP. * calling engine->init_hw() and also writing the ELSP.
* Turning off the engine->irq_tasklet until the reset is over * Turning off the execlists->tasklet until the reset is over
* prevents the race. * prevents the race.
*/ */
tasklet_kill(&engine->execlists.irq_tasklet); tasklet_kill(&engine->execlists.tasklet);
tasklet_disable(&engine->execlists.irq_tasklet); tasklet_disable(&engine->execlists.tasklet);
/* /*
* We're using worker to queue preemption requests from the tasklet in * We're using worker to queue preemption requests from the tasklet in
...@@ -3128,7 +3128,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) ...@@ -3128,7 +3128,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{ {
tasklet_enable(&engine->execlists.irq_tasklet); tasklet_enable(&engine->execlists.tasklet);
kthread_unpark(engine->breadcrumbs.signaler); kthread_unpark(engine->breadcrumbs.signaler);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
......
...@@ -650,7 +650,7 @@ static void inject_preempt_context(struct work_struct *work) ...@@ -650,7 +650,7 @@ static void inject_preempt_context(struct work_struct *work)
if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) { if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
execlists_clear_active(&engine->execlists, execlists_clear_active(&engine->execlists,
EXECLISTS_ACTIVE_PREEMPT); EXECLISTS_ACTIVE_PREEMPT);
tasklet_schedule(&engine->execlists.irq_tasklet); tasklet_schedule(&engine->execlists.tasklet);
} }
} }
...@@ -799,7 +799,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine) ...@@ -799,7 +799,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
spin_unlock_irq(&engine->timeline->lock); spin_unlock_irq(&engine->timeline->lock);
} }
static void i915_guc_irq_handler(unsigned long data) static void guc_submission_tasklet(unsigned long data)
{ {
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
...@@ -1439,7 +1439,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) ...@@ -1439,7 +1439,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
execlists->irq_tasklet.func = i915_guc_irq_handler; execlists->tasklet.func = guc_submission_tasklet;
engine->park = i915_guc_submission_park; engine->park = i915_guc_submission_park;
engine->unpark = i915_guc_submission_unpark; engine->unpark = i915_guc_submission_unpark;
} }
......
...@@ -1404,7 +1404,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) ...@@ -1404,7 +1404,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
} }
if (tasklet) if (tasklet)
tasklet_hi_schedule(&execlists->irq_tasklet); tasklet_hi_schedule(&execlists->tasklet);
} }
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
......
...@@ -1585,7 +1585,7 @@ void intel_engines_park(struct drm_i915_private *i915) ...@@ -1585,7 +1585,7 @@ void intel_engines_park(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
/* Flush the residual irq tasklets first. */ /* Flush the residual irq tasklets first. */
intel_engine_disarm_breadcrumbs(engine); intel_engine_disarm_breadcrumbs(engine);
tasklet_kill(&engine->execlists.irq_tasklet); tasklet_kill(&engine->execlists.tasklet);
/* /*
* We are committed now to parking the engines, make sure there * We are committed now to parking the engines, make sure there
......
...@@ -781,7 +781,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -781,7 +781,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
* Check the unread Context Status Buffers and manage the submission of new * Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly. * contexts to the ELSP accordingly.
*/ */
static void intel_lrc_irq_handler(unsigned long data) static void execlists_submission_tasklet(unsigned long data)
{ {
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
...@@ -947,7 +947,7 @@ static void insert_request(struct intel_engine_cs *engine, ...@@ -947,7 +947,7 @@ static void insert_request(struct intel_engine_cs *engine,
list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests); list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
if (ptr_unmask_bits(p, 1)) if (ptr_unmask_bits(p, 1))
tasklet_hi_schedule(&engine->execlists.irq_tasklet); tasklet_hi_schedule(&engine->execlists.tasklet);
} }
static void execlists_submit_request(struct drm_i915_gem_request *request) static void execlists_submit_request(struct drm_i915_gem_request *request)
...@@ -1503,7 +1503,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) ...@@ -1503,7 +1503,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
/* After a GPU reset, we may have requests to replay */ /* After a GPU reset, we may have requests to replay */
if (execlists->first) if (execlists->first)
tasklet_schedule(&execlists->irq_tasklet); tasklet_schedule(&execlists->tasklet);
return 0; return 0;
} }
...@@ -1881,8 +1881,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) ...@@ -1881,8 +1881,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
* Tasklet cannot be active at this point due intel_mark_active/idle * Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation. * so this is just for documentation.
*/ */
if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state))) if (WARN_ON(test_bit(TASKLET_STATE_SCHED,
tasklet_kill(&engine->execlists.irq_tasklet); &engine->execlists.tasklet.state)))
tasklet_kill(&engine->execlists.tasklet);
dev_priv = engine->i915; dev_priv = engine->i915;
...@@ -1906,7 +1907,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine) ...@@ -1906,7 +1907,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = execlists_submit_request; engine->submit_request = execlists_submit_request;
engine->cancel_requests = execlists_cancel_requests; engine->cancel_requests = execlists_cancel_requests;
engine->schedule = execlists_schedule; engine->schedule = execlists_schedule;
engine->execlists.irq_tasklet.func = intel_lrc_irq_handler; engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->park = NULL; engine->park = NULL;
engine->unpark = NULL; engine->unpark = NULL;
...@@ -1968,8 +1969,8 @@ logical_ring_setup(struct intel_engine_cs *engine) ...@@ -1968,8 +1969,8 @@ logical_ring_setup(struct intel_engine_cs *engine)
engine->execlists.fw_domains = fw_domains; engine->execlists.fw_domains = fw_domains;
tasklet_init(&engine->execlists.irq_tasklet, tasklet_init(&engine->execlists.tasklet,
intel_lrc_irq_handler, (unsigned long)engine); execlists_submission_tasklet, (unsigned long)engine);
logical_ring_default_vfuncs(engine); logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine); logical_ring_default_irqs(engine);
......
...@@ -193,9 +193,9 @@ struct i915_priolist { ...@@ -193,9 +193,9 @@ struct i915_priolist {
*/ */
struct intel_engine_execlists { struct intel_engine_execlists {
/** /**
* @irq_tasklet: softirq tasklet for bottom handler * @tasklet: softirq tasklet for bottom handler
*/ */
struct tasklet_struct irq_tasklet; struct tasklet_struct tasklet;
/** /**
* @default_priolist: priority list for I915_PRIORITY_NORMAL * @default_priolist: priority list for I915_PRIORITY_NORMAL
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment