Commit dc93c9b6 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Schedule request retirement when signaler idles

Very similar to commit 4f88f874 ("drm/i915/gt: Schedule request
retirement when timeline idles"), but this time instead of coupling into
the execlists CS event interrupt, we couple into the breadcrumb
interrupt and queue a timeline's retirement when the last signaler is
completed. This should allow us to more rapidly park ringbuffer
submission, and so help reduce power consumption on older systems.

v2: Fixup intel_engine_add_retire() to handle concurrent callers

References: 4f88f874 ("drm/i915/gt: Schedule request retirement when timeline idles")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191219124353.8607-1-chris@chris-wilson.co.uk
parent cabeacd4
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_gt_pm.h" #include "intel_gt_pm.h"
#include "intel_gt_requests.h"
static void irq_enable(struct intel_engine_cs *engine) static void irq_enable(struct intel_engine_cs *engine)
{ {
...@@ -179,8 +180,11 @@ static void signal_irq_work(struct irq_work *work) ...@@ -179,8 +180,11 @@ static void signal_irq_work(struct irq_work *work)
if (!list_is_first(pos, &ce->signals)) { if (!list_is_first(pos, &ce->signals)) {
/* Advance the list to the first incomplete request */ /* Advance the list to the first incomplete request */
__list_del_many(&ce->signals, pos); __list_del_many(&ce->signals, pos);
if (&ce->signals == pos) /* now empty */ if (&ce->signals == pos) { /* now empty */
list_del_init(&ce->signal_link); list_del_init(&ce->signal_link);
intel_engine_add_retire(ce->engine,
ce->timeline);
}
} }
} }
......
...@@ -62,19 +62,16 @@ static void engine_retire(struct work_struct *work) ...@@ -62,19 +62,16 @@ static void engine_retire(struct work_struct *work)
static bool add_retire(struct intel_engine_cs *engine, static bool add_retire(struct intel_engine_cs *engine,
struct intel_timeline *tl) struct intel_timeline *tl)
{ {
#define STUB ((struct intel_timeline *)1)
struct intel_timeline *first; struct intel_timeline *first;
/* /*
* We open-code a llist here to include the additional tag [BIT(0)] * We open-code a llist here to include the additional tag [BIT(0)]
* so that we know when the timeline is already on a * so that we know when the timeline is already on a
* retirement queue: either this engine or another. * retirement queue: either this engine or another.
*
* However, we rely on that a timeline can only be active on a single
* engine at any one time and that add_retire() is called before the
* engine releases the timeline and transferred to another to retire.
*/ */
if (READ_ONCE(tl->retire)) /* already queued */ if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
return false; return false;
intel_timeline_get(tl); intel_timeline_get(tl);
......
...@@ -4511,8 +4511,8 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, ...@@ -4511,8 +4511,8 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
intel_engine_init_breadcrumbs(&ve->base); intel_engine_init_breadcrumbs(&ve->base);
intel_engine_init_execlists(&ve->base); intel_engine_init_execlists(&ve->base);
intel_engine_init_retire(&ve->base);
ve->base.cops = &virtual_context_ops; ve->base.cops = &virtual_context_ops;
ve->base.request_alloc = execlists_request_alloc; ve->base.request_alloc = execlists_request_alloc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment