Commit e9037e7f authored by Chris Wilson's avatar Chris Wilson

drm/i915: Extend intel_wakeref to support delayed puts

In some cases we want to hold onto the wakeref for a little after the
last user so that we can avoid having to drop and then immediately
reacquire it. Allow the last user to specify if they would like to keep
the wakeref alive for a short hysteresis.

v2: Embrace bitfield.h for adjustable flags.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200323103221.14444-1-chris@chris-wilson.co.uk
parent 45d41739
...@@ -37,6 +37,12 @@ static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine) ...@@ -37,6 +37,12 @@ static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
intel_wakeref_put_async(&engine->wakeref); intel_wakeref_put_async(&engine->wakeref);
} }
static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
unsigned long delay)
{
intel_wakeref_put_delay(&engine->wakeref, delay);
}
static inline void intel_engine_pm_flush(struct intel_engine_cs *engine) static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
{ {
intel_wakeref_unlock_wait(&engine->wakeref); intel_wakeref_unlock_wait(&engine->wakeref);
......
...@@ -38,7 +38,7 @@ static bool flush_submission(struct intel_gt *gt) ...@@ -38,7 +38,7 @@ static bool flush_submission(struct intel_gt *gt)
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
intel_engine_flush_submission(engine); intel_engine_flush_submission(engine);
active |= flush_work(&engine->retire_work); active |= flush_work(&engine->retire_work);
active |= flush_work(&engine->wakeref.work); active |= flush_delayed_work(&engine->wakeref.work);
} }
return active; return active;
......
...@@ -70,11 +70,12 @@ static void ____intel_wakeref_put_last(struct intel_wakeref *wf) ...@@ -70,11 +70,12 @@ static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
{ {
INTEL_WAKEREF_BUG_ON(work_pending(&wf->work)); INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
/* Assume we are not in process context and so cannot sleep. */ /* Assume we are not in process context and so cannot sleep. */
if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) { if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
schedule_work(&wf->work); mod_delayed_work(system_wq, &wf->work,
FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
return; return;
} }
...@@ -83,7 +84,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) ...@@ -83,7 +84,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
static void __intel_wakeref_put_work(struct work_struct *wrk) static void __intel_wakeref_put_work(struct work_struct *wrk)
{ {
struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work); struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
if (atomic_add_unless(&wf->count, -1, 1)) if (atomic_add_unless(&wf->count, -1, 1))
return; return;
...@@ -104,8 +105,9 @@ void __intel_wakeref_init(struct intel_wakeref *wf, ...@@ -104,8 +105,9 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
atomic_set(&wf->count, 0); atomic_set(&wf->count, 0);
wf->wakeref = 0; wf->wakeref = 0;
INIT_WORK(&wf->work, __intel_wakeref_put_work); INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0); lockdep_init_map(&wf->work.work.lockdep_map,
"wakeref.work", &key->work, 0);
} }
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf) int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define INTEL_WAKEREF_H #define INTEL_WAKEREF_H
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h> #include <linux/bits.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/mutex.h> #include <linux/mutex.h>
...@@ -41,7 +42,7 @@ struct intel_wakeref { ...@@ -41,7 +42,7 @@ struct intel_wakeref {
struct intel_runtime_pm *rpm; struct intel_runtime_pm *rpm;
const struct intel_wakeref_ops *ops; const struct intel_wakeref_ops *ops;
struct work_struct work; struct delayed_work work;
}; };
struct intel_wakeref_lockclass { struct intel_wakeref_lockclass {
...@@ -117,6 +118,11 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf) ...@@ -117,6 +118,11 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
return atomic_inc_not_zero(&wf->count); return atomic_inc_not_zero(&wf->count);
} }
enum {
INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
__INTEL_WAKEREF_PUT_LAST_BIT__
};
/** /**
* intel_wakeref_put_flags: Release the wakeref * intel_wakeref_put_flags: Release the wakeref
* @wf: the wakeref * @wf: the wakeref
...@@ -134,7 +140,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf) ...@@ -134,7 +140,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
*/ */
static inline void static inline void
__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags) __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
#define INTEL_WAKEREF_PUT_ASYNC BIT(0) #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
#define INTEL_WAKEREF_PUT_DELAY \
GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
{ {
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
if (unlikely(!atomic_add_unless(&wf->count, -1, 1))) if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
...@@ -154,6 +162,14 @@ intel_wakeref_put_async(struct intel_wakeref *wf) ...@@ -154,6 +162,14 @@ intel_wakeref_put_async(struct intel_wakeref *wf)
__intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC); __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
} }
static inline void
intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
{
__intel_wakeref_put(wf,
INTEL_WAKEREF_PUT_ASYNC |
FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
}
/** /**
* intel_wakeref_lock: Lock the wakeref (mutex) * intel_wakeref_lock: Lock the wakeref (mutex)
* @wf: the wakeref * @wf: the wakeref
...@@ -194,7 +210,7 @@ intel_wakeref_unlock_wait(struct intel_wakeref *wf) ...@@ -194,7 +210,7 @@ intel_wakeref_unlock_wait(struct intel_wakeref *wf)
{ {
mutex_lock(&wf->mutex); mutex_lock(&wf->mutex);
mutex_unlock(&wf->mutex); mutex_unlock(&wf->mutex);
flush_work(&wf->work); flush_delayed_work(&wf->work);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment