Commit 538ef96b authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Track the rpm wakerefs

Keep track of the temporary rpm wakerefs used for user access to the
device, so that we can cancel them upon release and clearly identify any
leaks.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-10-chris@chris-wilson.co.uk
parent 3055f0cd
......@@ -785,6 +785,8 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref;
/*
* No actual flushing is required for the GTT write domain for reads
* from the GTT domain. Writes to it "immediately" go to main memory
......@@ -811,13 +813,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
i915_gem_chipset_flush(dev_priv);
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
spin_unlock_irq(&dev_priv->uncore.lock);
intel_runtime_pm_put_unchecked(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
}
static void
......@@ -1069,6 +1071,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
void __user *user_data;
......@@ -1079,7 +1082,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
if (ret)
return ret;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONFAULT |
......@@ -1152,7 +1155,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
i915_vma_unpin(vma);
}
out_unlock:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
......@@ -1253,6 +1256,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
u64 remain, offset;
......@@ -1271,13 +1275,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* This easily dwarfs any performance advantage from
* using the cache bypass of indirect GGTT access.
*/
if (!intel_runtime_pm_get_if_in_use(i915)) {
wakeref = intel_runtime_pm_get_if_in_use(i915);
if (!wakeref) {
ret = -EFAULT;
goto out_unlock;
}
} else {
/* No backing pages, no fallback, we must force GGTT access */
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
......@@ -1359,7 +1364,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
i915_vma_unpin(vma);
}
out_rpm:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
......@@ -1864,6 +1869,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool write = area->vm_flags & VM_WRITE;
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
int ret;
......@@ -1893,7 +1899,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err;
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
......@@ -1971,7 +1977,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put_unchecked(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
......@@ -2044,6 +2050,7 @@ void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
......@@ -2054,7 +2061,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
* wakeref.
*/
lockdep_assert_held(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
if (!obj->userfault_count)
goto out;
......@@ -2071,7 +2078,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
wmb();
out:
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
......@@ -4706,8 +4713,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
intel_wakeref_t wakeref;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_vma *vma, *vn;
......@@ -4768,7 +4776,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (on)
cond_resched();
}
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
......@@ -4877,11 +4885,13 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
void i915_gem_sanitize(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
GEM_TRACE("\n");
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
/*
......@@ -4904,7 +4914,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
intel_engines_sanitize(i915, false);
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
......@@ -4912,11 +4922,12 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
int i915_gem_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
int ret;
GEM_TRACE("\n");
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
intel_suspend_gt_powersave(i915);
mutex_lock(&i915->drm.struct_mutex);
......@@ -4968,12 +4979,12 @@ int i915_gem_suspend(struct drm_i915_private *i915)
if (WARN_ON(!intel_engines_are_idle(i915)))
i915_gem_set_wedged(i915); /* no hope, discard everything */
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
return 0;
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
return ret;
}
......
......@@ -2202,6 +2202,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct sync_file *out_fence = NULL;
intel_wakeref_t wakeref;
int out_fence_fd = -1;
int err;
......@@ -2272,7 +2273,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
intel_runtime_pm_get(eb.i915);
wakeref = intel_runtime_pm_get(eb.i915);
err = i915_mutex_lock_interruptible(dev);
if (err)
......@@ -2424,7 +2425,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb_release_vmas(&eb);
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put_unchecked(eb.i915);
intel_runtime_pm_put(eb.i915, wakeref);
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb);
......
......@@ -209,6 +209,7 @@ static void fence_write(struct drm_i915_fence_reg *fence,
static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
int ret;
if (vma) {
......@@ -256,9 +257,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
*/
if (intel_runtime_pm_get_if_in_use(fence->i915)) {
wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
if (wakeref) {
fence_write(fence, vma);
intel_runtime_pm_put_unchecked(fence->i915);
intel_runtime_pm_put(fence->i915, wakeref);
}
if (vma) {
......
......@@ -2527,6 +2527,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
{
struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
intel_wakeref_t wakeref;
u32 pte_flags;
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
......@@ -2534,9 +2535,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
......@@ -2553,10 +2554,11 @@ static int ggtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
intel_wakeref_t wakeref;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
......@@ -2588,9 +2590,11 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
}
if (flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
return 0;
......@@ -2601,9 +2605,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
if (vma->flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
......
......@@ -153,6 +153,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
{ &i915->mm.bound_list, I915_SHRINK_BOUND },
{ NULL, 0 },
}, *phase;
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
bool unlock;
......@@ -182,9 +183,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
if ((flags & I915_SHRINK_BOUND) &&
!intel_runtime_pm_get_if_in_use(i915))
flags &= ~I915_SHRINK_BOUND;
if (flags & I915_SHRINK_BOUND) {
wakeref = intel_runtime_pm_get_if_in_use(i915);
if (!wakeref)
flags &= ~I915_SHRINK_BOUND;
}
/*
* As we may completely rewrite the (un)bound list whilst unbinding
......@@ -265,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
}
if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
i915_retire_requests(i915);
......@@ -292,14 +295,15 @@ i915_gem_shrink(struct drm_i915_private *i915,
*/
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
unsigned long freed;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
return freed;
}
......@@ -370,14 +374,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_runtime_pm_get(i915);
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(i915);
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
}
shrinker_unlock(i915, unlock);
......@@ -392,12 +398,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
intel_wakeref_t wakeref;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
freed_pages = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
......@@ -435,6 +442,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
intel_wakeref_t wakeref;
bool unlock;
if (!shrinker_lock(i915, 0, &unlock))
......@@ -446,12 +454,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
MAX_SCHEDULE_TIMEOUT))
goto out;
intel_runtime_pm_get(i915);
wakeref = intel_runtime_pm_get(i915);
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_VMAPS);
intel_runtime_pm_put_unchecked(i915);
intel_runtime_pm_put(i915, wakeref);
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
......
......@@ -913,10 +913,12 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
static bool ring_is_idle(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
intel_wakeref_t wakeref;
bool idle = true;
/* If the whole device is asleep, the engine must be idle */
if (!intel_runtime_pm_get_if_in_use(dev_priv))
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref)
return true;
/* First check that no commands are left in the ring */
......@@ -928,7 +930,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
idle = false;
intel_runtime_pm_put_unchecked(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return idle;
}
......@@ -1425,6 +1427,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq, *last;
intel_wakeref_t wakeref;
unsigned long flags;
struct rb_node *rb;
int count;
......@@ -1483,9 +1486,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rcu_read_unlock();
if (intel_runtime_pm_get_if_in_use(engine->i915)) {
wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
if (wakeref) {
intel_engine_print_registers(engine, m);
intel_runtime_pm_put_unchecked(engine->i915);
intel_runtime_pm_put(engine->i915, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
......
......@@ -1670,6 +1670,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reg_read *reg = data;
struct reg_whitelist const *entry;
intel_wakeref_t wakeref;
unsigned int flags;
int remain;
int ret = 0;
......@@ -1695,7 +1696,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
flags = reg->offset & (entry->size - 1);
intel_runtime_pm_get(dev_priv);
wakeref = intel_runtime_pm_get(dev_priv);
if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
reg->val = I915_READ64_2x32(entry->offset_ldw,
entry->offset_udw);
......@@ -1709,7 +1710,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
reg->val = I915_READ8(entry->offset_ldw);
else
ret = -EINVAL;
intel_runtime_pm_put_unchecked(dev_priv);
intel_runtime_pm_put(dev_priv, wakeref);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment