Commit c0e60347 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Hold rpm wakeref before taking ggtt->vm.mutex

We need to hold the runtime-pm wakeref to update the global PTEs (as
they exist behind a PCI BAR). However, some systems invoke ACPI during
runtime resume and so require allocations, which is verboten inside the
vm->mutex. Ergo, we must not use intel_runtime_pm_get() inside the
mutex, but lift the call outside.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/958Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200110144418.1415639-1-chris@chris-wilson.co.uk
parent 8cbf89db
...@@ -430,9 +430,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -430,9 +430,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
intel_wakeref_t wakeref;
u32 pte_flags; u32 pte_flags;
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
...@@ -440,7 +438,6 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -440,7 +438,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(obj)) if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
...@@ -457,10 +454,6 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -457,10 +454,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma) static void ggtt_unbind_vma(struct i915_vma *vma)
{ {
struct drm_i915_private *i915 = vma->vm->i915;
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vma->vm->clear_range(vma->vm, vma->node.start, vma->size); vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
} }
...@@ -571,7 +564,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, ...@@ -571,7 +564,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
struct drm_i915_private *i915 = vma->vm->i915;
u32 pte_flags; u32 pte_flags;
int ret; int ret;
...@@ -599,27 +591,17 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, ...@@ -599,27 +591,17 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
cache_level, pte_flags); cache_level, pte_flags);
} }
if (flags & I915_VMA_GLOBAL_BIND) { if (flags & I915_VMA_GLOBAL_BIND)
intel_wakeref_t wakeref; vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
vma->vm->insert_entries(vma->vm, vma,
cache_level, pte_flags);
}
}
return 0; return 0;
} }
static void aliasing_gtt_unbind_vma(struct i915_vma *vma) static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
{ {
struct drm_i915_private *i915 = vma->vm->i915;
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
struct i915_address_space *vm = vma->vm; struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vm->clear_range(vm, vma->node.start, vma->size); vm->clear_range(vm, vma->node.start, vma->size);
} }
......
...@@ -858,6 +858,7 @@ static void vma_unbind_pages(struct i915_vma *vma) ...@@ -858,6 +858,7 @@ static void vma_unbind_pages(struct i915_vma *vma)
int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{ {
struct i915_vma_work *work = NULL; struct i915_vma_work *work = NULL;
intel_wakeref_t wakeref = 0;
unsigned int bound; unsigned int bound;
int err; int err;
...@@ -883,6 +884,9 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -883,6 +884,9 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
} }
} }
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
/* No more allocations allowed once we hold vm->mutex */ /* No more allocations allowed once we hold vm->mutex */
err = mutex_lock_interruptible(&vma->vm->mutex); err = mutex_lock_interruptible(&vma->vm->mutex);
if (err) if (err)
...@@ -946,6 +950,8 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -946,6 +950,8 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
err_fence: err_fence:
if (work) if (work)
dma_fence_work_commit(&work->base); dma_fence_work_commit(&work->base);
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
err_pages: err_pages:
vma_put_pages(vma); vma_put_pages(vma);
return err; return err;
...@@ -1246,11 +1252,16 @@ int __i915_vma_unbind(struct i915_vma *vma) ...@@ -1246,11 +1252,16 @@ int __i915_vma_unbind(struct i915_vma *vma)
int i915_vma_unbind(struct i915_vma *vma) int i915_vma_unbind(struct i915_vma *vma)
{ {
struct i915_address_space *vm = vma->vm; struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref = 0;
int err; int err;
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
return 0; return 0;
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
/* XXX not always required: nop_clear_range */
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
err = mutex_lock_interruptible(&vm->mutex); err = mutex_lock_interruptible(&vm->mutex);
if (err) if (err)
return err; return err;
...@@ -1258,6 +1269,9 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -1258,6 +1269,9 @@ int i915_vma_unbind(struct i915_vma *vma)
err = __i915_vma_unbind(vma); err = __i915_vma_unbind(vma);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
if (wakeref)
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment