Commit 43a8f684 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Reorder struct_mutex-vs-reset_lock in i915_gem_fault()

Annoyingly, struct_mutex was not entirely eliminated from the reset
pathway; for reasons of its own, intel_display_resume() requires
struct_mutex to prepare the planes it already captured. To avoid the
immediate problem of a deadlock between the struct_mutex and the reset
srcu, we have to acquire the reset_lock before struct_mutex in
i915_gem_fault(). Now any wait underneath struct_mutex will result us in
having to forcibly reset all inflight rendering, less than ideal, but
better than a deadlock (and will do for the short term).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190221102924.13442-1-chris@chris-wilson.co.uk
parent 2a3902bd
...@@ -1834,9 +1834,15 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) ...@@ -1834,9 +1834,15 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
wakeref = intel_runtime_pm_get(dev_priv); wakeref = intel_runtime_pm_get(dev_priv);
srcu = i915_reset_trylock(dev_priv);
if (srcu < 0) {
ret = srcu;
goto err_rpm;
}
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
if (ret) if (ret)
goto err_rpm; goto err_reset;
/* Access to snoopable pages through the GTT is incoherent. */ /* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
...@@ -1885,12 +1891,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) ...@@ -1885,12 +1891,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret) if (ret)
goto err_unpin; goto err_unpin;
srcu = i915_reset_trylock(dev_priv);
if (srcu < 0) {
ret = srcu;
goto err_fence;
}
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area, ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
...@@ -1898,7 +1898,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) ...@@ -1898,7 +1898,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
min_t(u64, vma->size, area->vm_end - area->vm_start), min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap); &ggtt->iomap);
if (ret) if (ret)
goto err_reset; goto err_fence;
/* Mark as being mmapped into userspace for later revocation */ /* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(dev_priv); assert_rpm_wakelock_held(dev_priv);
...@@ -1908,14 +1908,14 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) ...@@ -1908,14 +1908,14 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
i915_vma_set_ggtt_write(vma); i915_vma_set_ggtt_write(vma);
err_reset:
i915_reset_unlock(dev_priv, srcu);
err_fence: err_fence:
i915_vma_unpin_fence(vma); i915_vma_unpin_fence(vma);
err_unpin: err_unpin:
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
err_unlock: err_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
err_reset:
i915_reset_unlock(dev_priv, srcu);
err_rpm: err_rpm:
intel_runtime_pm_put(dev_priv, wakeref); intel_runtime_pm_put(dev_priv, wakeref);
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment