Commit aa5e4453 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Try to flush pending unbind events

If we cannot handle a vma within the unbind loop, try to flush the
pending events (i915_vma_parked, i915_vm_release) and try again. This
avoids a round trip to userspace that is not guaranteed to make forward
progress, as the events we wait upon require being idle.

References: cb6c3d45 ("drm/i915/gem: Avoid parking the vma as we unbind")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191204123556.3740002-1-chris@chris-wilson.co.uk
parent cc662126
......@@ -124,7 +124,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
LIST_HEAD(still_in_list);
intel_wakeref_t wakeref;
struct i915_vma *vma;
int ret = 0;
int ret;
if (!atomic_read(&obj->bind_count))
return 0;
......@@ -137,6 +137,8 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
*/
wakeref = intel_runtime_pm_get(rpm);
try_again:
ret = 0;
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
......@@ -154,6 +156,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
} else {
if (i915_vma_is_closed(vma)) {
spin_unlock(&obj->vma.lock);
i915_vma_parked(vm->gt);
goto err_vm;
}
}
......@@ -175,6 +178,11 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
list_splice(&still_in_list, &obj->vma.list);
spin_unlock(&obj->vma.lock);
if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_ACTIVE) {
rcu_barrier(); /* flush the i915_vm_release() */
goto try_again;
}
intel_runtime_pm_put(rpm, wakeref);
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment