Commit e948761f authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Make the mman object busy everywhere

Loop over all engines, issuing a request for the object on each in order
to make sure we leave no stone unturned when creating an active ref. The
purpose is to make sure that we can reap a zombie object (one that is
only alive due to an active reference on the GPU) no matter where that
active reference emanates from.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191022101704.5618-1-chris@chris-wilson.co.uk
parent 51757cf4
......@@ -515,11 +515,13 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
struct i915_vma *vma;
int err;
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
......@@ -527,9 +529,6 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
for_each_engine(engine, i915, id) {
struct i915_request *rq;
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
......@@ -544,12 +543,13 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
i915_vma_unlock(vma);
i915_request_add(rq);
i915_vma_unpin(vma);
if (err)
return err;
}
i915_vma_unpin(vma);
i915_gem_object_put(obj); /* leave it only alive via its active ref */
return err;
return 0;
}
static bool assert_mmap_offset(struct drm_i915_private *i915,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment