Commit 096a42dd authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Move obj->lut_list under its own lock

The obj->lut_list is traversed when the object is closed as the file
table is destroyed during process termination. As this occurs before we
kill any outstanding context if, due to some bug or another, the closure
is blocked, then we fail to shootdown any inflight operations
potentially leaving the GPU spinning forever. As we only need to guard
the list against concurrent closures and insertions, the hold is short
and merits being treated as a simple spinlock.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200701084439.17025-1-chris@chris-wilson.co.uk
parent 0ba7ffea
......@@ -112,8 +112,7 @@ static void lut_close(struct i915_gem_context *ctx)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
rcu_read_unlock();
i915_gem_object_lock(obj);
spin_lock(&obj->lut_lock);
list_for_each_entry(lut, &obj->lut_list, obj_link) {
if (lut->ctx != ctx)
continue;
......@@ -124,8 +123,7 @@ static void lut_close(struct i915_gem_context *ctx)
list_del(&lut->obj_link);
break;
}
i915_gem_object_unlock(obj);
rcu_read_lock();
spin_unlock(&obj->lut_lock);
if (&lut->obj_link != &obj->lut_list) {
i915_lut_handle_free(lut);
......
......@@ -789,14 +789,14 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
if (err == 0) { /* And nor has this handle */
struct drm_i915_gem_object *obj = vma->obj;
i915_gem_object_lock(obj);
spin_lock(&obj->lut_lock);
if (idr_find(&eb->file->object_idr, handle) == obj) {
list_add(&lut->obj_link, &obj->lut_list);
} else {
radix_tree_delete(&ctx->handles_vma, handle);
err = -ENOENT;
}
i915_gem_object_unlock(obj);
spin_unlock(&obj->lut_lock);
}
mutex_unlock(&ctx->mutex);
}
......
......@@ -61,6 +61,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->mm.link);
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->lut_lock);
spin_lock_init(&obj->mmo.lock);
obj->mmo.offsets = RB_ROOT;
......@@ -104,21 +105,29 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
struct i915_lut_handle bookmark = {};
struct i915_mmap_offset *mmo, *mn;
struct i915_lut_handle *lut, *ln;
LIST_HEAD(close);
i915_gem_object_lock(obj);
spin_lock(&obj->lut_lock);
list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
struct i915_gem_context *ctx = lut->ctx;
if (ctx->file_priv != fpriv)
continue;
if (ctx && ctx->file_priv == fpriv) {
i915_gem_context_get(ctx);
list_move(&lut->obj_link, &close);
}
i915_gem_context_get(ctx);
list_move(&lut->obj_link, &close);
/* Break long locks, and carefully continue on from this spot */
if (&ln->obj_link != &obj->lut_list) {
list_add_tail(&bookmark.obj_link, &ln->obj_link);
if (cond_resched_lock(&obj->lut_lock))
list_safe_reset_next(&bookmark, ln, obj_link);
__list_del_entry(&bookmark.obj_link);
}
}
i915_gem_object_unlock(obj);
spin_unlock(&obj->lut_lock);
spin_lock(&obj->mmo.lock);
rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
......
......@@ -121,6 +121,7 @@ struct drm_i915_gem_object {
* this translation from object to context->handles_vma.
*/
struct list_head lut_list;
spinlock_t lut_lock; /* guards lut_list */
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment