Commit 155ab883 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Move object close under its own lock

Use i915_gem_object_lock() to guard the LUT and active reference to
allow us to break free of struct_mutex for handling GEM_CLOSE.

Testcase: igt/gem_close_race
Testcase: igt/gem_exec_parallel
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606112320.9704-1-chris@chris-wilson.co.uk
parent affa22b5
...@@ -95,24 +95,45 @@ void i915_lut_handle_free(struct i915_lut_handle *lut) ...@@ -95,24 +95,45 @@ void i915_lut_handle_free(struct i915_lut_handle *lut)
static void lut_close(struct i915_gem_context *ctx) static void lut_close(struct i915_gem_context *ctx)
{ {
struct i915_lut_handle *lut, *ln;
struct radix_tree_iter iter; struct radix_tree_iter iter;
void __rcu **slot; void __rcu **slot;
list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) { lockdep_assert_held(&ctx->mutex);
list_del(&lut->obj_link);
i915_lut_handle_free(lut);
}
INIT_LIST_HEAD(&ctx->handles_list);
rcu_read_lock(); rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot); struct i915_vma *vma = rcu_dereference_raw(*slot);
struct drm_i915_gem_object *obj = vma->obj;
struct i915_lut_handle *lut;
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
rcu_read_unlock();
i915_gem_object_lock(obj);
list_for_each_entry(lut, &obj->lut_list, obj_link) {
if (lut->ctx != ctx)
continue;
if (lut->handle != iter.index)
continue;
list_del(&lut->obj_link);
break;
}
i915_gem_object_unlock(obj);
rcu_read_lock();
if (&lut->obj_link != &obj->lut_list) {
i915_lut_handle_free(lut);
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
if (atomic_dec_and_test(&vma->open_count) &&
!i915_vma_is_ggtt(vma))
i915_vma_close(vma);
i915_gem_object_put(obj);
}
vma->open_count--; i915_gem_object_put(obj);
i915_vma_put(vma);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -250,15 +271,9 @@ static void free_engines(struct i915_gem_engines *e) ...@@ -250,15 +271,9 @@ static void free_engines(struct i915_gem_engines *e)
__free_engines(e, e->num_engines); __free_engines(e, e->num_engines);
} }
static void free_engines_rcu(struct work_struct *wrk) static void free_engines_rcu(struct rcu_head *rcu)
{ {
struct i915_gem_engines *e = free_engines(container_of(rcu, struct i915_gem_engines, rcu));
container_of(wrk, struct i915_gem_engines, rcu.work);
struct drm_i915_private *i915 = e->i915;
mutex_lock(&i915->drm.struct_mutex);
free_engines(e);
mutex_unlock(&i915->drm.struct_mutex);
} }
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
...@@ -271,7 +286,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) ...@@ -271,7 +286,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
if (!e) if (!e)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
e->i915 = ctx->i915; init_rcu_head(&e->rcu);
for_each_engine(engine, ctx->i915, id) { for_each_engine(engine, ctx->i915, id) {
struct intel_context *ce; struct intel_context *ce;
...@@ -359,7 +374,10 @@ void i915_gem_context_release(struct kref *ref) ...@@ -359,7 +374,10 @@ void i915_gem_context_release(struct kref *ref)
static void context_close(struct i915_gem_context *ctx) static void context_close(struct i915_gem_context *ctx)
{ {
mutex_lock(&ctx->mutex);
i915_gem_context_set_closed(ctx); i915_gem_context_set_closed(ctx);
ctx->file_priv = ERR_PTR(-EBADF);
/* /*
* This context will never again be assinged to HW, so we can * This context will never again be assinged to HW, so we can
...@@ -374,7 +392,7 @@ static void context_close(struct i915_gem_context *ctx) ...@@ -374,7 +392,7 @@ static void context_close(struct i915_gem_context *ctx)
*/ */
lut_close(ctx); lut_close(ctx);
ctx->file_priv = ERR_PTR(-EBADF); mutex_unlock(&ctx->mutex);
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
} }
...@@ -429,7 +447,6 @@ __create_context(struct drm_i915_private *dev_priv) ...@@ -429,7 +447,6 @@ __create_context(struct drm_i915_private *dev_priv)
RCU_INIT_POINTER(ctx->engines, e); RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link); INIT_LIST_HEAD(&ctx->hw_id_link);
/* NB: Mark all slices as needing a remap so that when the context first /* NB: Mark all slices as needing a remap so that when the context first
...@@ -772,9 +789,7 @@ int i915_gem_context_open(struct drm_i915_private *i915, ...@@ -772,9 +789,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
return 0; return 0;
err_ctx: err_ctx:
mutex_lock(&i915->drm.struct_mutex);
context_close(ctx); context_close(ctx);
mutex_unlock(&i915->drm.struct_mutex);
err: err:
idr_destroy(&file_priv->vm_idr); idr_destroy(&file_priv->vm_idr);
idr_destroy(&file_priv->context_idr); idr_destroy(&file_priv->context_idr);
...@@ -787,8 +802,6 @@ void i915_gem_context_close(struct drm_file *file) ...@@ -787,8 +802,6 @@ void i915_gem_context_close(struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr); idr_destroy(&file_priv->context_idr);
mutex_destroy(&file_priv->context_idr_lock); mutex_destroy(&file_priv->context_idr_lock);
...@@ -1093,7 +1106,9 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, ...@@ -1093,7 +1106,9 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
goto unlock; goto unlock;
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */ /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
mutex_lock(&ctx->mutex);
lut_close(ctx); lut_close(ctx);
mutex_unlock(&ctx->mutex);
old = __set_ppgtt(ctx, ppgtt); old = __set_ppgtt(ctx, ppgtt);
...@@ -1612,7 +1627,7 @@ set_engines(struct i915_gem_context *ctx, ...@@ -1612,7 +1627,7 @@ set_engines(struct i915_gem_context *ctx,
if (!set.engines) if (!set.engines)
return -ENOMEM; return -ENOMEM;
set.engines->i915 = ctx->i915; init_rcu_head(&set.engines->rcu);
for (n = 0; n < num_engines; n++) { for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci; struct i915_engine_class_instance ci;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
...@@ -1666,8 +1681,7 @@ set_engines(struct i915_gem_context *ctx, ...@@ -1666,8 +1681,7 @@ set_engines(struct i915_gem_context *ctx,
rcu_swap_protected(ctx->engines, set.engines, 1); rcu_swap_protected(ctx->engines, set.engines, 1);
mutex_unlock(&ctx->engines_mutex); mutex_unlock(&ctx->engines_mutex);
INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu); call_rcu(&set.engines->rcu, free_engines_rcu);
queue_rcu_work(system_wq, &set.engines->rcu);
return 0; return 0;
} }
...@@ -1682,7 +1696,7 @@ __copy_engines(struct i915_gem_engines *e) ...@@ -1682,7 +1696,7 @@ __copy_engines(struct i915_gem_engines *e)
if (!copy) if (!copy)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
copy->i915 = e->i915; init_rcu_head(&copy->rcu);
for (n = 0; n < e->num_engines; n++) { for (n = 0; n < e->num_engines; n++) {
if (e->engines[n]) if (e->engines[n])
copy->engines[n] = intel_context_get(e->engines[n]); copy->engines[n] = intel_context_get(e->engines[n]);
...@@ -1769,8 +1783,7 @@ get_engines(struct i915_gem_context *ctx, ...@@ -1769,8 +1783,7 @@ get_engines(struct i915_gem_context *ctx,
args->size = size; args->size = size;
err_free: err_free:
INIT_RCU_WORK(&e->rcu, free_engines_rcu); free_engines(e);
queue_rcu_work(system_wq, &e->rcu);
return err; return err;
} }
...@@ -1891,7 +1904,7 @@ static int clone_engines(struct i915_gem_context *dst, ...@@ -1891,7 +1904,7 @@ static int clone_engines(struct i915_gem_context *dst,
if (!clone) if (!clone)
goto err_unlock; goto err_unlock;
clone->i915 = dst->i915; init_rcu_head(&clone->rcu);
for (n = 0; n < e->num_engines; n++) { for (n = 0; n < e->num_engines; n++) {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
...@@ -2163,9 +2176,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ...@@ -2163,9 +2176,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
err_ctx: err_ctx:
mutex_lock(&dev->struct_mutex);
context_close(ext_data.ctx); context_close(ext_data.ctx);
mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
...@@ -2190,10 +2201,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -2190,10 +2201,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (!ctx) if (!ctx)
return -ENOENT; return -ENOENT;
mutex_lock(&dev->struct_mutex);
context_close(ctx); context_close(ctx);
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
......
...@@ -30,8 +30,7 @@ struct i915_timeline; ...@@ -30,8 +30,7 @@ struct i915_timeline;
struct intel_ring; struct intel_ring;
struct i915_gem_engines { struct i915_gem_engines {
struct rcu_work rcu; struct rcu_head rcu;
struct drm_i915_private *i915;
unsigned int num_engines; unsigned int num_engines;
struct intel_context *engines[]; struct intel_context *engines[];
}; };
...@@ -192,17 +191,12 @@ struct i915_gem_context { ...@@ -192,17 +191,12 @@ struct i915_gem_context {
/** remap_slice: Bitmask of cache lines that need remapping */ /** remap_slice: Bitmask of cache lines that need remapping */
u8 remap_slice; u8 remap_slice;
/** handles_vma: rbtree to look up our context specific obj/vma for /**
* handles_vma: rbtree to look up our context specific obj/vma for
* the user handle. (user handles are per fd, but the binding is * the user handle. (user handles are per fd, but the binding is
* per vm, which may be one per context or shared with the global GTT) * per vm, which may be one per context or shared with the global GTT)
*/ */
struct radix_tree_root handles_vma; struct radix_tree_root handles_vma;
/** handles_list: reverse list of all the rbtree entries in use for
* this context, which allows us to free all the allocations on
* context close.
*/
struct list_head handles_list;
}; };
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */ #endif /* __I915_GEM_CONTEXT_TYPES_H__ */
...@@ -801,9 +801,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -801,9 +801,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch; unsigned int i, batch;
int err; int err;
if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
return -ENOENT;
if (unlikely(i915_gem_context_is_banned(eb->gem_context))) if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
return -EIO; return -EIO;
...@@ -812,6 +809,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -812,6 +809,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
batch = eb_batch_index(eb); batch = eb_batch_index(eb);
mutex_lock(&eb->gem_context->mutex);
if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
err = -ENOENT;
goto err_ctx;
}
for (i = 0; i < eb->buffer_count; i++) { for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle; u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut; struct i915_lut_handle *lut;
...@@ -845,13 +848,15 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -845,13 +848,15 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_obj; goto err_obj;
} }
/* transfer ref to ctx */ /* transfer ref to lut */
if (!vma->open_count++) if (!atomic_fetch_inc(&vma->open_count))
i915_vma_reopen(vma); i915_vma_reopen(vma);
list_add(&lut->obj_link, &obj->lut_list);
list_add(&lut->ctx_link, &eb->gem_context->handles_list);
lut->ctx = eb->gem_context;
lut->handle = handle; lut->handle = handle;
lut->ctx = eb->gem_context;
i915_gem_object_lock(obj);
list_add(&lut->obj_link, &obj->lut_list);
i915_gem_object_unlock(obj);
add_vma: add_vma:
err = eb_add_vma(eb, i, batch, vma); err = eb_add_vma(eb, i, batch, vma);
...@@ -864,6 +869,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -864,6 +869,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
} }
mutex_unlock(&eb->gem_context->mutex);
eb->args->flags |= __EXEC_VALIDATED; eb->args->flags |= __EXEC_VALIDATED;
return eb_reserve(eb); return eb_reserve(eb);
...@@ -871,6 +878,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -871,6 +878,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
i915_gem_object_put(obj); i915_gem_object_put(obj);
err_vma: err_vma:
eb->vma[i] = NULL; eb->vma[i] = NULL;
err_ctx:
mutex_unlock(&eb->gem_context->mutex);
return err; return err;
} }
......
...@@ -105,39 +105,47 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, ...@@ -105,39 +105,47 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{ {
struct drm_i915_private *i915 = to_i915(gem->dev);
struct drm_i915_gem_object *obj = to_intel_bo(gem); struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv; struct drm_i915_file_private *fpriv = file->driver_priv;
struct i915_lut_handle *lut, *ln; struct i915_lut_handle *lut, *ln;
LIST_HEAD(close);
mutex_lock(&i915->drm.struct_mutex); i915_gem_object_lock(obj);
list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
struct i915_gem_context *ctx = lut->ctx; struct i915_gem_context *ctx = lut->ctx;
struct i915_vma *vma;
GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
if (ctx->file_priv != fpriv) if (ctx->file_priv != fpriv)
continue; continue;
vma = radix_tree_delete(&ctx->handles_vma, lut->handle); i915_gem_context_get(ctx);
GEM_BUG_ON(vma->obj != obj); list_move(&lut->obj_link, &close);
}
i915_gem_object_unlock(obj);
list_for_each_entry_safe(lut, ln, &close, obj_link) {
struct i915_gem_context *ctx = lut->ctx;
struct i915_vma *vma;
/* We allow the process to have multiple handles to the same /*
* We allow the process to have multiple handles to the same
* vma, in the same fd namespace, by virtue of flink/open. * vma, in the same fd namespace, by virtue of flink/open.
*/ */
GEM_BUG_ON(!vma->open_count);
if (!--vma->open_count && !i915_vma_is_ggtt(vma))
i915_vma_close(vma);
list_del(&lut->obj_link); mutex_lock(&ctx->mutex);
list_del(&lut->ctx_link); vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
if (vma) {
GEM_BUG_ON(vma->obj != obj);
GEM_BUG_ON(!atomic_read(&vma->open_count));
if (atomic_dec_and_test(&vma->open_count) &&
!i915_vma_is_ggtt(vma))
i915_vma_close(vma);
}
mutex_unlock(&ctx->mutex);
i915_gem_context_put(lut->ctx);
i915_lut_handle_free(lut); i915_lut_handle_free(lut);
i915_gem_object_put(obj); i915_gem_object_put(obj);
} }
mutex_unlock(&i915->drm.struct_mutex);
} }
static bool discard_backing_storage(struct drm_i915_gem_object *obj) static bool discard_backing_storage(struct drm_i915_gem_object *obj)
......
...@@ -24,7 +24,6 @@ struct drm_i915_gem_object; ...@@ -24,7 +24,6 @@ struct drm_i915_gem_object;
*/ */
struct i915_lut_handle { struct i915_lut_handle {
struct list_head obj_link; struct list_head obj_link;
struct list_head ctx_link;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
u32 handle; u32 handle;
}; };
......
...@@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915, ...@@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915,
RCU_INIT_POINTER(ctx->engines, e); RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link); INIT_LIST_HEAD(&ctx->hw_id_link);
mutex_init(&ctx->mutex); mutex_init(&ctx->mutex);
......
...@@ -1899,10 +1899,12 @@ struct drm_i915_private { ...@@ -1899,10 +1899,12 @@ struct drm_i915_private {
} timelines; } timelines;
struct list_head active_rings; struct list_head active_rings;
struct list_head closed_vma;
struct intel_wakeref wakeref; struct intel_wakeref wakeref;
struct list_head closed_vma;
spinlock_t closed_lock; /* guards the list of closed_vma */
/** /**
* Is the GPU currently considered idle, or busy executing * Is the GPU currently considered idle, or busy executing
* userspace requests? Whilst idle, we allow runtime power * userspace requests? Whilst idle, we allow runtime power
......
...@@ -1784,6 +1784,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) ...@@ -1784,6 +1784,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->gt.active_rings); INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma); INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
spin_lock_init(&dev_priv->gt.closed_lock);
i915_gem_init__mm(dev_priv); i915_gem_init__mm(dev_priv);
i915_gem_init__pm(dev_priv); i915_gem_init__pm(dev_priv);
......
...@@ -2074,6 +2074,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) ...@@ -2074,6 +2074,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
INIT_LIST_HEAD(&vma->obj_link); INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->closed_link);
mutex_lock(&vma->vm->mutex); mutex_lock(&vma->vm->mutex);
list_add(&vma->vm_link, &vma->vm->unbound_list); list_add(&vma->vm_link, &vma->vm->unbound_list);
......
...@@ -61,7 +61,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) ...@@ -61,7 +61,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE); BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
spin_lock(&gt->hwsp_lock); spin_lock_irq(&gt->hwsp_lock);
/* hwsp_free_list only contains HWSP that have available cachelines */ /* hwsp_free_list only contains HWSP that have available cachelines */
hwsp = list_first_entry_or_null(&gt->hwsp_free_list, hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
...@@ -69,7 +69,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) ...@@ -69,7 +69,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
if (!hwsp) { if (!hwsp) {
struct i915_vma *vma; struct i915_vma *vma;
spin_unlock(&gt->hwsp_lock); spin_unlock_irq(&gt->hwsp_lock);
hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL); hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
if (!hwsp) if (!hwsp)
...@@ -86,7 +86,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) ...@@ -86,7 +86,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
hwsp->free_bitmap = ~0ull; hwsp->free_bitmap = ~0ull;
hwsp->gt = gt; hwsp->gt = gt;
spin_lock(&gt->hwsp_lock); spin_lock_irq(&gt->hwsp_lock);
list_add(&hwsp->free_link, &gt->hwsp_free_list); list_add(&hwsp->free_link, &gt->hwsp_free_list);
} }
...@@ -96,7 +96,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) ...@@ -96,7 +96,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
if (!hwsp->free_bitmap) if (!hwsp->free_bitmap)
list_del(&hwsp->free_link); list_del(&hwsp->free_link);
spin_unlock(&gt->hwsp_lock); spin_unlock_irq(&gt->hwsp_lock);
GEM_BUG_ON(hwsp->vma->private != hwsp); GEM_BUG_ON(hwsp->vma->private != hwsp);
return hwsp->vma; return hwsp->vma;
...@@ -105,8 +105,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) ...@@ -105,8 +105,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
{ {
struct i915_gt_timelines *gt = hwsp->gt; struct i915_gt_timelines *gt = hwsp->gt;
unsigned long flags;
spin_lock(&gt->hwsp_lock); spin_lock_irqsave(&gt->hwsp_lock, flags);
/* As a cacheline becomes available, publish the HWSP on the freelist */ /* As a cacheline becomes available, publish the HWSP on the freelist */
if (!hwsp->free_bitmap) if (!hwsp->free_bitmap)
...@@ -122,7 +123,7 @@ static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) ...@@ -122,7 +123,7 @@ static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
kfree(hwsp); kfree(hwsp);
} }
spin_unlock(&gt->hwsp_lock); spin_unlock_irqrestore(&gt->hwsp_lock, flags);
} }
static void __idle_cacheline_free(struct i915_timeline_cacheline *cl) static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
......
...@@ -131,9 +131,6 @@ vma_create(struct drm_i915_gem_object *obj, ...@@ -131,9 +131,6 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL) if (vma == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
INIT_ACTIVE_REQUEST(&vma->last_fence);
vma->vm = vm; vma->vm = vm;
vma->ops = &vm->vma_ops; vma->ops = &vm->vma_ops;
vma->obj = obj; vma->obj = obj;
...@@ -141,6 +138,11 @@ vma_create(struct drm_i915_gem_object *obj, ...@@ -141,6 +138,11 @@ vma_create(struct drm_i915_gem_object *obj,
vma->size = obj->base.size; vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT; vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
INIT_ACTIVE_REQUEST(&vma->last_fence);
INIT_LIST_HEAD(&vma->closed_link);
if (view && view->type != I915_GGTT_VIEW_NORMAL) { if (view && view->type != I915_GGTT_VIEW_NORMAL) {
vma->ggtt_view = *view; vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) { if (view->type == I915_GGTT_VIEW_PARTIAL) {
...@@ -787,10 +789,10 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -787,10 +789,10 @@ int __i915_vma_do_pin(struct i915_vma *vma,
void i915_vma_close(struct i915_vma *vma) void i915_vma_close(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); struct drm_i915_private *i915 = vma->vm->i915;
unsigned long flags;
GEM_BUG_ON(i915_vma_is_closed(vma)); GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
/* /*
* We defer actually closing, unbinding and destroying the VMA until * We defer actually closing, unbinding and destroying the VMA until
...@@ -804,17 +806,26 @@ void i915_vma_close(struct i915_vma *vma) ...@@ -804,17 +806,26 @@ void i915_vma_close(struct i915_vma *vma)
* causing us to rebind the VMA once more. This ends up being a lot * causing us to rebind the VMA once more. This ends up being a lot
* of wasted work for the steady state. * of wasted work for the steady state.
*/ */
list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma); spin_lock_irqsave(&i915->gt.closed_lock, flags);
list_add(&vma->closed_link, &i915->gt.closed_vma);
spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
} }
void i915_vma_reopen(struct i915_vma *vma) static void __i915_vma_remove_closed(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); struct drm_i915_private *i915 = vma->vm->i915;
if (vma->flags & I915_VMA_CLOSED) { if (!i915_vma_is_closed(vma))
vma->flags &= ~I915_VMA_CLOSED; return;
list_del(&vma->closed_link);
} spin_lock_irq(&i915->gt.closed_lock);
list_del_init(&vma->closed_link);
spin_unlock_irq(&i915->gt.closed_lock);
}
void i915_vma_reopen(struct i915_vma *vma)
{
__i915_vma_remove_closed(vma);
} }
static void __i915_vma_destroy(struct i915_vma *vma) static void __i915_vma_destroy(struct i915_vma *vma)
...@@ -848,8 +859,7 @@ void i915_vma_destroy(struct i915_vma *vma) ...@@ -848,8 +859,7 @@ void i915_vma_destroy(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_pinned(vma)); GEM_BUG_ON(i915_vma_is_pinned(vma));
if (i915_vma_is_closed(vma)) __i915_vma_remove_closed(vma);
list_del(&vma->closed_link);
WARN_ON(i915_vma_unbind(vma)); WARN_ON(i915_vma_unbind(vma));
GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(i915_vma_is_active(vma));
...@@ -861,12 +871,16 @@ void i915_vma_parked(struct drm_i915_private *i915) ...@@ -861,12 +871,16 @@ void i915_vma_parked(struct drm_i915_private *i915)
{ {
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
spin_lock_irq(&i915->gt.closed_lock);
list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
GEM_BUG_ON(!i915_vma_is_closed(vma)); list_del_init(&vma->closed_link);
spin_unlock_irq(&i915->gt.closed_lock);
i915_vma_destroy(vma); i915_vma_destroy(vma);
}
GEM_BUG_ON(!list_empty(&i915->gt.closed_vma)); spin_lock_irq(&i915->gt.closed_lock);
}
spin_unlock_irq(&i915->gt.closed_lock);
} }
static void __i915_vma_iounmap(struct i915_vma *vma) static void __i915_vma_iounmap(struct i915_vma *vma)
......
...@@ -71,7 +71,7 @@ struct i915_vma { ...@@ -71,7 +71,7 @@ struct i915_vma {
* handles (but same file) for execbuf, i.e. the number of aliases * handles (but same file) for execbuf, i.e. the number of aliases
* that exist in the ctx->handle_vmas LUT for this vma. * that exist in the ctx->handle_vmas LUT for this vma.
*/ */
unsigned int open_count; atomic_t open_count;
unsigned long flags; unsigned long flags;
/** /**
* How many users have pinned this object in GTT space. * How many users have pinned this object in GTT space.
...@@ -106,10 +106,9 @@ struct i915_vma { ...@@ -106,10 +106,9 @@ struct i915_vma {
#define I915_VMA_GGTT BIT(11) #define I915_VMA_GGTT BIT(11)
#define I915_VMA_CAN_FENCE BIT(12) #define I915_VMA_CAN_FENCE BIT(12)
#define I915_VMA_CLOSED BIT(13) #define I915_VMA_USERFAULT_BIT 13
#define I915_VMA_USERFAULT_BIT 14
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) #define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
#define I915_VMA_GGTT_WRITE BIT(15) #define I915_VMA_GGTT_WRITE BIT(14)
struct i915_active active; struct i915_active active;
struct i915_active_request last_fence; struct i915_active_request last_fence;
...@@ -192,11 +191,6 @@ static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma) ...@@ -192,11 +191,6 @@ static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
return vma->flags & I915_VMA_CAN_FENCE; return vma->flags & I915_VMA_CAN_FENCE;
} }
static inline bool i915_vma_is_closed(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_CLOSED;
}
static inline bool i915_vma_set_userfault(struct i915_vma *vma) static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{ {
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
...@@ -213,6 +207,11 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma) ...@@ -213,6 +207,11 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags); return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
} }
static inline bool i915_vma_is_closed(const struct i915_vma *vma)
{
return !list_empty(&vma->closed_link);
}
static inline u32 i915_ggtt_offset(const struct i915_vma *vma) static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{ {
GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_ggtt(vma));
......
...@@ -203,6 +203,7 @@ struct drm_i915_private *mock_gem_device(void) ...@@ -203,6 +203,7 @@ struct drm_i915_private *mock_gem_device(void)
INIT_LIST_HEAD(&i915->gt.active_rings); INIT_LIST_HEAD(&i915->gt.active_rings);
INIT_LIST_HEAD(&i915->gt.closed_vma); INIT_LIST_HEAD(&i915->gt.closed_vma);
spin_lock_init(&i915->gt.closed_lock);
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment