Commit d55495b4 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Use vma->exec_entry as our double-entry placeholder

This has the benefit of not requiring us to manipulate the
vma->exec_link list when tearing down the execbuffer, and is a
marginally cheaper test to detect the user error.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170615081435.17699-2-chris@chris-wilson.co.uk
parent 650bc635
...@@ -59,9 +59,6 @@ mark_free(struct drm_mm_scan *scan, ...@@ -59,9 +59,6 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma)) if (i915_vma_is_pinned(vma))
return false; return false;
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link)) if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
return false; return false;
...@@ -160,8 +157,6 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -160,8 +157,6 @@ i915_gem_evict_something(struct i915_address_space *vm,
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
ret = drm_mm_scan_remove_block(&scan, &vma->node); ret = drm_mm_scan_remove_block(&scan, &vma->node);
BUG_ON(ret); BUG_ON(ret);
INIT_LIST_HEAD(&vma->exec_list);
} }
/* Can we unpin some objects such as idle hw contents, /* Can we unpin some objects such as idle hw contents,
...@@ -209,17 +204,12 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -209,17 +204,12 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (drm_mm_scan_remove_block(&scan, &vma->node)) if (drm_mm_scan_remove_block(&scan, &vma->node))
__i915_vma_pin(vma); __i915_vma_pin(vma);
else else
list_del_init(&vma->exec_list); list_del(&vma->exec_list);
} }
/* Unbinding will emit any required flushes */ /* Unbinding will emit any required flushes */
ret = 0; ret = 0;
while (!list_empty(&eviction_list)) { list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
vma = list_first_entry(&eviction_list,
struct i915_vma,
exec_list);
list_del_init(&vma->exec_list);
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
if (ret == 0) if (ret == 0)
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
...@@ -315,7 +305,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -315,7 +305,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
} }
/* Overlap of objects in the same batch? */ /* Overlap of objects in the same batch? */
if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) { if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC; ret = -ENOSPC;
if (vma->exec_entry && if (vma->exec_entry &&
vma->exec_entry->flags & EXEC_OBJECT_PINNED) vma->exec_entry->flags & EXEC_OBJECT_PINNED)
...@@ -336,7 +326,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -336,7 +326,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
} }
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
list_del_init(&vma->exec_list);
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
if (ret == 0) if (ret == 0)
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
......
...@@ -108,13 +108,40 @@ static int eb_create(struct i915_execbuffer *eb) ...@@ -108,13 +108,40 @@ static int eb_create(struct i915_execbuffer *eb)
eb->and = -eb->args->buffer_count; eb->and = -eb->args->buffer_count;
} }
INIT_LIST_HEAD(&eb->vmas);
return 0; return 0;
} }
static inline void
__eb_unreserve_vma(struct i915_vma *vma,
const struct drm_i915_gem_exec_object2 *entry)
{
if (unlikely(entry->flags & __EXEC_OBJECT_HAS_FENCE))
i915_vma_unpin_fence(vma);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
__i915_vma_unpin(vma);
}
static void
eb_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
__eb_unreserve_vma(vma, entry);
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static void static void
eb_reset(struct i915_execbuffer *eb) eb_reset(struct i915_execbuffer *eb)
{ {
struct i915_vma *vma;
list_for_each_entry(vma, &eb->vmas, exec_list) {
eb_unreserve_vma(vma);
i915_vma_put(vma);
vma->exec_entry = NULL;
}
if (eb->and >= 0) if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
} }
...@@ -146,6 +173,8 @@ eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -146,6 +173,8 @@ eb_lookup_vmas(struct i915_execbuffer *eb)
struct list_head objects; struct list_head objects;
int i, ret; int i, ret;
INIT_LIST_HEAD(&eb->vmas);
INIT_LIST_HEAD(&objects); INIT_LIST_HEAD(&objects);
spin_lock(&eb->file->table_lock); spin_lock(&eb->file->table_lock);
/* Grab a reference to the object and release the lock so we can lookup /* Grab a reference to the object and release the lock so we can lookup
...@@ -252,40 +281,23 @@ static struct i915_vma *eb_get_vma(struct i915_execbuffer *eb, unsigned long han ...@@ -252,40 +281,23 @@ static struct i915_vma *eb_get_vma(struct i915_execbuffer *eb, unsigned long han
} }
} }
static void
eb_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
if (!drm_mm_node_allocated(&vma->node))
return;
entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_vma_unpin_fence(vma);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
__i915_vma_unpin(vma);
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static void eb_destroy(struct i915_execbuffer *eb) static void eb_destroy(struct i915_execbuffer *eb)
{ {
i915_gem_context_put(eb->ctx);
while (!list_empty(&eb->vmas)) {
struct i915_vma *vma; struct i915_vma *vma;
vma = list_first_entry(&eb->vmas, list_for_each_entry(vma, &eb->vmas, exec_list) {
struct i915_vma, if (!vma->exec_entry)
exec_list); continue;
list_del_init(&vma->exec_list);
eb_unreserve_vma(vma); __eb_unreserve_vma(vma, vma->exec_entry);
vma->exec_entry = NULL; vma->exec_entry = NULL;
i915_vma_put(vma); i915_vma_put(vma);
} }
i915_gem_context_put(eb->ctx);
if (eb->buckets)
kfree(eb->buckets);
} }
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
...@@ -985,13 +997,7 @@ eb_relocate_slow(struct i915_execbuffer *eb) ...@@ -985,13 +997,7 @@ eb_relocate_slow(struct i915_execbuffer *eb)
int i, total, ret; int i, total, ret;
/* We may process another execbuffer during the unlock... */ /* We may process another execbuffer during the unlock... */
while (!list_empty(&eb->vmas)) { eb_reset(eb);
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
list_del_init(&vma->exec_list);
eb_unreserve_vma(vma);
i915_vma_put(vma);
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
total = 0; total = 0;
...@@ -1052,7 +1058,6 @@ eb_relocate_slow(struct i915_execbuffer *eb) ...@@ -1052,7 +1058,6 @@ eb_relocate_slow(struct i915_execbuffer *eb)
} }
/* reacquire the objects */ /* reacquire the objects */
eb_reset(eb);
ret = eb_lookup_vmas(eb); ret = eb_lookup_vmas(eb);
if (ret) if (ret)
goto err; goto err;
......
...@@ -85,7 +85,6 @@ vma_create(struct drm_i915_gem_object *obj, ...@@ -85,7 +85,6 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL) if (vma == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&vma->exec_list);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire); init_request_active(&vma->last_read[i], i915_vma_retire);
init_request_active(&vma->last_fence, NULL); init_request_active(&vma->last_fence, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment