Commit 520ea7c5 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Prepare for non-object vma

In order to allow ourselves to use VMA to wrap other entities other than
GEM objects, we need to allow for the vma->obj backpointer to be NULL.
In most cases, we know we are operating on a GEM object and its vma, but
we need the core code (such as i915_vma_pin/insert/bind/unbind) to work
regardless of the innards.

The remaining eyesore here is vma->obj->cache_level and related (but
less of an issue) vma->obj->gt_ro. With a bit of care we should mirror
those on the vma itself.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180607154047.9171-1-chris@chris-wilson.co.uk
parent 52137010
...@@ -3588,8 +3588,11 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) ...@@ -3588,8 +3588,11 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
if (!i915_vma_unbind(vma)) if (!i915_vma_unbind(vma))
continue; continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); WARN_ON(i915_vma_bind(vma,
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); obj ? obj->cache_level : 0,
PIN_UPDATE));
if (obj)
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
} }
ggtt->vm.closed = false; ggtt->vm.closed = false;
......
...@@ -1050,6 +1050,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, ...@@ -1050,6 +1050,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
int i = 0; int i = 0;
list_for_each_entry(vma, head, vm_link) { list_for_each_entry(vma, head, vm_link) {
if (!vma->obj)
continue;
if (pinned_only && !i915_vma_is_pinned(vma)) if (pinned_only && !i915_vma_is_pinned(vma))
continue; continue;
......
...@@ -345,7 +345,7 @@ void i915_vma_flush_writes(struct i915_vma *vma) ...@@ -345,7 +345,7 @@ void i915_vma_flush_writes(struct i915_vma *vma)
void i915_vma_unpin_iomap(struct i915_vma *vma) void i915_vma_unpin_iomap(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->obj->base.dev->struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON(vma->iomap == NULL); GEM_BUG_ON(vma->iomap == NULL);
...@@ -365,6 +365,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma) ...@@ -365,6 +365,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
return; return;
obj = vma->obj; obj = vma->obj;
GEM_BUG_ON(!obj);
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_vma_close(vma); i915_vma_close(vma);
...@@ -489,7 +490,7 @@ static int ...@@ -489,7 +490,7 @@ static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{ {
struct drm_i915_private *dev_priv = vma->vm->i915; struct drm_i915_private *dev_priv = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj; unsigned int cache_level;
u64 start, end; u64 start, end;
int ret; int ret;
...@@ -524,16 +525,21 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -524,16 +525,21 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
* attempt to find space. * attempt to find space.
*/ */
if (size > end) { if (size > end) {
DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n", DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
size, obj->base.size, size, flags & PIN_MAPPABLE ? "mappable" : "total",
flags & PIN_MAPPABLE ? "mappable" : "total",
end); end);
return -ENOSPC; return -ENOSPC;
} }
ret = i915_gem_object_pin_pages(obj); if (vma->obj) {
if (ret) ret = i915_gem_object_pin_pages(vma->obj);
return ret; if (ret)
return ret;
cache_level = vma->obj->cache_level;
} else {
cache_level = 0;
}
GEM_BUG_ON(vma->pages); GEM_BUG_ON(vma->pages);
...@@ -550,7 +556,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -550,7 +556,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
} }
ret = i915_gem_gtt_reserve(vma->vm, &vma->node, ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
size, offset, obj->cache_level, size, offset, cache_level,
flags); flags);
if (ret) if (ret)
goto err_clear; goto err_clear;
...@@ -589,7 +595,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -589,7 +595,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
} }
ret = i915_gem_gtt_insert(vma->vm, &vma->node, ret = i915_gem_gtt_insert(vma->vm, &vma->node,
size, alignment, obj->cache_level, size, alignment, cache_level,
start, end, flags); start, end, flags);
if (ret) if (ret)
goto err_clear; goto err_clear;
...@@ -598,23 +604,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -598,23 +604,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(vma->node.start + vma->node.size > end); GEM_BUG_ON(vma->node.start + vma->node.size > end);
} }
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list); list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
spin_lock(&dev_priv->mm.obj_lock); if (vma->obj) {
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); struct drm_i915_gem_object *obj = vma->obj;
obj->bind_count++;
spin_unlock(&dev_priv->mm.obj_lock); spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
obj->bind_count++;
spin_unlock(&dev_priv->mm.obj_lock);
assert_bind_count(obj); assert_bind_count(obj);
}
return 0; return 0;
err_clear: err_clear:
vma->vm->clear_pages(vma); vma->vm->clear_pages(vma);
err_unpin: err_unpin:
i915_gem_object_unpin_pages(obj); if (vma->obj)
i915_gem_object_unpin_pages(vma->obj);
return ret; return ret;
} }
...@@ -622,7 +633,6 @@ static void ...@@ -622,7 +633,6 @@ static void
i915_vma_remove(struct i915_vma *vma) i915_vma_remove(struct i915_vma *vma)
{ {
struct drm_i915_private *i915 = vma->vm->i915; struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
...@@ -632,20 +642,26 @@ i915_vma_remove(struct i915_vma *vma) ...@@ -632,20 +642,26 @@ i915_vma_remove(struct i915_vma *vma)
drm_mm_remove_node(&vma->node); drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list); list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
/* Since the unbound list is global, only move to that list if /*
* Since the unbound list is global, only move to that list if
* no more VMAs exist. * no more VMAs exist.
*/ */
spin_lock(&i915->mm.obj_lock); if (vma->obj) {
if (--obj->bind_count == 0) struct drm_i915_gem_object *obj = vma->obj;
list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
spin_unlock(&i915->mm.obj_lock); spin_lock(&i915->mm.obj_lock);
if (--obj->bind_count == 0)
/* And finally now the object is completely decoupled from this vma, list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
* we can drop its hold on the backing storage and allow it to be spin_unlock(&i915->mm.obj_lock);
* reaped by the shrinker.
*/ /*
i915_gem_object_unpin_pages(obj); * And finally now the object is completely decoupled from this
assert_bind_count(obj); * vma, we can drop its hold on the backing storage and allow
* it to be reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
assert_bind_count(obj);
}
} }
int __i915_vma_do_pin(struct i915_vma *vma, int __i915_vma_do_pin(struct i915_vma *vma,
...@@ -670,7 +686,7 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -670,7 +686,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
} }
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
ret = i915_vma_bind(vma, vma->obj->cache_level, flags); ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
if (ret) if (ret)
goto err_remove; goto err_remove;
...@@ -727,6 +743,7 @@ void i915_vma_reopen(struct i915_vma *vma) ...@@ -727,6 +743,7 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma) static void __i915_vma_destroy(struct i915_vma *vma)
{ {
struct drm_i915_private *i915 = vma->vm->i915;
int i; int i;
GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(vma->node.allocated);
...@@ -738,12 +755,13 @@ static void __i915_vma_destroy(struct i915_vma *vma) ...@@ -738,12 +755,13 @@ static void __i915_vma_destroy(struct i915_vma *vma)
list_del(&vma->obj_link); list_del(&vma->obj_link);
list_del(&vma->vm_link); list_del(&vma->vm_link);
rb_erase(&vma->obj_node, &vma->obj->vma_tree); if (vma->obj)
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_ggtt(vma)) if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); kmem_cache_free(i915->vmas, vma);
} }
void i915_vma_destroy(struct i915_vma *vma) void i915_vma_destroy(struct i915_vma *vma)
...@@ -809,13 +827,13 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) ...@@ -809,13 +827,13 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
int i915_vma_unbind(struct i915_vma *vma) int i915_vma_unbind(struct i915_vma *vma)
{ {
struct drm_i915_gem_object *obj = vma->obj;
unsigned long active; unsigned long active;
int ret; int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
/* First wait upon any activity as retiring the request may /*
* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma. * have side-effects such as unpinning or even unbinding this vma.
*/ */
might_sleep(); might_sleep();
...@@ -823,7 +841,8 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -823,7 +841,8 @@ int i915_vma_unbind(struct i915_vma *vma)
if (active) { if (active) {
int idx; int idx;
/* When a closed VMA is retired, it is unbound - eek. /*
* When a closed VMA is retired, it is unbound - eek.
* In order to prevent it from being recursively closed, * In order to prevent it from being recursively closed,
* take a pin on the vma so that the second unbind is * take a pin on the vma so that the second unbind is
* aborted. * aborted.
...@@ -861,9 +880,6 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -861,9 +880,6 @@ int i915_vma_unbind(struct i915_vma *vma)
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
return 0; return 0;
GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
if (i915_vma_is_map_and_fenceable(vma)) { if (i915_vma_is_map_and_fenceable(vma)) {
/* /*
* Check that we have flushed all writes through the GGTT * Check that we have flushed all writes through the GGTT
......
...@@ -407,7 +407,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma) ...@@ -407,7 +407,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
static inline void static inline void
i915_vma_unpin_fence(struct i915_vma *vma) i915_vma_unpin_fence(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->obj->base.dev->struct_mutex); /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
if (vma->fence) if (vma->fence)
__i915_vma_unpin_fence(vma); __i915_vma_unpin_fence(vma);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment