Commit 3272db53 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Combine all i915_vma bitfields into a single set of flags

In preparation to perform some magic to speed up i915_vma_pin(), which
is among the hottest of hot paths in execbuf, refactor all the bitfields
accessed by i915_vma_pin() into a single unified set of flags.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-16-git-send-email-chris@chris-wilson.co.uk
parent 59bfa124
...@@ -125,7 +125,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) ...@@ -125,7 +125,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->is_ggtt && drm_mm_node_allocated(&vma->node)) if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
size += vma->node.size; size += vma->node.size;
} }
...@@ -181,9 +181,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -181,9 +181,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
continue; continue;
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
vma->is_ggtt ? "g" : "pp", i915_vma_is_ggtt(vma) ? "g" : "pp",
vma->node.start, vma->node.size); vma->node.start, vma->node.size);
if (vma->is_ggtt) if (i915_vma_is_ggtt(vma))
seq_printf(m, ", type: %u", vma->ggtt_view.type); seq_printf(m, ", type: %u", vma->ggtt_view.type);
seq_puts(m, ")"); seq_puts(m, ")");
} }
...@@ -356,7 +356,7 @@ static int per_file_stats(int id, void *ptr, void *data) ...@@ -356,7 +356,7 @@ static int per_file_stats(int id, void *ptr, void *data)
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
continue; continue;
if (vma->is_ggtt) { if (i915_vma_is_ggtt(vma)) {
stats->global += vma->node.size; stats->global += vma->node.size;
} else { } else {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
......
...@@ -2861,7 +2861,8 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -2861,7 +2861,8 @@ int i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(obj->bind_count == 0); GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!obj->pages); GEM_BUG_ON(!obj->pages);
if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { if (i915_vma_is_ggtt(vma) &&
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj); i915_gem_object_finish_gtt(obj);
/* release the fence reg _after_ flushing */ /* release the fence reg _after_ flushing */
...@@ -2876,12 +2877,12 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -2876,12 +2877,12 @@ int i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma); trace_i915_vma_unbind(vma);
vma->vm->unbind_vma(vma); vma->vm->unbind_vma(vma);
} }
vma->bound = 0; vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
drm_mm_remove_node(&vma->node); drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list); list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
if (vma->is_ggtt) { if (i915_vma_is_ggtt(vma)) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
obj->map_and_fenceable = false; obj->map_and_fenceable = false;
} else if (vma->ggtt_view.pages) { } else if (vma->ggtt_view.pages) {
...@@ -2904,7 +2905,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -2904,7 +2905,7 @@ int i915_vma_unbind(struct i915_vma *vma)
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
destroy: destroy:
if (unlikely(vma->closed)) if (unlikely(i915_vma_is_closed(vma)))
i915_vma_destroy(vma); i915_vma_destroy(vma);
return 0; return 0;
...@@ -2985,7 +2986,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -2985,7 +2986,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
u64 min_alignment; u64 min_alignment;
int ret; int ret;
GEM_BUG_ON(vma->bound); GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size); size = max(size, vma->size);
...@@ -3707,13 +3708,14 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) ...@@ -3707,13 +3708,14 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
int int
i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{ {
unsigned int bound = vma->bound; unsigned int bound;
int ret; int ret;
GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
GEM_BUG_ON((flags & PIN_GLOBAL) && !vma->is_ggtt); GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) bound = vma->flags;
if (WARN_ON((bound & I915_VMA_PIN_MASK) == I915_VMA_PIN_MASK))
return -EBUSY; return -EBUSY;
/* Pin early to prevent the shrinker/eviction logic from destroying /* Pin early to prevent the shrinker/eviction logic from destroying
...@@ -3721,7 +3723,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -3721,7 +3723,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
*/ */
__i915_vma_pin(vma); __i915_vma_pin(vma);
if (!bound) { if ((bound & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)) == 0) {
ret = i915_vma_insert(vma, size, alignment, flags); ret = i915_vma_insert(vma, size, alignment, flags);
if (ret) if (ret)
goto err; goto err;
...@@ -3731,7 +3733,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -3731,7 +3733,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (ret) if (ret)
goto err; goto err;
if ((bound ^ vma->bound) & GLOBAL_BIND) if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
...@@ -4032,9 +4034,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) ...@@ -4032,9 +4034,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
* unbound now. * unbound now.
*/ */
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
GEM_BUG_ON(!vma->is_ggtt); GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(i915_vma_is_active(vma));
vma->pin_count = 0; vma->flags &= ~I915_VMA_PIN_MASK;
i915_vma_close(vma); i915_vma_close(vma);
} }
GEM_BUG_ON(obj->bind_count); GEM_BUG_ON(obj->bind_count);
...@@ -4094,7 +4096,8 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, ...@@ -4094,7 +4096,8 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!view); GEM_BUG_ON(!view);
list_for_each_entry(vma, &obj->vma_list, obj_link) list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) if (i915_vma_is_ggtt(vma) &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma; return vma;
return NULL; return NULL;
} }
...@@ -4583,7 +4586,7 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, ...@@ -4583,7 +4586,7 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
list_for_each_entry(vma, &o->vma_list, obj_link) { list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt && if (i915_vma_is_ggtt(vma) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue; continue;
if (vma->vm == vm) if (vma->vm == vm)
...@@ -4601,7 +4604,8 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, ...@@ -4601,7 +4604,8 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link) list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) if (i915_vma_is_ggtt(vma) &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start; return vma->node.start;
WARN(1, "global vma for this object not found. (view=%u)\n", view->type); WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
...@@ -4614,7 +4618,7 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, ...@@ -4614,7 +4618,7 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link) { list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt && if (i915_vma_is_ggtt(vma) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue; continue;
if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
...@@ -4630,7 +4634,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, ...@@ -4630,7 +4634,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link) list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->is_ggtt && if (i915_vma_is_ggtt(vma) &&
i915_ggtt_view_equal(&vma->ggtt_view, view) && i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node)) drm_mm_node_allocated(&vma->node))
return true; return true;
...@@ -4645,7 +4649,7 @@ unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) ...@@ -4645,7 +4649,7 @@ unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
GEM_BUG_ON(list_empty(&o->vma_list)); GEM_BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, obj_link) { list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt && if (i915_vma_is_ggtt(vma) &&
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
return vma->node.size; return vma->node.size;
} }
......
...@@ -219,7 +219,7 @@ static void i915_ppgtt_close(struct i915_address_space *vm) ...@@ -219,7 +219,7 @@ static void i915_ppgtt_close(struct i915_address_space *vm)
struct i915_vma *vma, *vn; struct i915_vma *vma, *vn;
list_for_each_entry_safe(vma, vn, *phase, vm_link) list_for_each_entry_safe(vma, vn, *phase, vm_link)
if (!vma->closed) if (!i915_vma_is_closed(vma))
i915_vma_close(vma); i915_vma_close(vma);
} }
} }
......
...@@ -717,7 +717,7 @@ need_reloc_mappable(struct i915_vma *vma) ...@@ -717,7 +717,7 @@ need_reloc_mappable(struct i915_vma *vma)
if (entry->relocation_count == 0) if (entry->relocation_count == 0)
return false; return false;
if (!vma->is_ggtt) if (!i915_vma_is_ggtt(vma))
return false; return false;
/* See also use_cpu_reloc() */ /* See also use_cpu_reloc() */
...@@ -736,7 +736,8 @@ eb_vma_misplaced(struct i915_vma *vma) ...@@ -736,7 +736,8 @@ eb_vma_misplaced(struct i915_vma *vma)
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt); WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_ggtt(vma));
if (entry->alignment && if (entry->alignment &&
vma->node.start & (entry->alignment - 1)) vma->node.start & (entry->alignment - 1))
......
...@@ -2652,7 +2652,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -2652,7 +2652,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding. * upgrade to both bound if we bind either to avoid double-binding.
*/ */
vma->bound |= GLOBAL_BIND | LOCAL_BIND; vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
return 0; return 0;
} }
...@@ -2674,14 +2674,14 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, ...@@ -2674,14 +2674,14 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
if (flags & GLOBAL_BIND) { if (flags & I915_VMA_GLOBAL_BIND) {
vma->vm->insert_entries(vma->vm, vma->vm->insert_entries(vma->vm,
vma->ggtt_view.pages, vma->ggtt_view.pages,
vma->node.start, vma->node.start,
cache_level, pte_flags); cache_level, pte_flags);
} }
if (flags & LOCAL_BIND) { if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = struct i915_hw_ppgtt *appgtt =
to_i915(vma->vm->dev)->mm.aliasing_ppgtt; to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base, appgtt->base.insert_entries(&appgtt->base,
...@@ -2698,12 +2698,12 @@ static void ggtt_unbind_vma(struct i915_vma *vma) ...@@ -2698,12 +2698,12 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt; struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size); const u64 size = min(vma->size, vma->node.size);
if (vma->bound & GLOBAL_BIND) if (vma->flags & I915_VMA_GLOBAL_BIND)
vma->vm->clear_range(vma->vm, vma->vm->clear_range(vma->vm,
vma->node.start, size, vma->node.start, size,
true); true);
if (vma->bound & LOCAL_BIND && appgtt) if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base, appgtt->base.clear_range(&appgtt->base,
vma->node.start, size, vma->node.start, size,
true); true);
...@@ -3334,7 +3334,7 @@ i915_vma_retire(struct i915_gem_active *active, ...@@ -3334,7 +3334,7 @@ i915_vma_retire(struct i915_gem_active *active,
return; return;
list_move_tail(&vma->vm_link, &vma->vm->inactive_list); list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
if (unlikely(vma->closed && !i915_vma_is_pinned(vma))) if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
WARN_ON(i915_vma_unbind(vma)); WARN_ON(i915_vma_unbind(vma));
} }
...@@ -3342,10 +3342,10 @@ void i915_vma_destroy(struct i915_vma *vma) ...@@ -3342,10 +3342,10 @@ void i915_vma_destroy(struct i915_vma *vma)
{ {
GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(!vma->closed); GEM_BUG_ON(!i915_vma_is_closed(vma));
list_del(&vma->vm_link); list_del(&vma->vm_link);
if (!vma->is_ggtt) if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
...@@ -3353,8 +3353,8 @@ void i915_vma_destroy(struct i915_vma *vma) ...@@ -3353,8 +3353,8 @@ void i915_vma_destroy(struct i915_vma *vma)
void i915_vma_close(struct i915_vma *vma) void i915_vma_close(struct i915_vma *vma)
{ {
GEM_BUG_ON(vma->closed); GEM_BUG_ON(i915_vma_is_closed(vma));
vma->closed = true; vma->flags |= I915_VMA_CLOSED;
list_del_init(&vma->obj_link); list_del_init(&vma->obj_link);
if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
...@@ -3386,9 +3386,9 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj, ...@@ -3386,9 +3386,9 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
vma->vm = vm; vma->vm = vm;
vma->obj = obj; vma->obj = obj;
vma->size = obj->base.size; vma->size = obj->base.size;
vma->is_ggtt = i915_is_ggtt(vm);
if (i915_is_ggtt(vm)) { if (i915_is_ggtt(vm)) {
vma->flags |= I915_VMA_GGTT;
vma->ggtt_view = *view; vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) { if (view->type == I915_GGTT_VIEW_PARTIAL) {
vma->size = view->params.partial.size; vma->size = view->params.partial.size;
...@@ -3433,7 +3433,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, ...@@ -3433,7 +3433,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
if (!vma) if (!vma)
vma = __i915_gem_vma_create(obj, &ggtt->base, view); vma = __i915_gem_vma_create(obj, &ggtt->base, view);
GEM_BUG_ON(vma->closed); GEM_BUG_ON(i915_vma_is_closed(vma));
return vma; return vma;
} }
...@@ -3644,27 +3644,28 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) ...@@ -3644,27 +3644,28 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
int ret;
u32 bind_flags; u32 bind_flags;
u32 vma_flags;
int ret;
if (WARN_ON(flags == 0)) if (WARN_ON(flags == 0))
return -EINVAL; return -EINVAL;
bind_flags = 0; bind_flags = 0;
if (flags & PIN_GLOBAL) if (flags & PIN_GLOBAL)
bind_flags |= GLOBAL_BIND; bind_flags |= I915_VMA_GLOBAL_BIND;
if (flags & PIN_USER) if (flags & PIN_USER)
bind_flags |= LOCAL_BIND; bind_flags |= I915_VMA_LOCAL_BIND;
vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
if (flags & PIN_UPDATE) if (flags & PIN_UPDATE)
bind_flags |= vma->bound; bind_flags |= vma_flags;
else else
bind_flags &= ~vma->bound; bind_flags &= ~vma_flags;
if (bind_flags == 0) if (bind_flags == 0)
return 0; return 0;
if (vma->bound == 0 && vma->vm->allocate_va_range) { if (vma_flags == 0 && vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma); trace_i915_va_alloc(vma);
ret = vma->vm->allocate_va_range(vma->vm, ret = vma->vm->allocate_va_range(vma->vm,
vma->node.start, vma->node.start,
...@@ -3677,8 +3678,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, ...@@ -3677,8 +3678,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (ret) if (ret)
return ret; return ret;
vma->bound |= bind_flags; vma->flags |= bind_flags;
return 0; return 0;
} }
...@@ -3690,8 +3690,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ...@@ -3690,8 +3690,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (WARN_ON(!vma->obj->map_and_fenceable)) if (WARN_ON(!vma->obj->map_and_fenceable))
return IO_ERR_PTR(-ENODEV); return IO_ERR_PTR(-ENODEV);
GEM_BUG_ON(!vma->is_ggtt); GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0); GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
ptr = vma->iomap; ptr = vma->iomap;
if (ptr == NULL) { if (ptr == NULL) {
......
...@@ -182,15 +182,28 @@ struct i915_vma { ...@@ -182,15 +182,28 @@ struct i915_vma {
void __iomem *iomap; void __iomem *iomap;
u64 size; u64 size;
unsigned int active; unsigned int flags;
struct i915_gem_active last_read[I915_NUM_ENGINES]; /**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, execbuffer
* (objects are not allowed multiple times for the same batchbuffer),
* and the framebuffer code. When switching/pageflipping, the
* framebuffer code has at most two buffers pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits.
*/
#define I915_VMA_PIN_MASK 0xf
/** Flags and address space this VMA is bound to */ /** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0) #define I915_VMA_GLOBAL_BIND BIT(5)
#define LOCAL_BIND (1<<1) #define I915_VMA_LOCAL_BIND BIT(6)
unsigned int bound : 4;
bool is_ggtt : 1; #define I915_VMA_GGTT BIT(7)
bool closed : 1; #define I915_VMA_CLOSED BIT(8)
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
/** /**
* Support different GGTT views into the same object. * Support different GGTT views into the same object.
...@@ -215,20 +228,18 @@ struct i915_vma { ...@@ -215,20 +228,18 @@ struct i915_vma {
struct hlist_node exec_node; struct hlist_node exec_node;
unsigned long exec_handle; unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry; struct drm_i915_gem_exec_object2 *exec_entry;
/**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, execbuffer
* (objects are not allowed multiple times for the same batchbuffer),
* and the framebuffer code. When switching/pageflipping, the
* framebuffer code has at most two buffers pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
}; };
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_GGTT;
}
static inline bool i915_vma_is_closed(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_CLOSED;
}
static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
{ {
return vma->active; return vma->active;
...@@ -625,7 +636,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags); ...@@ -625,7 +636,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
static inline int i915_vma_pin_count(const struct i915_vma *vma) static inline int i915_vma_pin_count(const struct i915_vma *vma)
{ {
return vma->pin_count; return vma->flags & I915_VMA_PIN_MASK;
} }
static inline bool i915_vma_is_pinned(const struct i915_vma *vma) static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
...@@ -635,14 +646,14 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma) ...@@ -635,14 +646,14 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
static inline void __i915_vma_pin(struct i915_vma *vma) static inline void __i915_vma_pin(struct i915_vma *vma)
{ {
vma->pin_count++; vma->flags++;
GEM_BUG_ON(!i915_vma_is_pinned(vma)); GEM_BUG_ON(!i915_vma_is_pinned(vma));
} }
static inline void __i915_vma_unpin(struct i915_vma *vma) static inline void __i915_vma_unpin(struct i915_vma *vma)
{ {
GEM_BUG_ON(!i915_vma_is_pinned(vma)); GEM_BUG_ON(!i915_vma_is_pinned(vma));
vma->pin_count--; vma->flags--;
} }
static inline void i915_vma_unpin(struct i915_vma *vma) static inline void i915_vma_unpin(struct i915_vma *vma)
......
...@@ -53,7 +53,7 @@ static bool any_vma_pinned(struct drm_i915_gem_object *obj) ...@@ -53,7 +53,7 @@ static bool any_vma_pinned(struct drm_i915_gem_object *obj)
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, obj_link) list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->pin_count) if (i915_vma_is_pinned(vma))
return true; return true;
return false; return false;
......
...@@ -705,7 +705,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -705,7 +705,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
goto err; goto err;
} }
vma->bound |= GLOBAL_BIND; vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
obj->bind_count++; obj->bind_count++;
......
...@@ -669,14 +669,14 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -669,14 +669,14 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
if (i915_is_ggtt(vm)) if (i915_is_ggtt(vm))
vma = i915_gem_obj_to_ggtt(src); vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE && use_ggtt = (src->cache_level == I915_CACHE_NONE &&
vma && (vma->bound & GLOBAL_BIND) && vma && (vma->flags & I915_VMA_GLOBAL_BIND) &&
reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end); reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
/* Cannot access stolen address directly, try to use the aperture */ /* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) { if (src->stolen) {
use_ggtt = true; use_ggtt = true;
if (!(vma && vma->bound & GLOBAL_BIND)) if (!(vma && vma->flags & I915_VMA_GLOBAL_BIND))
goto unwind; goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src); reloc_offset = i915_gem_obj_ggtt_offset(src);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment