Commit 7e00897b authored by Maarten Lankhorst's avatar Maarten Lankhorst

drm/i915: Add object locking to i915_gem_evict_for_node and i915_gem_evict_something, v2.

Because we will start to require the obj->resv lock for unbinding,
ensure these vma eviction utility functions also take the lock.

This requires some function signature changes, to ensure that the
ww context is passed around, but is mostly straightforward.

Previously this was split up into several patches, but reworking
should allow for easier bisection.

Changes since v1:
- Handle evicting dead objects better.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-4-maarten.lankhorst@linux.intel.com
parent 6945c53b
...@@ -506,7 +506,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) ...@@ -506,7 +506,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
size = ggtt->vm.total - GUC_GGTT_TOP; size = ggtt->vm.total - GUC_GGTT_TOP;
ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
PIN_NOEVICT); PIN_NOEVICT);
if (ret) if (ret)
......
...@@ -1382,7 +1382,7 @@ static int evict_vma(void *data) ...@@ -1382,7 +1382,7 @@ static int evict_vma(void *data)
complete(&arg->completion); complete(&arg->completion);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
err = i915_gem_evict_for_node(vm, &evict, 0); err = i915_gem_evict_for_node(vm, NULL, &evict, 0);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
return err; return err;
......
...@@ -63,7 +63,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) ...@@ -63,7 +63,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
mutex_lock(&gt->ggtt->vm.mutex); mutex_lock(&gt->ggtt->vm.mutex);
mmio_hw_access_pre(gt); mmio_hw_access_pre(gt);
ret = i915_gem_gtt_insert(&gt->ggtt->vm, node, ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
size, I915_GTT_PAGE_SIZE, size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE, I915_COLOR_UNEVICTABLE,
start, end, flags); start, end, flags);
......
...@@ -1735,11 +1735,13 @@ i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) ...@@ -1735,11 +1735,13 @@ i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
/* i915_gem_evict.c */ /* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm, int __must_check i915_gem_evict_something(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
u64 min_size, u64 alignment, u64 min_size, u64 alignment,
unsigned long color, unsigned long color,
u64 start, u64 end, u64 start, u64 end,
unsigned flags); unsigned flags);
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node, struct drm_mm_node *node,
unsigned int flags); unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm, int i915_gem_evict_vm(struct i915_address_space *vm,
......
...@@ -37,6 +37,11 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl { ...@@ -37,6 +37,11 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1; bool fail_if_busy:1;
} igt_evict_ctl;) } igt_evict_ctl;)
static bool dying_vma(struct i915_vma *vma)
{
return !kref_read(&vma->obj->base.refcount);
}
static int ggtt_flush(struct intel_gt *gt) static int ggtt_flush(struct intel_gt *gt)
{ {
/* /*
...@@ -49,8 +54,37 @@ static int ggtt_flush(struct intel_gt *gt) ...@@ -49,8 +54,37 @@ static int ggtt_flush(struct intel_gt *gt)
return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
} }
static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
{
/*
* We add the extra refcount so the object doesn't drop to zero until
* after ungrab_vma(), this way trylock is always paired with unlock.
*/
if (i915_gem_object_get_rcu(vma->obj)) {
if (!i915_gem_object_trylock(vma->obj, ww)) {
i915_gem_object_put(vma->obj);
return false;
}
} else {
/* Dead objects don't need pins */
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
}
return true;
}
static void ungrab_vma(struct i915_vma *vma)
{
if (dying_vma(vma))
return;
i915_gem_object_unlock(vma->obj);
i915_gem_object_put(vma->obj);
}
static bool static bool
mark_free(struct drm_mm_scan *scan, mark_free(struct drm_mm_scan *scan,
struct i915_gem_ww_ctx *ww,
struct i915_vma *vma, struct i915_vma *vma,
unsigned int flags, unsigned int flags,
struct list_head *unwind) struct list_head *unwind)
...@@ -58,6 +92,9 @@ mark_free(struct drm_mm_scan *scan, ...@@ -58,6 +92,9 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma)) if (i915_vma_is_pinned(vma))
return false; return false;
if (!grab_vma(vma, ww))
return false;
list_add(&vma->evict_link, unwind); list_add(&vma->evict_link, unwind);
return drm_mm_scan_add_block(scan, &vma->node); return drm_mm_scan_add_block(scan, &vma->node);
} }
...@@ -76,6 +113,7 @@ static bool defer_evict(struct i915_vma *vma) ...@@ -76,6 +113,7 @@ static bool defer_evict(struct i915_vma *vma)
/** /**
* i915_gem_evict_something - Evict vmas to make room for binding a new one * i915_gem_evict_something - Evict vmas to make room for binding a new one
* @vm: address space to evict from * @vm: address space to evict from
* @ww: An optional struct i915_gem_ww_ctx.
* @min_size: size of the desired free space * @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space * @alignment: alignment constraint of the desired free space
* @color: color for the desired space * @color: color for the desired space
...@@ -98,6 +136,7 @@ static bool defer_evict(struct i915_vma *vma) ...@@ -98,6 +136,7 @@ static bool defer_evict(struct i915_vma *vma)
*/ */
int int
i915_gem_evict_something(struct i915_address_space *vm, i915_gem_evict_something(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
u64 min_size, u64 alignment, u64 min_size, u64 alignment,
unsigned long color, unsigned long color,
u64 start, u64 end, u64 start, u64 end,
...@@ -170,7 +209,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -170,7 +209,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
continue; continue;
} }
if (mark_free(&scan, vma, flags, &eviction_list)) if (mark_free(&scan, ww, vma, flags, &eviction_list))
goto found; goto found;
} }
...@@ -178,6 +217,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -178,6 +217,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
ret = drm_mm_scan_remove_block(&scan, &vma->node); ret = drm_mm_scan_remove_block(&scan, &vma->node);
BUG_ON(ret); BUG_ON(ret);
ungrab_vma(vma);
} }
/* /*
...@@ -222,10 +262,12 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -222,10 +262,12 @@ i915_gem_evict_something(struct i915_address_space *vm,
* of any of our objects, thus corrupting the list). * of any of our objects, thus corrupting the list).
*/ */
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
if (drm_mm_scan_remove_block(&scan, &vma->node)) if (drm_mm_scan_remove_block(&scan, &vma->node)) {
__i915_vma_pin(vma); __i915_vma_pin(vma);
else } else {
list_del(&vma->evict_link); list_del(&vma->evict_link);
ungrab_vma(vma);
}
} }
/* Unbinding will emit any required flushes */ /* Unbinding will emit any required flushes */
...@@ -234,16 +276,20 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -234,16 +276,20 @@ i915_gem_evict_something(struct i915_address_space *vm,
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
if (ret == 0) if (ret == 0)
ret = __i915_vma_unbind(vma); ret = __i915_vma_unbind(vma);
ungrab_vma(vma);
} }
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) { while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node); vma = container_of(node, struct i915_vma, node);
/* If we find any non-objects (!vma), we cannot evict them */ /* If we find any non-objects (!vma), we cannot evict them */
if (vma->node.color != I915_COLOR_UNEVICTABLE) if (vma->node.color != I915_COLOR_UNEVICTABLE &&
grab_vma(vma, ww)) {
ret = __i915_vma_unbind(vma); ret = __i915_vma_unbind(vma);
else ungrab_vma(vma);
ret = -ENOSPC; /* XXX search failed, try again? */ } else {
ret = -ENOSPC;
}
} }
return ret; return ret;
...@@ -252,6 +298,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -252,6 +298,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
/** /**
* i915_gem_evict_for_node - Evict vmas to make room for binding a new one * i915_gem_evict_for_node - Evict vmas to make room for binding a new one
* @vm: address space to evict from * @vm: address space to evict from
* @ww: An optional struct i915_gem_ww_ctx.
* @target: range (and color) to evict for * @target: range (and color) to evict for
* @flags: additional flags to control the eviction algorithm * @flags: additional flags to control the eviction algorithm
* *
...@@ -261,6 +308,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -261,6 +308,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
* memory in e.g. the shrinker. * memory in e.g. the shrinker.
*/ */
int i915_gem_evict_for_node(struct i915_address_space *vm, int i915_gem_evict_for_node(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
struct drm_mm_node *target, struct drm_mm_node *target,
unsigned int flags) unsigned int flags)
{ {
...@@ -333,6 +381,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -333,6 +381,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break; break;
} }
if (!grab_vma(vma, ww)) {
ret = -ENOSPC;
break;
}
/* /*
* Never show fear in the face of dragons! * Never show fear in the face of dragons!
* *
...@@ -350,6 +403,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -350,6 +403,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
if (ret == 0) if (ret == 0)
ret = __i915_vma_unbind(vma); ret = __i915_vma_unbind(vma);
ungrab_vma(vma);
} }
return ret; return ret;
...@@ -401,7 +456,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) ...@@ -401,7 +456,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
* the resv is shared among multiple objects, we still * the resv is shared among multiple objects, we still
* need the object ref. * need the object ref.
*/ */
if (!kref_read(&vma->obj->base.refcount) || if (dying_vma(vma) ||
(ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) { (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
__i915_vma_pin(vma); __i915_vma_pin(vma);
list_add(&vma->evict_link, &locked_eviction_list); list_add(&vma->evict_link, &locked_eviction_list);
......
...@@ -70,6 +70,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, ...@@ -70,6 +70,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
/** /**
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT) * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm: the &struct i915_address_space * @vm: the &struct i915_address_space
* @ww: An optional struct i915_gem_ww_ctx.
* @node: the &struct drm_mm_node (typically i915_vma.mode) * @node: the &struct drm_mm_node (typically i915_vma.mode)
* @size: how much space to allocate inside the GTT, * @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned * must be #I915_GTT_PAGE_SIZE aligned
...@@ -93,6 +94,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, ...@@ -93,6 +94,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
* asked to wait for eviction and interrupted. * asked to wait for eviction and interrupted.
*/ */
int i915_gem_gtt_reserve(struct i915_address_space *vm, int i915_gem_gtt_reserve(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node, struct drm_mm_node *node,
u64 size, u64 offset, unsigned long color, u64 size, u64 offset, unsigned long color,
unsigned int flags) unsigned int flags)
...@@ -117,7 +119,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm, ...@@ -117,7 +119,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
if (flags & PIN_NOEVICT) if (flags & PIN_NOEVICT)
return -ENOSPC; return -ENOSPC;
err = i915_gem_evict_for_node(vm, node, flags); err = i915_gem_evict_for_node(vm, ww, node, flags);
if (err == 0) if (err == 0)
err = drm_mm_reserve_node(&vm->mm, node); err = drm_mm_reserve_node(&vm->mm, node);
...@@ -152,6 +154,7 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align) ...@@ -152,6 +154,7 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
/** /**
* i915_gem_gtt_insert - insert a node into an address_space (GTT) * i915_gem_gtt_insert - insert a node into an address_space (GTT)
* @vm: the &struct i915_address_space * @vm: the &struct i915_address_space
* @ww: An optional struct i915_gem_ww_ctx.
* @node: the &struct drm_mm_node (typically i915_vma.node) * @node: the &struct drm_mm_node (typically i915_vma.node)
* @size: how much space to allocate inside the GTT, * @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned * must be #I915_GTT_PAGE_SIZE aligned
...@@ -184,6 +187,7 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align) ...@@ -184,6 +187,7 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
* asked to wait for eviction and interrupted. * asked to wait for eviction and interrupted.
*/ */
int i915_gem_gtt_insert(struct i915_address_space *vm, int i915_gem_gtt_insert(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node, struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color, u64 size, u64 alignment, unsigned long color,
u64 start, u64 end, unsigned int flags) u64 start, u64 end, unsigned int flags)
...@@ -269,7 +273,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, ...@@ -269,7 +273,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
*/ */
offset = random_offset(start, end, offset = random_offset(start, end,
size, alignment ?: I915_GTT_MIN_ALIGNMENT); size, alignment ?: I915_GTT_MIN_ALIGNMENT);
err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags); err = i915_gem_gtt_reserve(vm, ww, node, size, offset, color, flags);
if (err != -ENOSPC) if (err != -ENOSPC)
return err; return err;
...@@ -277,7 +281,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, ...@@ -277,7 +281,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
return -ENOSPC; return -ENOSPC;
/* Randomly selected placement is pinned, do a search */ /* Randomly selected placement is pinned, do a search */
err = i915_gem_evict_something(vm, size, alignment, color, err = i915_gem_evict_something(vm, ww, size, alignment, color,
start, end, flags); start, end, flags);
if (err) if (err)
return err; return err;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
struct drm_i915_gem_object; struct drm_i915_gem_object;
struct i915_address_space; struct i915_address_space;
struct i915_gem_ww_ctx;
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages); struct sg_table *pages);
...@@ -23,11 +24,13 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, ...@@ -23,11 +24,13 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages); struct sg_table *pages);
int i915_gem_gtt_reserve(struct i915_address_space *vm, int i915_gem_gtt_reserve(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node, struct drm_mm_node *node,
u64 size, u64 offset, unsigned long color, u64 size, u64 offset, unsigned long color,
unsigned int flags); unsigned int flags);
int i915_gem_gtt_insert(struct i915_address_space *vm, int i915_gem_gtt_insert(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
struct drm_mm_node *node, struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color, u64 size, u64 alignment, unsigned long color,
u64 start, u64 end, unsigned int flags); u64 start, u64 end, unsigned int flags);
......
...@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, ...@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
drm_info(&dev_priv->drm, drm_info(&dev_priv->drm,
"balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", "balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024); start, end, size / 1024);
ret = i915_gem_gtt_reserve(&ggtt->vm, node, ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, node,
size, start, I915_COLOR_UNEVICTABLE, size, start, I915_COLOR_UNEVICTABLE,
0); 0);
if (!ret) if (!ret)
......
...@@ -712,7 +712,8 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) ...@@ -712,7 +712,8 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
* 0 on success, negative error code otherwise. * 0 on success, negative error code otherwise.
*/ */
static int static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags)
{ {
unsigned long color; unsigned long color;
u64 start, end; u64 start, end;
...@@ -764,7 +765,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -764,7 +765,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
range_overflows(offset, size, end)) range_overflows(offset, size, end))
return -EINVAL; return -EINVAL;
ret = i915_gem_gtt_reserve(vma->vm, &vma->node, ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
size, offset, color, size, offset, color,
flags); flags);
if (ret) if (ret)
...@@ -803,7 +804,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -803,7 +804,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
size = round_up(size, I915_GTT_PAGE_SIZE_2M); size = round_up(size, I915_GTT_PAGE_SIZE_2M);
} }
ret = i915_gem_gtt_insert(vma->vm, &vma->node, ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
size, alignment, color, size, alignment, color,
start, end, flags); start, end, flags);
if (ret) if (ret)
...@@ -1396,7 +1397,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, ...@@ -1396,7 +1397,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
goto err_unlock; goto err_unlock;
if (!(bound & I915_VMA_BIND_MASK)) { if (!(bound & I915_VMA_BIND_MASK)) {
err = i915_vma_insert(vma, size, alignment, flags); err = i915_vma_insert(vma, ww, size, alignment, flags);
if (err) if (err)
goto err_active; goto err_active;
......
...@@ -117,7 +117,7 @@ static int igt_evict_something(void *arg) ...@@ -117,7 +117,7 @@ static int igt_evict_something(void *arg)
/* Everything is pinned, nothing should happen */ /* Everything is pinned, nothing should happen */
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_something(&ggtt->vm, err = i915_gem_evict_something(&ggtt->vm, NULL,
I915_GTT_PAGE_SIZE, 0, 0, I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX, 0, U64_MAX,
0); 0);
...@@ -132,7 +132,7 @@ static int igt_evict_something(void *arg) ...@@ -132,7 +132,7 @@ static int igt_evict_something(void *arg)
/* Everything is unpinned, we should be able to evict something */ /* Everything is unpinned, we should be able to evict something */
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_something(&ggtt->vm, err = i915_gem_evict_something(&ggtt->vm, NULL,
I915_GTT_PAGE_SIZE, 0, 0, I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX, 0, U64_MAX,
0); 0);
...@@ -204,7 +204,7 @@ static int igt_evict_for_vma(void *arg) ...@@ -204,7 +204,7 @@ static int igt_evict_for_vma(void *arg)
/* Everything is pinned, nothing should happen */ /* Everything is pinned, nothing should happen */
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex); mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) { if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n", pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
...@@ -216,7 +216,7 @@ static int igt_evict_for_vma(void *arg) ...@@ -216,7 +216,7 @@ static int igt_evict_for_vma(void *arg)
/* Everything is unpinned, we should be able to evict the node */ /* Everything is unpinned, we should be able to evict the node */
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex); mutex_unlock(&ggtt->vm.mutex);
if (err) { if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n", pr_err("i915_gem_evict_for_node returned err=%d\n",
...@@ -297,7 +297,7 @@ static int igt_evict_for_cache_color(void *arg) ...@@ -297,7 +297,7 @@ static int igt_evict_for_cache_color(void *arg)
/* Remove just the second vma */ /* Remove just the second vma */
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex); mutex_unlock(&ggtt->vm.mutex);
if (err) { if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err); pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
...@@ -310,7 +310,7 @@ static int igt_evict_for_cache_color(void *arg) ...@@ -310,7 +310,7 @@ static int igt_evict_for_cache_color(void *arg)
target.color = I915_CACHE_L3_LLC; target.color = I915_CACHE_L3_LLC;
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
mutex_unlock(&ggtt->vm.mutex); mutex_unlock(&ggtt->vm.mutex);
if (!err) { if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err); pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
...@@ -406,7 +406,7 @@ static int igt_evict_contexts(void *arg) ...@@ -406,7 +406,7 @@ static int igt_evict_contexts(void *arg)
/* Reserve a block so that we know we have enough to fit a few rq */ /* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole)); memset(&hole, 0, sizeof(hole));
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &hole, err = i915_gem_gtt_insert(&ggtt->vm, NULL, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE, PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->vm.total, 0, ggtt->vm.total,
PIN_NOEVICT); PIN_NOEVICT);
...@@ -426,7 +426,7 @@ static int igt_evict_contexts(void *arg) ...@@ -426,7 +426,7 @@ static int igt_evict_contexts(void *arg)
goto out_locked; goto out_locked;
} }
if (i915_gem_gtt_insert(&ggtt->vm, &r->node, if (i915_gem_gtt_insert(&ggtt->vm, NULL, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE, 1ul << 20, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->vm.total, 0, ggtt->vm.total,
PIN_NOEVICT)) { PIN_NOEVICT)) {
......
...@@ -1350,7 +1350,7 @@ static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset) ...@@ -1350,7 +1350,7 @@ static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
return PTR_ERR(vma_res); return PTR_ERR(vma_res);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
err = i915_gem_gtt_reserve(vm, &vma->node, obj->base.size, err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
offset, offset,
obj->cache_level, obj->cache_level,
0); 0);
...@@ -1531,7 +1531,7 @@ static int insert_gtt_with_resource(struct i915_vma *vma) ...@@ -1531,7 +1531,7 @@ static int insert_gtt_with_resource(struct i915_vma *vma)
return PTR_ERR(vma_res); return PTR_ERR(vma_res);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
err = i915_gem_gtt_insert(vm, &vma->node, obj->base.size, 0, err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
obj->cache_level, 0, vm->total, 0); obj->cache_level, 0, vm->total, 0);
if (!err) { if (!err) {
i915_vma_resource_init_from_vma(vma_res, vma); i915_vma_resource_init_from_vma(vma_res, vma);
...@@ -1587,7 +1587,7 @@ static int igt_gtt_insert(void *arg) ...@@ -1587,7 +1587,7 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */ /* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) { for (ii = invalid_insert; ii->size; ii++) {
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &tmp, err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
ii->size, ii->alignment, ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE, I915_COLOR_UNEVICTABLE,
ii->start, ii->end, ii->start, ii->end,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment