Commit bffa18dd authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Remove local entries from GGTT on suspend

Across suspend/resume, we clear the entire GGTT and rebuild from
scratch. In particular, we want to only preserve the global entries for
use by the HW, and delay reinstating the local binds until required by
the user. This means that we can evict any local binds in the global GTT,
saving any time in preserving their state, as they will be rebound on
demand.

References: https://gitlab.freedesktop.org/drm/intel/-/issues/1947Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200528082427.21402-2-chris@chris-wilson.co.uk
parent dc6cd912
...@@ -108,13 +108,32 @@ static bool needs_idle_maps(struct drm_i915_private *i915) ...@@ -108,13 +108,32 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
void i915_ggtt_suspend(struct i915_ggtt *ggtt) void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{ {
struct i915_vma *vma; struct i915_vma *vma, *vn;
int open;
mutex_lock(&ggtt->vm.mutex);
/* Skip rewriting PTE on VMA unbind. */
open = atomic_xchg(&ggtt->vm.open, 0);
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
i915_vma_wait_for_bind(vma); i915_vma_wait_for_bind(vma);
if (i915_vma_is_pinned(vma))
continue;
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
__i915_vma_evict(vma);
drm_mm_remove_node(&vma->node);
}
}
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
ggtt->invalidate(ggtt); ggtt->invalidate(ggtt);
atomic_set(&ggtt->vm.open, open);
mutex_unlock(&ggtt->vm.mutex);
intel_gt_check_and_clear_faults(ggtt->vm.gt); intel_gt_check_and_clear_faults(ggtt->vm.gt);
} }
......
...@@ -1229,31 +1229,9 @@ int i915_vma_move_to_active(struct i915_vma *vma, ...@@ -1229,31 +1229,9 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return 0; return 0;
} }
int __i915_vma_unbind(struct i915_vma *vma) void __i915_vma_evict(struct i915_vma *vma)
{ {
int ret;
lockdep_assert_held(&vma->vm->mutex);
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
}
/*
* After confirming that no one else is pinning this vma, wait for
* any laggards who may have crept in during the wait (through
* a residual pin skipping the vm->mutex) to complete.
*/
ret = i915_vma_sync(vma);
if (ret)
return ret;
if (!drm_mm_node_allocated(&vma->node))
return 0;
GEM_BUG_ON(i915_vma_is_pinned(vma)); GEM_BUG_ON(i915_vma_is_pinned(vma));
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_map_and_fenceable(vma)) { if (i915_vma_is_map_and_fenceable(vma)) {
/* Force a pagefault for domain tracking on next user access */ /* Force a pagefault for domain tracking on next user access */
...@@ -1292,6 +1270,33 @@ int __i915_vma_unbind(struct i915_vma *vma) ...@@ -1292,6 +1270,33 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_detach(vma); i915_vma_detach(vma);
vma_unbind_pages(vma); vma_unbind_pages(vma);
}
int __i915_vma_unbind(struct i915_vma *vma)
{
int ret;
lockdep_assert_held(&vma->vm->mutex);
if (!drm_mm_node_allocated(&vma->node))
return 0;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
}
/*
* After confirming that no one else is pinning this vma, wait for
* any laggards who may have crept in during the wait (through
* a residual pin skipping the vm->mutex) to complete.
*/
ret = i915_vma_sync(vma);
if (ret)
return ret;
GEM_BUG_ON(i915_vma_is_active(vma));
__i915_vma_evict(vma);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0; return 0;
...@@ -1303,13 +1308,13 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -1303,13 +1308,13 @@ int i915_vma_unbind(struct i915_vma *vma)
intel_wakeref_t wakeref = 0; intel_wakeref_t wakeref = 0;
int err; int err;
if (!drm_mm_node_allocated(&vma->node))
return 0;
/* Optimistic wait before taking the mutex */ /* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma); err = i915_vma_sync(vma);
if (err) if (err)
goto out_rpm; return err;
if (!drm_mm_node_allocated(&vma->node))
return 0;
if (i915_vma_is_pinned(vma)) { if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned"); vma_print_allocator(vma, "is pinned");
......
...@@ -203,6 +203,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma, ...@@ -203,6 +203,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags); u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma); void i915_vma_revoke_mmap(struct i915_vma *vma);
void __i915_vma_evict(struct i915_vma *vma);
int __i915_vma_unbind(struct i915_vma *vma); int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma); void i915_vma_unlink_ctx(struct i915_vma *vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment