Commit a65adaf8 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Track user GTT faulting per-vma

We don't wish to refault the entire object (other vma) when unbinding
one partial vma. To do this track which vma have been faulted into the
user's address space.

v2: Use a local vma_offset to tidy up a multiline unmap_mapping_range().
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009084401.29090-3-chris@chris-wilson.co.ukReviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 3bd40735
...@@ -98,7 +98,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj) ...@@ -98,7 +98,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj) static char get_global_flag(struct drm_i915_gem_object *obj)
{ {
return !list_empty(&obj->userfault_link) ? 'g' : ' '; return obj->userfault_count ? 'g' : ' ';
} }
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
......
...@@ -1914,18 +1914,22 @@ int i915_gem_fault(struct vm_fault *vmf) ...@@ -1914,18 +1914,22 @@ int i915_gem_fault(struct vm_fault *vmf)
if (ret) if (ret)
goto err_unpin; goto err_unpin;
/* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(dev_priv);
if (list_empty(&obj->userfault_link))
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area, ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start), min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable); &ggtt->mappable);
if (ret)
goto err_fence;
/* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(dev_priv);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
GEM_BUG_ON(!obj->userfault_count);
err_fence:
i915_vma_unpin_fence(vma); i915_vma_unpin_fence(vma);
err_unpin: err_unpin:
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
...@@ -1978,6 +1982,25 @@ int i915_gem_fault(struct vm_fault *vmf) ...@@ -1978,6 +1982,25 @@ int i915_gem_fault(struct vm_fault *vmf)
return ret; return ret;
} }
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
GEM_BUG_ON(!obj->userfault_count);
obj->userfault_count = 0;
list_del(&obj->userfault_link);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!i915_vma_is_ggtt(vma))
break;
i915_vma_unset_userfault(vma);
}
}
/** /**
* i915_gem_release_mmap - remove physical page mappings * i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question * @obj: obj in question
...@@ -2008,12 +2031,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) ...@@ -2008,12 +2031,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
lockdep_assert_held(&i915->drm.struct_mutex); lockdep_assert_held(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915); intel_runtime_pm_get(i915);
if (list_empty(&obj->userfault_link)) if (!obj->userfault_count)
goto out; goto out;
list_del_init(&obj->userfault_link); __i915_gem_object_release_mmap(obj);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
/* Ensure that the CPU's PTE are revoked and there are not outstanding /* Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB * memory transactions from userspace before we return. The TLB
...@@ -2041,11 +2062,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) ...@@ -2041,11 +2062,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
*/ */
list_for_each_entry_safe(obj, on, list_for_each_entry_safe(obj, on,
&dev_priv->mm.userfault_list, userfault_link) { &dev_priv->mm.userfault_list, userfault_link)
list_del_init(&obj->userfault_link); __i915_gem_object_release_mmap(obj);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
}
/* The fence will be lost when the device powers down. If any were /* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering * in use by hardware (i.e. they are pinned), we should not be powering
...@@ -2068,7 +2086,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) ...@@ -2068,7 +2086,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
if (!reg->vma) if (!reg->vma)
continue; continue;
GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link)); GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
reg->dirty = true; reg->dirty = true;
} }
} }
...@@ -4276,7 +4294,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, ...@@ -4276,7 +4294,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
mutex_init(&obj->mm.lock); mutex_init(&obj->mm.lock);
INIT_LIST_HEAD(&obj->global_link); INIT_LIST_HEAD(&obj->global_link);
INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->vma_list); INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->lut_list); INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link); INIT_LIST_HEAD(&obj->batch_pool_link);
...@@ -4457,6 +4474,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -4457,6 +4474,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
llist_for_each_entry_safe(obj, on, freed, freed) { llist_for_each_entry_safe(obj, on, freed, freed) {
GEM_BUG_ON(obj->bind_count); GEM_BUG_ON(obj->bind_count);
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list)); GEM_BUG_ON(!list_empty(&obj->lut_list));
......
...@@ -82,7 +82,7 @@ mark_free(struct drm_mm_scan *scan, ...@@ -82,7 +82,7 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma)) if (i915_vma_is_pinned(vma))
return false; return false;
if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link)) if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
return false; return false;
list_add(&vma->evict_link, unwind); list_add(&vma->evict_link, unwind);
......
...@@ -240,7 +240,8 @@ static int fence_update(struct drm_i915_fence_reg *fence, ...@@ -240,7 +240,8 @@ static int fence_update(struct drm_i915_fence_reg *fence,
/* Ensure that all userspace CPU access is completed before /* Ensure that all userspace CPU access is completed before
* stealing the fence. * stealing the fence.
*/ */
i915_gem_release_mmap(fence->vma->obj); GEM_BUG_ON(fence->vma->fence != fence);
i915_vma_revoke_mmap(fence->vma);
fence->vma->fence = NULL; fence->vma->fence = NULL;
fence->vma = NULL; fence->vma = NULL;
...@@ -451,7 +452,7 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv) ...@@ -451,7 +452,7 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
GEM_BUG_ON(fence->vma && fence->vma->fence != fence); GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->vma) if (fence->vma)
i915_gem_release_mmap(fence->vma->obj); i915_vma_revoke_mmap(fence->vma);
} }
} }
...@@ -479,7 +480,7 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv) ...@@ -479,7 +480,7 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
*/ */
if (vma && !i915_gem_object_is_tiled(vma->obj)) { if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty); GEM_BUG_ON(!reg->dirty);
GEM_BUG_ON(!list_empty(&vma->obj->userfault_link)); GEM_BUG_ON(i915_vma_has_userfault(vma));
list_move(&reg->link, &dev_priv->mm.fence_list); list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL; vma->fence = NULL;
......
...@@ -123,6 +123,7 @@ struct drm_i915_gem_object { ...@@ -123,6 +123,7 @@ struct drm_i915_gem_object {
/** /**
* Whether the object is currently in the GGTT mmap. * Whether the object is currently in the GGTT mmap.
*/ */
unsigned int userfault_count;
struct list_head userfault_link; struct list_head userfault_link;
struct list_head batch_pool_link; struct list_head batch_pool_link;
......
...@@ -690,6 +690,30 @@ static void __i915_vma_iounmap(struct i915_vma *vma) ...@@ -690,6 +690,30 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
vma->iomap = NULL; vma->iomap = NULL;
} }
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
u64 vma_offset;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (!i915_vma_has_userfault(vma))
return;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
1);
i915_vma_unset_userfault(vma);
if (!--vma->obj->userfault_count)
list_del(&vma->obj->userfault_link);
}
int i915_vma_unbind(struct i915_vma *vma) int i915_vma_unbind(struct i915_vma *vma)
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
...@@ -753,11 +777,13 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -753,11 +777,13 @@ int i915_vma_unbind(struct i915_vma *vma)
return ret; return ret;
/* Force a pagefault for domain tracking on next user access */ /* Force a pagefault for domain tracking on next user access */
i915_gem_release_mmap(obj); i915_vma_revoke_mmap(vma);
__i915_vma_iounmap(vma); __i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE; vma->flags &= ~I915_VMA_CAN_FENCE;
} }
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
if (likely(!vma->vm->closed)) { if (likely(!vma->vm->closed)) {
trace_i915_vma_unbind(vma); trace_i915_vma_unbind(vma);
......
...@@ -66,7 +66,7 @@ struct i915_vma { ...@@ -66,7 +66,7 @@ struct i915_vma {
* that exist in the ctx->handle_vmas LUT for this vma. * that exist in the ctx->handle_vmas LUT for this vma.
*/ */
unsigned int open_count; unsigned int open_count;
unsigned int flags; unsigned long flags;
/** /**
* How many users have pinned this object in GTT space. The following * How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, execbuffer * users can each hold at most one reference: pwrite/pread, execbuffer
...@@ -88,6 +88,8 @@ struct i915_vma { ...@@ -88,6 +88,8 @@ struct i915_vma {
#define I915_VMA_GGTT BIT(8) #define I915_VMA_GGTT BIT(8)
#define I915_VMA_CAN_FENCE BIT(9) #define I915_VMA_CAN_FENCE BIT(9)
#define I915_VMA_CLOSED BIT(10) #define I915_VMA_CLOSED BIT(10)
#define I915_VMA_USERFAULT_BIT 11
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
unsigned int active; unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES]; struct i915_gem_active last_read[I915_NUM_ENGINES];
...@@ -146,6 +148,22 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma) ...@@ -146,6 +148,22 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
return vma->flags & I915_VMA_CLOSED; return vma->flags & I915_VMA_CLOSED;
} }
static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
static inline void i915_vma_unset_userfault(struct i915_vma *vma)
{
return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
{
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
{ {
return vma->active; return vma->active;
...@@ -244,6 +262,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level); ...@@ -244,6 +262,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
bool i915_vma_misplaced(const struct i915_vma *vma, bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags); u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma); void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma); void i915_vma_close(struct i915_vma *vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment