Commit 20ee27bd authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Daniel Vetter

drm/i915: Make compilation of userptr code depend on MMU_NOTIFIER.

Now that unsynchronized mappings are removed, the only time userptr
works is when the MMU notifier is enabled. Put all of the userptr
code behind a mmu notifier ifdef.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-16-maarten.lankhorst@linux.intel.com
parent c6bcc0c2
......@@ -1978,8 +1978,10 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
err = 0;
}
#ifdef CONFIG_MMU_NOTIFIER
if (!err)
flush_workqueue(eb->i915->mm.userptr_wq);
#endif
err_relock:
i915_gem_ww_ctx_init(&eb->ww, true);
......
......@@ -551,7 +551,11 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
{
#ifdef CONFIG_MMU_NOTIFIER
return obj->userptr.mm;
#else
return false;
#endif
}
static inline void
......
......@@ -290,6 +290,7 @@ struct drm_i915_gem_object {
unsigned long *bit_17;
union {
#ifdef CONFIG_MMU_NOTIFIER
struct i915_gem_userptr {
uintptr_t ptr;
......@@ -297,6 +298,7 @@ struct drm_i915_gem_object {
struct i915_mmu_object *mmu_object;
struct work_struct *work;
} userptr;
#endif
struct drm_mm_node *stolen;
......
......@@ -15,6 +15,8 @@
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#if defined(CONFIG_MMU_NOTIFIER)
struct i915_mm_struct {
struct mm_struct *mm;
struct drm_i915_private *i915;
......@@ -24,7 +26,6 @@ struct i915_mm_struct {
struct rcu_work work;
};
#if defined(CONFIG_MMU_NOTIFIER)
#include <linux/interval_tree.h>
struct i915_mmu_notifier {
......@@ -217,15 +218,11 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
}
static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
unsigned flags)
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
{
struct i915_mmu_notifier *mn;
struct i915_mmu_object *mo;
if (flags & I915_USERPTR_UNSYNCHRONIZED)
return -ENODEV;
if (GEM_WARN_ON(!obj->userptr.mm))
return -EINVAL;
......@@ -258,32 +255,6 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
kfree(mn);
}
#else
static void
__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
{
}
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
}
static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
unsigned flags)
{
return -ENODEV;
}
static void
i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
struct mm_struct *mm)
{
}
#endif
static struct i915_mm_struct *
__i915_mm_struct_find(struct drm_i915_private *i915, struct mm_struct *real)
......@@ -725,6 +696,8 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.release = i915_gem_userptr_release,
};
#endif
/*
* Creates a new mm object that wraps some normal memory from the process
* context - user memory.
......@@ -765,12 +738,12 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
static struct lock_class_key lock_class;
static struct lock_class_key __maybe_unused lock_class;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
struct drm_i915_gem_object __maybe_unused *obj;
int __maybe_unused ret;
u32 __maybe_unused handle;
if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
/* We cannot support coherent userptr objects on hw without
......@@ -809,6 +782,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
return -EFAULT;
if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
return -ENODEV;
if (args->flags & I915_USERPTR_READ_ONLY) {
/*
* On almost all of the older hw, we cannot tell the GPU that
......@@ -818,6 +794,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENODEV;
}
#ifdef CONFIG_MMU_NOTIFIER
obj = i915_gem_object_alloc();
if (obj == NULL)
return -ENOMEM;
......@@ -839,7 +816,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
*/
ret = i915_gem_userptr_init__mm_struct(obj);
if (ret == 0)
ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
ret = i915_gem_userptr_init__mmu_notifier(obj);
if (ret == 0)
ret = drm_gem_handle_create(file, &obj->base, &handle);
......@@ -850,10 +827,14 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
args->handle = handle;
return 0;
#else
return -ENODEV;
#endif
}
int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{
#ifdef CONFIG_MMU_NOTIFIER
spin_lock_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs);
......@@ -863,11 +844,14 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
0);
if (!dev_priv->mm.userptr_wq)
return -ENOMEM;
#endif
return 0;
}
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
{
#ifdef CONFIG_MMU_NOTIFIER
destroy_workqueue(dev_priv->mm.userptr_wq);
#endif
}
......@@ -554,12 +554,14 @@ struct i915_gem_mm {
struct notifier_block vmap_notifier;
struct shrinker shrinker;
#ifdef CONFIG_MMU_NOTIFIER
/**
* Workqueue to fault in userptr pages, flushed by the execbuf
* when required but otherwise left to userspace to try again
* on EAGAIN.
*/
struct workqueue_struct *userptr_wq;
#endif
/* shrinker accounting, also useful for userland debugging */
u64 shrink_memory;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment