Commit 89f82cbb authored by Rob Clark's avatar Rob Clark

drm/msm: fix use of copy_from_user() while holding spinlock

Use instead __copy_from_user_inatomic() and fallback to slow-path where
we drop and re-aquire the lock in case of fault.

Cc: stable@vger.kernel.org
Reported-by: default avatarVaishali Thakkar <vaishali.thakkar@oracle.com>
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 969af80f
......@@ -64,6 +64,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
kfree(submit);
}
static inline unsigned long __must_check
copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
return __copy_from_user_inatomic(to, from, n);
return -EFAULT;
}
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
......@@ -71,6 +79,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
int ret = 0;
spin_lock(&file->table_lock);
pagefault_disable();
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
......@@ -84,10 +93,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
submit->bos[i].flags = 0;
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret) {
ret = -EFAULT;
goto out_unlock;
ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
if (unlikely(ret)) {
pagefault_enable();
spin_unlock(&file->table_lock);
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret)
goto out;
spin_lock(&file->table_lock);
pagefault_disable();
}
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
......@@ -127,9 +141,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
out_unlock:
submit->nr_bos = i;
pagefault_enable();
spin_unlock(&file->table_lock);
out:
submit->nr_bos = i;
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment