Commit 8cdb878d authored by Christopher Yeoh's avatar Christopher Yeoh Committed by Linus Torvalds

Fix race in process_vm_rw_core

This fixes the race in process_vm_core found by Oleg (see

  http://article.gmane.org/gmane.linux.kernel/1235667/

for details).

This has been updated since I last sent it as the creation of the new
mm_access() function did almost exactly the same thing as parts of the
previous version of this patch did.

In order to use mm_access() even when /proc isn't enabled, we move it to
kernel/fork.c where other related process mm access functions already
are.
Signed-off-by: default avatarChris Yeoh <yeohc@au1.ibm.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 24b36da3
...@@ -198,26 +198,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path) ...@@ -198,26 +198,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
return result; return result;
} }
static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
{
struct mm_struct *mm;
int err;
err = mutex_lock_killable(&task->signal->cred_guard_mutex);
if (err)
return ERR_PTR(err);
mm = get_task_mm(task);
if (mm && mm != current->mm &&
!ptrace_may_access(task, mode)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
mutex_unlock(&task->signal->cred_guard_mutex);
return mm;
}
struct mm_struct *mm_for_maps(struct task_struct *task) struct mm_struct *mm_for_maps(struct task_struct *task)
{ {
return mm_access(task, PTRACE_MODE_READ); return mm_access(task, PTRACE_MODE_READ);
......
...@@ -2259,6 +2259,12 @@ static inline void mmdrop(struct mm_struct * mm) ...@@ -2259,6 +2259,12 @@ static inline void mmdrop(struct mm_struct * mm)
extern void mmput(struct mm_struct *); extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */ /* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task); extern struct mm_struct *get_task_mm(struct task_struct *task);
/*
* Grab a reference to a task's mm, if it is not already going away
* and ptrace_may_access with the mode parameter passed to it
* succeeds.
*/
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
/* Remove the current tasks stale references to the old mm_struct */ /* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *); extern void mm_release(struct task_struct *, struct mm_struct *);
/* Allocate a new mm structure and copy contents from tsk->mm */ /* Allocate a new mm structure and copy contents from tsk->mm */
......
...@@ -647,6 +647,26 @@ struct mm_struct *get_task_mm(struct task_struct *task) ...@@ -647,6 +647,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
} }
EXPORT_SYMBOL_GPL(get_task_mm); EXPORT_SYMBOL_GPL(get_task_mm);
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
{
struct mm_struct *mm;
int err;
err = mutex_lock_killable(&task->signal->cred_guard_mutex);
if (err)
return ERR_PTR(err);
mm = get_task_mm(task);
if (mm && mm != current->mm &&
!ptrace_may_access(task, mode)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
mutex_unlock(&task->signal->cred_guard_mutex);
return mm;
}
/* Please note the differences between mmput and mm_release. /* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct, * mmput is called whenever we stop holding onto a mm_struct,
* error success whatever. * error success whatever.
......
...@@ -298,23 +298,18 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, ...@@ -298,23 +298,18 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
goto free_proc_pages; goto free_proc_pages;
} }
task_lock(task); mm = mm_access(task, PTRACE_MODE_ATTACH);
if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) { if (!mm || IS_ERR(mm)) {
task_unlock(task); rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
rc = -EPERM; /*
goto put_task_struct; * Explicitly map EACCES to EPERM as EPERM is a more a
} * appropriate error code for process_vw_readv/writev
mm = task->mm; */
if (rc == -EACCES)
if (!mm || (task->flags & PF_KTHREAD)) { rc = -EPERM;
task_unlock(task);
rc = -EINVAL;
goto put_task_struct; goto put_task_struct;
} }
atomic_inc(&mm->mm_users);
task_unlock(task);
for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
rc = process_vm_rw_single_vec( rc = process_vm_rw_single_vec(
(unsigned long)rvec[i].iov_base, rvec[i].iov_len, (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment