Commit 7dbb1d67 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] clarify get_task_mm (mmgrab)

Clarify mmgrab by collapsing it into get_task_mm (in fork.c not inline),
and commenting on the special case it is guarding against: when use_mm in
an AIO daemon temporarily adopts the mm while it's on its way out.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c524e494
...@@ -316,11 +316,7 @@ int proc_pid_stat(struct task_struct *task, char * buffer) ...@@ -316,11 +316,7 @@ int proc_pid_stat(struct task_struct *task, char * buffer)
state = *get_task_state(task); state = *get_task_state(task);
vsize = eip = esp = 0; vsize = eip = esp = 0;
task_lock(task); mm = get_task_mm(task);
mm = task->mm;
if(mm)
mm = mmgrab(mm);
task_unlock(task);
if (mm) { if (mm) {
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vsize = task_vsize(mm); vsize = task_vsize(mm);
......
...@@ -788,8 +788,8 @@ static inline void mmdrop(struct mm_struct * mm) ...@@ -788,8 +788,8 @@ static inline void mmdrop(struct mm_struct * mm)
/* mmput gets rid of the mappings and all user-space */ /* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *); extern void mmput(struct mm_struct *);
/* Grab a reference to the mm if its not already going away */ /* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *mmgrab(struct mm_struct *); extern struct mm_struct *get_task_mm(struct task_struct *task);
/* Remove the current tasks stale references to the old mm_struct */ /* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *); extern void mm_release(struct task_struct *, struct mm_struct *);
...@@ -890,27 +890,7 @@ static inline void task_unlock(struct task_struct *p) ...@@ -890,27 +890,7 @@ static inline void task_unlock(struct task_struct *p)
{ {
spin_unlock(&p->alloc_lock); spin_unlock(&p->alloc_lock);
} }
/**
* get_task_mm - acquire a reference to the task's mm
*
* Returns %NULL if the task has no mm. User must release
* the mm via mmput() after use.
*/
static inline struct mm_struct * get_task_mm(struct task_struct * task)
{
struct mm_struct * mm;
task_lock(task);
mm = task->mm;
if (mm)
mm = mmgrab(mm);
task_unlock(task);
return mm;
}
/* set thread flags in other task's structures /* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available * - see asm/thread_info.h for TIF_xxxx flags available
*/ */
......
...@@ -477,20 +477,34 @@ void mmput(struct mm_struct *mm) ...@@ -477,20 +477,34 @@ void mmput(struct mm_struct *mm)
} }
} }
/* /**
* Checks if the use count of an mm is non-zero and if so * get_task_mm - acquire a reference to the task's mm
* returns a reference to it after bumping up the use count. *
* If the use count is zero, it means this mm is going away, * Returns %NULL if the task has no mm. Checks if the use count
* so return NULL. * of the mm is non-zero and if so returns a reference to it, after
* bumping up the use count. User must release the mm via mmput()
* after use. Typically used by /proc and ptrace.
*
* If the use count is zero, it means that this mm is going away,
* so return %NULL. This only happens in the case of an AIO daemon
* which has temporarily adopted an mm (see use_mm), in the course
* of its final mmput, before exit_aio has completed.
*/ */
struct mm_struct *mmgrab(struct mm_struct *mm) struct mm_struct *get_task_mm(struct task_struct *task)
{ {
spin_lock(&mmlist_lock); struct mm_struct *mm;
if (!atomic_read(&mm->mm_users))
mm = NULL; task_lock(task);
else mm = task->mm;
atomic_inc(&mm->mm_users); if (mm) {
spin_unlock(&mmlist_lock); spin_lock(&mmlist_lock);
if (!atomic_read(&mm->mm_users))
mm = NULL;
else
atomic_inc(&mm->mm_users);
spin_unlock(&mmlist_lock);
}
task_unlock(task);
return mm; return mm;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment