Commit d34bc48f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

include/linux/sched/mm.h: re-inline mmdrop()

As Peter points out, Doing a CALL+RET for just the decrement is a bit silly.

Fixes: d70f2a14 ("include/linux/sched/mm.h: uninline mmdrop_async(), etc")
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infraded.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7ed1c190
...@@ -36,7 +36,18 @@ static inline void mmgrab(struct mm_struct *mm) ...@@ -36,7 +36,18 @@ static inline void mmgrab(struct mm_struct *mm)
atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_count);
} }
extern void mmdrop(struct mm_struct *mm); extern void __mmdrop(struct mm_struct *mm);
static inline void mmdrop(struct mm_struct *mm)
{
/*
* The implicit full barrier implied by atomic_dec_and_test() is
* required by the membarrier system call before returning to
* user-space, after storing to rq->curr.
*/
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
/** /**
* mmget() - Pin the address space associated with a &struct mm_struct. * mmget() - Pin the address space associated with a &struct mm_struct.
......
...@@ -592,7 +592,7 @@ static void check_mm(struct mm_struct *mm) ...@@ -592,7 +592,7 @@ static void check_mm(struct mm_struct *mm)
* is dropped: either by a lazy thread or by * is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm. * mmput. Free the page directory and the mm.
*/ */
static void __mmdrop(struct mm_struct *mm) void __mmdrop(struct mm_struct *mm)
{ {
BUG_ON(mm == &init_mm); BUG_ON(mm == &init_mm);
mm_free_pgd(mm); mm_free_pgd(mm);
...@@ -603,18 +603,7 @@ static void __mmdrop(struct mm_struct *mm) ...@@ -603,18 +603,7 @@ static void __mmdrop(struct mm_struct *mm)
put_user_ns(mm->user_ns); put_user_ns(mm->user_ns);
free_mm(mm); free_mm(mm);
} }
EXPORT_SYMBOL_GPL(__mmdrop);
void mmdrop(struct mm_struct *mm)
{
/*
* The implicit full barrier implied by atomic_dec_and_test() is
* required by the membarrier system call before returning to
* user-space, after storing to rq->curr.
*/
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmdrop);
static void mmdrop_async_fn(struct work_struct *work) static void mmdrop_async_fn(struct work_struct *work)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment