Commit a1ea1c03 authored by Dave Hansen's avatar Dave Hansen Committed by Thomas Gleixner

x86: Cleanly separate use of asm-generic/mm_hooks.h

asm-generic/mm_hooks.h provides some generic fillers for the 90%
of architectures that do not need to hook some mmap-manipulation
functions.  A comment inside says:

> Define generic no-op hooks for arch_dup_mmap and
> arch_exit_mmap, to be included in asm-FOO/mmu_context.h
> for any arch FOO which doesn't need to hook these.

So, does x86 need to hook these?  It depends on CONFIG_PARAVIRT.
We *conditionally* include this generic header if we have
CONFIG_PARAVIRT=n.  That's madness.

With this patch, x86 stops using asm-generic/mmu_hooks.h entirely.
We use our own copies of the functions.  The paravirt code
provides some stubs if it is disabled, and we always call those
stubs in our x86-private versions of arch_exit_mmap() and
arch_dup_mmap().
Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: x86@kernel.org
Link: http://lkml.kernel.org/r/20141118182349.14567FA5@viggo.jf.intel.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 68c009c4
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/mpx.h> #include <asm/mpx.h>
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
#include <asm-generic/mm_hooks.h>
static inline void paravirt_activate_mm(struct mm_struct *prev, static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next) struct mm_struct *next)
{ {
...@@ -103,6 +101,17 @@ do { \ ...@@ -103,6 +101,17 @@ do { \
} while (0) } while (0)
#endif #endif
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
paravirt_arch_dup_mmap(oldmm, mm);
}
static inline void arch_exit_mmap(struct mm_struct *mm)
{
paravirt_arch_exit_mmap(mm);
}
static inline void arch_bprm_mm_init(struct mm_struct *mm, static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
......
...@@ -330,13 +330,13 @@ static inline void paravirt_activate_mm(struct mm_struct *prev, ...@@ -330,13 +330,13 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next); PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
} }
static inline void arch_dup_mmap(struct mm_struct *oldmm, static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm) struct mm_struct *mm)
{ {
PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm); PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
} }
static inline void arch_exit_mmap(struct mm_struct *mm) static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
{ {
PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm); PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
} }
...@@ -986,5 +986,15 @@ extern void default_banner(void); ...@@ -986,5 +986,15 @@ extern void default_banner(void);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#else /* CONFIG_PARAVIRT */ #else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop # define default_banner x86_init_noop
#ifndef __ASSEMBLY__
static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
}
static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
{
}
#endif /* __ASSEMBLY__ */
#endif /* !CONFIG_PARAVIRT */ #endif /* !CONFIG_PARAVIRT */
#endif /* _ASM_X86_PARAVIRT_H */ #endif /* _ASM_X86_PARAVIRT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment