Commit 56f434f4 authored by Jason Gunthorpe's avatar Jason Gunthorpe

mm/mmu_notifier: define the header pre-processor parts even if disabled

Now that we have KERNEL_HEADER_TEST all headers are generally compile
tested, so relying on makefile tricks to avoid compiling code that depends
on CONFIG_MMU_NOTIFIER is more annoying.

Instead follow the usual pattern and provide most of the header with only
the functions stubbed out when CONFIG_MMU_NOTIFIER is disabled. This
ensures code compiles no matter what the config setting is.

While here, struct mmu_notifier_mm is private to mmu_notifier.c, move it.

Link: https://lore.kernel.org/r/20191112202231.3856-2-jgg@ziepe.caReviewed-by: default avatarJérôme Glisse <jglisse@redhat.com>
Tested-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 0e64e5b3
......@@ -7,8 +7,9 @@
#include <linux/mm_types.h>
#include <linux/srcu.h>
struct mmu_notifier_mm;
struct mmu_notifier;
struct mmu_notifier_ops;
struct mmu_notifier_range;
/**
* enum mmu_notifier_event - reason for the mmu notifier callback
......@@ -40,36 +41,8 @@ enum mmu_notifier_event {
MMU_NOTIFY_SOFT_DIRTY,
};
#ifdef CONFIG_MMU_NOTIFIER
#ifdef CONFIG_LOCKDEP
extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
#endif
/*
* The mmu notifier_mm structure is allocated and installed in
* mm->mmu_notifier_mm inside the mm_take_all_locks() protected
* critical section and it's released only when mm_count reaches zero
* in mmdrop().
*/
struct mmu_notifier_mm {
/* all mmu notifiers registerd in this mm are queued in this list */
struct hlist_head list;
/* to serialize the list modifications and hlist_unhashed */
spinlock_t lock;
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
struct mmu_notifier_range {
struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long start;
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
};
struct mmu_notifier_ops {
/*
* Called either by mmu_notifier_unregister or when the mm is
......@@ -249,6 +222,21 @@ struct mmu_notifier {
unsigned int users;
};
#ifdef CONFIG_MMU_NOTIFIER
#ifdef CONFIG_LOCKDEP
extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
#endif
struct mmu_notifier_range {
struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long start;
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
{
return unlikely(mm->mmu_notifier_mm);
......
......@@ -27,6 +27,19 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
};
#endif
/*
* The mmu notifier_mm structure is allocated and installed in
* mm->mmu_notifier_mm inside the mm_take_all_locks() protected
* critical section and it's released only when mm_count reaches zero
* in mmdrop().
*/
struct mmu_notifier_mm {
/* all mmu notifiers registered in this mm are queued in this list */
struct hlist_head list;
/* to serialize the list modifications and hlist_unhashed */
spinlock_t lock;
};
/*
* This function can't run concurrently against mmu_notifier_register
* because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment