Commit af8e3354 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: CONFIG_MMU for PG_mlocked

Remove three degrees of obfuscation, left over from when we had
CONFIG_UNEVICTABLE_LRU.  MLOCK_PAGES is CONFIG_HAVE_MLOCKED_PAGE_BIT is
CONFIG_HAVE_MLOCK is CONFIG_MMU.  rmap.o (and memory-failure.o) are only
built when CONFIG_MMU, so don't need such conditions at all.

Somehow, I feel no compulsion to remove the CONFIG_HAVE_MLOCK* lines from
169 defconfigs: leave those to evolve in due course.
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Nick Piggin <npiggin@suse.de>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 53f79acb
...@@ -99,7 +99,7 @@ enum pageflags { ...@@ -99,7 +99,7 @@ enum pageflags {
PG_buddy, /* Page is free, on buddy lists */ PG_buddy, /* Page is free, on buddy lists */
PG_swapbacked, /* Page is backed by RAM/swap */ PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */ PG_unevictable, /* Page is "unevictable" */
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */ PG_mlocked, /* Page is vma mlocked */
#endif #endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED #ifdef CONFIG_ARCH_USES_PG_UNCACHED
...@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache) ...@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache)
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
TESTCLEARFLAG(Unevictable, unevictable) TESTCLEARFLAG(Unevictable, unevictable)
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #ifdef CONFIG_MMU
#define MLOCK_PAGES 1
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
#else #else
#define MLOCK_PAGES 0
PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
#endif #endif
...@@ -393,7 +391,7 @@ static inline void __ClearPageTail(struct page *page) ...@@ -393,7 +391,7 @@ static inline void __ClearPageTail(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */ #endif /* !PAGEFLAGS_EXTENDED */
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #ifdef CONFIG_MMU
#define __PG_MLOCKED (1 << PG_mlocked) #define __PG_MLOCKED (1 << PG_mlocked)
#else #else
#define __PG_MLOCKED 0 #define __PG_MLOCKED 0
......
...@@ -200,14 +200,6 @@ config VIRT_TO_BUS ...@@ -200,14 +200,6 @@ config VIRT_TO_BUS
def_bool y def_bool y
depends on !ARCH_NO_VIRT_TO_BUS depends on !ARCH_NO_VIRT_TO_BUS
config HAVE_MLOCK
bool
default y if MMU=y
config HAVE_MLOCKED_PAGE_BIT
bool
default y if HAVE_MLOCK=y
config MMU_NOTIFIER config MMU_NOTIFIER
bool bool
......
...@@ -63,17 +63,6 @@ static inline unsigned long page_order(struct page *page) ...@@ -63,17 +63,6 @@ static inline unsigned long page_order(struct page *page)
return page_private(page); return page_private(page);
} }
#ifdef CONFIG_HAVE_MLOCK
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
{
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}
#endif
/* /*
* unevictable_migrate_page() called only from migrate_page_copy() to * unevictable_migrate_page() called only from migrate_page_copy() to
* migrate unevictable flag to new page. * migrate unevictable flag to new page.
...@@ -86,7 +75,16 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) ...@@ -86,7 +75,16 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
SetPageUnevictable(new); SetPageUnevictable(new);
} }
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #ifdef CONFIG_MMU
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
{
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}
/* /*
* Called only in fault path via page_evictable() for a new page * Called only in fault path via page_evictable() for a new page
* to determine if it's being mapped into a LOCKED vma. * to determine if it's being mapped into a LOCKED vma.
...@@ -144,7 +142,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) ...@@ -144,7 +142,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
} }
} }
#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ #else /* !CONFIG_MMU */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{ {
return 0; return 0;
...@@ -153,7 +151,7 @@ static inline void clear_page_mlock(struct page *page) { } ...@@ -153,7 +151,7 @@ static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { } static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { }
#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ #endif /* !CONFIG_MMU */
/* /*
* Return the mem_map entry representing the 'offset' subpage within * Return the mem_map entry representing the 'offset' subpage within
......
...@@ -582,10 +582,8 @@ static struct page_state { ...@@ -582,10 +582,8 @@ static struct page_state {
{ unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
{ unevict, unevict, "unevictable LRU", me_pagecache_clean}, { unevict, unevict, "unevictable LRU", me_pagecache_clean},
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
{ mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
{ mlock, mlock, "mlocked LRU", me_pagecache_clean }, { mlock, mlock, "mlocked LRU", me_pagecache_clean },
#endif
{ lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
{ lru|dirty, lru, "clean LRU", me_pagecache_clean }, { lru|dirty, lru, "clean LRU", me_pagecache_clean },
......
...@@ -486,7 +486,6 @@ static inline void __free_one_page(struct page *page, ...@@ -486,7 +486,6 @@ static inline void __free_one_page(struct page *page,
zone->free_area[order].nr_free++; zone->free_area[order].nr_free++;
} }
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/* /*
* free_page_mlock() -- clean up attempts to free and mlocked() page. * free_page_mlock() -- clean up attempts to free and mlocked() page.
* Page should not be on lru, so no need to fix that up. * Page should not be on lru, so no need to fix that up.
...@@ -497,9 +496,6 @@ static inline void free_page_mlock(struct page *page) ...@@ -497,9 +496,6 @@ static inline void free_page_mlock(struct page *page)
__dec_zone_page_state(page, NR_MLOCK); __dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED); __count_vm_event(UNEVICTABLE_MLOCKFREED);
} }
#else
static void free_page_mlock(struct page *page) { }
#endif
static inline int free_pages_check(struct page *page) static inline int free_pages_check(struct page *page)
{ {
......
...@@ -788,7 +788,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -788,7 +788,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
ret = SWAP_MLOCK; ret = SWAP_MLOCK;
goto out_unmap; goto out_unmap;
} }
if (MLOCK_PAGES && TTU_ACTION(flags) == TTU_MUNLOCK) if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out_unmap; goto out_unmap;
} }
if (!(flags & TTU_IGNORE_ACCESS)) { if (!(flags & TTU_IGNORE_ACCESS)) {
...@@ -861,7 +861,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -861,7 +861,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
out_unmap: out_unmap:
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
if (MLOCK_PAGES && ret == SWAP_MLOCK) { if (ret == SWAP_MLOCK) {
ret = SWAP_AGAIN; ret = SWAP_AGAIN;
if (down_read_trylock(&vma->vm_mm->mmap_sem)) { if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
if (vma->vm_flags & VM_LOCKED) { if (vma->vm_flags & VM_LOCKED) {
...@@ -938,11 +938,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, ...@@ -938,11 +938,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
return ret; return ret;
/* /*
* MLOCK_PAGES => feature is configured. * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
* if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
* keep the sem while scanning the cluster for mlocking pages. * keep the sem while scanning the cluster for mlocking pages.
*/ */
if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) { if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
locked_vma = (vma->vm_flags & VM_LOCKED); locked_vma = (vma->vm_flags & VM_LOCKED);
if (!locked_vma) if (!locked_vma)
up_read(&vma->vm_mm->mmap_sem); /* don't need it */ up_read(&vma->vm_mm->mmap_sem); /* don't need it */
...@@ -1075,9 +1074,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) ...@@ -1075,9 +1074,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) { shared.vm_set.list) {
if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
(vma->vm_flags & VM_LOCKED))
continue;
cursor = (unsigned long) vma->vm_private_data; cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor) if (cursor > max_nl_cursor)
max_nl_cursor = cursor; max_nl_cursor = cursor;
...@@ -1110,9 +1106,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) ...@@ -1110,9 +1106,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
do { do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) { shared.vm_set.list) {
if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
(vma->vm_flags & VM_LOCKED))
continue;
cursor = (unsigned long) vma->vm_private_data; cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor && while ( cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) { cursor < vma->vm_end - vma->vm_start) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment