Commit 68377659 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds

mm: remove CONFIG_UNEVICTABLE_LRU config option

Currently, nobody wants to turn UNEVICTABLE_LRU off.  Thus this
configurability is unnecessary.
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andi Kleen <andi@firstfloor.org>
Acked-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bce7394a
...@@ -72,10 +72,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev, ...@@ -72,10 +72,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
"Node %d Inactive(anon): %8lu kB\n" "Node %d Inactive(anon): %8lu kB\n"
"Node %d Active(file): %8lu kB\n" "Node %d Active(file): %8lu kB\n"
"Node %d Inactive(file): %8lu kB\n" "Node %d Inactive(file): %8lu kB\n"
#ifdef CONFIG_UNEVICTABLE_LRU
"Node %d Unevictable: %8lu kB\n" "Node %d Unevictable: %8lu kB\n"
"Node %d Mlocked: %8lu kB\n" "Node %d Mlocked: %8lu kB\n"
#endif
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
"Node %d HighTotal: %8lu kB\n" "Node %d HighTotal: %8lu kB\n"
"Node %d HighFree: %8lu kB\n" "Node %d HighFree: %8lu kB\n"
...@@ -105,10 +103,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev, ...@@ -105,10 +103,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
nid, K(node_page_state(nid, NR_INACTIVE_ANON)), nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
nid, K(node_page_state(nid, NR_ACTIVE_FILE)), nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
nid, K(node_page_state(nid, NR_INACTIVE_FILE)), nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
#ifdef CONFIG_UNEVICTABLE_LRU
nid, K(node_page_state(nid, NR_UNEVICTABLE)), nid, K(node_page_state(nid, NR_UNEVICTABLE)),
nid, K(node_page_state(nid, NR_MLOCK)), nid, K(node_page_state(nid, NR_MLOCK)),
#endif
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
nid, K(i.totalhigh), nid, K(i.totalhigh),
nid, K(i.freehigh), nid, K(i.freehigh),
......
...@@ -64,10 +64,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) ...@@ -64,10 +64,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
"Inactive(anon): %8lu kB\n" "Inactive(anon): %8lu kB\n"
"Active(file): %8lu kB\n" "Active(file): %8lu kB\n"
"Inactive(file): %8lu kB\n" "Inactive(file): %8lu kB\n"
#ifdef CONFIG_UNEVICTABLE_LRU
"Unevictable: %8lu kB\n" "Unevictable: %8lu kB\n"
"Mlocked: %8lu kB\n" "Mlocked: %8lu kB\n"
#endif
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
"HighTotal: %8lu kB\n" "HighTotal: %8lu kB\n"
"HighFree: %8lu kB\n" "HighFree: %8lu kB\n"
...@@ -109,10 +107,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) ...@@ -109,10 +107,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
K(pages[LRU_INACTIVE_ANON]), K(pages[LRU_INACTIVE_ANON]),
K(pages[LRU_ACTIVE_FILE]), K(pages[LRU_ACTIVE_FILE]),
K(pages[LRU_INACTIVE_FILE]), K(pages[LRU_INACTIVE_FILE]),
#ifdef CONFIG_UNEVICTABLE_LRU
K(pages[LRU_UNEVICTABLE]), K(pages[LRU_UNEVICTABLE]),
K(global_page_state(NR_MLOCK)), K(global_page_state(NR_MLOCK)),
#endif
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
K(i.totalhigh), K(i.totalhigh),
K(i.freehigh), K(i.freehigh),
......
...@@ -172,10 +172,8 @@ static u64 get_uflags(struct page *page) ...@@ -172,10 +172,8 @@ static u64 get_uflags(struct page *page)
u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache);
u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
#ifdef CONFIG_UNEVICTABLE_LRU
u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
#endif
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
......
...@@ -83,13 +83,8 @@ enum zone_stat_item { ...@@ -83,13 +83,8 @@ enum zone_stat_item {
NR_ACTIVE_ANON, /* " " " " " */ NR_ACTIVE_ANON, /* " " " " " */
NR_INACTIVE_FILE, /* " " " " " */ NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */
#ifdef CONFIG_UNEVICTABLE_LRU
NR_UNEVICTABLE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */
#else
NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
NR_MLOCK = NR_ACTIVE_FILE,
#endif
NR_ANON_PAGES, /* Mapped anonymous pages */ NR_ANON_PAGES, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
only modified from process context */ only modified from process context */
...@@ -132,11 +127,7 @@ enum lru_list { ...@@ -132,11 +127,7 @@ enum lru_list {
LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
#ifdef CONFIG_UNEVICTABLE_LRU
LRU_UNEVICTABLE, LRU_UNEVICTABLE,
#else
LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
#endif
NR_LRU_LISTS NR_LRU_LISTS
}; };
...@@ -156,11 +147,7 @@ static inline int is_active_lru(enum lru_list l) ...@@ -156,11 +147,7 @@ static inline int is_active_lru(enum lru_list l)
static inline int is_unevictable_lru(enum lru_list l) static inline int is_unevictable_lru(enum lru_list l)
{ {
#ifdef CONFIG_UNEVICTABLE_LRU
return (l == LRU_UNEVICTABLE); return (l == LRU_UNEVICTABLE);
#else
return 0;
#endif
} }
enum zone_watermarks { enum zone_watermarks {
......
...@@ -95,9 +95,7 @@ enum pageflags { ...@@ -95,9 +95,7 @@ enum pageflags {
PG_reclaim, /* To be reclaimed asap */ PG_reclaim, /* To be reclaimed asap */
PG_buddy, /* Page is free, on buddy lists */ PG_buddy, /* Page is free, on buddy lists */
PG_swapbacked, /* Page is backed by RAM/swap */ PG_swapbacked, /* Page is backed by RAM/swap */
#ifdef CONFIG_UNEVICTABLE_LRU
PG_unevictable, /* Page is "unevictable" */ PG_unevictable, /* Page is "unevictable" */
#endif
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
PG_mlocked, /* Page is vma mlocked */ PG_mlocked, /* Page is vma mlocked */
#endif #endif
...@@ -248,14 +246,8 @@ PAGEFLAG_FALSE(SwapCache) ...@@ -248,14 +246,8 @@ PAGEFLAG_FALSE(SwapCache)
SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
#endif #endif
#ifdef CONFIG_UNEVICTABLE_LRU
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
TESTCLEARFLAG(Unevictable, unevictable) TESTCLEARFLAG(Unevictable, unevictable)
#else
PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
__CLEARPAGEFLAG_NOOP(Unevictable)
#endif
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
#define MLOCK_PAGES 1 #define MLOCK_PAGES 1
...@@ -382,12 +374,6 @@ static inline void __ClearPageTail(struct page *page) ...@@ -382,12 +374,6 @@ static inline void __ClearPageTail(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */ #endif /* !PAGEFLAGS_EXTENDED */
#ifdef CONFIG_UNEVICTABLE_LRU
#define __PG_UNEVICTABLE (1 << PG_unevictable)
#else
#define __PG_UNEVICTABLE 0
#endif
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
#define __PG_MLOCKED (1 << PG_mlocked) #define __PG_MLOCKED (1 << PG_mlocked)
#else #else
...@@ -403,7 +389,7 @@ static inline void __ClearPageTail(struct page *page) ...@@ -403,7 +389,7 @@ static inline void __ClearPageTail(struct page *page)
1 << PG_private | 1 << PG_private_2 | \ 1 << PG_private | 1 << PG_private_2 | \
1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
__PG_UNEVICTABLE | __PG_MLOCKED) 1 << PG_unevictable | __PG_MLOCKED)
/* /*
* Flags checked when a page is prepped for return by the page allocator. * Flags checked when a page is prepped for return by the page allocator.
......
...@@ -22,9 +22,7 @@ enum mapping_flags { ...@@ -22,9 +22,7 @@ enum mapping_flags {
AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
#ifdef CONFIG_UNEVICTABLE_LRU
AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
#endif
}; };
static inline void mapping_set_error(struct address_space *mapping, int error) static inline void mapping_set_error(struct address_space *mapping, int error)
...@@ -37,8 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error) ...@@ -37,8 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
} }
} }
#ifdef CONFIG_UNEVICTABLE_LRU
static inline void mapping_set_unevictable(struct address_space *mapping) static inline void mapping_set_unevictable(struct address_space *mapping)
{ {
set_bit(AS_UNEVICTABLE, &mapping->flags); set_bit(AS_UNEVICTABLE, &mapping->flags);
...@@ -55,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping) ...@@ -55,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
return test_bit(AS_UNEVICTABLE, &mapping->flags); return test_bit(AS_UNEVICTABLE, &mapping->flags);
return !!mapping; return !!mapping;
} }
#else
static inline void mapping_set_unevictable(struct address_space *mapping) { }
static inline void mapping_clear_unevictable(struct address_space *mapping) { }
static inline int mapping_unevictable(struct address_space *mapping)
{
return 0;
}
#endif
static inline gfp_t mapping_gfp_mask(struct address_space * mapping) static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{ {
......
...@@ -105,18 +105,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); ...@@ -105,18 +105,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*/ */
int page_mkclean(struct page *); int page_mkclean(struct page *);
#ifdef CONFIG_UNEVICTABLE_LRU
/* /*
* called in munlock()/munmap() path to check for other vmas holding * called in munlock()/munmap() path to check for other vmas holding
* the page mlocked. * the page mlocked.
*/ */
int try_to_munlock(struct page *); int try_to_munlock(struct page *);
#else
static inline int try_to_munlock(struct page *page)
{
return 0; /* a.k.a. SWAP_SUCCESS */
}
#endif
#else /* !CONFIG_MMU */ #else /* !CONFIG_MMU */
......
...@@ -235,7 +235,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) ...@@ -235,7 +235,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
} }
#endif #endif
#ifdef CONFIG_UNEVICTABLE_LRU
extern int page_evictable(struct page *page, struct vm_area_struct *vma); extern int page_evictable(struct page *page, struct vm_area_struct *vma);
extern void scan_mapping_unevictable_pages(struct address_space *); extern void scan_mapping_unevictable_pages(struct address_space *);
...@@ -244,24 +243,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *, ...@@ -244,24 +243,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *); void __user *, size_t *, loff_t *);
extern int scan_unevictable_register_node(struct node *node); extern int scan_unevictable_register_node(struct node *node);
extern void scan_unevictable_unregister_node(struct node *node); extern void scan_unevictable_unregister_node(struct node *node);
#else
static inline int page_evictable(struct page *page,
struct vm_area_struct *vma)
{
return 1;
}
static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
{
}
static inline int scan_unevictable_register_node(struct node *node)
{
return 0;
}
static inline void scan_unevictable_unregister_node(struct node *node) { }
#endif
extern int kswapd_run(int nid); extern int kswapd_run(int nid);
......
...@@ -41,7 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, ...@@ -41,7 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
#endif #endif
#ifdef CONFIG_UNEVICTABLE_LRU
UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
...@@ -50,7 +49,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, ...@@ -50,7 +49,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
UNEVICTABLE_MLOCKFREED, UNEVICTABLE_MLOCKFREED,
#endif
NR_VM_EVENT_ITEMS NR_VM_EVENT_ITEMS
}; };
......
...@@ -1325,7 +1325,6 @@ static struct ctl_table vm_table[] = { ...@@ -1325,7 +1325,6 @@ static struct ctl_table vm_table[] = {
.extra2 = &one, .extra2 = &one,
}, },
#endif #endif
#ifdef CONFIG_UNEVICTABLE_LRU
{ {
.ctl_name = CTL_UNNUMBERED, .ctl_name = CTL_UNNUMBERED,
.procname = "scan_unevictable_pages", .procname = "scan_unevictable_pages",
...@@ -1334,7 +1333,6 @@ static struct ctl_table vm_table[] = { ...@@ -1334,7 +1333,6 @@ static struct ctl_table vm_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = &scan_unevictable_handler, .proc_handler = &scan_unevictable_handler,
}, },
#endif
/* /*
* NOTE: do not add new entries to this table unless you have read * NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt * Documentation/sysctl/ctl_unnumbered.txt
......
...@@ -203,25 +203,13 @@ config VIRT_TO_BUS ...@@ -203,25 +203,13 @@ config VIRT_TO_BUS
def_bool y def_bool y
depends on !ARCH_NO_VIRT_TO_BUS depends on !ARCH_NO_VIRT_TO_BUS
config UNEVICTABLE_LRU
bool "Add LRU list to track non-evictable pages"
default y
help
Keeps unevictable pages off of the active and inactive pageout
lists, so kswapd will not waste CPU time or have its balancing
algorithms thrown off by scanning these pages. Selecting this
will use one page flag and increase the code size a little,
say Y unless you know what you are doing.
See Documentation/vm/unevictable-lru.txt for more information.
config HAVE_MLOCK config HAVE_MLOCK
bool bool
default y if MMU=y default y if MMU=y
config HAVE_MLOCKED_PAGE_BIT config HAVE_MLOCKED_PAGE_BIT
bool bool
default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y default y if HAVE_MLOCK=y
config MMU_NOTIFIER config MMU_NOTIFIER
bool bool
......
...@@ -73,7 +73,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) ...@@ -73,7 +73,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
} }
#endif #endif
#ifdef CONFIG_UNEVICTABLE_LRU
/* /*
* unevictable_migrate_page() called only from migrate_page_copy() to * unevictable_migrate_page() called only from migrate_page_copy() to
* migrate unevictable flag to new page. * migrate unevictable flag to new page.
...@@ -85,11 +84,6 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) ...@@ -85,11 +84,6 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
if (TestClearPageUnevictable(old)) if (TestClearPageUnevictable(old))
SetPageUnevictable(new); SetPageUnevictable(new);
} }
#else
static inline void unevictable_migrate_page(struct page *new, struct page *old)
{
}
#endif
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/* /*
......
...@@ -31,7 +31,6 @@ int can_do_mlock(void) ...@@ -31,7 +31,6 @@ int can_do_mlock(void)
} }
EXPORT_SYMBOL(can_do_mlock); EXPORT_SYMBOL(can_do_mlock);
#ifdef CONFIG_UNEVICTABLE_LRU
/* /*
* Mlocked pages are marked with PageMlocked() flag for efficient testing * Mlocked pages are marked with PageMlocked() flag for efficient testing
* in vmscan and, possibly, the fault path; and to support semi-accurate * in vmscan and, possibly, the fault path; and to support semi-accurate
...@@ -261,27 +260,6 @@ static int __mlock_posix_error_return(long retval) ...@@ -261,27 +260,6 @@ static int __mlock_posix_error_return(long retval)
return retval; return retval;
} }
#else /* CONFIG_UNEVICTABLE_LRU */
/*
* Just make pages present if VM_LOCKED. No-op if unlocking.
*/
static long __mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
int mlock)
{
if (mlock && (vma->vm_flags & VM_LOCKED))
return make_pages_present(start, end);
return 0;
}
static inline int __mlock_posix_error_return(long retval)
{
return 0;
}
#endif /* CONFIG_UNEVICTABLE_LRU */
/** /**
* mlock_vma_pages_range() - mlock pages in specified vma range. * mlock_vma_pages_range() - mlock pages in specified vma range.
* @vma - the vma containing the specfied address range * @vma - the vma containing the specfied address range
......
...@@ -2077,19 +2077,14 @@ void show_free_areas(void) ...@@ -2077,19 +2077,14 @@ void show_free_areas(void)
printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n" printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
" inactive_file:%lu" " inactive_file:%lu"
//TODO: check/adjust line lengths
#ifdef CONFIG_UNEVICTABLE_LRU
" unevictable:%lu" " unevictable:%lu"
#endif
" dirty:%lu writeback:%lu unstable:%lu\n" " dirty:%lu writeback:%lu unstable:%lu\n"
" free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
global_page_state(NR_ACTIVE_ANON), global_page_state(NR_ACTIVE_ANON),
global_page_state(NR_ACTIVE_FILE), global_page_state(NR_ACTIVE_FILE),
global_page_state(NR_INACTIVE_ANON), global_page_state(NR_INACTIVE_ANON),
global_page_state(NR_INACTIVE_FILE), global_page_state(NR_INACTIVE_FILE),
#ifdef CONFIG_UNEVICTABLE_LRU
global_page_state(NR_UNEVICTABLE), global_page_state(NR_UNEVICTABLE),
#endif
global_page_state(NR_FILE_DIRTY), global_page_state(NR_FILE_DIRTY),
global_page_state(NR_WRITEBACK), global_page_state(NR_WRITEBACK),
global_page_state(NR_UNSTABLE_NFS), global_page_state(NR_UNSTABLE_NFS),
...@@ -2113,9 +2108,7 @@ void show_free_areas(void) ...@@ -2113,9 +2108,7 @@ void show_free_areas(void)
" inactive_anon:%lukB" " inactive_anon:%lukB"
" active_file:%lukB" " active_file:%lukB"
" inactive_file:%lukB" " inactive_file:%lukB"
#ifdef CONFIG_UNEVICTABLE_LRU
" unevictable:%lukB" " unevictable:%lukB"
#endif
" present:%lukB" " present:%lukB"
" pages_scanned:%lu" " pages_scanned:%lu"
" all_unreclaimable? %s" " all_unreclaimable? %s"
...@@ -2129,9 +2122,7 @@ void show_free_areas(void) ...@@ -2129,9 +2122,7 @@ void show_free_areas(void)
K(zone_page_state(zone, NR_INACTIVE_ANON)), K(zone_page_state(zone, NR_INACTIVE_ANON)),
K(zone_page_state(zone, NR_ACTIVE_FILE)), K(zone_page_state(zone, NR_ACTIVE_FILE)),
K(zone_page_state(zone, NR_INACTIVE_FILE)), K(zone_page_state(zone, NR_INACTIVE_FILE)),
#ifdef CONFIG_UNEVICTABLE_LRU
K(zone_page_state(zone, NR_UNEVICTABLE)), K(zone_page_state(zone, NR_UNEVICTABLE)),
#endif
K(zone->present_pages), K(zone->present_pages),
zone->pages_scanned, zone->pages_scanned,
(zone_is_all_unreclaimable(zone) ? "yes" : "no") (zone_is_all_unreclaimable(zone) ? "yes" : "no")
......
...@@ -1202,7 +1202,6 @@ int try_to_unmap(struct page *page, int migration) ...@@ -1202,7 +1202,6 @@ int try_to_unmap(struct page *page, int migration)
return ret; return ret;
} }
#ifdef CONFIG_UNEVICTABLE_LRU
/** /**
* try_to_munlock - try to munlock a page * try_to_munlock - try to munlock a page
* @page: the page to be munlocked * @page: the page to be munlocked
...@@ -1226,4 +1225,4 @@ int try_to_munlock(struct page *page) ...@@ -1226,4 +1225,4 @@ int try_to_munlock(struct page *page)
else else
return try_to_unmap_file(page, 1, 0); return try_to_unmap_file(page, 1, 0);
} }
#endif
...@@ -514,7 +514,6 @@ int remove_mapping(struct address_space *mapping, struct page *page) ...@@ -514,7 +514,6 @@ int remove_mapping(struct address_space *mapping, struct page *page)
* *
* lru_lock must not be held, interrupts must be enabled. * lru_lock must not be held, interrupts must be enabled.
*/ */
#ifdef CONFIG_UNEVICTABLE_LRU
void putback_lru_page(struct page *page) void putback_lru_page(struct page *page)
{ {
int lru; int lru;
...@@ -568,20 +567,6 @@ void putback_lru_page(struct page *page) ...@@ -568,20 +567,6 @@ void putback_lru_page(struct page *page)
put_page(page); /* drop ref from isolate */ put_page(page); /* drop ref from isolate */
} }
#else /* CONFIG_UNEVICTABLE_LRU */
void putback_lru_page(struct page *page)
{
int lru;
VM_BUG_ON(PageLRU(page));
lru = !!TestClearPageActive(page) + page_is_file_cache(page);
lru_cache_add_lru(page, lru);
put_page(page);
}
#endif /* CONFIG_UNEVICTABLE_LRU */
/* /*
* shrink_page_list() returns the number of reclaimed pages * shrink_page_list() returns the number of reclaimed pages
*/ */
...@@ -2470,7 +2455,6 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) ...@@ -2470,7 +2455,6 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
} }
#endif #endif
#ifdef CONFIG_UNEVICTABLE_LRU
/* /*
* page_evictable - test whether a page is evictable * page_evictable - test whether a page is evictable
* @page: the page to test * @page: the page to test
...@@ -2717,4 +2701,3 @@ void scan_unevictable_unregister_node(struct node *node) ...@@ -2717,4 +2701,3 @@ void scan_unevictable_unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
} }
#endif
...@@ -629,10 +629,8 @@ static const char * const vmstat_text[] = { ...@@ -629,10 +629,8 @@ static const char * const vmstat_text[] = {
"nr_active_anon", "nr_active_anon",
"nr_inactive_file", "nr_inactive_file",
"nr_active_file", "nr_active_file",
#ifdef CONFIG_UNEVICTABLE_LRU
"nr_unevictable", "nr_unevictable",
"nr_mlock", "nr_mlock",
#endif
"nr_anon_pages", "nr_anon_pages",
"nr_mapped", "nr_mapped",
"nr_file_pages", "nr_file_pages",
...@@ -687,7 +685,6 @@ static const char * const vmstat_text[] = { ...@@ -687,7 +685,6 @@ static const char * const vmstat_text[] = {
"htlb_buddy_alloc_success", "htlb_buddy_alloc_success",
"htlb_buddy_alloc_fail", "htlb_buddy_alloc_fail",
#endif #endif
#ifdef CONFIG_UNEVICTABLE_LRU
"unevictable_pgs_culled", "unevictable_pgs_culled",
"unevictable_pgs_scanned", "unevictable_pgs_scanned",
"unevictable_pgs_rescued", "unevictable_pgs_rescued",
...@@ -697,7 +694,6 @@ static const char * const vmstat_text[] = { ...@@ -697,7 +694,6 @@ static const char * const vmstat_text[] = {
"unevictable_pgs_stranded", "unevictable_pgs_stranded",
"unevictable_pgs_mlockfreed", "unevictable_pgs_mlockfreed",
#endif #endif
#endif
}; };
static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment