Commit ce677ce2 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] move nr_active and nr_inactive into per-CPU page

It might reduce pagemap_lru_lock hold times a little, and is more
consistent.  I think all global page accounting is now inside
page_states[].
parent 9a0bd0e3
...@@ -149,8 +149,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off, ...@@ -149,8 +149,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
"MemShared: %8lu kB\n" "MemShared: %8lu kB\n"
"Cached: %8lu kB\n" "Cached: %8lu kB\n"
"SwapCached: %8lu kB\n" "SwapCached: %8lu kB\n"
"Active: %8u kB\n" "Active: %8lu kB\n"
"Inactive: %8u kB\n" "Inactive: %8lu kB\n"
"HighTotal: %8lu kB\n" "HighTotal: %8lu kB\n"
"HighFree: %8lu kB\n" "HighFree: %8lu kB\n"
"LowTotal: %8lu kB\n" "LowTotal: %8lu kB\n"
...@@ -164,8 +164,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off, ...@@ -164,8 +164,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
K(i.sharedram), K(i.sharedram),
K(ps.nr_pagecache-swapper_space.nrpages), K(ps.nr_pagecache-swapper_space.nrpages),
K(swapper_space.nrpages), K(swapper_space.nrpages),
K(nr_active_pages), K(ps.nr_active),
K(nr_inactive_pages), K(ps.nr_inactive),
K(i.totalhigh), K(i.totalhigh),
K(i.freehigh), K(i.freehigh),
K(i.totalram-i.totalhigh), K(i.totalram-i.totalhigh),
......
...@@ -73,6 +73,8 @@ extern struct page_state { ...@@ -73,6 +73,8 @@ extern struct page_state {
unsigned long nr_dirty; unsigned long nr_dirty;
unsigned long nr_writeback; unsigned long nr_writeback;
unsigned long nr_pagecache; unsigned long nr_pagecache;
unsigned long nr_active; /* on active_list LRU */
unsigned long nr_inactive; /* on inactive_list LRU */
} ____cacheline_aligned_in_smp page_states[NR_CPUS]; } ____cacheline_aligned_in_smp page_states[NR_CPUS];
extern void get_page_state(struct page_state *ret); extern void get_page_state(struct page_state *ret);
......
...@@ -102,8 +102,6 @@ extern unsigned long totalhigh_pages; ...@@ -102,8 +102,6 @@ extern unsigned long totalhigh_pages;
extern unsigned int nr_free_pages(void); extern unsigned int nr_free_pages(void);
extern unsigned int nr_free_buffer_pages(void); extern unsigned int nr_free_buffer_pages(void);
extern unsigned int nr_free_pagecache_pages(void); extern unsigned int nr_free_pagecache_pages(void);
extern int nr_active_pages;
extern int nr_inactive_pages;
extern void __remove_inode_page(struct page *); extern void __remove_inode_page(struct page *);
/* Incomplete types for prototype declarations: */ /* Incomplete types for prototype declarations: */
...@@ -191,27 +189,27 @@ do { \ ...@@ -191,27 +189,27 @@ do { \
DEBUG_LRU_PAGE(page); \ DEBUG_LRU_PAGE(page); \
SetPageActive(page); \ SetPageActive(page); \
list_add(&(page)->lru, &active_list); \ list_add(&(page)->lru, &active_list); \
nr_active_pages++; \ inc_page_state(nr_active); \
} while (0) } while (0)
#define add_page_to_inactive_list(page) \ #define add_page_to_inactive_list(page) \
do { \ do { \
DEBUG_LRU_PAGE(page); \ DEBUG_LRU_PAGE(page); \
list_add(&(page)->lru, &inactive_list); \ list_add(&(page)->lru, &inactive_list); \
nr_inactive_pages++; \ inc_page_state(nr_inactive); \
} while (0) } while (0)
#define del_page_from_active_list(page) \ #define del_page_from_active_list(page) \
do { \ do { \
list_del(&(page)->lru); \ list_del(&(page)->lru); \
ClearPageActive(page); \ ClearPageActive(page); \
nr_active_pages--; \ dec_page_state(nr_active); \
} while (0) } while (0)
#define del_page_from_inactive_list(page) \ #define del_page_from_inactive_list(page) \
do { \ do { \
list_del(&(page)->lru); \ list_del(&(page)->lru); \
nr_inactive_pages--; \ dec_page_state(nr_inactive); \
} while (0) } while (0)
extern spinlock_t swaplock; extern spinlock_t swaplock;
......
...@@ -1415,16 +1415,19 @@ asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t *offset, size_t ...@@ -1415,16 +1415,19 @@ asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t *offset, size_t
return ret; return ret;
} }
static ssize_t do_readahead(struct file *file, unsigned long index, unsigned long nr) static ssize_t
do_readahead(struct file *file, unsigned long index, unsigned long nr)
{ {
struct address_space *mapping = file->f_dentry->d_inode->i_mapping; struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
unsigned long max; unsigned long max;
struct page_state ps;
if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
return -EINVAL; return -EINVAL;
/* Limit it to a sane percentage of the inactive list.. */ /* Limit it to a sane percentage of the inactive list.. */
max = nr_inactive_pages / 2; get_page_state(&ps);
max = ps.nr_inactive / 2;
if (nr > max) if (nr > max)
nr = max; nr = max;
......
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
unsigned long totalram_pages; unsigned long totalram_pages;
unsigned long totalhigh_pages; unsigned long totalhigh_pages;
int nr_swap_pages; int nr_swap_pages;
int nr_active_pages;
int nr_inactive_pages;
struct list_head inactive_list; struct list_head inactive_list;
struct list_head active_list; struct list_head active_list;
pg_data_t *pgdat_list; pg_data_t *pgdat_list;
...@@ -528,7 +526,7 @@ void free_pages(unsigned long addr, unsigned int order) ...@@ -528,7 +526,7 @@ void free_pages(unsigned long addr, unsigned int order)
/* /*
* Total amount of free (allocatable) RAM: * Total amount of free (allocatable) RAM:
*/ */
unsigned int nr_free_pages (void) unsigned int nr_free_pages(void)
{ {
unsigned int sum; unsigned int sum;
zone_t *zone; zone_t *zone;
...@@ -608,10 +606,7 @@ void get_page_state(struct page_state *ret) ...@@ -608,10 +606,7 @@ void get_page_state(struct page_state *ret)
{ {
int pcpu; int pcpu;
ret->nr_dirty = 0; memset(ret, 0, sizeof(*ret));
ret->nr_writeback = 0;
ret->nr_pagecache = 0;
for (pcpu = 0; pcpu < smp_num_cpus; pcpu++) { for (pcpu = 0; pcpu < smp_num_cpus; pcpu++) {
struct page_state *ps; struct page_state *ps;
...@@ -619,6 +614,8 @@ void get_page_state(struct page_state *ret) ...@@ -619,6 +614,8 @@ void get_page_state(struct page_state *ret)
ret->nr_dirty += ps->nr_dirty; ret->nr_dirty += ps->nr_dirty;
ret->nr_writeback += ps->nr_writeback; ret->nr_writeback += ps->nr_writeback;
ret->nr_pagecache += ps->nr_pagecache; ret->nr_pagecache += ps->nr_pagecache;
ret->nr_active += ps->nr_active;
ret->nr_inactive += ps->nr_inactive;
} }
} }
...@@ -658,6 +655,9 @@ void show_free_areas_core(pg_data_t *pgdat) ...@@ -658,6 +655,9 @@ void show_free_areas_core(pg_data_t *pgdat)
unsigned int order; unsigned int order;
unsigned type; unsigned type;
pg_data_t *tmpdat = pgdat; pg_data_t *tmpdat = pgdat;
struct page_state ps;
get_page_state(&ps);
printk("Free pages: %6dkB (%6dkB HighMem)\n", printk("Free pages: %6dkB (%6dkB HighMem)\n",
K(nr_free_pages()), K(nr_free_pages()),
...@@ -678,9 +678,11 @@ void show_free_areas_core(pg_data_t *pgdat) ...@@ -678,9 +678,11 @@ void show_free_areas_core(pg_data_t *pgdat)
tmpdat = tmpdat->node_next; tmpdat = tmpdat->node_next;
} }
printk("( Active: %d, inactive: %d, free: %d )\n", printk("( Active:%lu inactive:%lu dirty:%lu writeback:%lu free:%u )\n",
nr_active_pages, ps.nr_active,
nr_inactive_pages, ps.nr_inactive,
ps.nr_dirty,
ps.nr_writeback,
nr_free_pages()); nr_free_pages());
for (type = 0; type < MAX_NR_ZONES; type++) { for (type = 0; type < MAX_NR_ZONES; type++) {
......
...@@ -380,16 +380,17 @@ static int swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * class ...@@ -380,16 +380,17 @@ static int swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * class
return 0; return 0;
} }
static int FASTCALL(shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int priority)); static int
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int priority) shrink_cache(int nr_pages, zone_t *classzone,
unsigned int gfp_mask, int priority, int max_scan)
{ {
struct list_head * entry; struct list_head * entry;
struct address_space *mapping; struct address_space *mapping;
int max_scan = nr_inactive_pages / priority;
int max_mapped = nr_pages << (9 - priority); int max_mapped = nr_pages << (9 - priority);
spin_lock(&pagemap_lru_lock); spin_lock(&pagemap_lru_lock);
while (--max_scan >= 0 && (entry = inactive_list.prev) != &inactive_list) { while (--max_scan >= 0 &&
(entry = inactive_list.prev) != &inactive_list) {
struct page * page; struct page * page;
if (need_resched()) { if (need_resched()) {
...@@ -619,17 +620,25 @@ static int shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask ...@@ -619,17 +620,25 @@ static int shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask
{ {
int chunk_size = nr_pages; int chunk_size = nr_pages;
unsigned long ratio; unsigned long ratio;
struct page_state ps;
int max_scan;
nr_pages -= kmem_cache_reap(gfp_mask); nr_pages -= kmem_cache_reap(gfp_mask);
if (nr_pages <= 0) if (nr_pages <= 0)
return 0; return 0;
nr_pages = chunk_size; nr_pages = chunk_size;
/* try to keep the active list 2/3 of the size of the cache */
ratio = (unsigned long) nr_pages * nr_active_pages / ((nr_inactive_pages + 1) * 2);
refill_inactive(ratio);
nr_pages = shrink_cache(nr_pages, classzone, gfp_mask, priority); /*
* Try to keep the active list 2/3 of the size of the cache
*/
get_page_state(&ps);
ratio = (unsigned long)nr_pages * ps.nr_active /
((ps.nr_inactive | 1) * 2);
refill_inactive(ratio);
max_scan = ps.nr_inactive / priority;
nr_pages = shrink_cache(nr_pages, classzone,
gfp_mask, priority, max_scan);
if (nr_pages <= 0) if (nr_pages <= 0)
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment