Commit 89e004ea authored by Lee Schermerhorn's avatar Lee Schermerhorn Committed by Linus Torvalds

SHM_LOCKED pages are unevictable

Shmem segments locked into memory via shmctl(SHM_LOCKED) should not be
kept on the normal LRU, since scanning them is a waste of time and might
throw off kswapd's balancing algorithms.  Place them on the unevictable
LRU list instead.

Use the AS_UNEVICTABLE flag to mark address_space of SHM_LOCKed shared
memory regions as unevictable.  Then these pages will be culled off the
normal LRU lists during vmscan.

Add new wrapper function to clear the mapping's unevictable state when/if
shared memory segment is munlocked.

Add 'scan_mapping_unevictable_page()' to mm/vmscan.c to scan all pages in
the shmem segment's mapping [struct address_space] for evictability now
that they're no longer locked.  If so, move them to the appropriate zone
lru list.

Changes depend on [CONFIG_]UNEVICTABLE_LRU.

[kosaki.motohiro@jp.fujitsu.com: revert shm change]
Signed-off-by: default avatarLee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarKosaki Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ba9ddf49
...@@ -700,7 +700,7 @@ static inline int page_mapped(struct page *page) ...@@ -700,7 +700,7 @@ static inline int page_mapped(struct page *page)
extern void show_free_areas(void); extern void show_free_areas(void);
#ifdef CONFIG_SHMEM #ifdef CONFIG_SHMEM
int shmem_lock(struct file *file, int lock, struct user_struct *user); extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
#else #else
static inline int shmem_lock(struct file *file, int lock, static inline int shmem_lock(struct file *file, int lock,
struct user_struct *user) struct user_struct *user)
......
...@@ -40,14 +40,20 @@ static inline void mapping_set_unevictable(struct address_space *mapping) ...@@ -40,14 +40,20 @@ static inline void mapping_set_unevictable(struct address_space *mapping)
set_bit(AS_UNEVICTABLE, &mapping->flags); set_bit(AS_UNEVICTABLE, &mapping->flags);
} }
static inline void mapping_clear_unevictable(struct address_space *mapping)
{
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
static inline int mapping_unevictable(struct address_space *mapping) static inline int mapping_unevictable(struct address_space *mapping)
{ {
if (mapping && (mapping->flags & AS_UNEVICTABLE)) if (likely(mapping))
return 1; return test_bit(AS_UNEVICTABLE, &mapping->flags);
return 0; return !!mapping;
} }
#else #else
static inline void mapping_set_unevictable(struct address_space *mapping) { } static inline void mapping_set_unevictable(struct address_space *mapping) { }
static inline void mapping_clear_unevictable(struct address_space *mapping) { }
static inline int mapping_unevictable(struct address_space *mapping) static inline int mapping_unevictable(struct address_space *mapping)
{ {
return 0; return 0;
......
...@@ -232,12 +232,16 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) ...@@ -232,12 +232,16 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
#ifdef CONFIG_UNEVICTABLE_LRU #ifdef CONFIG_UNEVICTABLE_LRU
extern int page_evictable(struct page *page, struct vm_area_struct *vma); extern int page_evictable(struct page *page, struct vm_area_struct *vma);
extern void scan_mapping_unevictable_pages(struct address_space *);
#else #else
static inline int page_evictable(struct page *page, static inline int page_evictable(struct page *page,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
return 1; return 1;
} }
static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
{
}
#endif #endif
extern int kswapd_run(int nid); extern int kswapd_run(int nid);
......
...@@ -737,6 +737,10 @@ asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) ...@@ -737,6 +737,10 @@ asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
case SHM_LOCK: case SHM_LOCK:
case SHM_UNLOCK: case SHM_UNLOCK:
{ {
struct file *uninitialized_var(shm_file);
lru_add_drain_all(); /* drain pagevecs to lru lists */
shp = shm_lock_check(ns, shmid); shp = shm_lock_check(ns, shmid);
if (IS_ERR(shp)) { if (IS_ERR(shp)) {
err = PTR_ERR(shp); err = PTR_ERR(shp);
......
...@@ -1477,12 +1477,16 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) ...@@ -1477,12 +1477,16 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
if (!user_shm_lock(inode->i_size, user)) if (!user_shm_lock(inode->i_size, user))
goto out_nomem; goto out_nomem;
info->flags |= VM_LOCKED; info->flags |= VM_LOCKED;
mapping_set_unevictable(file->f_mapping);
} }
if (!lock && (info->flags & VM_LOCKED) && user) { if (!lock && (info->flags & VM_LOCKED) && user) {
user_shm_unlock(inode->i_size, user); user_shm_unlock(inode->i_size, user);
info->flags &= ~VM_LOCKED; info->flags &= ~VM_LOCKED;
mapping_clear_unevictable(file->f_mapping);
scan_mapping_unevictable_pages(file->f_mapping);
} }
retval = 0; retval = 0;
out_nomem: out_nomem:
spin_unlock(&info->lock); spin_unlock(&info->lock);
return retval; return retval;
......
...@@ -2346,4 +2346,93 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) ...@@ -2346,4 +2346,93 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
return 1; return 1;
} }
/**
* check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
* @page: page to check evictability and move to appropriate lru list
* @zone: zone page is in
*
* Checks a page for evictability and moves the page to the appropriate
* zone lru list.
*
* Restrictions: zone->lru_lock must be held, page must be on LRU and must
* have PageUnevictable set.
*/
static void check_move_unevictable_page(struct page *page, struct zone *zone)
{
VM_BUG_ON(PageActive(page));
retry:
ClearPageUnevictable(page);
if (page_evictable(page, NULL)) {
enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
__dec_zone_state(zone, NR_UNEVICTABLE);
list_move(&page->lru, &zone->lru[l].list);
__inc_zone_state(zone, NR_INACTIVE_ANON + l);
__count_vm_event(UNEVICTABLE_PGRESCUED);
} else {
/*
* rotate unevictable list
*/
SetPageUnevictable(page);
list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
if (page_evictable(page, NULL))
goto retry;
}
}
/**
* scan_mapping_unevictable_pages - scan an address space for evictable pages
* @mapping: struct address_space to scan for evictable pages
*
* Scan all pages in mapping. Check unevictable pages for
* evictability and move them to the appropriate zone lru list.
*/
void scan_mapping_unevictable_pages(struct address_space *mapping)
{
pgoff_t next = 0;
pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
struct zone *zone;
struct pagevec pvec;
if (mapping->nrpages == 0)
return;
pagevec_init(&pvec, 0);
while (next < end &&
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
int i;
int pg_scanned = 0;
zone = NULL;
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index = page->index;
struct zone *pagezone = page_zone(page);
pg_scanned++;
if (page_index > next)
next = page_index;
next++;
if (pagezone != zone) {
if (zone)
spin_unlock_irq(&zone->lru_lock);
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
if (PageLRU(page) && PageUnevictable(page))
check_move_unevictable_page(page, zone);
}
if (zone)
spin_unlock_irq(&zone->lru_lock);
pagevec_release(&pvec);
count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
}
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment