Commit aaa46865 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

swap_info: note SWAP_MAP_SHMEM

While we're fiddling with the swap_map values, let's assign a particular
value to shmem/tmpfs swap pages: their swap counts are never incremented,
and it helps swapoff's try_to_unuse() a little if it can immediately
distinguish those pages from process pages.

Since we've no use for SWAP_MAP_BAD | COUNT_CONTINUED,
we might as well use that 0xbf value for SWAP_MAP_SHMEM.
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 570a335b
...@@ -157,6 +157,7 @@ enum { ...@@ -157,6 +157,7 @@ enum {
#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
/* /*
* The in-memory structure used to track swap areas. * The in-memory structure used to track swap areas.
...@@ -316,6 +317,7 @@ extern swp_entry_t get_swap_page(void); ...@@ -316,6 +317,7 @@ extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int); extern swp_entry_t get_swap_page_of_type(int);
extern int valid_swaphandles(swp_entry_t, unsigned long *); extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern int add_swap_count_continuation(swp_entry_t, gfp_t);
extern void swap_shmem_alloc(swp_entry_t);
extern int swap_duplicate(swp_entry_t); extern int swap_duplicate(swp_entry_t);
extern int swapcache_prepare(swp_entry_t); extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t); extern void swap_free(swp_entry_t);
...@@ -394,6 +396,10 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) ...@@ -394,6 +396,10 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
return 0; return 0;
} }
static inline void swap_shmem_alloc(swp_entry_t swp)
{
}
static inline int swap_duplicate(swp_entry_t swp) static inline int swap_duplicate(swp_entry_t swp)
{ {
return 0; return 0;
......
...@@ -1017,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page) ...@@ -1017,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
goto out; goto out;
} }
mutex_unlock(&shmem_swaplist_mutex); mutex_unlock(&shmem_swaplist_mutex);
out: return found; /* 0 or 1 or -ENOMEM */ /*
* Can some race bring us here? We've been holding page lock,
* so I think not; but would rather try again later than BUG()
*/
unlock_page(page);
page_cache_release(page);
out:
return (found < 0) ? found : 0;
} }
/* /*
...@@ -1080,7 +1087,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) ...@@ -1080,7 +1087,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
else else
inode = NULL; inode = NULL;
spin_unlock(&info->lock); spin_unlock(&info->lock);
swap_duplicate(swap); swap_shmem_alloc(swap);
BUG_ON(page_mapped(page)); BUG_ON(page_mapped(page));
page_cache_release(page); /* pagecache ref */ page_cache_release(page); /* pagecache ref */
swap_writepage(page, wbc); swap_writepage(page, wbc);
......
...@@ -548,6 +548,12 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, ...@@ -548,6 +548,12 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
if (usage == SWAP_HAS_CACHE) { if (usage == SWAP_HAS_CACHE) {
VM_BUG_ON(!has_cache); VM_BUG_ON(!has_cache);
has_cache = 0; has_cache = 0;
} else if (count == SWAP_MAP_SHMEM) {
/*
* Or we could insist on shmem.c using a special
* swap_shmem_free() and free_shmem_swap_and_cache()...
*/
count = 0;
} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
if (count == COUNT_CONTINUED) { if (count == COUNT_CONTINUED) {
if (swap_count_continued(p, offset, count)) if (swap_count_continued(p, offset, count))
...@@ -1031,7 +1037,6 @@ static int try_to_unuse(unsigned int type) ...@@ -1031,7 +1037,6 @@ static int try_to_unuse(unsigned int type)
swp_entry_t entry; swp_entry_t entry;
unsigned int i = 0; unsigned int i = 0;
int retval = 0; int retval = 0;
int shmem;
/* /*
* When searching mms for an entry, a good strategy is to * When searching mms for an entry, a good strategy is to
...@@ -1107,17 +1112,18 @@ static int try_to_unuse(unsigned int type) ...@@ -1107,17 +1112,18 @@ static int try_to_unuse(unsigned int type)
/* /*
* Remove all references to entry. * Remove all references to entry.
* Whenever we reach init_mm, there's no address space
* to search, but use it as a reminder to search shmem.
*/ */
shmem = 0;
swcount = *swap_map; swcount = *swap_map;
if (swap_count(swcount)) { if (swap_count(swcount) == SWAP_MAP_SHMEM) {
if (start_mm == &init_mm) retval = shmem_unuse(entry, page);
shmem = shmem_unuse(entry, page); /* page has already been unlocked and released */
else if (retval < 0)
retval = unuse_mm(start_mm, entry, page); break;
continue;
} }
if (swap_count(swcount) && start_mm != &init_mm)
retval = unuse_mm(start_mm, entry, page);
if (swap_count(*swap_map)) { if (swap_count(*swap_map)) {
int set_start_mm = (*swap_map >= swcount); int set_start_mm = (*swap_map >= swcount);
struct list_head *p = &start_mm->mmlist; struct list_head *p = &start_mm->mmlist;
...@@ -1128,7 +1134,7 @@ static int try_to_unuse(unsigned int type) ...@@ -1128,7 +1134,7 @@ static int try_to_unuse(unsigned int type)
atomic_inc(&new_start_mm->mm_users); atomic_inc(&new_start_mm->mm_users);
atomic_inc(&prev_mm->mm_users); atomic_inc(&prev_mm->mm_users);
spin_lock(&mmlist_lock); spin_lock(&mmlist_lock);
while (swap_count(*swap_map) && !retval && !shmem && while (swap_count(*swap_map) && !retval &&
(p = p->next) != &start_mm->mmlist) { (p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist); mm = list_entry(p, struct mm_struct, mmlist);
if (!atomic_inc_not_zero(&mm->mm_users)) if (!atomic_inc_not_zero(&mm->mm_users))
...@@ -1142,10 +1148,9 @@ static int try_to_unuse(unsigned int type) ...@@ -1142,10 +1148,9 @@ static int try_to_unuse(unsigned int type)
swcount = *swap_map; swcount = *swap_map;
if (!swap_count(swcount)) /* any usage ? */ if (!swap_count(swcount)) /* any usage ? */
; ;
else if (mm == &init_mm) { else if (mm == &init_mm)
set_start_mm = 1; set_start_mm = 1;
shmem = shmem_unuse(entry, page); else
} else
retval = unuse_mm(mm, entry, page); retval = unuse_mm(mm, entry, page);
if (set_start_mm && *swap_map < swcount) { if (set_start_mm && *swap_map < swcount) {
...@@ -1161,13 +1166,6 @@ static int try_to_unuse(unsigned int type) ...@@ -1161,13 +1166,6 @@ static int try_to_unuse(unsigned int type)
mmput(start_mm); mmput(start_mm);
start_mm = new_start_mm; start_mm = new_start_mm;
} }
if (shmem) {
/* page has already been unlocked and released */
if (shmem > 0)
continue;
retval = shmem;
break;
}
if (retval) { if (retval) {
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
...@@ -2126,6 +2124,15 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) ...@@ -2126,6 +2124,15 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
goto out; goto out;
} }
/*
* Help swapoff by noting that swap entry belongs to shmem/tmpfs
* (in which case its reference count is never incremented).
*/
void swap_shmem_alloc(swp_entry_t entry)
{
__swap_duplicate(entry, SWAP_MAP_SHMEM);
}
/* /*
* increase reference count of swap entry by 1. * increase reference count of swap entry by 1.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment