Commit a2495207 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] tmpfs swapoff deadlock

tmpfs 1/5 swapoff deadlock: my igrab/iput around the yield in
shmem_unuse_inode was rubbish, seems my testing never really hit the
case until last week, when truncation of course deadlocked on the page
held locked across the iput (at least I had the foresight to say "ugh!"
there).  Don't yield here, switch over to the simple backoff I'd been
using for months in the loopable tmpfs patch (yes, it could loop
indefinitely for memory, that's already an issue to be dealt with
later).  The return convention from shmem_unuse to try_to_unuse is
inelegant (commented at both ends), but effective.
parent c21c3ad0
......@@ -218,7 +218,7 @@ extern spinlock_t swaplock;
#define swap_device_lock(p) spin_lock(&p->sdev_lock)
#define swap_device_unlock(p) spin_unlock(&p->sdev_lock)
extern void shmem_unuse(swp_entry_t entry, struct page *page);
extern int shmem_unuse(swp_entry_t entry, struct page *page);
#endif /* __KERNEL__*/
......
......@@ -430,7 +430,6 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
swp_entry_t *ptr;
unsigned long idx;
int offset;
struct inode *inode;
idx = 0;
ptr = info->i_direct;
......@@ -457,54 +456,43 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
spin_unlock (&info->lock);
return 0;
found:
idx += offset;
inode = igrab(&info->vfs_inode);
/* move head to start search for next from here */
list_move_tail(&shmem_inodes, &info->list);
spin_unlock(&shmem_ilock);
swap_free(entry);
if (move_from_swap_cache(page, idx + offset,
info->vfs_inode.i_mapping) == 0) {
ptr[offset] = (swp_entry_t) {0};
while (inode && move_from_swap_cache(page, idx, inode->i_mapping)) {
/*
* Yield for kswapd, and try again - but we're still
* holding the page lock - ugh! fix this up later on.
* Beware of inode being unlinked or truncated: just
* leave try_to_unuse to delete_from_swap_cache if so.
*/
spin_unlock(&info->lock);
yield();
spin_lock(&info->lock);
ptr = shmem_swp_entry(info, idx, 0);
if (IS_ERR(ptr))
break;
}
info->swapped--;
SetPageUptodate(page);
}
spin_unlock(&info->lock);
if (inode)
iput(inode);
SetPageUptodate(page);
/*
* Decrement swap count even when the entry is left behind:
* try_to_unuse will skip over mms, then reincrement count.
*/
swap_free(entry);
return 1;
}
/*
* shmem_unuse() search for an eventually swapped out shmem page.
* Note shmem_unuse_inode drops shmem_ilock itself if successful.
*/
void shmem_unuse(swp_entry_t entry, struct page *page)
int shmem_unuse(swp_entry_t entry, struct page *page)
{
struct list_head *p;
struct shmem_inode_info * info;
int found = 0;
spin_lock (&shmem_ilock);
list_for_each(p, &shmem_inodes) {
info = list_entry(p, struct shmem_inode_info, list);
if (info->swapped && shmem_unuse_inode(info, entry, page))
return;
if (info->swapped && shmem_unuse_inode(info, entry, page)) {
/* move head to start search for next from here */
list_move_tail(&shmem_inodes, &info->list);
found = 1;
break;
}
}
spin_unlock (&shmem_ilock);
return found;
}
/*
......
......@@ -531,6 +531,7 @@ static int try_to_unuse(unsigned int type)
int i = 0;
int retval = 0;
int reset_overflow = 0;
int shmem;
/*
* When searching mms for an entry, a good strategy is to
......@@ -611,11 +612,12 @@ static int try_to_unuse(unsigned int type)
* Whenever we reach init_mm, there's no address space
* to search, but use it as a reminder to search shmem.
*/
shmem = 0;
swcount = *swap_map;
if (swcount > 1) {
flush_page_to_ram(page);
if (start_mm == &init_mm)
shmem_unuse(entry, page);
shmem = shmem_unuse(entry, page);
else
unuse_process(start_mm, entry, page);
}
......@@ -632,7 +634,9 @@ static int try_to_unuse(unsigned int type)
swcount = *swap_map;
if (mm == &init_mm) {
set_start_mm = 1;
shmem_unuse(entry, page);
spin_unlock(&mmlist_lock);
shmem = shmem_unuse(entry, page);
spin_lock(&mmlist_lock);
} else
unuse_process(mm, entry, page);
if (set_start_mm && *swap_map < swcount) {
......@@ -681,15 +685,24 @@ static int try_to_unuse(unsigned int type)
* read from disk into another page. Splitting into two
* pages would be incorrect if swap supported "shared
* private" pages, but they are handled by tmpfs files.
* Note shmem_unuse already deleted its from swap cache.
*
* Note shmem_unuse already deleted a swappage from
* the swap cache, unless the move to filepage failed:
* in which case it left swappage in cache, lowered its
* swap count to pass quickly through the loops above,
* and now we must reincrement count to try again later.
*/
if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
swap_writepage(page);
lock_page(page);
wait_on_page_writeback(page);
}
if (PageSwapCache(page))
if (PageSwapCache(page)) {
if (shmem)
swap_duplicate(entry);
else
delete_from_swap_cache(page);
}
/*
* So we could skip searching mms once swap count went
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment