Commit 48f170fb authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

tmpfs: simplify unuse and writepage

shmem_unuse_inode() and shmem_writepage() contain a little code to cope
with pages inserted independently into the filecache, probably by a
filesystem stacked on top of tmpfs, then fed to its ->readpage() or
->writepage().

Unionfs was indeed experimenting with working in that way three years ago,
but I find no current examples: nowadays the stacking filesystems use vfs
interfaces to the lower filesystem.

It's now illegal: remove most of that code, adding some WARN_ON_ONCEs.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Erez Zadok <ezk@fsl.cs.sunysb.edu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 27ab7006
...@@ -972,20 +972,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s ...@@ -972,20 +972,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
/* which does mem_cgroup_uncharge_cache_page on error */ /* which does mem_cgroup_uncharge_cache_page on error */
if (error == -EEXIST) { if (error != -ENOMEM) {
struct page *filepage = find_get_page(mapping, idx);
error = 1;
if (filepage) {
/*
* There might be a more uptodate page coming down
* from a stacked writepage: forget our swappage if so.
*/
if (PageUptodate(filepage))
error = 0;
page_cache_release(filepage);
}
}
if (!error) {
delete_from_swap_cache(page); delete_from_swap_cache(page);
set_page_dirty(page); set_page_dirty(page);
info->flags |= SHMEM_PAGEIN; info->flags |= SHMEM_PAGEIN;
...@@ -1072,16 +1059,17 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) ...@@ -1072,16 +1059,17 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
/* /*
* shmem_backing_dev_info's capabilities prevent regular writeback or * shmem_backing_dev_info's capabilities prevent regular writeback or
* sync from ever calling shmem_writepage; but a stacking filesystem * sync from ever calling shmem_writepage; but a stacking filesystem
* may use the ->writepage of its underlying filesystem, in which case * might use ->writepage of its underlying filesystem, in which case
* tmpfs should write out to swap only in response to memory pressure, * tmpfs should write out to swap only in response to memory pressure,
* and not for the writeback threads or sync. However, in those cases, * and not for the writeback threads or sync.
* we do still want to check if there's a redundant swappage to be
* discarded.
*/ */
if (wbc->for_reclaim) if (!wbc->for_reclaim) {
swap = get_swap_page(); WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
else goto redirty;
swap.val = 0; }
swap = get_swap_page();
if (!swap.val)
goto redirty;
/* /*
* Add inode to shmem_unuse()'s list of swapped-out inodes, * Add inode to shmem_unuse()'s list of swapped-out inodes,
...@@ -1092,15 +1080,12 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) ...@@ -1092,15 +1080,12 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
* we've taken the spinlock, because shmem_unuse_inode() will * we've taken the spinlock, because shmem_unuse_inode() will
* prune a !swapped inode from the swaplist under both locks. * prune a !swapped inode from the swaplist under both locks.
*/ */
if (swap.val) { mutex_lock(&shmem_swaplist_mutex);
mutex_lock(&shmem_swaplist_mutex); if (list_empty(&info->swaplist))
if (list_empty(&info->swaplist)) list_add_tail(&info->swaplist, &shmem_swaplist);
list_add_tail(&info->swaplist, &shmem_swaplist);
}
spin_lock(&info->lock); spin_lock(&info->lock);
if (swap.val) mutex_unlock(&shmem_swaplist_mutex);
mutex_unlock(&shmem_swaplist_mutex);
if (index >= info->next_index) { if (index >= info->next_index) {
BUG_ON(!(info->flags & SHMEM_TRUNCATE)); BUG_ON(!(info->flags & SHMEM_TRUNCATE));
...@@ -1108,16 +1093,13 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) ...@@ -1108,16 +1093,13 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
} }
entry = shmem_swp_entry(info, index, NULL); entry = shmem_swp_entry(info, index, NULL);
if (entry->val) { if (entry->val) {
/* WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
* The more uptodate page coming down from a stacked
* writepage should replace our old swappage.
*/
free_swap_and_cache(*entry); free_swap_and_cache(*entry);
shmem_swp_set(info, entry, 0); shmem_swp_set(info, entry, 0);
} }
shmem_recalc_inode(inode); shmem_recalc_inode(inode);
if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
delete_from_page_cache(page); delete_from_page_cache(page);
shmem_swp_set(info, entry, swap.val); shmem_swp_set(info, entry, swap.val);
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment