Commit 750b4987 authored by Nick Piggin's avatar Nick Piggin Committed by Andi Kleen

HWPOISON: Refactor truncate to allow direct truncating of page v2

Extract out truncate_inode_page() out of the truncate path so that
it can be used by memory-failure.c

[AK: description, headers, fix typos]
v2: Some white space changes from Fengguang Wu
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
parent 2a7684a2
...@@ -794,6 +794,8 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, ...@@ -794,6 +794,8 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
extern int vmtruncate(struct inode * inode, loff_t offset); extern int vmtruncate(struct inode * inode, loff_t offset);
extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags); unsigned long address, unsigned int flags);
......
...@@ -93,11 +93,11 @@ EXPORT_SYMBOL(cancel_dirty_page); ...@@ -93,11 +93,11 @@ EXPORT_SYMBOL(cancel_dirty_page);
* its lock, b) when a concurrent invalidate_mapping_pages got there first and * its lock, b) when a concurrent invalidate_mapping_pages got there first and
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
*/ */
static void static int
truncate_complete_page(struct address_space *mapping, struct page *page) truncate_complete_page(struct address_space *mapping, struct page *page)
{ {
if (page->mapping != mapping) if (page->mapping != mapping)
return; return -EIO;
if (page_has_private(page)) if (page_has_private(page))
do_invalidatepage(page, 0); do_invalidatepage(page, 0);
...@@ -108,6 +108,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) ...@@ -108,6 +108,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
remove_from_page_cache(page); remove_from_page_cache(page);
ClearPageMappedToDisk(page); ClearPageMappedToDisk(page);
page_cache_release(page); /* pagecache ref */ page_cache_release(page); /* pagecache ref */
return 0;
} }
/* /*
...@@ -135,6 +136,16 @@ invalidate_complete_page(struct address_space *mapping, struct page *page) ...@@ -135,6 +136,16 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
return ret; return ret;
} }
int truncate_inode_page(struct address_space *mapping, struct page *page)
{
if (page_mapped(page)) {
unmap_mapping_range(mapping,
(loff_t)page->index << PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE, 0);
}
return truncate_complete_page(mapping, page);
}
/** /**
* truncate_inode_pages - truncate range of pages specified by start & end byte offsets * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate * @mapping: mapping to truncate
...@@ -196,12 +207,7 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -196,12 +207,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
unlock_page(page); unlock_page(page);
continue; continue;
} }
if (page_mapped(page)) { truncate_inode_page(mapping, page);
unmap_mapping_range(mapping,
(loff_t)page_index<<PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE, 0);
}
truncate_complete_page(mapping, page);
unlock_page(page); unlock_page(page);
} }
pagevec_release(&pvec); pagevec_release(&pvec);
...@@ -238,15 +244,10 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -238,15 +244,10 @@ void truncate_inode_pages_range(struct address_space *mapping,
break; break;
lock_page(page); lock_page(page);
wait_on_page_writeback(page); wait_on_page_writeback(page);
if (page_mapped(page)) { truncate_inode_page(mapping, page);
unmap_mapping_range(mapping,
(loff_t)page->index<<PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE, 0);
}
if (page->index > next) if (page->index > next)
next = page->index; next = page->index;
next++; next++;
truncate_complete_page(mapping, page);
unlock_page(page); unlock_page(page);
} }
pagevec_release(&pvec); pagevec_release(&pvec);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment