Commit 5a178119 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: add support for direct_IO to highmem pages

The patch "mm: add support for a filesystem to activate swap files and use
direct_IO for writing swap pages" added support for using direct_IO to
write swap pages but it is insufficient for highmem pages.

To support highmem pages, this patch kmaps() the page before calling the
direct_IO() handler.  As direct_IO deals with virtual addresses an
additional helper is necessary for get_kernel_pages() to lookup the struct
page for a kmap virtual address.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Eric Paris <eparis@redhat.com>
Cc: James Morris <jmorris@namei.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Neil Brown <neilb@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: Xiaotian Feng <dfeng@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a509bc1a
...@@ -39,10 +39,17 @@ extern unsigned long totalhigh_pages; ...@@ -39,10 +39,17 @@ extern unsigned long totalhigh_pages;
void kmap_flush_unused(void); void kmap_flush_unused(void);
struct page *kmap_to_page(void *addr);
#else /* CONFIG_HIGHMEM */ #else /* CONFIG_HIGHMEM */
static inline unsigned int nr_free_highpages(void) { return 0; } static inline unsigned int nr_free_highpages(void) { return 0; }
static inline struct page *kmap_to_page(void *addr)
{
return virt_to_page(addr);
}
#define totalhigh_pages 0UL #define totalhigh_pages 0UL
#ifndef ARCH_HAS_KMAP #ifndef ARCH_HAS_KMAP
......
...@@ -94,6 +94,18 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); ...@@ -94,6 +94,18 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
do { spin_unlock(&kmap_lock); (void)(flags); } while (0) do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
#endif #endif
struct page *kmap_to_page(void *vaddr)
{
unsigned long addr = (unsigned long)vaddr;
if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
return pte_page(pkmap_page_table[i]);
}
return virt_to_page(addr);
}
static void flush_all_zero_pkmaps(void) static void flush_all_zero_pkmaps(void)
{ {
int i; int i;
......
...@@ -205,7 +205,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) ...@@ -205,7 +205,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
struct file *swap_file = sis->swap_file; struct file *swap_file = sis->swap_file;
struct address_space *mapping = swap_file->f_mapping; struct address_space *mapping = swap_file->f_mapping;
struct iovec iov = { struct iovec iov = {
.iov_base = page_address(page), .iov_base = kmap(page),
.iov_len = PAGE_SIZE, .iov_len = PAGE_SIZE,
}; };
...@@ -218,6 +218,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) ...@@ -218,6 +218,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
ret = mapping->a_ops->direct_IO(KERNEL_WRITE, ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
&kiocb, &iov, &kiocb, &iov,
kiocb.ki_pos, 1); kiocb.ki_pos, 1);
kunmap(page);
if (ret == PAGE_SIZE) { if (ret == PAGE_SIZE) {
count_vm_event(PSWPOUT); count_vm_event(PSWPOUT);
ret = 0; ret = 0;
......
...@@ -258,8 +258,7 @@ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, ...@@ -258,8 +258,7 @@ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
return seg; return seg;
/* virt_to_page sanity checks the PFN */ pages[seg] = kmap_to_page(kiov[seg].iov_base);
pages[seg] = virt_to_page(kiov[seg].iov_base);
page_cache_get(pages[seg]); page_cache_get(pages[seg]);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment