Commit f24407d2 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/xfs-vipt

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/xfs-vipt:
  xfs: fix xfs to work with Virtually Indexed architectures
  sh: add mm API for DMA to vmalloc/vmap areas
  arm: add mm API for DMA to vmalloc/vmap areas
  parisc: add mm API for DMA to vmalloc/vmap areas
  mm: add coherence API for DMA to vmalloc/vmap areas
parents 65445174 73c77e2c
...@@ -377,3 +377,27 @@ maps this page at its virtual address. ...@@ -377,3 +377,27 @@ maps this page at its virtual address.
All the functionality of flush_icache_page can be implemented in All the functionality of flush_icache_page can be implemented in
flush_dcache_page and update_mmu_cache. In 2.7 the hope is to flush_dcache_page and update_mmu_cache. In 2.7 the hope is to
remove this interface completely. remove this interface completely.
The final category of APIs is for I/O to deliberately aliased address
ranges inside the kernel. Such aliases are set up by use of the
vmap/vmalloc API. Since kernel I/O goes via physical pages, the I/O
subsystem assumes that the user mapping and kernel offset mapping are
the only aliases. This isn't true for vmap aliases, so anything in
the kernel trying to do I/O to vmap areas must manually manage
coherency. It must do this by flushing the vmap range before doing
I/O and invalidating it after the I/O returns.
void flush_kernel_vmap_range(void *vaddr, int size)
flushes the kernel cache for a given virtual address range in
the vmap area. This is to make sure that any data the kernel
modified in the vmap range is made visible to the physical
page. The design is to make this area safe to perform I/O on.
Note that this API does *not* also flush the offset map alias
of the area.
void invalidate_kernel_vmap_range(void *vaddr, int size) invalidates
the cache for a given virtual address range in the vmap area
which prevents the processor from making the cache stale by
speculatively reading data while the I/O was occurring to the
physical pages. This is only necessary for data reads into the
vmap area.
...@@ -447,6 +447,16 @@ static inline void __flush_icache_all(void) ...@@ -447,6 +447,16 @@ static inline void __flush_icache_all(void)
: "r" (0)); : "r" (0));
#endif #endif
} }
static inline void flush_kernel_vmap_range(void *addr, int size)
{
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
__cpuc_flush_dcache_area(addr, (size_t)size);
}
static inline void invalidate_kernel_vmap_range(void *addr, int size)
{
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
__cpuc_flush_dcache_area(addr, (size_t)size);
}
#define ARCH_HAS_FLUSH_ANON_PAGE #define ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma, static inline void flush_anon_page(struct vm_area_struct *vma,
......
...@@ -38,6 +38,18 @@ void flush_cache_mm(struct mm_struct *mm); ...@@ -38,6 +38,18 @@ void flush_cache_mm(struct mm_struct *mm);
#define flush_kernel_dcache_range(start,size) \ #define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size)); flush_kernel_dcache_range_asm((start), (start)+(size));
/* vmap range flushes and invalidates. Architecturally, we don't need
* the invalidate, because the CPU should refuse to speculate once an
* area has been flushed, so invalidate is left empty */
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
flush_kernel_dcache_range_asm(start, start + size);
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
}
#define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all()
......
...@@ -63,6 +63,14 @@ static inline void flush_anon_page(struct vm_area_struct *vma, ...@@ -63,6 +63,14 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
__flush_anon_page(page, vmaddr); __flush_anon_page(page, vmaddr);
} }
static inline void flush_kernel_vmap_range(void *addr, int size)
{
__flush_wback_region(addr, size);
}
static inline void invalidate_kernel_vmap_range(void *addr, int size)
{
__flush_invalidate_region(addr, size);
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page) static inline void flush_kernel_dcache_page(struct page *page)
......
...@@ -76,6 +76,27 @@ struct workqueue_struct *xfsconvertd_workqueue; ...@@ -76,6 +76,27 @@ struct workqueue_struct *xfsconvertd_workqueue;
#define xfs_buf_deallocate(bp) \ #define xfs_buf_deallocate(bp) \
kmem_zone_free(xfs_buf_zone, (bp)); kmem_zone_free(xfs_buf_zone, (bp));
static inline int
xfs_buf_is_vmapped(
struct xfs_buf *bp)
{
/*
* Return true if the buffer is vmapped.
*
* The XBF_MAPPED flag is set if the buffer should be mapped, but the
* code is clever enough to know it doesn't have to map a single page,
* so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
*/
return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
}
static inline int
xfs_buf_vmap_len(
struct xfs_buf *bp)
{
return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
}
/* /*
* Page Region interfaces. * Page Region interfaces.
* *
...@@ -314,7 +335,7 @@ xfs_buf_free( ...@@ -314,7 +335,7 @@ xfs_buf_free(
if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
uint i; uint i;
if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) if (xfs_buf_is_vmapped(bp))
free_address(bp->b_addr - bp->b_offset); free_address(bp->b_addr - bp->b_offset);
for (i = 0; i < bp->b_page_count; i++) { for (i = 0; i < bp->b_page_count; i++) {
...@@ -1107,6 +1128,9 @@ xfs_buf_bio_end_io( ...@@ -1107,6 +1128,9 @@ xfs_buf_bio_end_io(
xfs_buf_ioerror(bp, -error); xfs_buf_ioerror(bp, -error);
if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
do { do {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
...@@ -1216,6 +1240,10 @@ _xfs_buf_ioapply( ...@@ -1216,6 +1240,10 @@ _xfs_buf_ioapply(
submit_io: submit_io:
if (likely(bio->bi_size)) { if (likely(bio->bi_size)) {
if (xfs_buf_is_vmapped(bp)) {
flush_kernel_vmap_range(bp->b_addr,
xfs_buf_vmap_len(bp));
}
submit_bio(rw, bio); submit_bio(rw, bio);
if (size) if (size)
goto next_chunk; goto next_chunk;
......
...@@ -17,6 +17,12 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page ...@@ -17,6 +17,12 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page
static inline void flush_kernel_dcache_page(struct page *page) static inline void flush_kernel_dcache_page(struct page *page)
{ {
} }
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
}
#endif #endif
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment