Commit 7d1c153a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc fix from Helge Deller:
 "This patch fixes the kmap/kunmap implementation on parisc and finally
  makes AIO work on parisc"

* 'parisc-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Ensure full cache coherency for kmap/kunmap
parents f8829150 f8dae006
...@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma ...@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
void mark_rodata_ro(void); void mark_rodata_ro(void);
#endif #endif
#ifdef CONFIG_PA8X00
/* Only pa8800, pa8900 needs this */
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#define ARCH_HAS_KMAP #define ARCH_HAS_KMAP
void kunmap_parisc(void *addr);
static inline void *kmap(struct page *page) static inline void *kmap(struct page *page)
{ {
might_sleep(); might_sleep();
flush_dcache_page(page);
return page_address(page); return page_address(page);
} }
static inline void kunmap(struct page *page) static inline void kunmap(struct page *page)
{ {
kunmap_parisc(page_address(page)); flush_kernel_dcache_page_addr(page_address(page));
} }
static inline void *kmap_atomic(struct page *page) static inline void *kmap_atomic(struct page *page)
{ {
pagefault_disable(); pagefault_disable();
flush_dcache_page(page);
return page_address(page); return page_address(page);
} }
static inline void __kunmap_atomic(void *addr) static inline void __kunmap_atomic(void *addr)
{ {
kunmap_parisc(addr); flush_kernel_dcache_page_addr(addr);
pagefault_enable(); pagefault_enable();
} }
#define kmap_atomic_prot(page, prot) kmap_atomic(page) #define kmap_atomic_prot(page, prot) kmap_atomic(page)
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr) #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif
#endif /* _PARISC_CACHEFLUSH_H */ #endif /* _PARISC_CACHEFLUSH_H */
...@@ -28,9 +28,8 @@ struct page; ...@@ -28,9 +28,8 @@ struct page;
void clear_page_asm(void *page); void clear_page_asm(void *page);
void copy_page_asm(void *to, void *from); void copy_page_asm(void *to, void *from);
void clear_user_page(void *vto, unsigned long vaddr, struct page *pg); #define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, #define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
struct page *pg);
/* #define CONFIG_PARISC_TMPALIAS */ /* #define CONFIG_PARISC_TMPALIAS */
......
...@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr) ...@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr)
} }
EXPORT_SYMBOL(flush_kernel_dcache_page_addr); EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
{
clear_page_asm(vto);
if (!parisc_requires_coherency())
flush_kernel_dcache_page_asm(vto);
}
EXPORT_SYMBOL(clear_user_page);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg)
{
/* Copy using kernel mapping. No coherency is needed
(all in kmap/kunmap) on machines that don't support
non-equivalent aliasing. However, the `from' page
needs to be flushed before it can be accessed through
the kernel mapping. */
preempt_disable();
flush_dcache_page_asm(__pa(vfrom), vaddr);
preempt_enable();
copy_page_asm(vto, vfrom);
if (!parisc_requires_coherency())
flush_kernel_dcache_page_asm(vto);
}
EXPORT_SYMBOL(copy_user_page);
#ifdef CONFIG_PA8X00
void kunmap_parisc(void *addr)
{
if (parisc_requires_coherency())
flush_kernel_dcache_page_addr(addr);
}
EXPORT_SYMBOL(kunmap_parisc);
#endif
void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{ {
unsigned long flags; unsigned long flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment