Commit 4ad35c1f authored by Guo Ren's avatar Guo Ren

csky: Fixup 610 vipt cache flush mechanism

610 has vipt aliasing issue, so we need to finish the cache flush
apis mentioned in cachetlb.rst to avoid data corruption.

Here is the list of modified apis in the patch:

 - flush_kernel_dcache_page      (new add)
 - flush_dcache_mmap_lock        (new add)
 - flush_dcache_mmap_unlock      (new add)
 - flush_kernel_vmap_range       (new add)
 - invalidate_kernel_vmap_range  (new add)
 - flush_anon_page               (new add)
 - flush_cache_range             (new add)
 - flush_cache_vmap              (flush all)
 - flush_cache_vunmap            (flush all)
 - flush_cache_mm                (only dcache flush)
 - flush_icache_page             (just nop)
 - copy_from_user_page           (remove no need flush)
 - copy_to_user_page             (remove no need flush)

Change to V2:
 - Fixup compile error with xa_lock*(&mapping->i_pages)
Signed-off-by: default avatarGuo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@infradead.org>
parent c7e6f0e9
...@@ -54,3 +54,23 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, ...@@ -54,3 +54,23 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
icache_inv_all(); icache_inv_all();
} }
} }
void flush_kernel_dcache_page(struct page *page)
{
struct address_space *mapping;
mapping = page_mapping_file(page);
if (!mapping || mapping_mapped(mapping))
dcache_wbinv_all();
}
EXPORT_SYMBOL(flush_kernel_dcache_page);
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
dcache_wbinv_all();
if (vma->vm_flags & VM_EXEC)
icache_inv_all();
}
...@@ -4,26 +4,49 @@ ...@@ -4,26 +4,49 @@
#ifndef __ABI_CSKY_CACHEFLUSH_H #ifndef __ABI_CSKY_CACHEFLUSH_H
#define __ABI_CSKY_CACHEFLUSH_H #define __ABI_CSKY_CACHEFLUSH_H
#include <linux/compiler.h> #include <linux/mm.h>
#include <asm/string.h> #include <asm/string.h>
#include <asm/cache.h> #include <asm/cache.h>
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *); extern void flush_dcache_page(struct page *);
#define flush_cache_mm(mm) cache_wbinv_all() #define flush_cache_mm(mm) dcache_wbinv_all()
#define flush_cache_page(vma, page, pfn) cache_wbinv_all() #define flush_cache_page(vma, page, pfn) cache_wbinv_all()
#define flush_cache_dup_mm(mm) cache_wbinv_all() #define flush_cache_dup_mm(mm) cache_wbinv_all()
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
extern void flush_kernel_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
static inline void flush_kernel_vmap_range(void *addr, int size)
{
dcache_wbinv_all();
}
static inline void invalidate_kernel_vmap_range(void *addr, int size)
{
dcache_wbinv_all();
}
#define ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vmaddr)
{
if (PageAnon(page))
cache_wbinv_all();
}
/* /*
* if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
* Use cache_wbinv_all() here and need to be improved in future. * Use cache_wbinv_all() here and need to be improved in future.
*/ */
#define flush_cache_range(vma, start, end) cache_wbinv_all() extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end) cache_wbinv_range(start, end) #define flush_cache_vmap(start, end) cache_wbinv_all()
#define flush_cache_vunmap(start, end) cache_wbinv_range(start, end) #define flush_cache_vunmap(start, end) cache_wbinv_all()
#define flush_icache_page(vma, page) cache_wbinv_all() #define flush_icache_page(vma, page) do {} while (0);
#define flush_icache_range(start, end) cache_wbinv_range(start, end) #define flush_icache_range(start, end) cache_wbinv_range(start, end)
#define flush_icache_user_range(vma,page,addr,len) \ #define flush_icache_user_range(vma,page,addr,len) \
...@@ -31,19 +54,13 @@ extern void flush_dcache_page(struct page *); ...@@ -31,19 +54,13 @@ extern void flush_dcache_page(struct page *);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
cache_wbinv_all(); \
memcpy(dst, src, len); \ memcpy(dst, src, len); \
cache_wbinv_all(); \
} while (0) } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
cache_wbinv_all(); \
memcpy(dst, src, len); \ memcpy(dst, src, len); \
cache_wbinv_all(); \ cache_wbinv_all(); \
} while (0) } while (0)
#define flush_dcache_mmap_lock(mapping) do {} while (0)
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
#endif /* __ABI_CSKY_CACHEFLUSH_H */ #endif /* __ABI_CSKY_CACHEFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment