Commit dc140045 authored by Guo Ren's avatar Guo Ren

csky: Fixup defer cache flush for 610

We use defer cache flush mechanism to improve the performance of
610, but the implementation is wrong. We fix it up now and update
the mechanism:

 - Zero page needn't be flushed.
 - If page is file mapping & non-touched in user space, defer flush.
 - If page is anon mapping or dirty file mapping, flush immediately.
 - In update_mmu_cache finish the defer flush by flush_dcache_page().

For 610 we need take care the dcache aliasing issue:
 - VIPT cache with 8K-bytes size per way in 4K page granularity.
Signed-off-by: default avatarGuo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>
parent be819aa6
......@@ -11,42 +11,46 @@
#include <asm/cacheflush.h>
#include <asm/cachectl.h>
#define PG_dcache_clean PG_arch_1
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
unsigned long addr;
struct address_space *mapping;
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_arch_1, &(page)->flags);
if (page == ZERO_PAGE(0))
return;
}
/*
* We could delay the flush for the !page_mapping case too. But that
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
addr = (unsigned long) page_address(page);
dcache_wb_range(addr, addr + PAGE_SIZE);
mapping = page_mapping_file(page);
if (mapping && !page_mapcount(page))
clear_bit(PG_dcache_clean, &page->flags);
else {
dcache_wbinv_all();
if (mapping)
icache_inv_all();
set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
unsigned long addr;
unsigned long pfn = pte_pfn(*ptep);
struct page *page;
unsigned long pfn;
pfn = pte_pfn(*pte);
if (unlikely(!pfn_valid(pfn)))
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
addr = (unsigned long) page_address(page);
if (page == ZERO_PAGE(0))
return;
if (vma->vm_flags & VM_EXEC ||
pages_do_alias(addr, address & PAGE_MASK))
cache_wbinv_all();
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
dcache_wbinv_all();
clear_bit(PG_arch_1, &(page)->flags);
if (page_mapping_file(page)) {
if (vma->vm_flags & VM_EXEC)
icache_inv_all();
}
}
......@@ -26,8 +26,8 @@ extern void flush_dcache_page(struct page *);
#define flush_icache_page(vma, page) cache_wbinv_all()
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
#define flush_icache_user_range(vma, pg, adr, len) \
cache_wbinv_range(adr, adr + len)
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment