Commit 8830f04a authored by Russell King's avatar Russell King

[PATCH] ARM: Fix delayed dcache flush for ARMv6 non-aliasing caches

flush_dcache_page() did nothing for these caches, but since they
suffer from I/D cache coherency issues, we need to ensure that data
is written back to RAM.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent d411b845
...@@ -77,9 +77,8 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) ...@@ -77,9 +77,8 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
} }
static void static void
make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty) make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
{ {
struct address_space *mapping = page_mapping(page);
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *mpnt; struct vm_area_struct *mpnt;
struct prio_tree_iter iter; struct prio_tree_iter iter;
...@@ -87,9 +86,6 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, ...@@ -87,9 +86,6 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
pgoff_t pgoff; pgoff_t pgoff;
int aliases = 0; int aliases = 0;
if (!mapping)
return;
pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
/* /*
...@@ -115,9 +111,11 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, ...@@ -115,9 +111,11 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
if (aliases) if (aliases)
adjust_pte(vma, addr); adjust_pte(vma, addr);
else else
flush_cache_page(vma, addr, page_to_pfn(page)); flush_cache_page(vma, addr, pfn);
} }
void __flush_dcache_page(struct address_space *mapping, struct page *page);
/* /*
* Take care of architecture specific things when placing a new PTE into * Take care of architecture specific things when placing a new PTE into
* a page table, or changing an existing PTE. Basically, there are two * a page table, or changing an existing PTE. Basically, there are two
...@@ -134,29 +132,22 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, ...@@ -134,29 +132,22 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{ {
unsigned long pfn = pte_pfn(pte); unsigned long pfn = pte_pfn(pte);
struct address_space *mapping;
struct page *page; struct page *page;
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
return; return;
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (page_mapping(page)) { mapping = page_mapping(page);
if (mapping) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty) { if (dirty)
/* __flush_dcache_page(mapping, page);
* This is our first userspace mapping of this page.
* Ensure that the physical page is coherent with
* the kernel mapping.
*
* FIXME: only need to do this on VIVT and aliasing
* VIPT cache architectures. We can do that
* by choosing whether to set this bit...
*/
__cpuc_flush_dcache_page(page_address(page));
}
if (cache_is_vivt()) if (cache_is_vivt())
make_coherent(vma, addr, page, dirty); make_coherent(mapping, vma, addr, pfn);
} }
} }
......
...@@ -37,13 +37,8 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) ...@@ -37,13 +37,8 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
#define flush_pfn_alias(pfn,vaddr) do { } while (0) #define flush_pfn_alias(pfn,vaddr) do { } while (0)
#endif #endif
static void __flush_dcache_page(struct address_space *mapping, struct page *page) void __flush_dcache_page(struct address_space *mapping, struct page *page)
{ {
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
pgoff_t pgoff;
/* /*
* Writeback any data associated with the kernel mapping of this * Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually * page. This ensures that data in the physical page is mutually
...@@ -52,24 +47,21 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page ...@@ -52,24 +47,21 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_page(page_address(page));
/* /*
* If there's no mapping pointer here, then this page isn't * If this is a page cache page, and we have an aliasing VIPT cache,
* visible to userspace yet, so there are no cache lines * we only need to do one flush - which would be at the relevant
* associated with any other aliases.
*/
if (!mapping)
return;
/*
* This is a page cache page. If we have a VIPT cache, we
* only need to do one flush - which would be at the relevant
* userspace colour, which is congruent with page->index. * userspace colour, which is congruent with page->index.
*/ */
if (cache_is_vipt()) { if (mapping && cache_is_vipt_aliasing())
if (cache_is_vipt_aliasing())
flush_pfn_alias(page_to_pfn(page), flush_pfn_alias(page_to_pfn(page),
page->index << PAGE_CACHE_SHIFT); page->index << PAGE_CACHE_SHIFT);
return; }
}
static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
pgoff_t pgoff;
/* /*
* There are possible user space mappings of this page: * There are possible user space mappings of this page:
...@@ -116,12 +108,12 @@ void flush_dcache_page(struct page *page) ...@@ -116,12 +108,12 @@ void flush_dcache_page(struct page *page)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
if (cache_is_vipt_nonaliasing())
return;
if (mapping && !mapping_mapped(mapping)) if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags); set_bit(PG_dcache_dirty, &page->flags);
else else {
__flush_dcache_page(mapping, page); __flush_dcache_page(mapping, page);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, page);
}
} }
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_page);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment