Commit 6e4154d4 authored by Paul Mundt's avatar Paul Mundt

sh: Use more aggressive dcache purging in kmap teardown.

This fixes up a number of outstanding issues observed with old mappings
on the same colour hanging around. This requires some more optimal
handling, but is a safe fallback until all of the corner cases have been
handled.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 0906a3ad
...@@ -97,7 +97,7 @@ void copy_user_highpage(struct page *to, struct page *from, ...@@ -97,7 +97,7 @@ void copy_user_highpage(struct page *to, struct page *from,
} }
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
__flush_wback_region(vto, PAGE_SIZE); __flush_purge_region(vto, PAGE_SIZE);
kunmap_atomic(vto, KM_USER1); kunmap_atomic(vto, KM_USER1);
/* Make sure this page is cleared on other CPU's too before using it */ /* Make sure this page is cleared on other CPU's too before using it */
...@@ -112,7 +112,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr) ...@@ -112,7 +112,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
clear_page(kaddr); clear_page(kaddr);
if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
__flush_wback_region(kaddr, PAGE_SIZE); __flush_purge_region(kaddr, PAGE_SIZE);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
} }
...@@ -134,7 +134,7 @@ void __update_cache(struct vm_area_struct *vma, ...@@ -134,7 +134,7 @@ void __update_cache(struct vm_area_struct *vma,
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
if (pages_do_alias(addr, address & PAGE_MASK)) if (pages_do_alias(addr, address & PAGE_MASK))
__flush_wback_region((void *)addr, PAGE_SIZE); __flush_purge_region((void *)addr, PAGE_SIZE);
} }
} }
} }
...@@ -149,10 +149,11 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) ...@@ -149,10 +149,11 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
void *kaddr; void *kaddr;
kaddr = kmap_coherent(page, vmaddr); kaddr = kmap_coherent(page, vmaddr);
__flush_wback_region((void *)kaddr, PAGE_SIZE); /* XXX.. For now kunmap_coherent() does a purge */
/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
kunmap_coherent(kaddr); kunmap_coherent(kaddr);
} else } else
__flush_wback_region((void *)addr, PAGE_SIZE); __flush_purge_region((void *)addr, PAGE_SIZE);
} }
} }
......
...@@ -54,6 +54,9 @@ void kunmap_coherent(void *kvaddr) ...@@ -54,6 +54,9 @@ void kunmap_coherent(void *kvaddr)
unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK;
enum fixed_addresses idx = __virt_to_fix(vaddr); enum fixed_addresses idx = __virt_to_fix(vaddr);
/* XXX.. Kill this later, here for sanity at the moment.. */
__flush_purge_region((void *)vaddr, PAGE_SIZE);
pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx);
local_flush_tlb_one(get_asid(), vaddr); local_flush_tlb_one(get_asid(), vaddr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment