Commit 50861f5a authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: Fix cache routines to ignore vma's with an invalid pfn

The parisc architecture does not have a pte special bit. As a result,
special mappings are handled with the VM_PFNMAP and VM_MIXEDMAP flags.
VM_MIXEDMAP mappings may or may not have a "struct page" backing. When
pfn_valid() is false, there is no "struct page" backing. Otherwise, they
are treated as normal pages.

The FireGL driver uses the VM_MIXEDMAP without a backing "struct page".
This treatment caused a panic due to a TLB data miss in
update_mmu_cache. This appeared to be in the code generated for
page_address(). We were in fact using a very circular bit of code to
determine the physical address of the PFN in various cache routines.
This wasn't valid when there was no "struct page" backing.  The needed
address can in fact be determined simply from the PFN itself without
using the "struct page".

The attached patch updates update_mmu_cache(), flush_cache_mm(),
flush_cache_range() and flush_cache_page() to check pfn_valid() and to
directly compute the PFN physical and virtual addresses.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Cc: <stable@vger.kernel.org> # 3.10
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 06693f30
...@@ -71,18 +71,27 @@ flush_cache_all_local(void) ...@@ -71,18 +71,27 @@ flush_cache_all_local(void)
} }
EXPORT_SYMBOL(flush_cache_all_local); EXPORT_SYMBOL(flush_cache_all_local);
/* Virtual address of pfn. */
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
void void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{ {
struct page *page = pte_page(*ptep); unsigned long pfn = pte_pfn(*ptep);
struct page *page;
if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && /* We don't have pte special. As a result, we can be called with
test_bit(PG_dcache_dirty, &page->flags)) { an invalid pfn and we don't need to flush the kernel dcache page.
This occurs with FireGL card in C8000. */
if (!pfn_valid(pfn))
return;
flush_kernel_dcache_page(page); page = pfn_to_page(pfn);
if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags); clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency()) } else if (parisc_requires_coherency())
flush_kernel_dcache_page(page); flush_kernel_dcache_page_addr(pfn_va(pfn));
} }
void void
...@@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr) ...@@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
void flush_cache_mm(struct mm_struct *mm) void flush_cache_mm(struct mm_struct *mm)
{ {
struct vm_area_struct *vma;
pgd_t *pgd;
/* Flushing the whole cache on each cpu takes forever on /* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */ rp3440, etc. So, avoid it if the mm isn't too big. */
if (mm_total_size(mm) < parisc_cache_flush_threshold) { if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
struct vm_area_struct *vma; flush_cache_all();
return;
if (mm->context == mfsp(3)) { }
for (vma = mm->mmap; vma; vma = vma->vm_next) {
flush_user_dcache_range_asm(vma->vm_start, if (mm->context == mfsp(3)) {
vma->vm_end); for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->vm_flags & VM_EXEC) flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
flush_user_icache_range_asm( if ((vma->vm_flags & VM_EXEC) == 0)
vma->vm_start, vma->vm_end); continue;
} flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
} else {
pgd_t *pgd = mm->pgd;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long addr;
for (addr = vma->vm_start; addr < vma->vm_end;
addr += PAGE_SIZE) {
pte_t *ptep = get_ptep(pgd, addr);
if (ptep != NULL) {
pte_t pte = *ptep;
__flush_cache_page(vma, addr,
page_to_phys(pte_page(pte)));
}
}
}
} }
return; return;
} }
#ifdef CONFIG_SMP pgd = mm->pgd;
flush_cache_all(); for (vma = mm->mmap; vma; vma = vma->vm_next) {
#else unsigned long addr;
flush_cache_all_local();
#endif for (addr = vma->vm_start; addr < vma->vm_end;
addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (!pfn_valid(pfn))
continue;
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
} }
void void
...@@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end) ...@@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
void flush_cache_range(struct vm_area_struct *vma, void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
unsigned long addr;
pgd_t *pgd;
BUG_ON(!vma->vm_mm->context); BUG_ON(!vma->vm_mm->context);
if ((end - start) < parisc_cache_flush_threshold) { if ((end - start) >= parisc_cache_flush_threshold) {
if (vma->vm_mm->context == mfsp(3)) {
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
} else {
unsigned long addr;
pgd_t *pgd = vma->vm_mm->pgd;
for (addr = start & PAGE_MASK; addr < end;
addr += PAGE_SIZE) {
pte_t *ptep = get_ptep(pgd, addr);
if (ptep != NULL) {
pte_t pte = *ptep;
flush_cache_page(vma,
addr, pte_pfn(pte));
}
}
}
} else {
#ifdef CONFIG_SMP
flush_cache_all(); flush_cache_all();
#else return;
flush_cache_all_local(); }
#endif
if (vma->vm_mm->context == mfsp(3)) {
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
return;
}
pgd = vma->vm_mm->pgd;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (pfn_valid(pfn))
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
} }
} }
...@@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long ...@@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{ {
BUG_ON(!vma->vm_mm->context); BUG_ON(!vma->vm_mm->context);
flush_tlb_page(vma, vmaddr); if (pfn_valid(pfn)) {
__flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
} }
#ifdef CONFIG_PARISC_TMPALIAS #ifdef CONFIG_PARISC_TMPALIAS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment