Commit 1a70db49 authored by Ley Foon Tan's avatar Ley Foon Tan

nios2: rework cache

- flush dcache before flush instruction cache
- remork update_mmu_cache and flush_dcache_page
- add shmparam.h
Signed-off-by: default avatarLey Foon Tan <lftan@altera.com>
parent 2009337e
...@@ -46,7 +46,6 @@ generic-y += segment.h ...@@ -46,7 +46,6 @@ generic-y += segment.h
generic-y += sembuf.h generic-y += sembuf.h
generic-y += serial.h generic-y += serial.h
generic-y += shmbuf.h generic-y += shmbuf.h
generic-y += shmparam.h
generic-y += siginfo.h generic-y += siginfo.h
generic-y += signal.h generic-y += signal.h
generic-y += socket.h generic-y += socket.h
......
/*
* Copyright Altera Corporation (C) <2015>. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASM_NIOS2_SHMPARAM_H
#define _ASM_NIOS2_SHMPARAM_H
#define SHMLBA CONFIG_NIOS2_DCACHE_SIZE
#endif /* _ASM_NIOS2_SHMPARAM_H */
...@@ -128,12 +128,14 @@ void flush_cache_dup_mm(struct mm_struct *mm) ...@@ -128,12 +128,14 @@ void flush_cache_dup_mm(struct mm_struct *mm)
void flush_icache_range(unsigned long start, unsigned long end) void flush_icache_range(unsigned long start, unsigned long end)
{ {
__flush_dcache(start, end);
__flush_icache(start, end); __flush_icache(start, end);
} }
void flush_dcache_range(unsigned long start, unsigned long end) void flush_dcache_range(unsigned long start, unsigned long end)
{ {
__flush_dcache(start, end); __flush_dcache(start, end);
__flush_icache(start, end);
} }
EXPORT_SYMBOL(flush_dcache_range); EXPORT_SYMBOL(flush_dcache_range);
...@@ -156,6 +158,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page) ...@@ -156,6 +158,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page)
unsigned long start = (unsigned long) page_address(page); unsigned long start = (unsigned long) page_address(page);
unsigned long end = start + PAGE_SIZE; unsigned long end = start + PAGE_SIZE;
__flush_dcache(start, end);
__flush_icache(start, end); __flush_icache(start, end);
} }
...@@ -170,6 +173,18 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, ...@@ -170,6 +173,18 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
__flush_icache(start, end); __flush_icache(start, end);
} }
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
unsigned long start = (unsigned long)page_address(page);
__flush_dcache_all(start, start + PAGE_SIZE);
}
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
struct address_space *mapping; struct address_space *mapping;
...@@ -187,11 +202,12 @@ void flush_dcache_page(struct page *page) ...@@ -187,11 +202,12 @@ void flush_dcache_page(struct page *page)
if (mapping && !mapping_mapped(mapping)) { if (mapping && !mapping_mapped(mapping)) {
clear_bit(PG_dcache_clean, &page->flags); clear_bit(PG_dcache_clean, &page->flags);
} else { } else {
unsigned long start = (unsigned long)page_address(page); __flush_dcache_page(mapping, page);
if (mapping) {
__flush_dcache_all(start, start + PAGE_SIZE); unsigned long start = (unsigned long)page_address(page);
if (mapping)
flush_aliases(mapping, page); flush_aliases(mapping, page);
flush_icache_range(start, start + PAGE_SIZE);
}
set_bit(PG_dcache_clean, &page->flags); set_bit(PG_dcache_clean, &page->flags);
} }
} }
...@@ -202,6 +218,7 @@ void update_mmu_cache(struct vm_area_struct *vma, ...@@ -202,6 +218,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
{ {
unsigned long pfn = pte_pfn(*pte); unsigned long pfn = pte_pfn(*pte);
struct page *page; struct page *page;
struct address_space *mapping;
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
return; return;
...@@ -214,16 +231,15 @@ void update_mmu_cache(struct vm_area_struct *vma, ...@@ -214,16 +231,15 @@ void update_mmu_cache(struct vm_area_struct *vma,
if (page == ZERO_PAGE(0)) if (page == ZERO_PAGE(0))
return; return;
if (!PageReserved(page) && mapping = page_mapping(page);
!test_and_set_bit(PG_dcache_clean, &page->flags)) { if (!test_and_set_bit(PG_dcache_clean, &page->flags))
unsigned long start = page_to_virt(page); __flush_dcache_page(mapping, page);
struct address_space *mapping;
if(mapping)
__flush_dcache(start, start + PAGE_SIZE); {
flush_aliases(mapping, page);
mapping = page_mapping(page); if (vma->vm_flags & VM_EXEC)
if (mapping) flush_icache_page(vma, page);
flush_aliases(mapping, page);
} }
} }
...@@ -231,15 +247,19 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, ...@@ -231,15 +247,19 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *to) struct page *to)
{ {
__flush_dcache(vaddr, vaddr + PAGE_SIZE); __flush_dcache(vaddr, vaddr + PAGE_SIZE);
__flush_icache(vaddr, vaddr + PAGE_SIZE);
copy_page(vto, vfrom); copy_page(vto, vfrom);
__flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE); __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
__flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
} }
void clear_user_page(void *addr, unsigned long vaddr, struct page *page) void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
{ {
__flush_dcache(vaddr, vaddr + PAGE_SIZE); __flush_dcache(vaddr, vaddr + PAGE_SIZE);
__flush_icache(vaddr, vaddr + PAGE_SIZE);
clear_page(addr); clear_page(addr);
__flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE); __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
__flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
} }
void copy_from_user_page(struct vm_area_struct *vma, struct page *page, void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
...@@ -248,7 +268,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -248,7 +268,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
{ {
flush_cache_page(vma, user_vaddr, page_to_pfn(page)); flush_cache_page(vma, user_vaddr, page_to_pfn(page));
memcpy(dst, src, len); memcpy(dst, src, len);
__flush_dcache((unsigned long)src, (unsigned long)src + len); __flush_dcache_all((unsigned long)src, (unsigned long)src + len);
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
__flush_icache((unsigned long)src, (unsigned long)src + len); __flush_icache((unsigned long)src, (unsigned long)src + len);
} }
...@@ -259,7 +279,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -259,7 +279,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
{ {
flush_cache_page(vma, user_vaddr, page_to_pfn(page)); flush_cache_page(vma, user_vaddr, page_to_pfn(page));
memcpy(dst, src, len); memcpy(dst, src, len);
__flush_dcache((unsigned long)dst, (unsigned long)dst + len); __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len);
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
__flush_icache((unsigned long)dst, (unsigned long)dst + len); __flush_icache((unsigned long)dst, (unsigned long)dst + len);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment