Commit 7a591cfe authored by David S. Miller's avatar David S. Miller

[SPARC64]: Avoid dcache-dirty page state management on sun4v.

It is totally wasted work, since we have no D-cache aliasing
issues on sun4v.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2a3a5f5d
...@@ -188,8 +188,9 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); ...@@ -188,8 +188,9 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
#endif #endif
#endif #endif
__inline__ void flush_dcache_page_impl(struct page *page) inline void flush_dcache_page_impl(struct page *page)
{ {
BUG_ON(tlb_type == hypervisor);
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes); atomic_inc(&dcpage_flushes);
#endif #endif
...@@ -279,11 +280,12 @@ unsigned long _PAGE_SZBITS __read_mostly; ...@@ -279,11 +280,12 @@ unsigned long _PAGE_SZBITS __read_mostly;
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{ {
struct mm_struct *mm; struct mm_struct *mm;
struct page *page;
unsigned long pfn; if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte);
unsigned long pg_flags; unsigned long pg_flags;
struct page *page;
pfn = pte_pfn(pte);
if (pfn_valid(pfn) && if (pfn_valid(pfn) &&
(page = pfn_to_page(pfn), page_mapping(page)) && (page = pfn_to_page(pfn), page_mapping(page)) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
...@@ -303,6 +305,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p ...@@ -303,6 +305,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
put_cpu(); put_cpu();
} }
}
mm = vma->vm_mm; mm = vma->vm_mm;
if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
...@@ -321,6 +324,9 @@ void flush_dcache_page(struct page *page) ...@@ -321,6 +324,9 @@ void flush_dcache_page(struct page *page)
struct address_space *mapping; struct address_space *mapping;
int this_cpu; int this_cpu;
if (tlb_type == hypervisor)
return;
/* Do not bother with the expensive D-cache flush if it /* Do not bother with the expensive D-cache flush if it
* is merely the zero page. The 'bigcore' testcase in GDB * is merely the zero page. The 'bigcore' testcase in GDB
* causes this case to run millions of times. * causes this case to run millions of times.
......
...@@ -49,7 +49,8 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t ...@@ -49,7 +49,8 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t
if (pte_exec(orig)) if (pte_exec(orig))
vaddr |= 0x1UL; vaddr |= 0x1UL;
if (pte_dirty(orig)) { if (tlb_type != hypervisor &&
pte_dirty(orig)) {
unsigned long paddr, pfn = pte_pfn(orig); unsigned long paddr, pfn = pte_pfn(orig);
struct address_space *mapping; struct address_space *mapping;
struct page *page; struct page *page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment