Commit e403d5b9 authored by David S. Miller's avatar David S. Miller

TLB gather: Distinguish between full-mm and partial-mm flushes.

parent 3da9cf28
......@@ -22,7 +22,7 @@
*/
#ifdef CONFIG_SMP
#define FREE_PTE_NR 507
#define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL)
#define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else
#define FREE_PTE_NR 1
#define tlb_fast_mode(tlb) 1
......@@ -35,7 +35,8 @@
*/
typedef struct free_pte_ctx {
struct mm_struct *mm;
unsigned long nr; /* set to ~0UL means fast mode */
unsigned int nr; /* set to ~0U means fast mode */
unsigned int fullmm; /* non-zero means full mm flush */
unsigned long freed;
struct page * pages[FREE_PTE_NR];
} mmu_gather_t;
......@@ -46,15 +47,18 @@ extern mmu_gather_t mmu_gathers[NR_CPUS];
/* tlb_gather_mmu
* Return a pointer to an initialized mmu_gather_t.
*/
static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm)
static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()];
tlb->mm = mm;
tlb->freed = 0;
/* Use fast mode if only one CPU is online */
tlb->nr = smp_num_cpus > 1 ? 0UL : ~0UL;
tlb->fullmm = full_mm_flush;
tlb->freed = 0;
return tlb;
}
......
#ifndef _SPARC64_TLB_H
#define _SPARC64_TLB_H
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#define tlb_flush(tlb) \
do { if ((tlb)->fullmm) \
flush_tlb_mm((tlb)->mm);\
} while (0)
#define tlb_start_vma(tlb, vma) \
flush_cache_range(vma, vma->vm_start, vma->vm_end)
do { if (!(tlb)->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb, vma) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end)
do { if (!(tlb)->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) \
do { } while (0)
#include <asm-generic/tlb.h>
......
......@@ -427,7 +427,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned
spin_lock(&mm->page_table_lock);
flush_cache_range(vma, address, end);
tlb = tlb_gather_mmu(mm);
tlb = tlb_gather_mmu(mm, 0);
unmap_page_range(tlb, vma, address, end);
tlb_finish_mmu(tlb, start, end);
spin_unlock(&mm->page_table_lock);
......
......@@ -848,7 +848,7 @@ static void unmap_region(struct mm_struct *mm,
{
mmu_gather_t *tlb;
tlb = tlb_gather_mmu(mm);
tlb = tlb_gather_mmu(mm, 0);
do {
unsigned long from, to;
......@@ -1105,7 +1105,7 @@ void exit_mmap(struct mm_struct * mm)
release_segments(mm);
spin_lock(&mm->page_table_lock);
tlb = tlb_gather_mmu(mm);
tlb = tlb_gather_mmu(mm, 1);
flush_cache_mm(mm);
mpnt = mm->mmap;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment