Commit af0d5292 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/mm-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 030dabd5 e403d5b9
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define FREE_PTE_NR 507 #define FREE_PTE_NR 507
#define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL) #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else #else
#define FREE_PTE_NR 1 #define FREE_PTE_NR 1
#define tlb_fast_mode(tlb) 1 #define tlb_fast_mode(tlb) 1
...@@ -35,7 +35,8 @@ ...@@ -35,7 +35,8 @@
*/ */
typedef struct free_pte_ctx { typedef struct free_pte_ctx {
struct mm_struct *mm; struct mm_struct *mm;
unsigned long nr; /* set to ~0UL means fast mode */ unsigned int nr; /* set to ~0U means fast mode */
unsigned int fullmm; /* non-zero means full mm flush */
unsigned long freed; unsigned long freed;
struct page * pages[FREE_PTE_NR]; struct page * pages[FREE_PTE_NR];
} mmu_gather_t; } mmu_gather_t;
...@@ -46,15 +47,18 @@ extern mmu_gather_t mmu_gathers[NR_CPUS]; ...@@ -46,15 +47,18 @@ extern mmu_gather_t mmu_gathers[NR_CPUS];
/* tlb_gather_mmu /* tlb_gather_mmu
* Return a pointer to an initialized mmu_gather_t. * Return a pointer to an initialized mmu_gather_t.
*/ */
static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm) static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{ {
mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()]; mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()];
tlb->mm = mm; tlb->mm = mm;
tlb->freed = 0;
/* Use fast mode if only one CPU is online */ /* Use fast mode if only one CPU is online */
tlb->nr = smp_num_cpus > 1 ? 0UL : ~0UL; tlb->nr = smp_num_cpus > 1 ? 0UL : ~0UL;
tlb->fullmm = full_mm_flush;
tlb->freed = 0;
return tlb; return tlb;
} }
......
#ifndef _SPARC64_TLB_H #ifndef _SPARC64_TLB_H
#define _SPARC64_TLB_H #define _SPARC64_TLB_H
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #define tlb_flush(tlb) \
do { if ((tlb)->fullmm) \
flush_tlb_mm((tlb)->mm);\
} while (0)
#define tlb_start_vma(tlb, vma) \ #define tlb_start_vma(tlb, vma) \
flush_cache_range(vma, vma->vm_start, vma->vm_end) do { if (!(tlb)->fullmm) \
#define tlb_end_vma(tlb, vma) \ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
flush_tlb_range(vma, vma->vm_start, vma->vm_end) } while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) #define tlb_end_vma(tlb, vma) \
do { if (!(tlb)->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) \
do { } while (0)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
......
...@@ -427,7 +427,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned ...@@ -427,7 +427,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
flush_cache_range(vma, address, end); flush_cache_range(vma, address, end);
tlb = tlb_gather_mmu(mm); tlb = tlb_gather_mmu(mm, 0);
unmap_page_range(tlb, vma, address, end); unmap_page_range(tlb, vma, address, end);
tlb_finish_mmu(tlb, start, end); tlb_finish_mmu(tlb, start, end);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
......
...@@ -848,7 +848,7 @@ static void unmap_region(struct mm_struct *mm, ...@@ -848,7 +848,7 @@ static void unmap_region(struct mm_struct *mm,
{ {
mmu_gather_t *tlb; mmu_gather_t *tlb;
tlb = tlb_gather_mmu(mm); tlb = tlb_gather_mmu(mm, 0);
do { do {
unsigned long from, to; unsigned long from, to;
...@@ -1105,7 +1105,7 @@ void exit_mmap(struct mm_struct * mm) ...@@ -1105,7 +1105,7 @@ void exit_mmap(struct mm_struct * mm)
release_segments(mm); release_segments(mm);
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
tlb = tlb_gather_mmu(mm); tlb = tlb_gather_mmu(mm, 1);
flush_cache_mm(mm); flush_cache_mm(mm);
mpnt = mm->mmap; mpnt = mm->mmap;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment