Commit 68f03921 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

s390: mmu_gather rework

Adapt the stand-alone s390 mmu_gather implementation to the new
preemptible mmu_gather interface.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 90f08e39
...@@ -29,65 +29,77 @@ ...@@ -29,65 +29,77 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#ifndef CONFIG_SMP
#define TLB_NR_PTRS 1
#else
#define TLB_NR_PTRS 508
#endif
struct mmu_gather { struct mmu_gather {
struct mm_struct *mm; struct mm_struct *mm;
unsigned int fullmm; unsigned int fullmm;
unsigned int nr_ptes; unsigned int nr_ptes;
unsigned int nr_pxds; unsigned int nr_pxds;
void *array[TLB_NR_PTRS]; unsigned int max;
void **array;
void *local[8];
}; };
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); static inline void __tlb_alloc_page(struct mmu_gather *tlb)
static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
unsigned int full_mm_flush)
{ {
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (addr) {
tlb->array = (void *) addr;
tlb->max = PAGE_SIZE / sizeof(void *);
}
}
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm,
unsigned int full_mm_flush)
{
tlb->mm = mm; tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->array = tlb->local;
tlb->fullmm = full_mm_flush; tlb->fullmm = full_mm_flush;
tlb->nr_ptes = 0;
tlb->nr_pxds = TLB_NR_PTRS;
if (tlb->fullmm) if (tlb->fullmm)
__tlb_flush_mm(mm); __tlb_flush_mm(mm);
return tlb; else
__tlb_alloc_page(tlb);
tlb->nr_ptes = 0;
tlb->nr_pxds = tlb->max;
} }
static inline void tlb_flush_mmu(struct mmu_gather *tlb, static inline void tlb_flush_mmu(struct mmu_gather *tlb)
unsigned long start, unsigned long end)
{ {
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS)) if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max))
__tlb_flush_mm(tlb->mm); __tlb_flush_mm(tlb->mm);
while (tlb->nr_ptes > 0) while (tlb->nr_ptes > 0)
page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]);
while (tlb->nr_pxds < TLB_NR_PTRS) while (tlb->nr_pxds < tlb->max)
crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]);
} }
static inline void tlb_finish_mmu(struct mmu_gather *tlb, static inline void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
tlb_flush_mmu(tlb, start, end); tlb_flush_mmu(tlb);
rcu_table_freelist_finish(); rcu_table_freelist_finish();
/* keep the page table cache within bounds */ /* keep the page table cache within bounds */
check_pgt_cache(); check_pgt_cache();
put_cpu_var(mmu_gathers); if (tlb->array != tlb->local)
free_pages((unsigned long) tlb->array, 0);
} }
/* /*
* Release the page cache reference for a pte removed by * Release the page cache reference for a pte removed by
* tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache. * has already been freed, so just do free_page_and_swap_cache.
*/ */
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
return 1; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{ {
free_page_and_swap_cache(page); free_page_and_swap_cache(page);
...@@ -103,7 +115,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, ...@@ -103,7 +115,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
if (!tlb->fullmm) { if (!tlb->fullmm) {
tlb->array[tlb->nr_ptes++] = pte; tlb->array[tlb->nr_ptes++] = pte;
if (tlb->nr_ptes >= tlb->nr_pxds) if (tlb->nr_ptes >= tlb->nr_pxds)
tlb_flush_mmu(tlb, 0, 0); tlb_flush_mmu(tlb);
} else } else
page_table_free(tlb->mm, (unsigned long *) pte); page_table_free(tlb->mm, (unsigned long *) pte);
} }
...@@ -124,7 +136,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, ...@@ -124,7 +136,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
if (!tlb->fullmm) { if (!tlb->fullmm) {
tlb->array[--tlb->nr_pxds] = pmd; tlb->array[--tlb->nr_pxds] = pmd;
if (tlb->nr_ptes >= tlb->nr_pxds) if (tlb->nr_ptes >= tlb->nr_pxds)
tlb_flush_mmu(tlb, 0, 0); tlb_flush_mmu(tlb);
} else } else
crst_table_free(tlb->mm, (unsigned long *) pmd); crst_table_free(tlb->mm, (unsigned long *) pmd);
#endif #endif
...@@ -146,7 +158,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ...@@ -146,7 +158,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
if (!tlb->fullmm) { if (!tlb->fullmm) {
tlb->array[--tlb->nr_pxds] = pud; tlb->array[--tlb->nr_pxds] = pud;
if (tlb->nr_ptes >= tlb->nr_pxds) if (tlb->nr_ptes >= tlb->nr_pxds)
tlb_flush_mmu(tlb, 0, 0); tlb_flush_mmu(tlb);
} else } else
crst_table_free(tlb->mm, (unsigned long *) pud); crst_table_free(tlb->mm, (unsigned long *) pud);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment