Commit ff075d60 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

um: mmu_gather rework

Fix up the um mmu_gather code to conform to the new API.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7a95a2c8
......@@ -22,9 +22,6 @@ struct mmu_gather {
unsigned int fullmm; /* non-zero means full mm flush */
};
/* Users of the generic TLB shootdown code must declare this storage space. */
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
unsigned long address)
{
......@@ -47,27 +44,20 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
}
/* tlb_gather_mmu
* Return a pointer to an initialized struct mmu_gather.
*/
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
{
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
init_tlb_gather(tlb);
return tlb;
}
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
static inline void
tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
tlb_flush_mmu(struct mmu_gather *tlb)
{
if (!tlb->need_flush)
return;
......@@ -83,12 +73,10 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
tlb_flush_mmu(tlb, start, end);
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
put_cpu_var(mmu_gathers);
}
/* tlb_remove_page
......@@ -96,11 +84,16 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
* while handling the additional races in SMP caused by other CPUs
* caching valid mappings in their TLBs.
*/
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->need_flush = 1;
free_page_and_swap_cache(page);
return;
return 1; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
__tlb_remove_page(tlb, page);
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment