Commit 1808d65b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

asm-generic/tlb: Remove arch_tlb*_mmu()

Now that all architectures are converted to the generic code, remove
the arch hooks.

No change in behavior intended.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9de7d833
...@@ -93,33 +93,6 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ ...@@ -93,33 +93,6 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
#endif /* HAVE_MMU_GATHER_NO_GATHER */ #endif /* HAVE_MMU_GATHER_NO_GATHER */
void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;
/* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb->need_flush_all = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
tlb->batch_count = 0;
#endif
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
tlb->page_size = 0;
#endif
__tlb_reset_range(tlb);
}
void tlb_flush_mmu_free(struct mmu_gather *tlb) void tlb_flush_mmu_free(struct mmu_gather *tlb)
{ {
#ifdef CONFIG_HAVE_RCU_TABLE_FREE #ifdef CONFIG_HAVE_RCU_TABLE_FREE
...@@ -136,27 +109,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb) ...@@ -136,27 +109,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
tlb_flush_mmu_free(tlb); tlb_flush_mmu_free(tlb);
} }
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{
if (force) {
__tlb_reset_range(tlb);
__tlb_adjust_range(tlb, start, end - start);
}
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb_batch_list_free(tlb);
#endif
}
#endif /* HAVE_GENERIC_MMU_GATHER */ #endif /* HAVE_GENERIC_MMU_GATHER */
#ifdef CONFIG_HAVE_RCU_TABLE_FREE #ifdef CONFIG_HAVE_RCU_TABLE_FREE
...@@ -258,10 +210,40 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) ...@@ -258,10 +210,40 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
arch_tlb_gather_mmu(tlb, mm, start, end); tlb->mm = mm;
/* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb->need_flush_all = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
tlb->batch_count = 0;
#endif
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
tlb->page_size = 0;
#endif
__tlb_reset_range(tlb);
inc_tlb_flush_pending(tlb->mm); inc_tlb_flush_pending(tlb->mm);
} }
/**
* tlb_finish_mmu - finish an mmu_gather structure
* @tlb: the mmu_gather structure to finish
* @start: start of the region that will be removed from the page-table
* @end: end of the region that will be removed from the page-table
*
* Called at the end of the shootdown operation to free up any resources that
* were required.
*/
void tlb_finish_mmu(struct mmu_gather *tlb, void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -272,8 +254,17 @@ void tlb_finish_mmu(struct mmu_gather *tlb, ...@@ -272,8 +254,17 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
* the TLB by observing pte_none|!pte_dirty, for example so flush TLB * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
* forcefully if we detect parallel PTE batching threads. * forcefully if we detect parallel PTE batching threads.
*/ */
bool force = mm_tlb_flush_nested(tlb->mm); if (mm_tlb_flush_nested(tlb->mm)) {
__tlb_reset_range(tlb);
__tlb_adjust_range(tlb, start, end - start);
}
arch_tlb_finish_mmu(tlb, start, end, force); tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb_batch_list_free(tlb);
#endif
dec_tlb_flush_pending(tlb->mm); dec_tlb_flush_pending(tlb->mm);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment