Commit 935f5839 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

x86/mm/cpa: Optimize cpa_flush_array() TLB invalidation

Instead of punting and doing tlb_flush_all(), do the same as
flush_tlb_kernel_range() does and use single page invalidations.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom.StDenis@amd.com
Cc: dave.hansen@intel.com
Link: http://lkml.kernel.org/r/20181203171043.430001980@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5fe26b7a
...@@ -19,4 +19,6 @@ extern int after_bootmem; ...@@ -19,4 +19,6 @@ extern int after_bootmem;
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
extern unsigned long tlb_single_page_flush_ceiling;
#endif /* __X86_MM_INTERNAL_H */ #endif /* __X86_MM_INTERNAL_H */
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include <asm/pat.h> #include <asm/pat.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include "mm_internal.h"
/* /*
* The current flushing context - we pass it instead of 5 arguments: * The current flushing context - we pass it instead of 5 arguments:
*/ */
...@@ -346,16 +348,26 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) ...@@ -346,16 +348,26 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
} }
} }
static void cpa_flush_array(unsigned long baddr, unsigned long *start, void __cpa_flush_array(void *data)
int numpages, int cache,
int in_flags, struct page **pages)
{ {
unsigned int i, level; struct cpa_data *cpa = data;
unsigned int i;
if (__inv_flush_all(cache)) for (i = 0; i < cpa->numpages; i++)
__flush_tlb_one_kernel(__cpa_addr(cpa, i));
}
static void cpa_flush_array(struct cpa_data *cpa, int cache)
{
unsigned int i;
if (cpa_check_flush_all(cache))
return; return;
flush_tlb_all(); if (cpa->numpages <= tlb_single_page_flush_ceiling)
on_each_cpu(__cpa_flush_array, cpa, 1);
else
flush_tlb_all();
if (!cache) if (!cache)
return; return;
...@@ -366,15 +378,11 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start, ...@@ -366,15 +378,11 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start,
* will cause all other CPUs to flush the same * will cause all other CPUs to flush the same
* cachelines: * cachelines:
*/ */
for (i = 0; i < numpages; i++) { for (i = 0; i < cpa->numpages; i++) {
unsigned long addr; unsigned long addr = __cpa_addr(cpa, i);
unsigned int level;
pte_t *pte; pte_t *pte;
if (in_flags & CPA_PAGES_ARRAY)
addr = (unsigned long)page_address(pages[i]);
else
addr = start[i];
pte = lookup_address(addr, &level); pte = lookup_address(addr, &level);
/* /*
...@@ -1771,12 +1779,10 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, ...@@ -1771,12 +1779,10 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
goto out; goto out;
} }
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
cpa_flush_array(baddr, addr, numpages, cache, cpa_flush_array(&cpa, cache);
cpa.flags, pages); else
} else {
cpa_flush_range(baddr, numpages, cache); cpa_flush_range(baddr, numpages, cache);
}
out: out:
return ret; return ret;
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/uv/uv.h> #include <asm/uv/uv.h>
#include "mm_internal.h"
/* /*
* TLB flushing, formerly SMP-only * TLB flushing, formerly SMP-only
* c/o Linus Torvalds. * c/o Linus Torvalds.
...@@ -721,7 +723,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, ...@@ -721,7 +723,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
* *
* This is in units of pages. * This is in units of pages.
*/ */
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int stride_shift, unsigned long end, unsigned int stride_shift,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment