Commit f154f290 authored by Joerg Roedel's avatar Joerg Roedel Committed by Borislav Petkov

x86/mm/64: Flush global TLB on boot and AP bringup

The AP bringup code uses the trampoline_pgd page-table which
establishes global mappings in the user range of the address space.
Flush the global TLB entries after the indentity mappings are removed so
no stale entries remain in the TLB.
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20211202153226.22946-3-joro@8bytes.org
parent 9de49990
...@@ -261,4 +261,9 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); ...@@ -261,4 +261,9 @@ extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
#endif /* !MODULE */ #endif /* !MODULE */
static inline void __native_tlb_flush_global(unsigned long cr4)
{
native_write_cr4(cr4 ^ X86_CR4_PGE);
native_write_cr4(cr4);
}
#endif /* _ASM_X86_TLBFLUSH_H */ #endif /* _ASM_X86_TLBFLUSH_H */
...@@ -483,6 +483,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) ...@@ -483,6 +483,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
/* Kill off the identity-map trampoline */ /* Kill off the identity-map trampoline */
reset_early_page_tables(); reset_early_page_tables();
__native_tlb_flush_global(native_read_cr4());
clear_bss(); clear_bss();
clear_page(init_top_pgt); clear_page(init_top_pgt);
......
...@@ -166,9 +166,26 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) ...@@ -166,9 +166,26 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
call sev_verify_cbit call sev_verify_cbit
popq %rsi popq %rsi
/* Switch to new page-table */ /*
* Switch to new page-table
*
* For the boot CPU this switches to early_top_pgt which still has the
* indentity mappings present. The secondary CPUs will switch to the
* init_top_pgt here, away from the trampoline_pgd and unmap the
* indentity mapped ranges.
*/
movq %rax, %cr3 movq %rax, %cr3
/*
* Do a global TLB flush after the CR3 switch to make sure the TLB
* entries from the identity mapping are flushed.
*/
movq %cr4, %rcx
movq %rcx, %rax
xorq $X86_CR4_PGE, %rcx
movq %rcx, %cr4
movq %rax, %cr4
/* Ensure I am executing from virtual addresses */ /* Ensure I am executing from virtual addresses */
movq $1f, %rax movq $1f, %rax
ANNOTATE_RETPOLINE_SAFE ANNOTATE_RETPOLINE_SAFE
......
...@@ -1148,7 +1148,7 @@ void flush_tlb_one_user(unsigned long addr) ...@@ -1148,7 +1148,7 @@ void flush_tlb_one_user(unsigned long addr)
*/ */
STATIC_NOPV void native_flush_tlb_global(void) STATIC_NOPV void native_flush_tlb_global(void)
{ {
unsigned long cr4, flags; unsigned long flags;
if (static_cpu_has(X86_FEATURE_INVPCID)) { if (static_cpu_has(X86_FEATURE_INVPCID)) {
/* /*
...@@ -1168,11 +1168,7 @@ STATIC_NOPV void native_flush_tlb_global(void) ...@@ -1168,11 +1168,7 @@ STATIC_NOPV void native_flush_tlb_global(void)
*/ */
raw_local_irq_save(flags); raw_local_irq_save(flags);
cr4 = this_cpu_read(cpu_tlbstate.cr4); __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
/* toggle PGE */
native_write_cr4(cr4 ^ X86_CR4_PGE);
/* write old PGE again and flush TLBs */
native_write_cr4(cr4);
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment