Commit 7623f37e authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Borislav Petkov

x86/cpu_entry_area: Provide exception stack accessor

Store a pointer to the per cpu entry area exception stack mappings to allow
fast retrieval.

Required for converting various places from using the shadow IST array to
directly doing address calculations on the actual mapping address.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190414160144.680960459@linutronix.de
parent a4af767a
...@@ -99,6 +99,7 @@ struct cpu_entry_area { ...@@ -99,6 +99,7 @@ struct cpu_entry_area {
#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) #define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
extern void setup_cpu_entry_areas(void); extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
...@@ -118,4 +119,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu) ...@@ -118,4 +119,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu)
return &get_cpu_entry_area(cpu)->entry_stack_page.stack; return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
} }
#define __this_cpu_ist_top_va(name) \
CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
#endif #endif
...@@ -14,6 +14,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage) ...@@ -14,6 +14,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks); static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
#endif #endif
struct cpu_entry_area *get_cpu_entry_area(int cpu) struct cpu_entry_area *get_cpu_entry_area(int cpu)
...@@ -92,6 +93,9 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu) ...@@ -92,6 +93,9 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
unsigned int npages; unsigned int npages;
BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
/* /*
* The exceptions stack mappings in the per cpu area are protected * The exceptions stack mappings in the per cpu area are protected
* by guard pages so each stack must be mapped separately. * by guard pages so each stack must be mapped separately.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment