Commit 541ac971 authored by Borislav Petkov's avatar Borislav Petkov

x86/sev: Make the #VC exception stacks part of the default stacks storage

The size of the exception stacks was increased by the commit in Fixes,
resulting in stack sizes greater than a page in size. The #VC exception
handling was only mapping the first (bottom) page, resulting in an
SEV-ES guest failing to boot.

Make the #VC exception stacks part of the default exception stacks
storage and allocate them with a CONFIG_AMD_MEM_ENCRYPT=y .config. Map
them only when a SEV-ES guest has been detected.

Rip out the custom VC stacks mapping and storage code.

 [ bp: Steal and adapt Tom's commit message. ]

Fixes: 7fae4c24 ("x86: Increase exception stack sizes")
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Tested-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Tested-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
Link: https://lkml.kernel.org/r/YVt1IMjIs7pIZTRR@zn.tnic
parent c7419a6e
...@@ -10,6 +10,12 @@ ...@@ -10,6 +10,12 @@
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define VC_EXCEPTION_STKSZ EXCEPTION_STKSZ
#else
#define VC_EXCEPTION_STKSZ 0
#endif
/* Macro to enforce the same ordering and stack sizes */ /* Macro to enforce the same ordering and stack sizes */
#define ESTACKS_MEMBERS(guardsize, optional_stack_size) \ #define ESTACKS_MEMBERS(guardsize, optional_stack_size) \
char DF_stack_guard[guardsize]; \ char DF_stack_guard[guardsize]; \
...@@ -28,7 +34,7 @@ ...@@ -28,7 +34,7 @@
/* The exception stacks' physical storage. No guard pages required */ /* The exception stacks' physical storage. No guard pages required */
struct exception_stacks { struct exception_stacks {
ESTACKS_MEMBERS(0, 0) ESTACKS_MEMBERS(0, VC_EXCEPTION_STKSZ)
}; };
/* The effective cpu entry area mapping with guard pages. */ /* The effective cpu entry area mapping with guard pages. */
......
...@@ -46,16 +46,6 @@ static struct ghcb __initdata *boot_ghcb; ...@@ -46,16 +46,6 @@ static struct ghcb __initdata *boot_ghcb;
struct sev_es_runtime_data { struct sev_es_runtime_data {
struct ghcb ghcb_page; struct ghcb ghcb_page;
/* Physical storage for the per-CPU IST stack of the #VC handler */
char ist_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE);
/*
* Physical storage for the per-CPU fall-back stack of the #VC handler.
* The fall-back stack is used when it is not safe to switch back to the
* interrupted stack in the #VC entry code.
*/
char fallback_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE);
/* /*
* Reserve one page per CPU as backup storage for the unencrypted GHCB. * Reserve one page per CPU as backup storage for the unencrypted GHCB.
* It is needed when an NMI happens while the #VC handler uses the real * It is needed when an NMI happens while the #VC handler uses the real
...@@ -99,27 +89,6 @@ DEFINE_STATIC_KEY_FALSE(sev_es_enable_key); ...@@ -99,27 +89,6 @@ DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
/* Needed in vc_early_forward_exception */ /* Needed in vc_early_forward_exception */
void do_early_exception(struct pt_regs *regs, int trapnr); void do_early_exception(struct pt_regs *regs, int trapnr);
static void __init setup_vc_stacks(int cpu)
{
struct sev_es_runtime_data *data;
struct cpu_entry_area *cea;
unsigned long vaddr;
phys_addr_t pa;
data = per_cpu(runtime_data, cpu);
cea = get_cpu_entry_area(cpu);
/* Map #VC IST stack */
vaddr = CEA_ESTACK_BOT(&cea->estacks, VC);
pa = __pa(data->ist_stack);
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
/* Map VC fall-back stack */
vaddr = CEA_ESTACK_BOT(&cea->estacks, VC2);
pa = __pa(data->fallback_stack);
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
}
static __always_inline bool on_vc_stack(struct pt_regs *regs) static __always_inline bool on_vc_stack(struct pt_regs *regs)
{ {
unsigned long sp = regs->sp; unsigned long sp = regs->sp;
...@@ -787,7 +756,6 @@ void __init sev_es_init_vc_handling(void) ...@@ -787,7 +756,6 @@ void __init sev_es_init_vc_handling(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
alloc_runtime_data(cpu); alloc_runtime_data(cpu);
init_ghcb(cpu); init_ghcb(cpu);
setup_vc_stacks(cpu);
} }
sev_es_setup_play_dead(); sev_es_setup_play_dead();
......
...@@ -110,6 +110,13 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu) ...@@ -110,6 +110,13 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
cea_map_stack(NMI); cea_map_stack(NMI);
cea_map_stack(DB); cea_map_stack(DB);
cea_map_stack(MCE); cea_map_stack(MCE);
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
cea_map_stack(VC);
cea_map_stack(VC2);
}
}
} }
#else #else
static inline void percpu_setup_exception_stacks(unsigned int cpu) static inline void percpu_setup_exception_stacks(unsigned int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment