Commit 6c321179 authored by Tom Lendacky's avatar Tom Lendacky Committed by Borislav Petkov (AMD)

x86/sev: Add SNP-specific unaccepted memory support

Add SNP-specific hooks to the unaccepted memory support in the boot
path (__accept_memory()) and the core kernel (accept_memory()) in order
to support booting SNP guests when unaccepted memory is present. Without
this support, SNP guests will fail to boot and/or panic() when unaccepted
memory is present in the EFI memory map.

The process of accepting memory under SNP involves invoking the hypervisor
to perform a page state change for the page to private memory and then
issuing a PVALIDATE instruction to accept the page.

Since the boot path and the core kernel paths perform similar operations,
move the pvalidate_pages() and vmgexit_psc() functions into sev-shared.c
to avoid code duplication.

Create the new header file arch/x86/boot/compressed/sev.h because adding
the function declaration to any of the existing SEV related header files
pulls in too many other header files, causing the build to fail.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/a52fa69f460fd1876d70074b20ad68210dfc31dd.1686063086.git.thomas.lendacky@amd.com
parent 15d90887
...@@ -1543,11 +1543,13 @@ config X86_MEM_ENCRYPT ...@@ -1543,11 +1543,13 @@ config X86_MEM_ENCRYPT
config AMD_MEM_ENCRYPT config AMD_MEM_ENCRYPT
bool "AMD Secure Memory Encryption (SME) support" bool "AMD Secure Memory Encryption (SME) support"
depends on X86_64 && CPU_SUP_AMD depends on X86_64 && CPU_SUP_AMD
depends on EFI_STUB
select DMA_COHERENT_POOL select DMA_COHERENT_POOL
select ARCH_USE_MEMREMAP_PROT select ARCH_USE_MEMREMAP_PROT
select INSTRUCTION_DECODER select INSTRUCTION_DECODER
select ARCH_HAS_CC_PLATFORM select ARCH_HAS_CC_PLATFORM
select X86_MEM_ENCRYPT select X86_MEM_ENCRYPT
select UNACCEPTED_MEMORY
help help
Say yes to enable support for the encryption of system memory. Say yes to enable support for the encryption of system memory.
This requires an AMD processor that supports Secure Memory This requires an AMD processor that supports Secure Memory
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include "error.h" #include "error.h"
#include "misc.h" #include "misc.h"
#include "tdx.h" #include "tdx.h"
#include "sev.h"
#include <asm/shared/tdx.h> #include <asm/shared/tdx.h>
/* /*
...@@ -37,6 +38,8 @@ void arch_accept_memory(phys_addr_t start, phys_addr_t end) ...@@ -37,6 +38,8 @@ void arch_accept_memory(phys_addr_t start, phys_addr_t end)
if (early_is_tdx_guest()) { if (early_is_tdx_guest()) {
if (!tdx_accept_memory(start, end)) if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n"); panic("TDX: Failed to accept memory\n");
} else if (sev_snp_enabled()) {
snp_accept_memory(start, end);
} else { } else {
error("Cannot accept memory: unknown platform\n"); error("Cannot accept memory: unknown platform\n");
} }
......
...@@ -115,7 +115,7 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, ...@@ -115,7 +115,7 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
/* Include code for early handlers */ /* Include code for early handlers */
#include "../../kernel/sev-shared.c" #include "../../kernel/sev-shared.c"
static inline bool sev_snp_enabled(void) bool sev_snp_enabled(void)
{ {
return sev_status & MSR_AMD64_SEV_SNP_ENABLED; return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
} }
...@@ -181,6 +181,58 @@ static bool early_setup_ghcb(void) ...@@ -181,6 +181,58 @@ static bool early_setup_ghcb(void)
return true; return true;
} }
static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
phys_addr_t pa, phys_addr_t pa_end)
{
struct psc_hdr *hdr;
struct psc_entry *e;
unsigned int i;
hdr = &desc->hdr;
memset(hdr, 0, sizeof(*hdr));
e = desc->entries;
i = 0;
while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
hdr->end_entry = i;
e->gfn = pa >> PAGE_SHIFT;
e->operation = SNP_PAGE_STATE_PRIVATE;
if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
e->pagesize = RMP_PG_SIZE_2M;
pa += PMD_SIZE;
} else {
e->pagesize = RMP_PG_SIZE_4K;
pa += PAGE_SIZE;
}
e++;
i++;
}
if (vmgexit_psc(boot_ghcb, desc))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
pvalidate_pages(desc);
return pa;
}
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{
struct snp_psc_desc desc = {};
unsigned int i;
phys_addr_t pa;
if (!boot_ghcb && !early_setup_ghcb())
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
pa = start;
while (pa < end)
pa = __snp_accept_memory(&desc, pa, end);
}
void sev_es_shutdown_ghcb(void) void sev_es_shutdown_ghcb(void)
{ {
if (!boot_ghcb) if (!boot_ghcb)
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* AMD SEV header for early boot related functions.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
#ifndef BOOT_COMPRESSED_SEV_H
#define BOOT_COMPRESSED_SEV_H
#ifdef CONFIG_AMD_MEM_ENCRYPT
bool sev_snp_enabled(void);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
#else
static inline bool sev_snp_enabled(void) { return false; }
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
#endif
#endif
...@@ -206,6 +206,7 @@ void snp_set_wakeup_secondary_cpu(void); ...@@ -206,6 +206,7 @@ void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp); bool snp_init(struct boot_params *bp);
void __init __noreturn snp_abort(void); void __init __noreturn snp_abort(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio); int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
#else #else
static inline void sev_es_ist_enter(struct pt_regs *regs) { } static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { } static inline void sev_es_ist_exit(void) { }
...@@ -229,6 +230,8 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in ...@@ -229,6 +230,8 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in
{ {
return -ENOTTY; return -ENOTTY;
} }
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
#endif #endif
#endif #endif
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/efi.h> #include <linux/efi.h>
#include <asm/tdx.h> #include <asm/tdx.h>
#include <asm/sev.h>
static inline void arch_accept_memory(phys_addr_t start, phys_addr_t end) static inline void arch_accept_memory(phys_addr_t start, phys_addr_t end)
{ {
...@@ -10,6 +11,8 @@ static inline void arch_accept_memory(phys_addr_t start, phys_addr_t end) ...@@ -10,6 +11,8 @@ static inline void arch_accept_memory(phys_addr_t start, phys_addr_t end)
if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) { if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
if (!tdx_accept_memory(start, end)) if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n"); panic("TDX: Failed to accept memory\n");
} else if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
snp_accept_memory(start, end);
} else { } else {
panic("Cannot accept memory: unknown platform\n"); panic("Cannot accept memory: unknown platform\n");
} }
......
...@@ -12,6 +12,9 @@ ...@@ -12,6 +12,9 @@
#ifndef __BOOT_COMPRESSED #ifndef __BOOT_COMPRESSED
#define error(v) pr_err(v) #define error(v) pr_err(v)
#define has_cpuflag(f) boot_cpu_has(f) #define has_cpuflag(f) boot_cpu_has(f)
#else
#undef WARN
#define WARN(condition, format...) (!!(condition))
#endif #endif
/* I/O parameters for CPUID-related helpers */ /* I/O parameters for CPUID-related helpers */
...@@ -991,3 +994,103 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info) ...@@ -991,3 +994,103 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
cpuid_ext_range_max = fn->eax; cpuid_ext_range_max = fn->eax;
} }
} }
static void pvalidate_pages(struct snp_psc_desc *desc)
{
struct psc_entry *e;
unsigned long vaddr;
unsigned int size;
unsigned int i;
bool validate;
int rc;
for (i = 0; i <= desc->hdr.end_entry; i++) {
e = &desc->entries[i];
vaddr = (unsigned long)pfn_to_kaddr(e->gfn);
size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
validate = e->operation == SNP_PAGE_STATE_PRIVATE;
rc = pvalidate(vaddr, size, validate);
if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
unsigned long vaddr_end = vaddr + PMD_SIZE;
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
if (rc)
break;
}
}
if (rc) {
WARN(1, "Failed to validate address 0x%lx ret %d", vaddr, rc);
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
}
}
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
{
int cur_entry, end_entry, ret = 0;
struct snp_psc_desc *data;
struct es_em_ctxt ctxt;
vc_ghcb_invalidate(ghcb);
/* Copy the input desc into GHCB shared buffer */
data = (struct snp_psc_desc *)ghcb->shared_buffer;
memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
/*
* As per the GHCB specification, the hypervisor can resume the guest
* before processing all the entries. Check whether all the entries
* are processed. If not, then keep retrying. Note, the hypervisor
* will update the data memory directly to indicate the status, so
* reference the data->hdr everywhere.
*
* The strategy here is to wait for the hypervisor to change the page
* state in the RMP table before guest accesses the memory pages. If the
* page state change was not successful, then later memory access will
* result in a crash.
*/
cur_entry = data->hdr.cur_entry;
end_entry = data->hdr.end_entry;
while (data->hdr.cur_entry <= data->hdr.end_entry) {
ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
/* This will advance the shared buffer data points to. */
ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
/*
* Page State Change VMGEXIT can pass error code through
* exit_info_2.
*/
if (WARN(ret || ghcb->save.sw_exit_info_2,
"SNP: PSC failed ret=%d exit_info_2=%llx\n",
ret, ghcb->save.sw_exit_info_2)) {
ret = 1;
goto out;
}
/* Verify that reserved bit is not set */
if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
ret = 1;
goto out;
}
/*
* Sanity check that entry processing is not going backwards.
* This will happen only if hypervisor is tricking us.
*/
if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
ret = 1;
goto out;
}
}
out:
return ret;
}
...@@ -657,38 +657,6 @@ static u64 __init get_jump_table_addr(void) ...@@ -657,38 +657,6 @@ static u64 __init get_jump_table_addr(void)
return ret; return ret;
} }
static void pvalidate_pages(struct snp_psc_desc *desc)
{
struct psc_entry *e;
unsigned long vaddr;
unsigned int size;
unsigned int i;
bool validate;
int rc;
for (i = 0; i <= desc->hdr.end_entry; i++) {
e = &desc->entries[i];
vaddr = (unsigned long)pfn_to_kaddr(e->gfn);
size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
validate = e->operation == SNP_PAGE_STATE_PRIVATE;
rc = pvalidate(vaddr, size, validate);
if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
unsigned long vaddr_end = vaddr + PMD_SIZE;
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
if (rc)
break;
}
}
if (WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
}
static void early_set_pages_state(unsigned long vaddr, unsigned long paddr, static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
unsigned long npages, enum psc_op op) unsigned long npages, enum psc_op op)
{ {
...@@ -796,72 +764,6 @@ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op ...@@ -796,72 +764,6 @@ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op
WARN(1, "invalid memory op %d\n", op); WARN(1, "invalid memory op %d\n", op);
} }
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
{
int cur_entry, end_entry, ret = 0;
struct snp_psc_desc *data;
struct es_em_ctxt ctxt;
vc_ghcb_invalidate(ghcb);
/* Copy the input desc into GHCB shared buffer */
data = (struct snp_psc_desc *)ghcb->shared_buffer;
memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
/*
* As per the GHCB specification, the hypervisor can resume the guest
* before processing all the entries. Check whether all the entries
* are processed. If not, then keep retrying. Note, the hypervisor
* will update the data memory directly to indicate the status, so
* reference the data->hdr everywhere.
*
* The strategy here is to wait for the hypervisor to change the page
* state in the RMP table before guest accesses the memory pages. If the
* page state change was not successful, then later memory access will
* result in a crash.
*/
cur_entry = data->hdr.cur_entry;
end_entry = data->hdr.end_entry;
while (data->hdr.cur_entry <= data->hdr.end_entry) {
ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
/* This will advance the shared buffer data points to. */
ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
/*
* Page State Change VMGEXIT can pass error code through
* exit_info_2.
*/
if (WARN(ret || ghcb->save.sw_exit_info_2,
"SNP: PSC failed ret=%d exit_info_2=%llx\n",
ret, ghcb->save.sw_exit_info_2)) {
ret = 1;
goto out;
}
/* Verify that reserved bit is not set */
if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
ret = 1;
goto out;
}
/*
* Sanity check that entry processing is not going backwards.
* This will happen only if hypervisor is tricking us.
*/
if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
ret = 1;
goto out;
}
}
out:
return ret;
}
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr, static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
unsigned long vaddr_end, int op) unsigned long vaddr_end, int op)
{ {
...@@ -966,6 +868,20 @@ void snp_set_memory_private(unsigned long vaddr, unsigned long npages) ...@@ -966,6 +868,20 @@ void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
} }
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{
unsigned long vaddr;
unsigned int npages;
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return;
vaddr = (unsigned long)__va(start);
npages = (end - start) >> PAGE_SHIFT;
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
}
static int snp_set_vmsa(void *va, bool vmsa) static int snp_set_vmsa(void *va, bool vmsa)
{ {
u64 attrs; u64 attrs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment