Commit 75d090fd authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Borislav Petkov (AMD)

x86/tdx: Add unaccepted memory support

Hookup TDX-specific code to accept memory.

Accepting the memory is done with ACCEPT_PAGE module call on every page
in the range. MAP_GPA hypercall is not required as the unaccepted memory
is considered private already.

Extract the part of tdx_enc_status_changed() that does memory acceptance
in a new helper. Move the helper tdx-shared.c. It is going to be used by
both main kernel and decompressor.

  [ bp: Fix the INTEL_TDX_GUEST=y, KVM_GUEST=n build. ]
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230606142637.5171-10-kirill.shutemov@linux.intel.com
parent c2b353ae
......@@ -884,9 +884,11 @@ config INTEL_TDX_GUEST
bool "Intel TDX (Trust Domain Extensions) - Guest Support"
depends on X86_64 && CPU_SUP_INTEL
depends on X86_X2APIC
depends on EFI_STUB
select ARCH_HAS_CC_PLATFORM
select X86_MEM_ENCRYPT
select X86_MCE
select UNACCEPTED_MEMORY
help
Support running as a guest under Intel TDX. Without this support,
the guest kernel can not boot or run under TDX.
......
......@@ -106,7 +106,7 @@ ifdef CONFIG_X86_64
endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
vmlinux-objs-$(CONFIG_INTEL_TDX_GUEST) += $(obj)/tdx.o $(obj)/tdcall.o
vmlinux-objs-$(CONFIG_INTEL_TDX_GUEST) += $(obj)/tdx.o $(obj)/tdcall.o $(obj)/tdx-shared.o
vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
......
......@@ -22,3 +22,22 @@ void error(char *m)
while (1)
asm("hlt");
}
/* EFI libstub provides vsnprintf() */
#ifdef CONFIG_EFI_STUB
void panic(const char *fmt, ...)
{
static char buf[1024];
va_list args;
int len;
va_start(args, fmt);
len = vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (len && buf[len - 1] == '\n')
buf[len - 1] = '\0';
error(buf);
}
#endif
......@@ -6,5 +6,6 @@
void warn(char *m);
void error(char *m) __noreturn;
void panic(const char *fmt, ...) __noreturn __cold;
#endif /* BOOT_COMPRESSED_ERROR_H */
......@@ -2,11 +2,44 @@
#include "error.h"
#include "misc.h"
#include "tdx.h"
#include <asm/shared/tdx.h>
/*
* accept_memory() and process_unaccepted_memory() called from EFI stub which
* runs before decompresser and its early_tdx_detect().
*
* Enumerate TDX directly from the early users.
*/
static bool early_is_tdx_guest(void)
{
static bool once;
static bool is_tdx;
if (!IS_ENABLED(CONFIG_INTEL_TDX_GUEST))
return false;
if (!once) {
u32 eax, sig[3];
cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax,
&sig[0], &sig[2], &sig[1]);
is_tdx = !memcmp(TDX_IDENT, sig, sizeof(sig));
once = true;
}
return is_tdx;
}
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
{
/* Platform-specific memory-acceptance call goes here */
error("Cannot accept memory");
if (early_is_tdx_guest()) {
if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n");
} else {
error("Cannot accept memory: unknown platform\n");
}
}
bool init_unaccepted_memory(void)
......
#include "error.h"
#include "../../coco/tdx/tdx-shared.c"
# SPDX-License-Identifier: GPL-2.0
obj-y += tdx.o tdcall.o
obj-y += tdx.o tdx-shared.o tdcall.o
#include <asm/tdx.h>
#include <asm/pgtable.h>
static unsigned long try_accept_one(phys_addr_t start, unsigned long len,
enum pg_level pg_level)
{
unsigned long accept_size = page_level_size(pg_level);
u64 tdcall_rcx;
u8 page_size;
if (!IS_ALIGNED(start, accept_size))
return 0;
if (len < accept_size)
return 0;
/*
* Pass the page physical address to the TDX module to accept the
* pending, private page.
*
* Bits 2:0 of RCX encode page size: 0 - 4K, 1 - 2M, 2 - 1G.
*/
switch (pg_level) {
case PG_LEVEL_4K:
page_size = 0;
break;
case PG_LEVEL_2M:
page_size = 1;
break;
case PG_LEVEL_1G:
page_size = 2;
break;
default:
return 0;
}
tdcall_rcx = start | page_size;
if (__tdx_module_call(TDX_ACCEPT_PAGE, tdcall_rcx, 0, 0, 0, NULL))
return 0;
return accept_size;
}
bool tdx_accept_memory(phys_addr_t start, phys_addr_t end)
{
/*
* For shared->private conversion, accept the page using
* TDX_ACCEPT_PAGE TDX module call.
*/
while (start < end) {
unsigned long len = end - start;
unsigned long accept_size;
/*
* Try larger accepts first. It gives chance to VMM to keep
* 1G/2M Secure EPT entries where possible and speeds up
* process by cutting number of hypercalls (if successful).
*/
accept_size = try_accept_one(start, len, PG_LEVEL_1G);
if (!accept_size)
accept_size = try_accept_one(start, len, PG_LEVEL_2M);
if (!accept_size)
accept_size = try_accept_one(start, len, PG_LEVEL_4K);
if (!accept_size)
return false;
start += accept_size;
}
return true;
}
......@@ -713,46 +713,6 @@ static bool tdx_cache_flush_required(void)
return true;
}
static unsigned long try_accept_one(phys_addr_t start, unsigned long len,
enum pg_level pg_level)
{
unsigned long accept_size = page_level_size(pg_level);
u64 tdcall_rcx;
u8 page_size;
if (!IS_ALIGNED(start, accept_size))
return 0;
if (len < accept_size)
return 0;
/*
* Pass the page physical address to the TDX module to accept the
* pending, private page.
*
* Bits 2:0 of RCX encode page size: 0 - 4K, 1 - 2M, 2 - 1G.
*/
switch (pg_level) {
case PG_LEVEL_4K:
page_size = 0;
break;
case PG_LEVEL_2M:
page_size = 1;
break;
case PG_LEVEL_1G:
page_size = 2;
break;
default:
return 0;
}
tdcall_rcx = start | page_size;
if (__tdx_module_call(TDX_ACCEPT_PAGE, tdcall_rcx, 0, 0, 0, NULL))
return 0;
return accept_size;
}
/*
* Inform the VMM of the guest's intent for this physical page: shared with
* the VMM or private to the guest. The VMM is expected to change its mapping
......@@ -777,33 +737,9 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0))
return false;
/* private->shared conversion requires only MapGPA call */
if (!enc)
return true;
/*
* For shared->private conversion, accept the page using
* TDX_ACCEPT_PAGE TDX module call.
*/
while (start < end) {
unsigned long len = end - start;
unsigned long accept_size;
/*
* Try larger accepts first. It gives chance to VMM to keep
* 1G/2M Secure EPT entries where possible and speeds up
* process by cutting number of hypercalls (if successful).
*/
accept_size = try_accept_one(start, len, PG_LEVEL_1G);
if (!accept_size)
accept_size = try_accept_one(start, len, PG_LEVEL_2M);
if (!accept_size)
accept_size = try_accept_one(start, len, PG_LEVEL_4K);
if (!accept_size)
return false;
start += accept_size;
}
/* shared->private conversion requires memory to be accepted before use */
if (enc)
return tdx_accept_memory(start, end);
return true;
}
......
......@@ -91,5 +91,7 @@ struct tdx_module_output {
u64 __tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9,
struct tdx_module_output *out);
bool tdx_accept_memory(phys_addr_t start, phys_addr_t end);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_SHARED_TDX_H */
......@@ -5,6 +5,8 @@
#include <linux/init.h>
#include <linux/bits.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
#include <asm/shared/tdx.h>
......
#ifndef _ASM_X86_UNACCEPTED_MEMORY_H
#define _ASM_X86_UNACCEPTED_MEMORY_H
#include <linux/efi.h>
#include <asm/tdx.h>
static inline void arch_accept_memory(phys_addr_t start, phys_addr_t end)
{
/* Platform-specific memory-acceptance call goes here */
if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n");
} else {
panic("Cannot accept memory: unknown platform\n");
}
}
static inline struct efi_unaccepted_memory *efi_get_unaccepted_table(void)
{
if (efi.unaccepted == EFI_INVALID_TABLE_ADDR)
return NULL;
return __va(efi.unaccepted);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment