Commit cd8eb291 authored by Peter Gonda's avatar Peter Gonda Committed by Sean Christopherson

KVM: selftests: Add support for allocating/managing protected guest memory

Add support for differentiating between protected (a.k.a. private, a.k.a.
encrypted) memory and normal (a.k.a. shared) memory for VMs that support
protected guest memory, e.g. x86's SEV.  Provide and manage a common
bitmap for tracking whether a given physical page resides in protected
memory, as support for protected memory isn't x86 specific, i.e. adding a
arch hook would be a net negative now, and in the future.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Vishal Annapurve <vannapurve@google.com>
Cc: Ackerley Tng <ackerleytng@google.com>
cc: Andrew Jones <andrew.jones@linux.dev>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Michael Roth <michael.roth@amd.com>
Reviewed-by: default avatarItaru Kitayama <itaru.kitayama@fujitsu.com>
Tested-by: default avatarCarlos Bilbao <carlos.bilbao@amd.com>
Originally-by: default avatarMichael Roth <michael.roth@amd.com>
Signed-off-by: default avatarPeter Gonda <pgonda@google.com>
Co-developed-by: default avatarSean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20240223004258.3104051-5-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 57e19f05
...@@ -46,6 +46,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ ...@@ -46,6 +46,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
struct userspace_mem_region { struct userspace_mem_region {
struct kvm_userspace_memory_region2 region; struct kvm_userspace_memory_region2 region;
struct sparsebit *unused_phy_pages; struct sparsebit *unused_phy_pages;
struct sparsebit *protected_phy_pages;
int fd; int fd;
off_t offset; off_t offset;
enum vm_mem_backing_src_type backing_src_type; enum vm_mem_backing_src_type backing_src_type;
...@@ -569,6 +570,13 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, ...@@ -569,6 +570,13 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
uint64_t guest_paddr, uint32_t slot, uint64_t npages, uint64_t guest_paddr, uint32_t slot, uint64_t npages,
uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset); uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
#ifndef vm_arch_has_protected_memory
static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
{
return false;
}
#endif
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
...@@ -832,10 +840,23 @@ const char *exit_reason_str(unsigned int exit_reason); ...@@ -832,10 +840,23 @@ const char *exit_reason_str(unsigned int exit_reason);
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot); uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t paddr_min, uint32_t memslot,
bool protected);
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot)
{
/*
* By default, allocate memory as protected for VMs that support
* protected memory, as the majority of memory for such VMs is
* protected, i.e. using shared memory is effectively opt-in.
*/
return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
vm_arch_has_protected_memory(vm));
}
/* /*
* ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
* loads the test binary into guest memory and creates an IRQ chip (x86 only). * loads the test binary into guest memory and creates an IRQ chip (x86 only).
......
...@@ -666,6 +666,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm, ...@@ -666,6 +666,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
sparsebit_free(&region->unused_phy_pages); sparsebit_free(&region->unused_phy_pages);
sparsebit_free(&region->protected_phy_pages);
ret = munmap(region->mmap_start, region->mmap_size); ret = munmap(region->mmap_start, region->mmap_size);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
if (region->fd >= 0) { if (region->fd >= 0) {
...@@ -1047,6 +1048,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, ...@@ -1047,6 +1048,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
} }
region->unused_phy_pages = sparsebit_alloc(); region->unused_phy_pages = sparsebit_alloc();
if (vm_arch_has_protected_memory(vm))
region->protected_phy_pages = sparsebit_alloc();
sparsebit_set_num(region->unused_phy_pages, sparsebit_set_num(region->unused_phy_pages,
guest_paddr >> vm->page_shift, npages); guest_paddr >> vm->page_shift, npages);
region->region.slot = slot; region->region.slot = slot;
...@@ -1873,6 +1876,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) ...@@ -1873,6 +1876,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
region->host_mem); region->host_mem);
fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
sparsebit_dump(stream, region->unused_phy_pages, 0); sparsebit_dump(stream, region->unused_phy_pages, 0);
if (region->protected_phy_pages) {
fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, "");
sparsebit_dump(stream, region->protected_phy_pages, 0);
}
} }
fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
sparsebit_dump(stream, vm->vpages_mapped, indent + 2); sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
...@@ -1974,6 +1981,7 @@ const char *exit_reason_str(unsigned int exit_reason) ...@@ -1974,6 +1981,7 @@ const char *exit_reason_str(unsigned int exit_reason)
* num - number of pages * num - number of pages
* paddr_min - Physical address minimum * paddr_min - Physical address minimum
* memslot - Memory region to allocate page from * memslot - Memory region to allocate page from
* protected - True if the pages will be used as protected/private memory
* *
* Output Args: None * Output Args: None
* *
...@@ -1985,8 +1993,9 @@ const char *exit_reason_str(unsigned int exit_reason) ...@@ -1985,8 +1993,9 @@ const char *exit_reason_str(unsigned int exit_reason)
* and their base address is returned. A TEST_ASSERT failure occurs if * and their base address is returned. A TEST_ASSERT failure occurs if
* not enough pages are available at or above paddr_min. * not enough pages are available at or above paddr_min.
*/ */
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot) vm_paddr_t paddr_min, uint32_t memslot,
bool protected)
{ {
struct userspace_mem_region *region; struct userspace_mem_region *region;
sparsebit_idx_t pg, base; sparsebit_idx_t pg, base;
...@@ -1999,8 +2008,10 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, ...@@ -1999,8 +2008,10 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
paddr_min, vm->page_size); paddr_min, vm->page_size);
region = memslot2region(vm, memslot); region = memslot2region(vm, memslot);
base = pg = paddr_min >> vm->page_shift; TEST_ASSERT(!protected || region->protected_phy_pages,
"Region doesn't support protected memory");
base = pg = paddr_min >> vm->page_shift;
do { do {
for (; pg < base + num; ++pg) { for (; pg < base + num; ++pg) {
if (!sparsebit_is_set(region->unused_phy_pages, pg)) { if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
...@@ -2019,8 +2030,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, ...@@ -2019,8 +2030,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
abort(); abort();
} }
for (pg = base; pg < base + num; ++pg) for (pg = base; pg < base + num; ++pg) {
sparsebit_clear(region->unused_phy_pages, pg); sparsebit_clear(region->unused_phy_pages, pg);
if (protected)
sparsebit_set(region->protected_phy_pages, pg);
}
return base * vm->page_size; return base * vm->page_size;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment