Commit 1446e331 authored by Ricardo Koller's avatar Ricardo Koller Committed by Marc Zyngier

KVM: selftests: Use the right memslot for code, page-tables, and data allocations

Now that kvm_vm allows specifying different memslots for code, page tables,
and data, use the appropriate memslot when making allocations in
common/libraty code. Change them accordingly:

- code (allocated by lib/elf) use the CODE memslot
- stacks, exception tables, and other core data pages (like the TSS in x86)
  use the DATA memslot
- page tables and the PGD use the PT memslot
- test data (anything allocated with vm_vaddr_alloc()) uses the TEST_DATA
  memslot

No functional change intended. All allocators keep using memslot #0.

Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <andrew.jones@linux.dev>
Signed-off-by: default avatarRicardo Koller <ricarkol@google.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Reviewed-by: default avatarAndrew Jones <andrew.jones@linux.dev>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221017195834.2295901-10-ricarkol@google.com
parent 5485e822
...@@ -407,7 +407,11 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); ...@@ -407,7 +407,11 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
enum kvm_mem_region_type type);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
enum kvm_mem_region_type type);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
......
...@@ -82,7 +82,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) ...@@ -82,7 +82,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
return; return;
vm->pgd = vm_phy_pages_alloc(vm, nr_pages, vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
vm->pgd_created = true; vm->pgd_created = true;
} }
...@@ -332,8 +333,9 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -332,8 +333,9 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
vm->page_size; vm->page_size;
stack_vaddr = vm_vaddr_alloc(vm, stack_size, stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN); DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
MEM_REGION_DATA);
aarch64_vcpu_setup(vcpu, init); aarch64_vcpu_setup(vcpu, init);
...@@ -438,8 +440,8 @@ void route_exception(struct ex_regs *regs, int vector) ...@@ -438,8 +440,8 @@ void route_exception(struct ex_regs *regs, int vector)
void vm_init_descriptor_tables(struct kvm_vm *vm) void vm_init_descriptor_tables(struct kvm_vm *vm)
{ {
vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers), vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
vm->page_size); vm->page_size, MEM_REGION_DATA);
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
} }
......
...@@ -161,7 +161,8 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) ...@@ -161,7 +161,8 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
seg_vend |= vm->page_size - 1; seg_vend |= vm->page_size - 1;
size_t seg_size = seg_vend - seg_vstart + 1; size_t seg_size = seg_vend - seg_vstart + 1;
vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart); vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart,
MEM_REGION_CODE);
TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate " TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
"virtual memory for segment at requested min addr,\n" "virtual memory for segment at requested min addr,\n"
" segment idx: %u\n" " segment idx: %u\n"
......
...@@ -1226,32 +1226,15 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, ...@@ -1226,32 +1226,15 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
return pgidx_start * vm->page_size; return pgidx_start * vm->page_size;
} }
/* vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
* VM Virtual Address Allocate enum kvm_mem_region_type type)
*
* Input Args:
* vm - Virtual Machine
* sz - Size in bytes
* vaddr_min - Minimum starting virtual address
*
* Output Args: None
*
* Return:
* Starting guest virtual address
*
* Allocates at least sz bytes within the virtual address space of the vm
* given by vm. The allocated bytes are mapped to a virtual address >=
* the address given by vaddr_min. Note that each allocation uses a
* a unique set of pages, with the minimum real allocation being at least
* a page.
*/
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
{ {
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm); virt_pgd_alloc(vm);
vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
KVM_UTIL_MIN_PFN * vm->page_size, 0); KVM_UTIL_MIN_PFN * vm->page_size,
vm->memslots[type]);
/* /*
* Find an unused range of virtual page addresses of at least * Find an unused range of virtual page addresses of at least
...@@ -1272,6 +1255,30 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) ...@@ -1272,6 +1255,30 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
return vaddr_start; return vaddr_start;
} }
/*
* VM Virtual Address Allocate
*
* Input Args:
* vm - Virtual Machine
* sz - Size in bytes
* vaddr_min - Minimum starting virtual address
*
* Output Args: None
*
* Return:
* Starting guest virtual address
*
* Allocates at least sz bytes within the virtual address space of the vm
* given by vm. The allocated bytes are mapped to a virtual address >=
* the address given by vaddr_min. Note that each allocation uses a
* a unique set of pages, with the minimum real allocation being at least
* a page. The allocated physical space comes from the TEST_DATA memory region.
*/
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
{
return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
}
/* /*
* VM Virtual Address Allocate Pages * VM Virtual Address Allocate Pages
* *
...@@ -1291,6 +1298,11 @@ vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) ...@@ -1291,6 +1298,11 @@ vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
} }
vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
{
return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
}
/* /*
* VM Virtual Address Allocate Page * VM Virtual Address Allocate Page
* *
...@@ -1856,7 +1868,8 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, ...@@ -1856,7 +1868,8 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
{ {
return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
} }
/* /*
......
...@@ -61,7 +61,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) ...@@ -61,7 +61,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
return; return;
vm->pgd = vm_phy_pages_alloc(vm, nr_pages, vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
vm->pgd_created = true; vm->pgd_created = true;
} }
...@@ -288,8 +289,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -288,8 +289,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
vm->page_size; vm->page_size;
stack_vaddr = vm_vaddr_alloc(vm, stack_size, stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
DEFAULT_RISCV_GUEST_STACK_VADDR_MIN); DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id); vcpu = __vm_vcpu_add(vm, vcpu_id);
riscv_vcpu_mmu_setup(vcpu); riscv_vcpu_mmu_setup(vcpu);
......
...@@ -21,7 +21,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) ...@@ -21,7 +21,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
return; return;
paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
vm->pgd = paddr; vm->pgd = paddr;
...@@ -167,8 +168,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -167,8 +168,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
vm->page_size); vm->page_size);
stack_vaddr = vm_vaddr_alloc(vm, stack_size, stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
DEFAULT_GUEST_STACK_VADDR_MIN); DEFAULT_GUEST_STACK_VADDR_MIN,
MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id); vcpu = __vm_vcpu_add(vm, vcpu_id);
......
...@@ -552,7 +552,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) ...@@ -552,7 +552,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt) static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)
{ {
if (!vm->gdt) if (!vm->gdt)
vm->gdt = vm_vaddr_alloc_page(vm); vm->gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
dt->base = vm->gdt; dt->base = vm->gdt;
dt->limit = getpagesize(); dt->limit = getpagesize();
...@@ -562,7 +562,7 @@ static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp, ...@@ -562,7 +562,7 @@ static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
int selector) int selector)
{ {
if (!vm->tss) if (!vm->tss)
vm->tss = vm_vaddr_alloc_page(vm); vm->tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
memset(segp, 0, sizeof(*segp)); memset(segp, 0, sizeof(*segp));
segp->base = vm->tss; segp->base = vm->tss;
...@@ -647,8 +647,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -647,8 +647,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
vm_vaddr_t stack_vaddr; vm_vaddr_t stack_vaddr;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
DEFAULT_GUEST_STACK_VADDR_MIN); DEFAULT_GUEST_STACK_VADDR_MIN,
MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id); vcpu = __vm_vcpu_add(vm, vcpu_id);
vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
...@@ -1145,8 +1146,8 @@ void vm_init_descriptor_tables(struct kvm_vm *vm) ...@@ -1145,8 +1146,8 @@ void vm_init_descriptor_tables(struct kvm_vm *vm)
extern void *idt_handlers; extern void *idt_handlers;
int i; int i;
vm->idt = vm_vaddr_alloc_page(vm); vm->idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
vm->handlers = vm_vaddr_alloc_page(vm); vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
/* Handlers have the same address in both address spaces.*/ /* Handlers have the same address in both address spaces.*/
for (i = 0; i < NUM_INTERRUPTS; i++) for (i = 0; i < NUM_INTERRUPTS; i++)
set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment