Commit c5a0ccec authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: selftests: Add option to create 2M and 1G EPT mappings

The current EPT mapping code in the selftests only supports mapping 4K
pages. This commit extends that support with an option to map at 2M or
1G. This will be used in a future commit to create large page mappings
to test eager page splitting.

No functional change intended.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220520233249.3776001-3-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4ee602e7
......@@ -392,80 +392,90 @@ void nested_vmx_check_supported(void)
}
}
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr)
static void nested_create_pte(struct kvm_vm *vm,
struct eptPageTableEntry *pte,
uint64_t nested_paddr,
uint64_t paddr,
int current_level,
int target_level)
{
if (!pte->readable) {
pte->writable = true;
pte->readable = true;
pte->executable = true;
pte->page_size = (current_level == target_level);
if (pte->page_size)
pte->address = paddr >> vm->page_shift;
else
pte->address = vm_alloc_page_table(vm) >> vm->page_shift;
} else {
/*
* Entry already present. Assert that the caller doesn't want
* a hugepage at this level, and that there isn't a hugepage at
* this level.
*/
TEST_ASSERT(current_level != target_level,
"Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n",
current_level, nested_paddr);
TEST_ASSERT(!pte->page_size,
"Cannot create page table at level: %u, nested_paddr: 0x%lx\n",
current_level, nested_paddr);
}
}
void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, int target_level)
{
uint16_t index[4];
struct eptPageTableEntry *pml4e;
const uint64_t page_size = PG_LEVEL_SIZE(target_level);
struct eptPageTableEntry *pt = vmx->eptp_hva, *pte;
uint16_t index;
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
TEST_ASSERT((nested_paddr % vm->page_size) == 0,
TEST_ASSERT((nested_paddr % page_size) == 0,
"Nested physical address not on page boundary,\n"
" nested_paddr: 0x%lx vm->page_size: 0x%x",
nested_paddr, vm->page_size);
" nested_paddr: 0x%lx page_size: 0x%lx",
nested_paddr, page_size);
TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
" nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
TEST_ASSERT((paddr % vm->page_size) == 0,
TEST_ASSERT((paddr % page_size) == 0,
"Physical address not on page boundary,\n"
" paddr: 0x%lx vm->page_size: 0x%x",
paddr, vm->page_size);
" paddr: 0x%lx page_size: 0x%lx",
paddr, page_size);
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
index[0] = (nested_paddr >> 12) & 0x1ffu;
index[1] = (nested_paddr >> 21) & 0x1ffu;
index[2] = (nested_paddr >> 30) & 0x1ffu;
index[3] = (nested_paddr >> 39) & 0x1ffu;
/* Allocate page directory pointer table if not present. */
pml4e = vmx->eptp_hva;
if (!pml4e[index[3]].readable) {
pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift;
pml4e[index[3]].writable = true;
pml4e[index[3]].readable = true;
pml4e[index[3]].executable = true;
}
for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) {
index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
pte = &pt[index];
/* Allocate page directory table if not present. */
struct eptPageTableEntry *pdpe;
pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
if (!pdpe[index[2]].readable) {
pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift;
pdpe[index[2]].writable = true;
pdpe[index[2]].readable = true;
pdpe[index[2]].executable = true;
}
nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level);
/* Allocate page table if not present. */
struct eptPageTableEntry *pde;
pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
if (!pde[index[1]].readable) {
pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift;
pde[index[1]].writable = true;
pde[index[1]].readable = true;
pde[index[1]].executable = true;
}
if (pte->page_size)
break;
/* Fill in page table entry. */
struct eptPageTableEntry *pte;
pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
pte[index[0]].address = paddr >> vm->page_shift;
pte[index[0]].writable = true;
pte[index[0]].readable = true;
pte[index[0]].executable = true;
pt = addr_gpa2hva(vm, pte->address * vm->page_size);
}
/*
* For now mark these as accessed and dirty because the only
* testcase we have needs that. Can be reconsidered later.
*/
pte[index[0]].accessed = true;
pte[index[0]].dirty = true;
pte->accessed = true;
pte->dirty = true;
}
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr)
{
__nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment