Commit e0f3f46e authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: selftests: Restrict test region to 48-bit physical addresses when using nested

The selftests nested code only supports 4-level paging at the moment.
This means it cannot map nested guest physical addresses with more than
48 bits. Allow perf_test_util nested mode to work on hosts with more
than 48 physical addresses by restricting the guest test region to
48-bits.

While here, opportunistically fix an off-by-one error when dealing with
vm_get_max_gfn(). perf_test_util.c was treating this as the maximum
number of GFNs, rather than the maximum allowed GFN. This didn't result
in any correctness issues, but it did end up shifting the test region
down slightly when using huge pages.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220520233249.3776001-12-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 71d48966
...@@ -110,6 +110,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, ...@@ -110,6 +110,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
struct kvm_vm *vm; struct kvm_vm *vm;
uint64_t guest_num_pages, slot0_pages = DEFAULT_GUEST_PHY_PAGES; uint64_t guest_num_pages, slot0_pages = DEFAULT_GUEST_PHY_PAGES;
uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src); uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
uint64_t region_end_gfn;
int i; int i;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
...@@ -151,18 +152,29 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, ...@@ -151,18 +152,29 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
pta->vm = vm; pta->vm = vm;
/* Put the test region at the top guest physical memory. */
region_end_gfn = vm_get_max_gfn(vm) + 1;
#ifdef __x86_64__
/*
* When running vCPUs in L2, restrict the test region to 48 bits to
* avoid needing 5-level page tables to identity map L2.
*/
if (pta->nested)
region_end_gfn = min(region_end_gfn, (1UL << 48) / pta->guest_page_size);
#endif
/* /*
* If there should be more memory in the guest test region than there * If there should be more memory in the guest test region than there
* can be pages in the guest, it will definitely cause problems. * can be pages in the guest, it will definitely cause problems.
*/ */
TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm), TEST_ASSERT(guest_num_pages < region_end_gfn,
"Requested more guest memory than address space allows.\n" "Requested more guest memory than address space allows.\n"
" guest pages: %" PRIx64 " max gfn: %" PRIx64 " guest pages: %" PRIx64 " max gfn: %" PRIx64
" vcpus: %d wss: %" PRIx64 "]\n", " vcpus: %d wss: %" PRIx64 "]\n",
guest_num_pages, vm_get_max_gfn(vm), vcpus, guest_num_pages, region_end_gfn - 1, vcpus,
vcpu_memory_bytes); vcpu_memory_bytes);
pta->gpa = (vm_get_max_gfn(vm) - guest_num_pages) * pta->guest_page_size; pta->gpa = (region_end_gfn - guest_num_pages) * pta->guest_page_size;
pta->gpa = align_down(pta->gpa, backing_src_pagesz); pta->gpa = align_down(pta->gpa, backing_src_pagesz);
#ifdef __s390x__ #ifdef __s390x__
/* Align to 1M (segment size) */ /* Align to 1M (segment size) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment