Commit b046f4ee authored by Sean Christopherson's avatar Sean Christopherson

KVM: selftests: Remove the obsolete/dead MMU role test

Remove the MMU role test, which was made obsolete by KVM commit
feb627e8 ("KVM: x86: Forbid KVM_SET_CPUID{,2} after KVM_RUN").  The
ongoing costs of keeping the test updated far outweigh any benefits,
e.g. the test _might_ be useful as an example or for documentation
purposes, but otherwise the test is dead weight.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20220614200707.3315957-14-seanjc@google.com
parent 045520e4
......@@ -27,7 +27,6 @@
/x86_64/hyperv_svm_test
/x86_64/max_vcpuid_cap_test
/x86_64/mmio_warning_test
/x86_64/mmu_role_test
/x86_64/monitor_mwait_test
/x86_64/nx_huge_pages_test
/x86_64/platform_info_test
......
......@@ -87,7 +87,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test
TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
TEST_GEN_PROGS_x86_64 += x86_64/mmu_role_test
TEST_GEN_PROGS_x86_64 += x86_64/monitor_mwait_test
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/pmu_event_filter_test
......
......@@ -142,9 +142,6 @@ struct kvm_x86_cpu_feature {
#define CPUID_XSAVE (1ul << 26)
#define CPUID_OSXSAVE (1ul << 27)
/* CPUID.0x8000_0001.EDX */
#define CPUID_GBPAGES (1ul << 26)
/* CPUID.0x8000_000A.EDX */
#define CPUID_NRIPS BIT(3)
......
// SPDX-License-Identifier: GPL-2.0
#include "kvm_util.h"
#include "processor.h"
#define MMIO_GPA 0x100000000ull
static void guest_code(void)
{
(void)READ_ONCE(*((uint64_t *)MMIO_GPA));
(void)READ_ONCE(*((uint64_t *)MMIO_GPA));
GUEST_ASSERT(0);
}
static void guest_pf_handler(struct ex_regs *regs)
{
/* PFEC == RSVD | PRESENT (read, kernel). */
GUEST_ASSERT(regs->error_code == 0x9);
GUEST_DONE();
}
static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
{
u32 good_cpuid_val = *cpuid_reg;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
uint64_t cmd;
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
/* Map 1gb page without a backing memlot. */
__virt_pg_map(vm, MMIO_GPA, MMIO_GPA, PG_LEVEL_1G);
vcpu_run(vcpu);
/* Guest access to the 1gb page should trigger MMIO. */
TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO,
"Unexpected exit reason: %u (%s), expected MMIO exit (1gb page w/o memslot)\n",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT(run->mmio.len == 8, "Unexpected exit mmio size = %u", run->mmio.len);
TEST_ASSERT(run->mmio.phys_addr == MMIO_GPA,
"Unexpected exit mmio address = 0x%llx", run->mmio.phys_addr);
/*
* Effect the CPUID change for the guest and re-enter the guest. Its
* access should now #PF due to the PAGE_SIZE bit being reserved or
* the resulting GPA being invalid. Note, kvm_get_supported_cpuid()
* returns the struct that contains the entry being modified. Eww.
*/
*cpuid_reg = evil_cpuid_val;
vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
/*
* Add a dummy memslot to coerce KVM into bumping the MMIO generation.
* KVM does not "officially" support mucking with CPUID after KVM_RUN,
* and will incorrectly reuse MMIO SPTEs. Don't delete the memslot!
* KVM x86 zaps all shadow pages on memslot deletion.
*/
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
MMIO_GPA << 1, 10, 1, 0);
/* Set up a #PF handler to eat the RSVD #PF and signal all done! */
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler);
vcpu_run(vcpu);
cmd = get_ucall(vcpu, NULL);
TEST_ASSERT(cmd == UCALL_DONE,
"Unexpected guest exit, exit_reason=%s, ucall.cmd = %lu\n",
exit_reason_str(run->exit_reason), cmd);
/*
* Restore the happy CPUID value for the next test. Yes, changes are
* indeed persistent across VM destruction.
*/
*cpuid_reg = good_cpuid_val;
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
struct kvm_cpuid_entry2 *entry;
int opt;
/*
* All tests are opt-in because TDP doesn't play nice with reserved #PF
* in the GVA->GPA translation. The hardware page walker doesn't let
* software change GBPAGES or MAXPHYADDR, and KVM doesn't manually walk
* the GVA on fault for performance reasons.
*/
bool do_gbpages = false;
bool do_maxphyaddr = false;
setbuf(stdout, NULL);
while ((opt = getopt(argc, argv, "gm")) != -1) {
switch (opt) {
case 'g':
do_gbpages = true;
break;
case 'm':
do_maxphyaddr = true;
break;
case 'h':
default:
printf("usage: %s [-g (GBPAGES)] [-m (MAXPHYADDR)]\n", argv[0]);
break;
}
}
__TEST_REQUIRE(do_gbpages || do_maxphyaddr, "No sub-tests selected");
entry = kvm_get_supported_cpuid_entry(0x80000001);
TEST_REQUIRE(entry->edx & CPUID_GBPAGES);
if (do_gbpages) {
pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n");
mmu_role_test(&entry->edx, entry->edx & ~CPUID_GBPAGES);
}
if (do_maxphyaddr) {
pr_info("Test MMIO after changing CPUID.MAXPHYADDR\n\n");
entry = kvm_get_supported_cpuid_entry(0x80000008);
mmu_role_test(&entry->eax, (entry->eax & ~0xff) | 0x20);
}
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment