Commit c834e5e4 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Use '0' as the one and only value for an invalid PAE root

Use '0' to denote an invalid pae_root instead of '0' or INVALID_PAGE.
Unlike root_hpa, the pae_roots hold permission bits and thus are
guaranteed to be non-zero.  Having to deal with both values leads to
bugs, e.g. failing to set back to INVALID_PAGE, warning on the wrong
value, etc...
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210309224207.1218275-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 978c834a
...@@ -3197,11 +3197,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -3197,11 +3197,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
(mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
} else if (mmu->pae_root) { } else if (mmu->pae_root) {
for (i = 0; i < 4; ++i) for (i = 0; i < 4; ++i) {
if (mmu->pae_root[i] != 0) if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
mmu_free_root_page(kvm, continue;
&mmu->pae_root[i],
&invalid_list); mmu_free_root_page(kvm, &mmu->pae_root[i],
&invalid_list);
mmu->pae_root[i] = INVALID_PAE_ROOT;
}
} }
mmu->root_hpa = INVALID_PAGE; mmu->root_hpa = INVALID_PAGE;
mmu->root_pgd = 0; mmu->root_pgd = 0;
...@@ -3253,8 +3256,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) ...@@ -3253,8 +3256,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
return -EIO; return -EIO;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
WARN_ON_ONCE(mmu->pae_root[i] && WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
VALID_PAGE(mmu->pae_root[i]));
root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
i << 30, PT32_ROOT_LEVEL, true); i << 30, PT32_ROOT_LEVEL, true);
...@@ -3328,11 +3330,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ...@@ -3328,11 +3330,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
} }
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
WARN_ON_ONCE(mmu->pae_root[i] && VALID_PAGE(mmu->pae_root[i])); WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
if (mmu->root_level == PT32E_ROOT_LEVEL) { if (mmu->root_level == PT32E_ROOT_LEVEL) {
if (!(pdptrs[i] & PT_PRESENT_MASK)) { if (!(pdptrs[i] & PT_PRESENT_MASK)) {
mmu->pae_root[i] = 0; mmu->pae_root[i] = INVALID_PAE_ROOT;
continue; continue;
} }
root_gfn = pdptrs[i] >> PAGE_SHIFT; root_gfn = pdptrs[i] >> PAGE_SHIFT;
...@@ -3450,7 +3452,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) ...@@ -3450,7 +3452,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu->pae_root[i]; hpa_t root = vcpu->arch.mmu->pae_root[i];
if (root && VALID_PAGE(root)) { if (IS_VALID_PAE_ROOT(root)) {
root &= PT64_BASE_ADDR_MASK; root &= PT64_BASE_ADDR_MASK;
sp = to_shadow_page(root); sp = to_shadow_page(root);
mmu_sync_children(vcpu, sp); mmu_sync_children(vcpu, sp);
...@@ -5307,7 +5309,7 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) ...@@ -5307,7 +5309,7 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
mmu->pae_root = page_address(page); mmu->pae_root = page_address(page);
for (i = 0; i < 4; ++i) for (i = 0; i < 4; ++i)
mmu->pae_root[i] = INVALID_PAGE; mmu->pae_root[i] = INVALID_PAE_ROOT;
return 0; return 0;
} }
......
...@@ -70,7 +70,7 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) ...@@ -70,7 +70,7 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu->pae_root[i]; hpa_t root = vcpu->arch.mmu->pae_root[i];
if (root && VALID_PAGE(root)) { if (IS_VALID_PAE_ROOT(root)) {
root &= PT64_BASE_ADDR_MASK; root &= PT64_BASE_ADDR_MASK;
sp = to_shadow_page(root); sp = to_shadow_page(root);
__mmu_spte_walk(vcpu, sp, fn, 2); __mmu_spte_walk(vcpu, sp, fn, 2);
......
...@@ -20,6 +20,16 @@ extern bool dbg; ...@@ -20,6 +20,16 @@ extern bool dbg;
#define MMU_WARN_ON(x) do { } while (0) #define MMU_WARN_ON(x) do { } while (0)
#endif #endif
/*
* Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT
* bit, and thus are guaranteed to be non-zero when valid. And, when a guest
* PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE,
* as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use
* '0' instead of INVALID_PAGE to indicate an invalid PAE root.
*/
#define INVALID_PAE_ROOT 0
#define IS_VALID_PAE_ROOT(x) (!!(x))
struct kvm_mmu_page { struct kvm_mmu_page {
struct list_head link; struct list_head link;
struct hlist_node hash_link; struct hlist_node hash_link;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment