Commit f95eec9b authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Don't put invalid SPs back on the list of active pages

Delete a shadow page from the invalidation list instead of throwing it
back on the list of active pages when it's a root shadow page with
active users.  Invalid active root pages will be explicitly freed by
mmu_free_root_page() when the root_count hits zero, i.e. they don't need
to be put on the active list to avoid leakage.

Use sp->role.invalid to detect that a shadow page has already been
zapped, i.e. is not on a list.

WARN if an invalid page is encountered when zapping pages, as it should
now be impossible.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200623193542.7554-2-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent fb58a9c3
...@@ -2748,10 +2748,23 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, ...@@ -2748,10 +2748,23 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
if (!sp->root_count) { if (!sp->root_count) {
/* Count self */ /* Count self */
(*nr_zapped)++; (*nr_zapped)++;
list_move(&sp->link, invalid_list);
/*
* Already invalid pages (previously active roots) are not on
* the active page list. See list_del() in the "else" case of
* !sp->root_count.
*/
if (sp->role.invalid)
list_add(&sp->link, invalid_list);
else
list_move(&sp->link, invalid_list);
kvm_mod_used_mmu_pages(kvm, -1); kvm_mod_used_mmu_pages(kvm, -1);
} else { } else {
list_move(&sp->link, &kvm->arch.active_mmu_pages); /*
* Remove the active root from the active page list, the root
* will be explicitly freed when the root_count hits zero.
*/
list_del(&sp->link);
/* /*
* Obsolete pages cannot be used on any vCPUs, see the comment * Obsolete pages cannot be used on any vCPUs, see the comment
...@@ -5718,12 +5731,11 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) ...@@ -5718,12 +5731,11 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
break; break;
/* /*
* Skip invalid pages with a non-zero root count, zapping pages * Invalid pages should never land back on the list of active
* with a non-zero root count will never succeed, i.e. the page * pages. Skip the bogus page, otherwise we'll get stuck in an
* will get thrown back on active_mmu_pages and we'll get stuck * infinite loop if the page gets put back on the list (again).
* in an infinite loop.
*/ */
if (sp->role.invalid && sp->root_count) if (WARN_ON(sp->role.invalid))
continue; continue;
/* /*
...@@ -6001,7 +6013,7 @@ void kvm_mmu_zap_all(struct kvm *kvm) ...@@ -6001,7 +6013,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
restart: restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
if (sp->role.invalid && sp->root_count) if (WARN_ON(sp->role.invalid))
continue; continue;
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
goto restart; goto restart;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment