Commit e7d11c7a authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Gleb Natapov

KVM: MMU: zap pages in batch

Zap at lease 10 pages before releasing mmu-lock to reduce the overload
caused by requiring lock

After the patch, kvm_zap_obsolete_pages can forward progress anyway,
so update the comments

[ It improves the case 0.6% ~ 1% that do kernel building meanwhile read
  PCI ROM. ]

Note: i am not sure that "10" is the best speculative value, i just
guessed that '10' can make vcpu do not spend long time on
kvm_zap_obsolete_pages and do not cause mmu-lock too hungry.
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Reviewed-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent 7f52af74
...@@ -4197,14 +4197,18 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) ...@@ -4197,14 +4197,18 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
#define BATCH_ZAP_PAGES 10
static void kvm_zap_obsolete_pages(struct kvm *kvm) static void kvm_zap_obsolete_pages(struct kvm *kvm)
{ {
struct kvm_mmu_page *sp, *node; struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
int batch = 0;
restart: restart:
list_for_each_entry_safe_reverse(sp, node, list_for_each_entry_safe_reverse(sp, node,
&kvm->arch.active_mmu_pages, link) { &kvm->arch.active_mmu_pages, link) {
int ret;
/* /*
* No obsolete page exists before new created page since * No obsolete page exists before new created page since
* active_mmu_pages is the FIFO list. * active_mmu_pages is the FIFO list.
...@@ -4213,28 +4217,6 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) ...@@ -4213,28 +4217,6 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
break; break;
/* /*
* Do not repeatedly zap a root page to avoid unnecessary
* KVM_REQ_MMU_RELOAD, otherwise we may not be able to
* progress:
* vcpu 0 vcpu 1
* call vcpu_enter_guest():
* 1): handle KVM_REQ_MMU_RELOAD
* and require mmu-lock to
* load mmu
* repeat:
* 1): zap root page and
* send KVM_REQ_MMU_RELOAD
*
* 2): if (cond_resched_lock(mmu-lock))
*
* 2): hold mmu-lock and load mmu
*
* 3): see KVM_REQ_MMU_RELOAD bit
* on vcpu->requests is set
* then return 1 to call
* vcpu_enter_guest() again.
* goto repeat;
*
* Since we are reversely walking the list and the invalid * Since we are reversely walking the list and the invalid
* list will be moved to the head, skip the invalid page * list will be moved to the head, skip the invalid page
* can help us to avoid the infinity list walking. * can help us to avoid the infinity list walking.
...@@ -4242,13 +4224,18 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) ...@@ -4242,13 +4224,18 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
if (sp->role.invalid) if (sp->role.invalid)
continue; continue;
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { if (batch >= BATCH_ZAP_PAGES &&
(need_resched() || spin_needbreak(&kvm->mmu_lock))) {
batch = 0;
kvm_mmu_commit_zap_page(kvm, &invalid_list); kvm_mmu_commit_zap_page(kvm, &invalid_list);
cond_resched_lock(&kvm->mmu_lock); cond_resched_lock(&kvm->mmu_lock);
goto restart; goto restart;
} }
if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list)) ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
batch += ret;
if (ret)
goto restart; goto restart;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment