Commit 20754c24 authored by Hollis Blanchard's avatar Hollis Blanchard Committed by Avi Kivity

KVM: ppc: Stop saving host TLB state

We're saving the host TLB state to memory on every exit, but never using it.
Originally I had thought that we'd want to restore host TLB for heavyweight
exits, but that could actually hurt when context switching to an unrelated host
process (i.e. not qemu).

Since this decreases the performance penalty of all exits, this patch improves
guest boot time by about 15%.
Signed-off-by: default avatarHollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 6a0ab738
...@@ -81,8 +81,6 @@ struct kvm_vcpu_arch { ...@@ -81,8 +81,6 @@ struct kvm_vcpu_arch {
struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
/* Pages which are referenced in the shadow TLB. */ /* Pages which are referenced in the shadow TLB. */
struct page *shadow_pages[PPC44x_TLB_SIZE]; struct page *shadow_pages[PPC44x_TLB_SIZE];
/* Copy of the host's TLB. */
struct tlbe host_tlb[PPC44x_TLB_SIZE];
u32 host_stack; u32 host_stack;
u32 host_pid; u32 host_pid;
......
...@@ -356,7 +356,6 @@ int main(void) ...@@ -356,7 +356,6 @@ int main(void)
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
......
...@@ -342,26 +342,15 @@ lightweight_exit: ...@@ -342,26 +342,15 @@ lightweight_exit:
andc r6, r5, r6 andc r6, r5, r6
mtmsr r6 mtmsr r6
/* Save the host's non-pinned TLB mappings, and load the guest mappings /* Load the guest mappings, leaving the host's "pinned" kernel mappings
* over them. Leave the host's "pinned" kernel mappings in place. */ * in place. */
/* XXX optimization: use generation count to avoid swapping unmodified /* XXX optimization: load only modified guest entries. */
* entries. */
mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
lis r8, tlb_44x_hwater@ha lis r8, tlb_44x_hwater@ha
lwz r8, tlb_44x_hwater@l(r8) lwz r8, tlb_44x_hwater@l(r8)
addi r3, r4, VCPU_HOST_TLB - 4
addi r9, r4, VCPU_SHADOW_TLB - 4 addi r9, r4, VCPU_SHADOW_TLB - 4
li r6, 0 li r6, 0
1: 1:
/* Save host entry. */
tlbre r7, r6, PPC44x_TLB_PAGEID
mfspr r5, SPRN_MMUCR
stwu r5, 4(r3)
stwu r7, 4(r3)
tlbre r7, r6, PPC44x_TLB_XLAT
stwu r7, 4(r3)
tlbre r7, r6, PPC44x_TLB_ATTRIB
stwu r7, 4(r3)
/* Load guest entry. */ /* Load guest entry. */
lwzu r7, 4(r9) lwzu r7, 4(r9)
mtspr SPRN_MMUCR, r7 mtspr SPRN_MMUCR, r7
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment