Commit 5ce4786f authored by Junaid Shahid's avatar Junaid Shahid Committed by Paolo Bonzini

kvm: x86: Make sync_page() flush remote TLBs once only

sync_page() calls set_spte() from a loop across a page table. It would
work better if set_spte() left the TLB flushing to its callers, so that
sync_page() can aggregate into a single call.
Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 42522d08
......@@ -2724,6 +2724,10 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
return true;
}
/* Bits which may be returned by set_spte() */
#define SET_SPTE_WRITE_PROTECTED_PT BIT(0)
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
......@@ -2800,7 +2804,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn);
ret = 1;
ret |= SET_SPTE_WRITE_PROTECTED_PT;
pte_access &= ~ACC_WRITE_MASK;
spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
}
......@@ -2816,7 +2820,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
set_pte:
if (mmu_spte_update(sptep, spte))
kvm_flush_remote_tlbs(vcpu->kvm);
ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
done:
return ret;
}
......@@ -2827,6 +2831,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
{
int was_rmapped = 0;
int rmap_count;
int set_spte_ret;
int ret = RET_PF_RETRY;
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
......@@ -2854,12 +2859,15 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
was_rmapped = 1;
}
if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
true, host_writable)) {
set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
speculative, true, host_writable);
if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
if (write_fault)
ret = RET_PF_EMULATE;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
kvm_flush_remote_tlbs(vcpu->kvm);
if (unlikely(is_mmio_spte(*sptep)))
ret = RET_PF_EMULATE;
......
......@@ -968,6 +968,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
int i, nr_present = 0;
bool host_writable;
gpa_t first_pte_gpa;
int set_spte_ret = 0;
/* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct);
......@@ -1024,12 +1025,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
set_spte(vcpu, &sp->spt[i], pte_access,
PT_PAGE_TABLE_LEVEL, gfn,
spte_to_pfn(sp->spt[i]), true, false,
host_writable);
set_spte_ret |= set_spte(vcpu, &sp->spt[i],
pte_access, PT_PAGE_TABLE_LEVEL,
gfn, spte_to_pfn(sp->spt[i]),
true, false, host_writable);
}
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
kvm_flush_remote_tlbs(vcpu->kvm);
return nr_present;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment