Commit 8cbc7069 authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Update accessed and dirty bits after guest pagetable walk

While unspecified, the behaviour of Intel processors is to first
perform the page table walk, then, if the walk was successful, to
atomically update the accessed and dirty bits of walked paging elements.

While we are not required to follow this exactly, doing so will allow us
to perform the access permissions check after the walk is complete, rather
than after each walk step.

(the tricky case is SMEP: a zero in any pte's U bit makes the referenced
page a supervisor page, so we can't fault on a one bit during the walk
itself).
Reviewed-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 3d34adec
...@@ -63,10 +63,12 @@ ...@@ -63,10 +63,12 @@
*/ */
struct guest_walker { struct guest_walker {
int level; int level;
unsigned max_level;
gfn_t table_gfn[PT_MAX_FULL_LEVELS]; gfn_t table_gfn[PT_MAX_FULL_LEVELS];
pt_element_t ptes[PT_MAX_FULL_LEVELS]; pt_element_t ptes[PT_MAX_FULL_LEVELS];
pt_element_t prefetch_ptes[PTE_PREFETCH_NUM]; pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
unsigned pt_access; unsigned pt_access;
unsigned pte_access; unsigned pte_access;
gfn_t gfn; gfn_t gfn;
...@@ -119,6 +121,43 @@ static bool FNAME(is_last_gpte)(struct guest_walker *walker, ...@@ -119,6 +121,43 @@ static bool FNAME(is_last_gpte)(struct guest_walker *walker,
return false; return false;
} }
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
struct guest_walker *walker,
int write_fault)
{
unsigned level, index;
pt_element_t pte, orig_pte;
pt_element_t __user *ptep_user;
gfn_t table_gfn;
int ret;
for (level = walker->max_level; level >= walker->level; --level) {
pte = orig_pte = walker->ptes[level - 1];
table_gfn = walker->table_gfn[level - 1];
ptep_user = walker->ptep_user[level - 1];
index = offset_in_page(ptep_user) / sizeof(pt_element_t);
if (!(pte & PT_ACCESSED_MASK)) {
trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
pte |= PT_ACCESSED_MASK;
}
if (level == walker->level && write_fault && !is_dirty_gpte(pte)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
pte |= PT_DIRTY_MASK;
}
if (pte == orig_pte)
continue;
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
if (ret)
return ret;
mark_page_dirty(vcpu->kvm, table_gfn);
walker->ptes[level] = pte;
}
return 0;
}
/* /*
* Fetch a guest pte for a guest virtual address * Fetch a guest pte for a guest virtual address
*/ */
...@@ -126,6 +165,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -126,6 +165,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t addr, u32 access) gva_t addr, u32 access)
{ {
int ret;
pt_element_t pte; pt_element_t pte;
pt_element_t __user *uninitialized_var(ptep_user); pt_element_t __user *uninitialized_var(ptep_user);
gfn_t table_gfn; gfn_t table_gfn;
...@@ -153,6 +193,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -153,6 +193,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
--walker->level; --walker->level;
} }
#endif #endif
walker->max_level = walker->level;
ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
(mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0); (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
...@@ -183,6 +224,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -183,6 +224,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
ptep_user = (pt_element_t __user *)((void *)host_addr + offset); ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
goto error; goto error;
walker->ptep_user[walker->level - 1] = ptep_user;
trace_kvm_mmu_paging_element(pte, walker->level); trace_kvm_mmu_paging_element(pte, walker->level);
...@@ -214,21 +256,6 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -214,21 +256,6 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
eperm = true; eperm = true;
} }
if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
int ret;
trace_kvm_mmu_set_accessed_bit(table_gfn, index,
sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_ACCESSED_MASK);
if (unlikely(ret < 0))
goto error;
else if (ret)
goto retry_walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_ACCESSED_MASK;
}
walker->ptes[walker->level - 1] = pte; walker->ptes[walker->level - 1] = pte;
if (last_gpte) { if (last_gpte) {
...@@ -268,21 +295,12 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -268,21 +295,12 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
if (!write_fault) if (!write_fault)
protect_clean_gpte(&pte_access, pte); protect_clean_gpte(&pte_access, pte);
else if (unlikely(!is_dirty_gpte(pte))) {
int ret;
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, if (unlikely(ret < 0))
pte, pte|PT_DIRTY_MASK); goto error;
if (unlikely(ret < 0)) else if (ret)
goto error; goto retry_walk;
else if (ret)
goto retry_walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_DIRTY_MASK;
walker->ptes[walker->level - 1] = pte;
}
walker->pt_access = pt_access; walker->pt_access = pt_access;
walker->pte_access = pte_access; walker->pte_access = pte_access;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment