Commit 41074d07 authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Fix inherited permissions for emulated guest pte updates

When we emulate a guest pte write, we fail to apply the correct inherited
permissions from the parent ptes.  Now that we store inherited permissions
in the shadow page, we can use that to update the pte permissions correctly.
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent bedbe4ee
...@@ -55,7 +55,7 @@ struct kvm_pte_chain { ...@@ -55,7 +55,7 @@ struct kvm_pte_chain {
* bits 4:7 - page table level for this shadow (1-4) * bits 4:7 - page table level for this shadow (1-4)
* bits 8:9 - page table quadrant for 2-level guests * bits 8:9 - page table quadrant for 2-level guests
* bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
* bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde * bits 17:19 - common access permissions for all ptes in this shadow page
*/ */
union kvm_mmu_page_role { union kvm_mmu_page_role {
unsigned word; unsigned word;
...@@ -65,7 +65,7 @@ union kvm_mmu_page_role { ...@@ -65,7 +65,7 @@ union kvm_mmu_page_role {
unsigned quadrant : 2; unsigned quadrant : 2;
unsigned pad_for_nice_hex_output : 6; unsigned pad_for_nice_hex_output : 6;
unsigned metaphysical : 1; unsigned metaphysical : 1;
unsigned hugepage_access : 3; unsigned access : 3;
}; };
}; };
......
...@@ -680,7 +680,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -680,7 +680,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
gva_t gaddr, gva_t gaddr,
unsigned level, unsigned level,
int metaphysical, int metaphysical,
unsigned hugepage_access, unsigned access,
u64 *parent_pte) u64 *parent_pte)
{ {
union kvm_mmu_page_role role; union kvm_mmu_page_role role;
...@@ -694,7 +694,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -694,7 +694,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
role.glevels = vcpu->mmu.root_level; role.glevels = vcpu->mmu.root_level;
role.level = level; role.level = level;
role.metaphysical = metaphysical; role.metaphysical = metaphysical;
role.hugepage_access = hugepage_access; role.access = access;
if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) { if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
......
...@@ -327,6 +327,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, ...@@ -327,6 +327,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
int offset_in_pte) int offset_in_pte)
{ {
pt_element_t gpte; pt_element_t gpte;
unsigned pte_access;
gpte = *(const pt_element_t *)pte; gpte = *(const pt_element_t *)pte;
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
...@@ -337,7 +338,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, ...@@ -337,7 +338,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
if (bytes < sizeof(pt_element_t)) if (bytes < sizeof(pt_element_t))
return; return;
pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
FNAME(set_pte)(vcpu, gpte, spte, ACC_ALL, ACC_ALL, pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
FNAME(set_pte)(vcpu, gpte, spte, page->role.access, pte_access,
0, 0, NULL, NULL, gpte_to_gfn(gpte)); 0, 0, NULL, NULL, gpte_to_gfn(gpte));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment