Commit ef51b2ed authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Stefan Bader

KVM: MMU: rename has_wrprotected_page to mmu_gfn_lpage_is_disallowed

kvm_lpage_info->write_count is used to detect if the large page mapping
for the gfn on the specified level is allowed, rename it to disallow_lpage
to reflect its purpose, also we rename has_wrprotected_page() to
mmu_gfn_lpage_is_disallowed() to make the code more clearer

Later we will extend this mechanism for page tracking: if the gfn is
tracked then large mapping for that gfn on any level is not allowed.
The new name is more straightforward
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

CVE-2018-12207

(cherry picked from commit 92f94f1e)
Signed-off-by: default avatarTyler Hicks <tyhicks@canonical.com>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent d55d4a9c
...@@ -392,11 +392,11 @@ To instantiate a large spte, four constraints must be satisfied: ...@@ -392,11 +392,11 @@ To instantiate a large spte, four constraints must be satisfied:
write-protected pages write-protected pages
- the guest page must be wholly contained by a single memory slot - the guest page must be wholly contained by a single memory slot
To check the last two conditions, the mmu maintains a ->write_count set of To check the last two conditions, the mmu maintains a ->disallow_lpage set of
arrays for each memory slot and large page size. Every write protected page arrays for each memory slot and large page size. Every write protected page
causes its write_count to be incremented, thus preventing instantiation of causes its disallow_lpage to be incremented, thus preventing instantiation of
a large spte. The frames at the end of an unaligned memory slot have a large spte. The frames at the end of an unaligned memory slot have
artificially inflated ->write_counts so they can never be instantiated. artificially inflated ->disallow_lpages so they can never be instantiated.
Zapping all pages (page generation count) Zapping all pages (page generation count)
========================================= =========================================
......
...@@ -624,7 +624,7 @@ struct kvm_vcpu_arch { ...@@ -624,7 +624,7 @@ struct kvm_vcpu_arch {
}; };
struct kvm_lpage_info { struct kvm_lpage_info {
int write_count; int disallow_lpage;
}; };
struct kvm_arch_memory_slot { struct kvm_arch_memory_slot {
......
...@@ -794,7 +794,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -794,7 +794,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
slot = __gfn_to_memslot(slots, gfn); slot = __gfn_to_memslot(slots, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count += 1; linfo->disallow_lpage += 1;
} }
kvm->arch.indirect_shadow_pages++; kvm->arch.indirect_shadow_pages++;
} }
...@@ -812,31 +812,32 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -812,31 +812,32 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
slot = __gfn_to_memslot(slots, gfn); slot = __gfn_to_memslot(slots, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count -= 1; linfo->disallow_lpage -= 1;
WARN_ON(linfo->write_count < 0); WARN_ON(linfo->disallow_lpage < 0);
} }
kvm->arch.indirect_shadow_pages--; kvm->arch.indirect_shadow_pages--;
} }
static int __has_wrprotected_page(gfn_t gfn, int level, static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
struct kvm_memory_slot *slot) struct kvm_memory_slot *slot)
{ {
struct kvm_lpage_info *linfo; struct kvm_lpage_info *linfo;
if (slot) { if (slot) {
linfo = lpage_info_slot(gfn, slot, level); linfo = lpage_info_slot(gfn, slot, level);
return linfo->write_count; return !!linfo->disallow_lpage;
} }
return 1; return true;
} }
static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level) static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
int level)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __has_wrprotected_page(gfn, level, slot); return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
} }
static int host_mapping_level(struct kvm *kvm, gfn_t gfn) static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
...@@ -902,7 +903,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, ...@@ -902,7 +903,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
max_level = min(kvm_x86_ops->get_lpage_level(), host_level); max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
if (__has_wrprotected_page(large_gfn, level, slot)) if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
break; break;
return level - 1; return level - 1;
...@@ -2532,7 +2533,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2532,7 +2533,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
* be fixed if guest refault. * be fixed if guest refault.
*/ */
if (level > PT_PAGE_TABLE_LEVEL && if (level > PT_PAGE_TABLE_LEVEL &&
has_wrprotected_page(vcpu, gfn, level)) mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
goto done; goto done;
spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
...@@ -2797,7 +2798,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, ...@@ -2797,7 +2798,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL && level == PT_PAGE_TABLE_LEVEL &&
PageTransCompound(pfn_to_page(pfn)) && PageTransCompound(pfn_to_page(pfn)) &&
!has_wrprotected_page(vcpu, gfn, PT_DIRECTORY_LEVEL)) { !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
unsigned long mask; unsigned long mask;
/* /*
* mmu_notifier_retry was successful and we hold the * mmu_notifier_retry was successful and we hold the
......
...@@ -8132,6 +8132,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ...@@ -8132,6 +8132,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
int i; int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
struct kvm_lpage_info *linfo;
unsigned long ugfn; unsigned long ugfn;
int lpages; int lpages;
int level = i + 1; int level = i + 1;
...@@ -8146,15 +8147,16 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ...@@ -8146,15 +8147,16 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
if (i == 0) if (i == 0)
continue; continue;
slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * linfo = kvm_kvzalloc(lpages * sizeof(*linfo));
sizeof(*slot->arch.lpage_info[i - 1])); if (!linfo)
if (!slot->arch.lpage_info[i - 1])
goto out_free; goto out_free;
slot->arch.lpage_info[i - 1] = linfo;
if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
slot->arch.lpage_info[i - 1][0].write_count = 1; linfo[0].disallow_lpage = 1;
if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; linfo[lpages - 1].disallow_lpage = 1;
ugfn = slot->userspace_addr >> PAGE_SHIFT; ugfn = slot->userspace_addr >> PAGE_SHIFT;
/* /*
* If the gfn and userspace address are not aligned wrt each * If the gfn and userspace address are not aligned wrt each
...@@ -8166,7 +8168,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ...@@ -8166,7 +8168,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long j; unsigned long j;
for (j = 0; j < lpages; ++j) for (j = 0; j < lpages; ++j)
slot->arch.lpage_info[i - 1][j].write_count = 1; linfo[j].disallow_lpage = 1;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment