Commit 54dee993 authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Avi Kivity

KVM: VMX: conditionally disable 2M pages

Disable usage of 2M pages if VMX_EPT_2MB_PAGE_BIT (bit 16) is clear
in MSR_IA32_VMX_EPT_VPID_CAP and EPT is enabled.

[avi: s/largepages_disabled/largepages_enabled/ to avoid negative logic]
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 68f89400
...@@ -1381,6 +1381,9 @@ static __init int hardware_setup(void) ...@@ -1381,6 +1381,9 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_tpr_shadow()) if (!cpu_has_vmx_tpr_shadow())
kvm_x86_ops->update_cr8_intercept = NULL; kvm_x86_ops->update_cr8_intercept = NULL;
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
return alloc_kvm_area(); return alloc_kvm_area();
} }
......
...@@ -224,6 +224,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -224,6 +224,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old, struct kvm_memory_slot old,
int user_alloc); int user_alloc);
void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm); void kvm_arch_flush_shadow(struct kvm *kvm);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
......
...@@ -85,6 +85,8 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, ...@@ -85,6 +85,8 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
static bool kvm_rebooting; static bool kvm_rebooting;
static bool largepages_enabled = true;
#ifdef KVM_CAP_DEVICE_ASSIGNMENT #ifdef KVM_CAP_DEVICE_ASSIGNMENT
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
int assigned_dev_id) int assigned_dev_id)
...@@ -1174,9 +1176,11 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1174,9 +1176,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
ugfn = new.userspace_addr >> PAGE_SHIFT; ugfn = new.userspace_addr >> PAGE_SHIFT;
/* /*
* If the gfn and userspace address are not aligned wrt each * If the gfn and userspace address are not aligned wrt each
* other, disable large page support for this slot * other, or if explicitly asked to, disable large page
* support for this slot
*/ */
if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1)) if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1) ||
!largepages_enabled)
for (i = 0; i < largepages; ++i) for (i = 0; i < largepages; ++i)
new.lpage_info[i].write_count = 1; new.lpage_info[i].write_count = 1;
} }
...@@ -1291,6 +1295,12 @@ int kvm_get_dirty_log(struct kvm *kvm, ...@@ -1291,6 +1295,12 @@ int kvm_get_dirty_log(struct kvm *kvm,
return r; return r;
} }
void kvm_disable_largepages(void)
{
largepages_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_largepages);
int is_error_page(struct page *page) int is_error_page(struct page *page)
{ {
return page == bad_page; return page == bad_page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment