Commit 7529e767 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'kvm-master' into HEAD

Merge AMD fixes before doing more development work.
parents 4c7ccc3b e7581cac
...@@ -51,7 +51,7 @@ static inline u64 rsvd_bits(int s, int e) ...@@ -51,7 +51,7 @@ static inline u64 rsvd_bits(int s, int e)
return ((1ULL << (e - s + 1)) - 1) << s; return ((1ULL << (e - s + 1)) - 1) << s;
} }
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask); void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
void void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
......
...@@ -247,7 +247,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ ...@@ -247,7 +247,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask; static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask; static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask; static u64 __read_mostly shadow_dirty_mask;
static u64 __read_mostly shadow_mmio_mask;
static u64 __read_mostly shadow_mmio_value; static u64 __read_mostly shadow_mmio_value;
static u64 __read_mostly shadow_mmio_access_mask; static u64 __read_mostly shadow_mmio_access_mask;
static u64 __read_mostly shadow_present_mask; static u64 __read_mostly shadow_present_mask;
...@@ -334,19 +333,19 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, ...@@ -334,19 +333,19 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
kvm_flush_remote_tlbs_with_range(kvm, &range); kvm_flush_remote_tlbs_with_range(kvm, &range);
} }
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
{ {
BUG_ON((u64)(unsigned)access_mask != access_mask); BUG_ON((u64)(unsigned)access_mask != access_mask);
BUG_ON((mmio_mask & mmio_value) != mmio_value); WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
shadow_mmio_access_mask = access_mask; shadow_mmio_access_mask = access_mask;
} }
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
static bool is_mmio_spte(u64 spte) static bool is_mmio_spte(u64 spte)
{ {
return (spte & shadow_mmio_mask) == shadow_mmio_value; return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK;
} }
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
...@@ -569,7 +568,6 @@ static void kvm_mmu_reset_all_pte_masks(void) ...@@ -569,7 +568,6 @@ static void kvm_mmu_reset_all_pte_masks(void)
shadow_dirty_mask = 0; shadow_dirty_mask = 0;
shadow_nx_mask = 0; shadow_nx_mask = 0;
shadow_x_mask = 0; shadow_x_mask = 0;
shadow_mmio_mask = 0;
shadow_present_mask = 0; shadow_present_mask = 0;
shadow_acc_track_mask = 0; shadow_acc_track_mask = 0;
...@@ -586,16 +584,15 @@ static void kvm_mmu_reset_all_pte_masks(void) ...@@ -586,16 +584,15 @@ static void kvm_mmu_reset_all_pte_masks(void)
* the most significant bits of legal physical address space. * the most significant bits of legal physical address space.
*/ */
shadow_nonpresent_or_rsvd_mask = 0; shadow_nonpresent_or_rsvd_mask = 0;
low_phys_bits = boot_cpu_data.x86_cache_bits; low_phys_bits = boot_cpu_data.x86_phys_bits;
if (boot_cpu_data.x86_cache_bits < if (boot_cpu_has_bug(X86_BUG_L1TF) &&
52 - shadow_nonpresent_or_rsvd_mask_len) { !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
52 - shadow_nonpresent_or_rsvd_mask_len)) {
low_phys_bits = boot_cpu_data.x86_cache_bits
- shadow_nonpresent_or_rsvd_mask_len;
shadow_nonpresent_or_rsvd_mask = shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_cache_bits - rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
shadow_nonpresent_or_rsvd_mask_len, }
boot_cpu_data.x86_cache_bits - 1);
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
} else
WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));
shadow_nonpresent_or_rsvd_lower_gfn_mask = shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
...@@ -6134,27 +6131,18 @@ static void kvm_set_mmio_spte_mask(void) ...@@ -6134,27 +6131,18 @@ static void kvm_set_mmio_spte_mask(void)
u64 mask; u64 mask;
/* /*
* Set the reserved bits and the present bit of an paging-structure * Set a reserved PA bit in MMIO SPTEs to generate page faults with
* entry to generate page fault with PFER.RSV = 1. * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
*/ * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
* 52-bit physical addresses then there are no reserved PA bits in the
/* * PTEs and so the reserved PA approach must be disabled.
* Mask the uppermost physical address bit, which would be reserved as */
* long as the supported physical address width is less than 52. if (shadow_phys_bits < 52)
*/ mask = BIT_ULL(51) | PT_PRESENT_MASK;
mask = 1ull << 51; else
mask = 0;
/* Set the present bit. */
mask |= 1ull;
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (shadow_phys_bits == 52)
mask &= ~1ull;
kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK);
} }
static bool get_nx_auto_mode(void) static bool get_nx_auto_mode(void)
......
...@@ -778,7 +778,7 @@ static __init void svm_adjust_mmio_mask(void) ...@@ -778,7 +778,7 @@ static __init void svm_adjust_mmio_mask(void)
*/ */
mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK);
} }
static void svm_hardware_teardown(void) static void svm_hardware_teardown(void)
......
...@@ -4233,8 +4233,7 @@ static void ept_set_mmio_spte_mask(void) ...@@ -4233,8 +4233,7 @@ static void ept_set_mmio_spte_mask(void)
* EPT Misconfigurations can be generated if the value of bits 2:0 * EPT Misconfigurations can be generated if the value of bits 2:0
* of an EPT paging-structure entry is 110b (write/execute). * of an EPT paging-structure entry is 110b (write/execute).
*/ */
kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 0);
VMX_EPT_MISCONFIG_WX_VALUE, 0);
} }
#define VMX_XSS_EXIT_BITMAP 0 #define VMX_XSS_EXIT_BITMAP 0
...@@ -7300,6 +7299,9 @@ static __init void vmx_set_cpu_caps(void) ...@@ -7300,6 +7299,9 @@ static __init void vmx_set_cpu_caps(void)
/* CPUID 0x80000001 */ /* CPUID 0x80000001 */
if (!cpu_has_vmx_rdtscp()) if (!cpu_has_vmx_rdtscp())
kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
if (vmx_waitpkg_supported())
kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
} }
static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
......
...@@ -3799,7 +3799,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, ...@@ -3799,7 +3799,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
unsigned bank_num = mcg_cap & 0xff, bank; unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL; r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
goto out; goto out;
if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000)) if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
goto out; goto out;
...@@ -5282,6 +5282,10 @@ static void kvm_init_msr_list(void) ...@@ -5282,6 +5282,10 @@ static void kvm_init_msr_list(void)
if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
continue; continue;
break; break;
case MSR_IA32_UMWAIT_CONTROL:
if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
continue;
break;
case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_CTL:
case MSR_IA32_RTIT_STATUS: case MSR_IA32_RTIT_STATUS:
if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment