Commit 89786147 authored by Mohammed Gamal's avatar Mohammed Gamal Committed by Paolo Bonzini

KVM: x86: Add helper functions for illegal GPA checking and page fault injection

This patch adds two helper functions that will be used to support virtualizing
MAXPHYADDR in both kvm-intel.ko and kvm.ko.

kvm_fixup_and_inject_pf_error() injects a page fault for a user-specified GVA,
while kvm_mmu_is_illegal_gpa() checks whether a GPA exceeds vCPU address limits.
Signed-off-by: default avatarMohammed Gamal <mgamal@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200710154811.418214-2-mgamal@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent fe9304d3
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "kvm_cache_regs.h" #include "kvm_cache_regs.h"
#include "cpuid.h"
#define PT64_PT_BITS 9 #define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
...@@ -150,6 +151,11 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu) ...@@ -150,6 +151,11 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu)
return kvm_read_cr0_bits(vcpu, X86_CR0_WP); return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
} }
static inline bool kvm_mmu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{
return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
}
/* /*
* Check if a given access (described through the I/D, W/R and U/S bits of a * Check if a given access (described through the I/D, W/R and U/S bits of a
* page fault error code pfec) causes a permission fault with the given PTE * page fault error code pfec) causes a permission fault with the given PTE
......
...@@ -10738,6 +10738,27 @@ int kvm_spec_ctrl_test_value(u64 value) ...@@ -10738,6 +10738,27 @@ int kvm_spec_ctrl_test_value(u64 value)
} }
EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
{
struct x86_exception fault;
if (!(error_code & PFERR_PRESENT_MASK) ||
vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, error_code, &fault) != UNMAPPED_GVA) {
/*
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
* tables probably do not match the TLB. Just proceed
* with the error code that the processor gave.
*/
fault.vector = PF_VECTOR;
fault.error_code_valid = true;
fault.error_code = error_code;
fault.nested_page_fault = false;
fault.address = gva;
}
vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
}
EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
......
...@@ -272,6 +272,7 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); ...@@ -272,6 +272,7 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
int page_num); int page_num);
bool kvm_vector_hashing_enabled(void); bool kvm_vector_hashing_enabled(void);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len); int emulation_type, void *insn, int insn_len);
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment