Commit 3dcf6c1b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'kvm-updates/3.3' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/3.3' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (74 commits)
  KVM: PPC: Whitespace fix for kvm.h
  KVM: Fix whitespace in kvm_para.h
  KVM: PPC: annotate kvm_rma_init as __init
  KVM: x86 emulator: implement RDPMC (0F 33)
  KVM: x86 emulator: fix RDPMC privilege check
  KVM: Expose the architectural performance monitoring CPUID leaf
  KVM: VMX: Intercept RDPMC
  KVM: SVM: Intercept RDPMC
  KVM: Add generic RDPMC support
  KVM: Expose a version 2 architectural PMU to a guests
  KVM: Expose kvm_lapic_local_deliver()
  KVM: x86 emulator: Use opcode::execute for Group 9 instruction
  KVM: x86 emulator: Use opcode::execute for Group 4/5 instructions
  KVM: x86 emulator: Use opcode::execute for Group 1A instruction
  KVM: ensure that debugfs entries have been created
  KVM: drop bsp_vcpu pointer from kvm struct
  KVM: x86: Consolidate PIT legacy test
  KVM: x86: Do not rely on implicit inclusions
  KVM: Make KVM_INTEL depend on CPU_SUP_INTEL
  KVM: Use memdup_user instead of kmalloc/copy_from_user
  ...
parents e4e11180 da69dee0
...@@ -350,15 +350,6 @@ Who: anybody or Florian Mickler <florian@mickler.org> ...@@ -350,15 +350,6 @@ Who: anybody or Florian Mickler <florian@mickler.org>
---------------------------- ----------------------------
What: KVM paravirt mmu host support
When: January 2011
Why: The paravirt mmu host support is slower than non-paravirt mmu, both
on newer and older hardware. It is already not exposed to the guest,
and kept only for live migration purposes.
Who: Avi Kivity <avi@redhat.com>
----------------------------
What: iwlwifi 50XX module parameters What: iwlwifi 50XX module parameters
When: 3.0 When: 3.0
Why: The "..50" modules parameters were used to configure 5000 series and Why: The "..50" modules parameters were used to configure 5000 series and
......
...@@ -1178,9 +1178,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -1178,9 +1178,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP) Default is 0 (don't ignore, but inject #GP)
kvm.oos_shadow= [KVM] Disable out-of-sync shadow paging.
Default is 1 (enabled)
kvm.mmu_audit= [KVM] This is a R/W parameter which allows audit kvm.mmu_audit= [KVM] This is a R/W parameter which allows audit
KVM MMU at runtime. KVM MMU at runtime.
Default is 0 (off) Default is 0 (off)
......
...@@ -1466,6 +1466,31 @@ is supported; 2 if the processor requires all virtual machines to have ...@@ -1466,6 +1466,31 @@ is supported; 2 if the processor requires all virtual machines to have
an RMA, or 1 if the processor can use an RMA but doesn't require it, an RMA, or 1 if the processor can use an RMA but doesn't require it,
because it supports the Virtual RMA (VRMA) facility. because it supports the Virtual RMA (VRMA) facility.
4.64 KVM_NMI
Capability: KVM_CAP_USER_NMI
Architectures: x86
Type: vcpu ioctl
Parameters: none
Returns: 0 on success, -1 on error
Queues an NMI on the thread's vcpu. Note this is well defined only
when KVM_CREATE_IRQCHIP has not been called, since this is an interface
between the virtual cpu core and virtual local APIC. After KVM_CREATE_IRQCHIP
has been called, this interface is completely emulated within the kernel.
To use this to emulate the LINT1 input with KVM_CREATE_IRQCHIP, use the
following algorithm:
- pause the vpcu
- read the local APIC's state (KVM_GET_LAPIC)
- check whether changing LINT1 will queue an NMI (see the LVT entry for LINT1)
- if so, issue KVM_NMI
- resume the vcpu
Some guests configure the LINT1 NMI input to cause a panic, aiding in
debugging.
5. The kvm_run structure 5. The kvm_run structure
Application code obtains a pointer to the kvm_run structure by Application code obtains a pointer to the kvm_run structure by
......
...@@ -774,13 +774,13 @@ struct kvm *kvm_arch_alloc_vm(void) ...@@ -774,13 +774,13 @@ struct kvm *kvm_arch_alloc_vm(void)
return kvm; return kvm;
} }
struct kvm_io_range { struct kvm_ia64_io_range {
unsigned long start; unsigned long start;
unsigned long size; unsigned long size;
unsigned long type; unsigned long type;
}; };
static const struct kvm_io_range io_ranges[] = { static const struct kvm_ia64_io_range io_ranges[] = {
{VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
{MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
{LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
...@@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm) ...@@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int i, j; int j;
unsigned long base_gfn; unsigned long base_gfn;
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) { kvm_for_each_memslot(memslot, slots) {
memslot = &slots->memslots[i];
base_gfn = memslot->base_gfn; base_gfn = memslot->base_gfn;
for (j = 0; j < memslot->npages; j++) { for (j = 0; j < memslot->npages; j++) {
if (memslot->rmap[j]) if (memslot->rmap[j])
put_page((struct page *)memslot->rmap[j]); put_page((struct page *)memslot->rmap[j]);
...@@ -1820,7 +1818,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1820,7 +1818,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS) if (log->slot >= KVM_MEMORY_SLOTS)
goto out; goto out;
memslot = &kvm->memslots->memslots[log->slot]; memslot = id_to_memslot(kvm->memslots, log->slot);
r = -ENOENT; r = -ENOENT;
if (!memslot->dirty_bitmap) if (!memslot->dirty_bitmap)
goto out; goto out;
......
...@@ -170,8 +170,8 @@ struct kvm_sregs { ...@@ -170,8 +170,8 @@ struct kvm_sregs {
} ppc64; } ppc64;
struct { struct {
__u32 sr[16]; __u32 sr[16];
__u64 ibat[8]; __u64 ibat[8];
__u64 dbat[8]; __u64 dbat[8];
} ppc32; } ppc32;
} s; } s;
struct { struct {
......
...@@ -498,7 +498,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -498,7 +498,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */ /* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) { if (is_dirty) {
memslot = &kvm->memslots->memslots[log->slot]; memslot = id_to_memslot(kvm->memslots, log->slot);
ga = memslot->base_gfn << PAGE_SHIFT; ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT); ga_end = ga + (memslot->npages << PAGE_SHIFT);
......
...@@ -86,7 +86,7 @@ static inline int lpcr_rmls(unsigned long rma_size) ...@@ -86,7 +86,7 @@ static inline int lpcr_rmls(unsigned long rma_size)
* to allocate contiguous physical memory for the real memory * to allocate contiguous physical memory for the real memory
* areas for guests. * areas for guests.
*/ */
void kvm_rma_init(void) void __init kvm_rma_init(void)
{ {
unsigned long i; unsigned long i;
unsigned long j, npages; unsigned long j, npages;
......
...@@ -197,7 +197,10 @@ ...@@ -197,7 +197,10 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ #define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
......
...@@ -181,6 +181,7 @@ struct x86_emulate_ops { ...@@ -181,6 +181,7 @@ struct x86_emulate_ops {
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
void (*halt)(struct x86_emulate_ctxt *ctxt); void (*halt)(struct x86_emulate_ctxt *ctxt);
void (*wbinvd)(struct x86_emulate_ctxt *ctxt); void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt); int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
...@@ -364,6 +365,7 @@ enum x86_intercept { ...@@ -364,6 +365,7 @@ enum x86_intercept {
#endif #endif
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len); int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_FAILED -1 #define EMULATION_FAILED -1
#define EMULATION_OK 0 #define EMULATION_OK 0
#define EMULATION_RESTART 1 #define EMULATION_RESTART 1
......
...@@ -16,10 +16,12 @@ ...@@ -16,10 +16,12 @@
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/irq_work.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_para.h> #include <linux/kvm_para.h>
#include <linux/kvm_types.h> #include <linux/kvm_types.h>
#include <linux/perf_event.h>
#include <asm/pvclock-abi.h> #include <asm/pvclock-abi.h>
#include <asm/desc.h> #include <asm/desc.h>
...@@ -31,6 +33,8 @@ ...@@ -31,6 +33,8 @@
#define KVM_MEMORY_SLOTS 32 #define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */ /* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#define KVM_MMIO_SIZE 16 #define KVM_MMIO_SIZE 16
#define KVM_PIO_PAGE_OFFSET 1 #define KVM_PIO_PAGE_OFFSET 1
...@@ -228,7 +232,7 @@ struct kvm_mmu_page { ...@@ -228,7 +232,7 @@ struct kvm_mmu_page {
* One bit set per slot which has memory * One bit set per slot which has memory
* in this shadow page. * in this shadow page.
*/ */
DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
bool unsync; bool unsync;
int root_count; /* Currently serving as active root */ int root_count; /* Currently serving as active root */
unsigned int unsync_children; unsigned int unsync_children;
...@@ -239,14 +243,9 @@ struct kvm_mmu_page { ...@@ -239,14 +243,9 @@ struct kvm_mmu_page {
int clear_spte_count; int clear_spte_count;
#endif #endif
struct rcu_head rcu; int write_flooding_count;
};
struct kvm_pv_mmu_op_buffer { struct rcu_head rcu;
void *ptr;
unsigned len;
unsigned processed;
char buf[512] __aligned(sizeof(long));
}; };
struct kvm_pio_request { struct kvm_pio_request {
...@@ -294,6 +293,37 @@ struct kvm_mmu { ...@@ -294,6 +293,37 @@ struct kvm_mmu {
u64 pdptrs[4]; /* pae */ u64 pdptrs[4]; /* pae */
}; };
enum pmc_type {
KVM_PMC_GP = 0,
KVM_PMC_FIXED,
};
struct kvm_pmc {
enum pmc_type type;
u8 idx;
u64 counter;
u64 eventsel;
struct perf_event *perf_event;
struct kvm_vcpu *vcpu;
};
struct kvm_pmu {
unsigned nr_arch_gp_counters;
unsigned nr_arch_fixed_counters;
unsigned available_event_types;
u64 fixed_ctr_ctrl;
u64 global_ctrl;
u64 global_status;
u64 global_ovf_ctrl;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
u8 version;
struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
struct irq_work irq_work;
u64 reprogram_pmi;
};
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
/* /*
* rip and regs accesses must go through * rip and regs accesses must go through
...@@ -345,19 +375,10 @@ struct kvm_vcpu_arch { ...@@ -345,19 +375,10 @@ struct kvm_vcpu_arch {
*/ */
struct kvm_mmu *walk_mmu; struct kvm_mmu *walk_mmu;
/* only needed in kvm_pv_mmu_op() path, but it's hot so
* put it here to avoid allocation */
struct kvm_pv_mmu_op_buffer mmu_op_buffer;
struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache; struct kvm_mmu_memory_cache mmu_page_header_cache;
gfn_t last_pt_write_gfn;
int last_pt_write_count;
u64 *last_pte_updated;
gfn_t last_pte_gfn;
struct fpu guest_fpu; struct fpu guest_fpu;
u64 xcr0; u64 xcr0;
...@@ -436,6 +457,8 @@ struct kvm_vcpu_arch { ...@@ -436,6 +457,8 @@ struct kvm_vcpu_arch {
unsigned access; unsigned access;
gfn_t mmio_gfn; gfn_t mmio_gfn;
struct kvm_pmu pmu;
/* used for guest single stepping over the given code position */ /* used for guest single stepping over the given code position */
unsigned long singlestep_rip; unsigned long singlestep_rip;
...@@ -444,6 +467,9 @@ struct kvm_vcpu_arch { ...@@ -444,6 +467,9 @@ struct kvm_vcpu_arch {
cpumask_var_t wbinvd_dirty_mask; cpumask_var_t wbinvd_dirty_mask;
unsigned long last_retry_eip;
unsigned long last_retry_addr;
struct { struct {
bool halted; bool halted;
gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
...@@ -459,7 +485,6 @@ struct kvm_arch { ...@@ -459,7 +485,6 @@ struct kvm_arch {
unsigned int n_requested_mmu_pages; unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages; unsigned int n_max_mmu_pages;
unsigned int indirect_shadow_pages; unsigned int indirect_shadow_pages;
atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/* /*
* Hash table of struct kvm_mmu_page. * Hash table of struct kvm_mmu_page.
...@@ -660,6 +685,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, ...@@ -660,6 +685,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
struct kvm_memory_slot *slot);
void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
...@@ -668,8 +695,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); ...@@ -668,8 +695,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes); const void *val, int bytes);
int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
gpa_t addr, unsigned long *ret);
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
extern bool tdp_enabled; extern bool tdp_enabled;
...@@ -692,6 +717,7 @@ enum emulation_result { ...@@ -692,6 +717,7 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0) #define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1) #define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2) #define EMULTYPE_SKIP (1 << 2)
#define EMULTYPE_RETRY (1 << 3)
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
int emulation_type, void *insn, int insn_len); int emulation_type, void *insn, int insn_len);
...@@ -734,6 +760,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); ...@@ -734,6 +760,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
bool kvm_rdpmc(struct kvm_vcpu *vcpu);
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
...@@ -754,13 +781,14 @@ int fx_init(struct kvm_vcpu *vcpu); ...@@ -754,13 +781,14 @@ int fx_init(struct kvm_vcpu *vcpu);
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes, const u8 *new, int bytes);
bool guest_initiated); int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception); struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
...@@ -782,6 +810,11 @@ void kvm_disable_tdp(void); ...@@ -782,6 +810,11 @@ void kvm_disable_tdp(void);
int complete_pio(struct kvm_vcpu *vcpu); int complete_pio(struct kvm_vcpu *vcpu);
bool kvm_check_iopl(struct kvm_vcpu *vcpu); bool kvm_check_iopl(struct kvm_vcpu *vcpu);
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
{
return gpa;
}
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{ {
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
...@@ -894,4 +927,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); ...@@ -894,4 +927,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
int kvm_is_in_guest(void);
void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
#endif /* _ASM_X86_KVM_HOST_H */ #endif /* _ASM_X86_KVM_HOST_H */
...@@ -39,8 +39,6 @@ ...@@ -39,8 +39,6 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#define MMU_QUEUE_SIZE 1024
static int kvmapf = 1; static int kvmapf = 1;
static int parse_no_kvmapf(char *arg) static int parse_no_kvmapf(char *arg)
...@@ -60,21 +58,10 @@ static int parse_no_stealacc(char *arg) ...@@ -60,21 +58,10 @@ static int parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc); early_param("no-steal-acc", parse_no_stealacc);
struct kvm_para_state {
u8 mmu_queue[MMU_QUEUE_SIZE];
int mmu_queue_len;
};
static DEFINE_PER_CPU(struct kvm_para_state, para_state);
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0; static int has_steal_clock = 0;
static struct kvm_para_state *kvm_para_state(void)
{
return &per_cpu(para_state, raw_smp_processor_id());
}
/* /*
* No need for any "IO delay" on KVM * No need for any "IO delay" on KVM
*/ */
...@@ -271,151 +258,6 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -271,151 +258,6 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
} }
} }
static void kvm_mmu_op(void *buffer, unsigned len)
{
int r;
unsigned long a1, a2;
do {
a1 = __pa(buffer);
a2 = 0; /* on i386 __pa() always returns <4G */
r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
buffer += r;
len -= r;
} while (len);
}
static void mmu_queue_flush(struct kvm_para_state *state)
{
if (state->mmu_queue_len) {
kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
state->mmu_queue_len = 0;
}
}
static void kvm_deferred_mmu_op(void *buffer, int len)
{
struct kvm_para_state *state = kvm_para_state();
if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
kvm_mmu_op(buffer, len);
return;
}
if (state->mmu_queue_len + len > sizeof state->mmu_queue)
mmu_queue_flush(state);
memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
state->mmu_queue_len += len;
}
static void kvm_mmu_write(void *dest, u64 val)
{
__u64 pte_phys;
struct kvm_mmu_op_write_pte wpte;
#ifdef CONFIG_HIGHPTE
struct page *page;
unsigned long dst = (unsigned long) dest;
page = kmap_atomic_to_page(dest);
pte_phys = page_to_pfn(page);
pte_phys <<= PAGE_SHIFT;
pte_phys += (dst & ~(PAGE_MASK));
#else
pte_phys = (unsigned long)__pa(dest);
#endif
wpte.header.op = KVM_MMU_OP_WRITE_PTE;
wpte.pte_val = val;
wpte.pte_phys = pte_phys;
kvm_deferred_mmu_op(&wpte, sizeof wpte);
}
/*
* We only need to hook operations that are MMU writes. We hook these so that
* we can use lazy MMU mode to batch these operations. We could probably
* improve the performance of the host code if we used some of the information
* here to simplify processing of batched writes.
*/
static void kvm_set_pte(pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}
static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}
static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
kvm_mmu_write(pmdp, pmd_val(pmd));
}
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}
static void kvm_pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
kvm_mmu_write(ptep, 0);
}
static void kvm_pmd_clear(pmd_t *pmdp)
{
kvm_mmu_write(pmdp, 0);
}
#endif
static void kvm_set_pud(pud_t *pudp, pud_t pud)
{
kvm_mmu_write(pudp, pud_val(pud));
}
#if PAGETABLE_LEVELS == 4
static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
kvm_mmu_write(pgdp, pgd_val(pgd));
}
#endif
#endif /* PAGETABLE_LEVELS >= 3 */
static void kvm_flush_tlb(void)
{
struct kvm_mmu_op_flush_tlb ftlb = {
.header.op = KVM_MMU_OP_FLUSH_TLB,
};
kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
}
static void kvm_release_pt(unsigned long pfn)
{
struct kvm_mmu_op_release_pt rpt = {
.header.op = KVM_MMU_OP_RELEASE_PT,
.pt_phys = (u64)pfn << PAGE_SHIFT,
};
kvm_mmu_op(&rpt, sizeof rpt);
}
static void kvm_enter_lazy_mmu(void)
{
paravirt_enter_lazy_mmu();
}
static void kvm_leave_lazy_mmu(void)
{
struct kvm_para_state *state = kvm_para_state();
mmu_queue_flush(state);
paravirt_leave_lazy_mmu();
}
static void __init paravirt_ops_setup(void) static void __init paravirt_ops_setup(void)
{ {
pv_info.name = "KVM"; pv_info.name = "KVM";
...@@ -424,29 +266,6 @@ static void __init paravirt_ops_setup(void) ...@@ -424,29 +266,6 @@ static void __init paravirt_ops_setup(void)
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
pv_cpu_ops.io_delay = kvm_io_delay; pv_cpu_ops.io_delay = kvm_io_delay;
if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
pv_mmu_ops.set_pte = kvm_set_pte;
pv_mmu_ops.set_pte_at = kvm_set_pte_at;
pv_mmu_ops.set_pmd = kvm_set_pmd;
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
pv_mmu_ops.pte_clear = kvm_pte_clear;
pv_mmu_ops.pmd_clear = kvm_pmd_clear;
#endif
pv_mmu_ops.set_pud = kvm_set_pud;
#if PAGETABLE_LEVELS == 4
pv_mmu_ops.set_pgd = kvm_set_pgd;
#endif
#endif
pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
pv_mmu_ops.release_pte = kvm_release_pt;
pv_mmu_ops.release_pmd = kvm_release_pt;
pv_mmu_ops.release_pud = kvm_release_pt;
pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
}
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
no_timer_check = 1; no_timer_check = 1;
#endif #endif
......
...@@ -35,6 +35,7 @@ config KVM ...@@ -35,6 +35,7 @@ config KVM
select KVM_MMIO select KVM_MMIO
select TASKSTATS select TASKSTATS
select TASK_DELAY_ACCT select TASK_DELAY_ACCT
select PERF_EVENTS
---help--- ---help---
Support hosting fully virtualized guest machines using hardware Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent virtualization extensions. You will need a fairly recent
...@@ -52,6 +53,8 @@ config KVM ...@@ -52,6 +53,8 @@ config KVM
config KVM_INTEL config KVM_INTEL
tristate "KVM for Intel processors support" tristate "KVM for Intel processors support"
depends on KVM depends on KVM
# for perf_guest_get_msrs():
depends on CPU_SUP_INTEL
---help--- ---help---
Provides support for KVM on Intel processors equipped with the VT Provides support for KVM on Intel processors equipped with the VT
extensions. extensions.
......
...@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o) ...@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o) kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o)
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o timer.o i8254.o timer.o cpuid.o pmu.o
kvm-intel-y += vmx.o kvm-intel-y += vmx.o
kvm-amd-y += svm.o kvm-amd-y += svm.o
......
This diff is collapsed.
#ifndef ARCH_X86_KVM_CPUID_H
#define ARCH_X86_KVM_CPUID_H
#include "x86.h"
void kvm_update_cpuid(struct kvm_vcpu *vcpu);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
struct kvm_cpuid_entry __user *entries);
int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
}
static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_SMEP));
}
static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
}
#endif
This diff is collapsed.
...@@ -344,7 +344,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) ...@@ -344,7 +344,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
struct kvm_timer *pt = &ps->pit_timer; struct kvm_timer *pt = &ps->pit_timer;
s64 interval; s64 interval;
if (!irqchip_in_kernel(kvm)) if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
return; return;
interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
...@@ -397,15 +397,11 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) ...@@ -397,15 +397,11 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
case 1: case 1:
/* FIXME: enhance mode 4 precision */ /* FIXME: enhance mode 4 precision */
case 4: case 4:
if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { create_pit_timer(kvm, val, 0);
create_pit_timer(kvm, val, 0);
}
break; break;
case 2: case 2:
case 3: case 3:
if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ create_pit_timer(kvm, val, 1);
create_pit_timer(kvm, val, 1);
}
break; break;
default: default:
destroy_pit_timer(kvm->arch.vpit); destroy_pit_timer(kvm->arch.vpit);
......
...@@ -262,9 +262,10 @@ int kvm_pic_read_irq(struct kvm *kvm) ...@@ -262,9 +262,10 @@ int kvm_pic_read_irq(struct kvm *kvm)
void kvm_pic_reset(struct kvm_kpic_state *s) void kvm_pic_reset(struct kvm_kpic_state *s)
{ {
int irq; int irq, i;
struct kvm_vcpu *vcpu0 = s->pics_state->kvm->bsp_vcpu; struct kvm_vcpu *vcpu;
u8 irr = s->irr, isr = s->imr; u8 irr = s->irr, isr = s->imr;
bool found = false;
s->last_irr = 0; s->last_irr = 0;
s->irr = 0; s->irr = 0;
...@@ -281,12 +282,19 @@ void kvm_pic_reset(struct kvm_kpic_state *s) ...@@ -281,12 +282,19 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
s->special_fully_nested_mode = 0; s->special_fully_nested_mode = 0;
s->init4 = 0; s->init4 = 0;
for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) if (kvm_apic_accept_pic_intr(vcpu)) {
if (irr & (1 << irq) || isr & (1 << irq)) { found = true;
pic_clear_isr(s, irq); break;
} }
}
if (!found)
return;
for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
if (irr & (1 << irq) || isr & (1 << irq))
pic_clear_isr(s, irq);
} }
static void pic_ioport_write(void *opaque, u32 addr, u32 val) static void pic_ioport_write(void *opaque, u32 addr, u32 val)
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "irq.h" #include "irq.h"
#include "trace.h" #include "trace.h"
#include "x86.h" #include "x86.h"
#include "cpuid.h"
#ifndef CONFIG_X86_64 #ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
...@@ -1120,7 +1121,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu) ...@@ -1120,7 +1121,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{ {
u32 reg = apic_get_reg(apic, lvt_type); u32 reg = apic_get_reg(apic, lvt_type);
int vector, mode, trig_mode; int vector, mode, trig_mode;
......
...@@ -34,6 +34,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu); ...@@ -34,6 +34,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq); int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
......
This diff is collapsed.
...@@ -19,6 +19,15 @@ ...@@ -19,6 +19,15 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
char const *audit_point_name[] = {
"pre page fault",
"post page fault",
"pre pte write",
"post pte write",
"pre sync",
"post sync"
};
#define audit_printk(kvm, fmt, args...) \ #define audit_printk(kvm, fmt, args...) \
printk(KERN_ERR "audit: (%s) error: " \ printk(KERN_ERR "audit: (%s) error: " \
fmt, audit_point_name[kvm->arch.audit_point], ##args) fmt, audit_point_name[kvm->arch.audit_point], ##args)
...@@ -224,7 +233,10 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu) ...@@ -224,7 +233,10 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
mmu_spte_walk(vcpu, audit_spte); mmu_spte_walk(vcpu, audit_spte);
} }
static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point) static bool mmu_audit;
static struct jump_label_key mmu_audit_key;
static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
{ {
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
...@@ -236,18 +248,18 @@ static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point) ...@@ -236,18 +248,18 @@ static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
audit_vcpu_spte(vcpu); audit_vcpu_spte(vcpu);
} }
static bool mmu_audit; static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
{
if (static_branch((&mmu_audit_key)))
__kvm_mmu_audit(vcpu, point);
}
static void mmu_audit_enable(void) static void mmu_audit_enable(void)
{ {
int ret;
if (mmu_audit) if (mmu_audit)
return; return;
ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); jump_label_inc(&mmu_audit_key);
WARN_ON(ret);
mmu_audit = true; mmu_audit = true;
} }
...@@ -256,8 +268,7 @@ static void mmu_audit_disable(void) ...@@ -256,8 +268,7 @@ static void mmu_audit_disable(void)
if (!mmu_audit) if (!mmu_audit)
return; return;
unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); jump_label_dec(&mmu_audit_key);
tracepoint_synchronize_unregister();
mmu_audit = false; mmu_audit = false;
} }
......
...@@ -243,25 +243,6 @@ TRACE_EVENT( ...@@ -243,25 +243,6 @@ TRACE_EVENT(
TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn, TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
__entry->access) __entry->access)
); );
TRACE_EVENT(
kvm_mmu_audit,
TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
TP_ARGS(vcpu, audit_point),
TP_STRUCT__entry(
__field(struct kvm_vcpu *, vcpu)
__field(int, audit_point)
),
TP_fast_assign(
__entry->vcpu = vcpu;
__entry->audit_point = audit_point;
),
TP_printk("vcpu:%d %s", __entry->vcpu->cpu,
audit_point_name[__entry->audit_point])
);
#endif /* _TRACE_KVMMMU_H */ #endif /* _TRACE_KVMMMU_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
...@@ -497,6 +497,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -497,6 +497,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_walk_next(&it)) { shadow_walk_next(&it)) {
gfn_t table_gfn; gfn_t table_gfn;
clear_sp_write_flooding_count(it.sptep);
drop_large_spte(vcpu, it.sptep); drop_large_spte(vcpu, it.sptep);
sp = NULL; sp = NULL;
...@@ -522,6 +523,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -522,6 +523,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_walk_next(&it)) { shadow_walk_next(&it)) {
gfn_t direct_gfn; gfn_t direct_gfn;
clear_sp_write_flooding_count(it.sptep);
validate_direct_spte(vcpu, it.sptep, direct_access); validate_direct_spte(vcpu, it.sptep, direct_access);
drop_large_spte(vcpu, it.sptep); drop_large_spte(vcpu, it.sptep);
...@@ -536,6 +538,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -536,6 +538,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
link_shadow_page(it.sptep, sp); link_shadow_page(it.sptep, sp);
} }
clear_sp_write_flooding_count(it.sptep);
mmu_set_spte(vcpu, it.sptep, access, gw->pte_access, mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
user_fault, write_fault, emulate, it.level, user_fault, write_fault, emulate, it.level,
gw->gfn, pfn, prefault, map_writable); gw->gfn, pfn, prefault, map_writable);
...@@ -599,11 +602,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -599,11 +602,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
*/ */
if (!r) { if (!r) {
pgprintk("%s: guest page fault\n", __func__); pgprintk("%s: guest page fault\n", __func__);
if (!prefault) { if (!prefault)
inject_page_fault(vcpu, &walker.fault); inject_page_fault(vcpu, &walker.fault);
/* reset fork detector */
vcpu->arch.last_pt_write_count = 0;
}
return 0; return 0;
} }
...@@ -631,7 +632,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -631,7 +632,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
if (mmu_notifier_retry(vcpu, mmu_seq)) if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock; goto out_unlock;
trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
if (!force_pt_level) if (!force_pt_level)
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
...@@ -641,11 +642,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -641,11 +642,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__, pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
sptep, *sptep, emulate); sptep, *sptep, emulate);
if (!emulate)
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
++vcpu->stat.pf_fixed; ++vcpu->stat.pf_fixed;
trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
return emulate; return emulate;
...@@ -656,65 +654,66 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -656,65 +654,66 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return 0; return 0;
} }
static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
{
int offset = 0;
WARN_ON(sp->role.level != 1);
if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;
return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
}
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{ {
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
gpa_t pte_gpa = -1;
int level; int level;
u64 *sptep; u64 *sptep;
int need_flush = 0;
vcpu_clear_mmio_info(vcpu, gva); vcpu_clear_mmio_info(vcpu, gva);
spin_lock(&vcpu->kvm->mmu_lock); /*
* No need to check return value here, rmap_can_add() can
* help us to skip pte prefetch later.
*/
mmu_topup_memory_caches(vcpu);
spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) { for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level; level = iterator.level;
sptep = iterator.sptep; sptep = iterator.sptep;
sp = page_header(__pa(sptep)); sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) { if (is_last_spte(*sptep, level)) {
int offset, shift; pt_element_t gpte;
gpa_t pte_gpa;
if (!sp->unsync) if (!sp->unsync)
break; break;
shift = PAGE_SHIFT - pte_gpa = FNAME(get_level1_sp_gpa)(sp);
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
offset = sp->role.quadrant << shift;
pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (is_shadow_present_pte(*sptep)) { if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
if (is_large_pte(*sptep)) kvm_flush_remote_tlbs(vcpu->kvm);
--vcpu->kvm->stat.lpages;
drop_spte(vcpu->kvm, sptep);
need_flush = 1;
} else if (is_mmio_spte(*sptep))
mmu_spte_clear_no_track(sptep);
break; if (!rmap_can_add(vcpu))
break;
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
sizeof(pt_element_t)))
break;
FNAME(update_pte)(vcpu, sp, sptep, &gpte);
} }
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break; break;
} }
if (need_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
atomic_inc(&vcpu->kvm->arch.invlpg_counter);
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
if (pte_gpa == -1)
return;
if (mmu_topup_memory_caches(vcpu))
return;
kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
} }
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
...@@ -769,19 +768,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, ...@@ -769,19 +768,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
*/ */
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
int i, offset, nr_present; int i, nr_present = 0;
bool host_writable; bool host_writable;
gpa_t first_pte_gpa; gpa_t first_pte_gpa;
offset = nr_present = 0;
/* direct kvm_mmu_page can not be unsync. */ /* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct); BUG_ON(sp->role.direct);
if (PTTYPE == 32) first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
offset = sp->role.quadrant << PT64_LEVEL_BITS;
first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) { for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
unsigned pte_access; unsigned pte_access;
......
This diff is collapsed.
...@@ -1014,6 +1014,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1014,6 +1014,7 @@ static void init_vmcb(struct vcpu_svm *svm)
set_intercept(svm, INTERCEPT_NMI); set_intercept(svm, INTERCEPT_NMI);
set_intercept(svm, INTERCEPT_SMI); set_intercept(svm, INTERCEPT_SMI);
set_intercept(svm, INTERCEPT_SELECTIVE_CR0); set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
set_intercept(svm, INTERCEPT_RDPMC);
set_intercept(svm, INTERCEPT_CPUID); set_intercept(svm, INTERCEPT_CPUID);
set_intercept(svm, INTERCEPT_INVD); set_intercept(svm, INTERCEPT_INVD);
set_intercept(svm, INTERCEPT_HLT); set_intercept(svm, INTERCEPT_HLT);
...@@ -2770,6 +2771,19 @@ static int emulate_on_interception(struct vcpu_svm *svm) ...@@ -2770,6 +2771,19 @@ static int emulate_on_interception(struct vcpu_svm *svm)
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
} }
static int rdpmc_interception(struct vcpu_svm *svm)
{
int err;
if (!static_cpu_has(X86_FEATURE_NRIPS))
return emulate_on_interception(svm);
err = kvm_rdpmc(&svm->vcpu);
kvm_complete_insn_gp(&svm->vcpu, err);
return 1;
}
bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val) bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
{ {
unsigned long cr0 = svm->vcpu.arch.cr0; unsigned long cr0 = svm->vcpu.arch.cr0;
...@@ -3190,6 +3204,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { ...@@ -3190,6 +3204,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_SMI] = nop_on_interception, [SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception, [SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_VINTR] = interrupt_window_interception,
[SVM_EXIT_RDPMC] = rdpmc_interception,
[SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_INVD] = emulate_on_interception,
......
...@@ -18,9 +18,10 @@ ...@@ -18,9 +18,10 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include "kvm_timer.h" #include "kvm_timer.h"
static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer) enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
{ {
int restart_timer = 0; struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_vcpu *vcpu = ktimer->vcpu;
wait_queue_head_t *q = &vcpu->wq; wait_queue_head_t *q = &vcpu->wq;
/* /*
...@@ -40,26 +41,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer) ...@@ -40,26 +41,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
if (ktimer->t_ops->is_periodic(ktimer)) { if (ktimer->t_ops->is_periodic(ktimer)) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
restart_timer = 1;
}
return restart_timer;
}
enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
{
int restart_timer;
struct kvm_vcpu *vcpu;
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
vcpu = ktimer->vcpu;
if (!vcpu)
return HRTIMER_NORESTART;
restart_timer = __kvm_timer_fn(vcpu, ktimer);
if (restart_timer)
return HRTIMER_RESTART; return HRTIMER_RESTART;
else } else
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "irq.h" #include "irq.h"
#include "mmu.h" #include "mmu.h"
#include "cpuid.h"
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -1747,7 +1748,6 @@ static void setup_msrs(struct vcpu_vmx *vmx) ...@@ -1747,7 +1748,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
int save_nmsrs, index; int save_nmsrs, index;
unsigned long *msr_bitmap; unsigned long *msr_bitmap;
vmx_load_host_state(vmx);
save_nmsrs = 0; save_nmsrs = 0;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) { if (is_long_mode(&vmx->vcpu)) {
...@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void) ...@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
#endif #endif
CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
CPU_BASED_RDPMC_EXITING |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
/* /*
* We can allow some features even when not supported by the * We can allow some features even when not supported by the
...@@ -2142,12 +2143,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) ...@@ -2142,12 +2143,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
return 1; return 1;
/* Otherwise falls through */ /* Otherwise falls through */
default: default:
vmx_load_host_state(to_vmx(vcpu));
if (vmx_get_vmx_msr(vcpu, msr_index, pdata)) if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
return 0; return 0;
msr = find_msr_entry(to_vmx(vcpu), msr_index); msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) { if (msr) {
vmx_load_host_state(to_vmx(vcpu));
data = msr->data; data = msr->data;
break; break;
} }
...@@ -2171,7 +2170,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -2171,7 +2170,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
switch (msr_index) { switch (msr_index) {
case MSR_EFER: case MSR_EFER:
vmx_load_host_state(vmx);
ret = kvm_set_msr_common(vcpu, msr_index, data); ret = kvm_set_msr_common(vcpu, msr_index, data);
break; break;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -2220,7 +2218,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -2220,7 +2218,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
break; break;
msr = find_msr_entry(vmx, msr_index); msr = find_msr_entry(vmx, msr_index);
if (msr) { if (msr) {
vmx_load_host_state(vmx);
msr->data = data; msr->data = data;
break; break;
} }
...@@ -2414,7 +2411,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) ...@@ -2414,7 +2411,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_MWAIT_EXITING | CPU_BASED_MWAIT_EXITING |
CPU_BASED_MONITOR_EXITING | CPU_BASED_MONITOR_EXITING |
CPU_BASED_INVLPG_EXITING; CPU_BASED_INVLPG_EXITING |
CPU_BASED_RDPMC_EXITING;
if (yield_on_hlt) if (yield_on_hlt)
min |= CPU_BASED_HLT_EXITING; min |= CPU_BASED_HLT_EXITING;
...@@ -2716,11 +2714,13 @@ static gva_t rmode_tss_base(struct kvm *kvm) ...@@ -2716,11 +2714,13 @@ static gva_t rmode_tss_base(struct kvm *kvm)
{ {
if (!kvm->arch.tss_addr) { if (!kvm->arch.tss_addr) {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
gfn_t base_gfn; gfn_t base_gfn;
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
base_gfn = slots->memslots[0].base_gfn + slot = id_to_memslot(slots, 0);
kvm->memslots->memslots[0].npages - 3; base_gfn = slot->base_gfn + slot->npages - 3;
return base_gfn << PAGE_SHIFT; return base_gfn << PAGE_SHIFT;
} }
return kvm->arch.tss_addr; return kvm->arch.tss_addr;
...@@ -3945,12 +3945,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) ...@@ -3945,12 +3945,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
static void enable_irq_window(struct kvm_vcpu *vcpu) static void enable_irq_window(struct kvm_vcpu *vcpu)
{ {
u32 cpu_based_vm_exec_control; u32 cpu_based_vm_exec_control;
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
/* We can get here when nested_run_pending caused /*
* vmx_interrupt_allowed() to return false. In this case, do * We get here if vmx_interrupt_allowed() said we can't
* nothing - the interrupt will be injected later. * inject to L1 now because L2 must run. Ask L2 to exit
* right after entry, so we can inject to L1 more promptly.
*/ */
kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
return; return;
}
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
...@@ -4077,11 +4080,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) ...@@ -4077,11 +4080,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{ {
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) { if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
struct vmcs12 *vmcs12; struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (to_vmx(vcpu)->nested.nested_run_pending) if (to_vmx(vcpu)->nested.nested_run_pending ||
(vmcs12->idt_vectoring_info_field &
VECTORING_INFO_VALID_MASK))
return 0; return 0;
nested_vmx_vmexit(vcpu); nested_vmx_vmexit(vcpu);
vmcs12 = get_vmcs12(vcpu);
vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
vmcs12->vm_exit_intr_info = 0; vmcs12->vm_exit_intr_info = 0;
/* fall through to normal code, but now in L1, not L2 */ /* fall through to normal code, but now in L1, not L2 */
...@@ -4611,6 +4615,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu) ...@@ -4611,6 +4615,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
static int handle_rdpmc(struct kvm_vcpu *vcpu)
{
int err;
err = kvm_rdpmc(vcpu);
kvm_complete_insn_gp(vcpu, err);
return 1;
}
static int handle_wbinvd(struct kvm_vcpu *vcpu) static int handle_wbinvd(struct kvm_vcpu *vcpu)
{ {
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
...@@ -5561,6 +5575,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { ...@@ -5561,6 +5575,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_HLT] = handle_halt, [EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVD] = handle_invd,
[EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_RDPMC] = handle_rdpmc,
[EXIT_REASON_VMCALL] = handle_vmcall, [EXIT_REASON_VMCALL] = handle_vmcall,
[EXIT_REASON_VMCLEAR] = handle_vmclear, [EXIT_REASON_VMCLEAR] = handle_vmclear,
[EXIT_REASON_VMLAUNCH] = handle_vmlaunch, [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
......
This diff is collapsed.
...@@ -33,9 +33,6 @@ static inline bool kvm_exception_is_soft(unsigned int nr) ...@@ -33,9 +33,6 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
return (nr == BP_VECTOR) || (nr == OF_VECTOR); return (nr == BP_VECTOR) || (nr == OF_VECTOR);
} }
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
static inline bool is_protmode(struct kvm_vcpu *vcpu) static inline bool is_protmode(struct kvm_vcpu *vcpu)
{ {
return kvm_read_cr0_bits(vcpu, X86_CR0_PE); return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
...@@ -125,4 +122,6 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, ...@@ -125,4 +122,6 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes, gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception); struct x86_exception *exception);
extern u64 host_xcr0;
#endif #endif
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -50,6 +51,9 @@ ...@@ -50,6 +51,9 @@
#define KVM_REQ_APF_HALT 12 #define KVM_REQ_APF_HALT 12
#define KVM_REQ_STEAL_UPDATE 13 #define KVM_REQ_STEAL_UPDATE 13
#define KVM_REQ_NMI 14 #define KVM_REQ_NMI 14
#define KVM_REQ_IMMEDIATE_EXIT 15
#define KVM_REQ_PMU 16
#define KVM_REQ_PMI 17
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
...@@ -179,6 +183,7 @@ struct kvm_memory_slot { ...@@ -179,6 +183,7 @@ struct kvm_memory_slot {
unsigned long *rmap; unsigned long *rmap;
unsigned long *dirty_bitmap; unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_head; unsigned long *dirty_bitmap_head;
unsigned long nr_dirty_pages;
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
unsigned long userspace_addr; unsigned long userspace_addr;
int user_alloc; int user_alloc;
...@@ -224,11 +229,20 @@ struct kvm_irq_routing_table {}; ...@@ -224,11 +229,20 @@ struct kvm_irq_routing_table {};
#endif #endif
#ifndef KVM_MEM_SLOTS_NUM
#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#endif
/*
* Note:
* memslots are not sorted by id anymore, please use id_to_memslot()
* to get the memslot by its id.
*/
struct kvm_memslots { struct kvm_memslots {
int nmemslots;
u64 generation; u64 generation;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
KVM_PRIVATE_MEM_SLOTS]; /* The mapping table from slot id to the index in memslots[]. */
int id_to_index[KVM_MEM_SLOTS_NUM];
}; };
struct kvm { struct kvm {
...@@ -239,7 +253,6 @@ struct kvm { ...@@ -239,7 +253,6 @@ struct kvm {
struct srcu_struct srcu; struct srcu_struct srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE #ifdef CONFIG_KVM_APIC_ARCHITECTURE
u32 bsp_vcpu_id; u32 bsp_vcpu_id;
struct kvm_vcpu *bsp_vcpu;
#endif #endif
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
atomic_t online_vcpus; atomic_t online_vcpus;
...@@ -302,6 +315,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) ...@@ -302,6 +315,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
(vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
idx++) idx++)
#define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
memslot++)
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
...@@ -314,6 +332,7 @@ void kvm_exit(void); ...@@ -314,6 +332,7 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm); void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm);
void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{ {
...@@ -322,6 +341,18 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) ...@@ -322,6 +341,18 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
|| lockdep_is_held(&kvm->slots_lock)); || lockdep_is_held(&kvm->slots_lock));
} }
static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id)
{
int index = slots->id_to_index[id];
struct kvm_memory_slot *slot;
slot = &slots->memslots[index];
WARN_ON(slot->id != id);
return slot;
}
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
......
...@@ -35,4 +35,3 @@ static inline int kvm_para_has_feature(unsigned int feature) ...@@ -35,4 +35,3 @@ static inline int kvm_para_has_feature(unsigned int feature)
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __LINUX_KVM_PARA_H */ #endif /* __LINUX_KVM_PARA_H */
...@@ -71,6 +71,7 @@ void jump_label_inc(struct jump_label_key *key) ...@@ -71,6 +71,7 @@ void jump_label_inc(struct jump_label_key *key)
atomic_inc(&key->enabled); atomic_inc(&key->enabled);
jump_label_unlock(); jump_label_unlock();
} }
EXPORT_SYMBOL_GPL(jump_label_inc);
static void __jump_label_dec(struct jump_label_key *key, static void __jump_label_dec(struct jump_label_key *key,
unsigned long rate_limit, struct delayed_work *work) unsigned long rate_limit, struct delayed_work *work)
...@@ -86,6 +87,7 @@ static void __jump_label_dec(struct jump_label_key *key, ...@@ -86,6 +87,7 @@ static void __jump_label_dec(struct jump_label_key *key,
jump_label_unlock(); jump_label_unlock();
} }
EXPORT_SYMBOL_GPL(jump_label_dec);
static void jump_label_update_timeout(struct work_struct *work) static void jump_label_update_timeout(struct work_struct *work)
{ {
......
...@@ -28,9 +28,15 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, ...@@ -28,9 +28,15 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
* (addr,len) is fully included in * (addr,len) is fully included in
* (zone->addr, zone->size) * (zone->addr, zone->size)
*/ */
if (len < 0)
return (dev->zone.addr <= addr && return 0;
addr + len <= dev->zone.addr + dev->zone.size); if (addr + len < addr)
return 0;
if (addr < dev->zone.addr)
return 0;
if (addr + len > dev->zone.addr + dev->zone.size)
return 0;
return 1;
} }
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
......
...@@ -185,7 +185,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) ...@@ -185,7 +185,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
irqe.dest_mode = 0; /* Physical mode. */ irqe.dest_mode = 0; /* Physical mode. */
/* need to read apic_id from apic regiest since /* need to read apic_id from apic regiest since
* it can be rewritten */ * it can be rewritten */
irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id; irqe.dest_id = ioapic->kvm->bsp_vcpu_id;
} }
#endif #endif
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
...@@ -332,9 +332,18 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, ...@@ -332,9 +332,18 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
(void*)addr, len, val); (void*)addr, len, val);
ASSERT(!(addr & 0xf)); /* check alignment */ ASSERT(!(addr & 0xf)); /* check alignment */
if (len == 4 || len == 8) switch (len) {
case 8:
case 4:
data = *(u32 *) val; data = *(u32 *) val;
else { break;
case 2:
data = *(u16 *) val;
break;
case 1:
data = *(u8 *) val;
break;
default:
printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
return 0; return 0;
} }
...@@ -343,7 +352,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, ...@@ -343,7 +352,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
spin_lock(&ioapic->lock); spin_lock(&ioapic->lock);
switch (addr) { switch (addr) {
case IOAPIC_REG_SELECT: case IOAPIC_REG_SELECT:
ioapic->ioregsel = data; ioapic->ioregsel = data & 0xFF; /* 8-bit register */
break; break;
case IOAPIC_REG_WINDOW: case IOAPIC_REG_WINDOW:
......
...@@ -134,14 +134,15 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) ...@@ -134,14 +134,15 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
static int kvm_iommu_map_memslots(struct kvm *kvm) static int kvm_iommu_map_memslots(struct kvm *kvm)
{ {
int i, idx, r = 0; int idx, r = 0;
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) { kvm_for_each_memslot(memslot, slots) {
r = kvm_iommu_map_pages(kvm, &slots->memslots[i]); r = kvm_iommu_map_pages(kvm, memslot);
if (r) if (r)
break; break;
} }
...@@ -311,16 +312,16 @@ static void kvm_iommu_put_pages(struct kvm *kvm, ...@@ -311,16 +312,16 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
static int kvm_iommu_unmap_memslots(struct kvm *kvm) static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{ {
int i, idx; int idx;
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) { kvm_for_each_memslot(memslot, slots)
kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
slots->memslots[i].npages);
}
srcu_read_unlock(&kvm->srcu, idx); srcu_read_unlock(&kvm->srcu, idx);
return 0; return 0;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment