Commit f076ab8d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (70 commits)
  KVM: Adjust smp_call_function_mask() callers to new requirements
  KVM: MMU: Fix potential race setting upper shadow ptes on nonpae hosts
  KVM: x86 emulator: emulate clflush
  KVM: MMU: improve invalid shadow root page handling
  KVM: MMU: nuke shadowed pgtable pages and ptes on memslot destruction
  KVM: Prefix some x86 low level function with kvm_, to avoid namespace issues
  KVM: check injected pic irq within valid pic irqs
  KVM: x86 emulator: Fix HLT instruction
  KVM: Apply the kernel sigmask to vcpus blocked due to being uninitialized
  KVM: VMX: Add ept_sync_context in flush_tlb
  KVM: mmu_shrink: kvm_mmu_zap_page requires slots_lock to be held
  x86: KVM guest: make kvm_smp_prepare_boot_cpu() static
  KVM: SVM: fix suspend/resume support
  KVM: s390: rename private structures
  KVM: s390: Set guest storage limit and offset to sane values
  KVM: Fix memory leak on guest exit
  KVM: s390: dont allocate dirty bitmap
  KVM: move slots_lock acquision down to vapic_exit
  KVM: VMX: Fake emulate Intel perfctr MSRs
  KVM: VMX: Fix a wrong usage of vmcs_config
  ...
parents db6d8c7a 597a5f55
...@@ -43,7 +43,8 @@ $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s ...@@ -43,7 +43,8 @@ $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o) common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
coalesced_mmio.o)
kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
......
...@@ -187,6 +187,9 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -187,6 +187,9 @@ int kvm_dev_ioctl_check_extension(long ext)
r = 1; r = 1;
break; break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
default: default:
r = 0; r = 0;
} }
...@@ -195,11 +198,11 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -195,11 +198,11 @@ int kvm_dev_ioctl_check_extension(long ext)
} }
static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
gpa_t addr) gpa_t addr, int len, int is_write)
{ {
struct kvm_io_device *dev; struct kvm_io_device *dev;
dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
return dev; return dev;
} }
...@@ -231,7 +234,7 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -231,7 +234,7 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->exit_reason = KVM_EXIT_MMIO; kvm_run->exit_reason = KVM_EXIT_MMIO;
return 0; return 0;
mmio: mmio:
mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr); mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
if (mmio_dev) { if (mmio_dev) {
if (!p->dir) if (!p->dir)
kvm_iodevice_write(mmio_dev, p->addr, p->size, kvm_iodevice_write(mmio_dev, p->addr, p->size,
...@@ -1035,14 +1038,6 @@ static void kvm_free_vmm_area(void) ...@@ -1035,14 +1038,6 @@ static void kvm_free_vmm_area(void)
} }
} }
/*
* Make sure that a cpu that is being hot-unplugged does not have any vcpus
* cached on it. Leave it as blank for IA64.
*/
void decache_vcpus_on_cpu(int cpu)
{
}
static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
} }
...@@ -1460,6 +1455,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -1460,6 +1455,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
return 0; return 0;
} }
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}
long kvm_arch_dev_ioctl(struct file *filp, long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o) common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
......
...@@ -145,6 +145,9 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -145,6 +145,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
r = 1; r = 1;
break; break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
default: default:
r = 0; r = 0;
break; break;
...@@ -167,6 +170,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -167,6 +170,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
return 0; return 0;
} }
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -240,10 +247,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -240,10 +247,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
} }
void decache_vcpus_on_cpu(int cpu)
{
}
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg) struct kvm_debug_guest *dbg)
{ {
......
...@@ -31,7 +31,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) ...@@ -31,7 +31,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
} }
static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
struct interrupt_info *inti) struct kvm_s390_interrupt_info *inti)
{ {
switch (inti->type) { switch (inti->type) {
case KVM_S390_INT_EMERGENCY: case KVM_S390_INT_EMERGENCY:
...@@ -91,7 +91,7 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) ...@@ -91,7 +91,7 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
} }
static void __set_intercept_indicator(struct kvm_vcpu *vcpu, static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
struct interrupt_info *inti) struct kvm_s390_interrupt_info *inti)
{ {
switch (inti->type) { switch (inti->type) {
case KVM_S390_INT_EMERGENCY: case KVM_S390_INT_EMERGENCY:
...@@ -111,7 +111,7 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, ...@@ -111,7 +111,7 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
} }
static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
struct interrupt_info *inti) struct kvm_s390_interrupt_info *inti)
{ {
const unsigned short table[] = { 2, 4, 4, 6 }; const unsigned short table[] = { 2, 4, 4, 6 };
int rc, exception = 0; int rc, exception = 0;
...@@ -290,9 +290,9 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) ...@@ -290,9 +290,9 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
{ {
struct local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct float_interrupt *fi = vcpu->arch.local_int.float_int; struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
struct interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc = 0; int rc = 0;
if (atomic_read(&li->active)) { if (atomic_read(&li->active)) {
...@@ -408,9 +408,9 @@ void kvm_s390_idle_wakeup(unsigned long data) ...@@ -408,9 +408,9 @@ void kvm_s390_idle_wakeup(unsigned long data)
void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
{ {
struct local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct float_interrupt *fi = vcpu->arch.local_int.float_int; struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
struct interrupt_info *n, *inti = NULL; struct kvm_s390_interrupt_info *n, *inti = NULL;
int deliver; int deliver;
__reset_intercept_indicators(vcpu); __reset_intercept_indicators(vcpu);
...@@ -465,8 +465,8 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) ...@@ -465,8 +465,8 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
{ {
struct local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
inti = kzalloc(sizeof(*inti), GFP_KERNEL); inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti) if (!inti)
...@@ -487,9 +487,9 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) ...@@ -487,9 +487,9 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
int kvm_s390_inject_vm(struct kvm *kvm, int kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int) struct kvm_s390_interrupt *s390int)
{ {
struct local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct float_interrupt *fi; struct kvm_s390_float_interrupt *fi;
struct interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int sigcpu; int sigcpu;
inti = kzalloc(sizeof(*inti), GFP_KERNEL); inti = kzalloc(sizeof(*inti), GFP_KERNEL);
...@@ -544,8 +544,8 @@ int kvm_s390_inject_vm(struct kvm *kvm, ...@@ -544,8 +544,8 @@ int kvm_s390_inject_vm(struct kvm *kvm,
int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt *s390int) struct kvm_s390_interrupt *s390int)
{ {
struct local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
inti = kzalloc(sizeof(*inti), GFP_KERNEL); inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti) if (!inti)
......
...@@ -79,10 +79,6 @@ void kvm_arch_hardware_disable(void *garbage) ...@@ -79,10 +79,6 @@ void kvm_arch_hardware_disable(void *garbage)
{ {
} }
void decache_vcpus_on_cpu(int cpu)
{
}
int kvm_arch_hardware_setup(void) int kvm_arch_hardware_setup(void)
{ {
return 0; return 0;
...@@ -198,6 +194,7 @@ struct kvm *kvm_arch_create_vm(void) ...@@ -198,6 +194,7 @@ struct kvm *kvm_arch_create_vm(void)
void kvm_arch_destroy_vm(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm)
{ {
debug_unregister(kvm->arch.dbf); debug_unregister(kvm->arch.dbf);
kvm_free_physmem(kvm);
free_page((unsigned long)(kvm->arch.sca)); free_page((unsigned long)(kvm->arch.sca));
kfree(kvm); kfree(kvm);
module_put(THIS_MODULE); module_put(THIS_MODULE);
...@@ -250,11 +247,16 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) ...@@ -250,11 +247,16 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->gbea = 1; vcpu->arch.sie_block->gbea = 1;
} }
/* The current code can have up to 256 pages for virtio */
#define VIRTIODESCSPACE (256ul * 4096ul)
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
vcpu->arch.sie_block->gmslm = 0xffffffffffUL; vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
vcpu->arch.sie_block->gmsor = 0x000000000000; vcpu->kvm->arch.guest_origin +
VIRTIODESCSPACE - 1ul;
vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
vcpu->arch.sie_block->ecb = 2; vcpu->arch.sie_block->ecb = 2;
vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->eca = 0xC1002001U;
setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
...@@ -273,7 +275,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -273,7 +275,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
if (!vcpu) if (!vcpu)
goto out_nomem; goto out_nomem;
vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL); vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
get_zeroed_page(GFP_KERNEL);
if (!vcpu->arch.sie_block) if (!vcpu->arch.sie_block)
goto out_free_cpu; goto out_free_cpu;
...@@ -672,6 +675,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -672,6 +675,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
return 0; return 0;
} }
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{ {
return gfn; return gfn;
......
...@@ -199,7 +199,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu) ...@@ -199,7 +199,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{ {
struct float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
int cpus = 0; int cpus = 0;
int n; int n;
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
{ {
struct float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
int rc; int rc;
if (cpu_addr >= KVM_MAX_VCPUS) if (cpu_addr >= KVM_MAX_VCPUS)
...@@ -71,9 +71,9 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) ...@@ -71,9 +71,9 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{ {
struct float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc; int rc;
if (cpu_addr >= KVM_MAX_VCPUS) if (cpu_addr >= KVM_MAX_VCPUS)
...@@ -108,9 +108,9 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) ...@@ -108,9 +108,9 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store) static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
{ {
struct float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc; int rc;
if (cpu_addr >= KVM_MAX_VCPUS) if (cpu_addr >= KVM_MAX_VCPUS)
...@@ -169,9 +169,9 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) ...@@ -169,9 +169,9 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
u64 *reg) u64 *reg)
{ {
struct float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc; int rc;
u8 tmp; u8 tmp;
......
...@@ -113,7 +113,7 @@ static void kvm_setup_secondary_clock(void) ...@@ -113,7 +113,7 @@ static void kvm_setup_secondary_clock(void)
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void __init kvm_smp_prepare_boot_cpu(void) static void __init kvm_smp_prepare_boot_cpu(void)
{ {
WARN_ON(kvm_register_clock("primary cpu clock")); WARN_ON(kvm_register_clock("primary cpu clock"));
native_smp_prepare_boot_cpu(); native_smp_prepare_boot_cpu();
......
...@@ -2,7 +2,8 @@ ...@@ -2,7 +2,8 @@
# Makefile for Kernel-based Virtual Machine module # Makefile for Kernel-based Virtual Machine module
# #
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o) common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
coalesced_mmio.o)
ifeq ($(CONFIG_KVM_TRACE),y) ifeq ($(CONFIG_KVM_TRACE),y)
common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o) common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o)
endif endif
......
...@@ -91,7 +91,7 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val) ...@@ -91,7 +91,7 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
c->gate = val; c->gate = val;
} }
int pit_get_gate(struct kvm *kvm, int channel) static int pit_get_gate(struct kvm *kvm, int channel)
{ {
WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
...@@ -193,19 +193,16 @@ static void pit_latch_status(struct kvm *kvm, int channel) ...@@ -193,19 +193,16 @@ static void pit_latch_status(struct kvm *kvm, int channel)
} }
} }
int __pit_timer_fn(struct kvm_kpit_state *ps) static int __pit_timer_fn(struct kvm_kpit_state *ps)
{ {
struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0]; struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0];
struct kvm_kpit_timer *pt = &ps->pit_timer; struct kvm_kpit_timer *pt = &ps->pit_timer;
atomic_inc(&pt->pending); if (!atomic_inc_and_test(&pt->pending))
smp_mb__after_atomic_inc();
if (vcpu0) {
set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests);
if (waitqueue_active(&vcpu0->wq)) { if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
wake_up_interruptible(&vcpu0->wq); wake_up_interruptible(&vcpu0->wq);
}
} }
pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
...@@ -308,6 +305,7 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) ...@@ -308,6 +305,7 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
create_pit_timer(&ps->pit_timer, val, 0); create_pit_timer(&ps->pit_timer, val, 0);
break; break;
case 2: case 2:
case 3:
create_pit_timer(&ps->pit_timer, val, 1); create_pit_timer(&ps->pit_timer, val, 1);
break; break;
default: default:
...@@ -459,7 +457,8 @@ static void pit_ioport_read(struct kvm_io_device *this, ...@@ -459,7 +457,8 @@ static void pit_ioport_read(struct kvm_io_device *this,
mutex_unlock(&pit_state->lock); mutex_unlock(&pit_state->lock);
} }
static int pit_in_range(struct kvm_io_device *this, gpa_t addr) static int pit_in_range(struct kvm_io_device *this, gpa_t addr,
int len, int is_write)
{ {
return ((addr >= KVM_PIT_BASE_ADDRESS) && return ((addr >= KVM_PIT_BASE_ADDRESS) &&
(addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
...@@ -500,7 +499,8 @@ static void speaker_ioport_read(struct kvm_io_device *this, ...@@ -500,7 +499,8 @@ static void speaker_ioport_read(struct kvm_io_device *this,
mutex_unlock(&pit_state->lock); mutex_unlock(&pit_state->lock);
} }
static int speaker_in_range(struct kvm_io_device *this, gpa_t addr) static int speaker_in_range(struct kvm_io_device *this, gpa_t addr,
int len, int is_write)
{ {
return (addr == KVM_SPEAKER_BASE_ADDRESS); return (addr == KVM_SPEAKER_BASE_ADDRESS);
} }
...@@ -575,7 +575,7 @@ void kvm_free_pit(struct kvm *kvm) ...@@ -575,7 +575,7 @@ void kvm_free_pit(struct kvm *kvm)
} }
} }
void __inject_pit_timer_intr(struct kvm *kvm) static void __inject_pit_timer_intr(struct kvm *kvm)
{ {
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1); kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1);
......
...@@ -130,8 +130,10 @@ void kvm_pic_set_irq(void *opaque, int irq, int level) ...@@ -130,8 +130,10 @@ void kvm_pic_set_irq(void *opaque, int irq, int level)
{ {
struct kvm_pic *s = opaque; struct kvm_pic *s = opaque;
pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); if (irq >= 0 && irq < PIC_NUM_PINS) {
pic_update_irq(s); pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
pic_update_irq(s);
}
} }
/* /*
...@@ -346,7 +348,8 @@ static u32 elcr_ioport_read(void *opaque, u32 addr1) ...@@ -346,7 +348,8 @@ static u32 elcr_ioport_read(void *opaque, u32 addr1)
return s->elcr; return s->elcr;
} }
static int picdev_in_range(struct kvm_io_device *this, gpa_t addr) static int picdev_in_range(struct kvm_io_device *this, gpa_t addr,
int len, int is_write)
{ {
switch (addr) { switch (addr) {
case 0x20: case 0x20:
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include "ioapic.h" #include "ioapic.h"
#include "lapic.h" #include "lapic.h"
#define PIC_NUM_PINS 16
struct kvm; struct kvm;
struct kvm_vcpu; struct kvm_vcpu;
......
...@@ -356,8 +356,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, ...@@ -356,8 +356,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
case APIC_DM_SMI: case APIC_DM_SMI:
printk(KERN_DEBUG "Ignoring guest SMI\n"); printk(KERN_DEBUG "Ignoring guest SMI\n");
break; break;
case APIC_DM_NMI: case APIC_DM_NMI:
printk(KERN_DEBUG "Ignoring guest NMI\n"); kvm_inject_nmi(vcpu);
break; break;
case APIC_DM_INIT: case APIC_DM_INIT:
...@@ -572,6 +573,8 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) ...@@ -572,6 +573,8 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
{ {
u32 val = 0; u32 val = 0;
KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler);
if (offset >= LAPIC_MMIO_LENGTH) if (offset >= LAPIC_MMIO_LENGTH)
return 0; return 0;
...@@ -695,6 +698,8 @@ static void apic_mmio_write(struct kvm_io_device *this, ...@@ -695,6 +698,8 @@ static void apic_mmio_write(struct kvm_io_device *this,
offset &= 0xff0; offset &= 0xff0;
KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler);
switch (offset) { switch (offset) {
case APIC_ID: /* Local APIC ID */ case APIC_ID: /* Local APIC ID */
apic_set_reg(apic, APIC_ID, val); apic_set_reg(apic, APIC_ID, val);
...@@ -780,7 +785,8 @@ static void apic_mmio_write(struct kvm_io_device *this, ...@@ -780,7 +785,8 @@ static void apic_mmio_write(struct kvm_io_device *this,
} }
static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr) static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr,
int len, int size)
{ {
struct kvm_lapic *apic = (struct kvm_lapic *)this->private; struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
int ret = 0; int ret = 0;
...@@ -939,8 +945,8 @@ static int __apic_timer_fn(struct kvm_lapic *apic) ...@@ -939,8 +945,8 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
int result = 0; int result = 0;
wait_queue_head_t *q = &apic->vcpu->wq; wait_queue_head_t *q = &apic->vcpu->wq;
atomic_inc(&apic->timer.pending); if(!atomic_inc_and_test(&apic->timer.pending))
set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests); set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests);
if (waitqueue_active(q)) { if (waitqueue_active(q)) {
apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
wake_up_interruptible(q); wake_up_interruptible(q);
......
...@@ -31,6 +31,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu); ...@@ -31,6 +31,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu); u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8); void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value); void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
......
...@@ -66,7 +66,8 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {} ...@@ -66,7 +66,8 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
#endif #endif
#if defined(MMU_DEBUG) || defined(AUDIT) #if defined(MMU_DEBUG) || defined(AUDIT)
static int dbg = 1; static int dbg = 0;
module_param(dbg, bool, 0644);
#endif #endif
#ifndef MMU_DEBUG #ifndef MMU_DEBUG
...@@ -776,6 +777,15 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, ...@@ -776,6 +777,15 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
BUG(); BUG();
} }
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
int i;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
sp->spt[i] = shadow_trap_nonpresent_pte;
}
static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
{ {
unsigned index; unsigned index;
...@@ -841,7 +851,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -841,7 +851,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
hlist_add_head(&sp->hash_link, bucket); hlist_add_head(&sp->hash_link, bucket);
if (!metaphysical) if (!metaphysical)
rmap_write_protect(vcpu->kvm, gfn); rmap_write_protect(vcpu->kvm, gfn);
vcpu->arch.mmu.prefetch_page(vcpu, sp); if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
vcpu->arch.mmu.prefetch_page(vcpu, sp);
else
nonpaging_prefetch_page(vcpu, sp);
return sp; return sp;
} }
...@@ -917,14 +930,17 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -917,14 +930,17 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
} }
kvm_mmu_page_unlink_children(kvm, sp); kvm_mmu_page_unlink_children(kvm, sp);
if (!sp->root_count) { if (!sp->root_count) {
if (!sp->role.metaphysical) if (!sp->role.metaphysical && !sp->role.invalid)
unaccount_shadowed(kvm, sp->gfn); unaccount_shadowed(kvm, sp->gfn);
hlist_del(&sp->hash_link); hlist_del(&sp->hash_link);
kvm_mmu_free_page(kvm, sp); kvm_mmu_free_page(kvm, sp);
} else { } else {
int invalid = sp->role.invalid;
list_move(&sp->link, &kvm->arch.active_mmu_pages); list_move(&sp->link, &kvm->arch.active_mmu_pages);
sp->role.invalid = 1; sp->role.invalid = 1;
kvm_reload_remote_mmus(kvm); kvm_reload_remote_mmus(kvm);
if (!sp->role.metaphysical && !invalid)
unaccount_shadowed(kvm, sp->gfn);
} }
kvm_mmu_reset_last_pte_updated(kvm); kvm_mmu_reset_last_pte_updated(kvm);
} }
...@@ -1103,7 +1119,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, ...@@ -1103,7 +1119,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
mark_page_dirty(vcpu->kvm, gfn); mark_page_dirty(vcpu->kvm, gfn);
pgprintk("%s: setting spte %llx\n", __func__, spte); pgprintk("%s: setting spte %llx\n", __func__, spte);
pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n", pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
(spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB", (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
(spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte); (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
set_shadow_pte(shadow_pte, spte); set_shadow_pte(shadow_pte, spte);
...@@ -1122,8 +1138,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, ...@@ -1122,8 +1138,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
else else
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
} }
if (!ptwrite || !*ptwrite) if (speculative) {
vcpu->arch.last_pte_updated = shadow_pte; vcpu->arch.last_pte_updated = shadow_pte;
vcpu->arch.last_pte_gfn = gfn;
}
} }
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
...@@ -1171,9 +1189,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, ...@@ -1171,9 +1189,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
return -ENOMEM; return -ENOMEM;
} }
table[index] = __pa(new_table->spt) set_shadow_pte(&table[index],
| PT_PRESENT_MASK | PT_WRITABLE_MASK __pa(new_table->spt)
| shadow_user_mask | shadow_x_mask; | PT_PRESENT_MASK | PT_WRITABLE_MASK
| shadow_user_mask | shadow_x_mask);
} }
table_addr = table[index] & PT64_BASE_ADDR_MASK; table_addr = table[index] & PT64_BASE_ADDR_MASK;
} }
...@@ -1211,15 +1230,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) ...@@ -1211,15 +1230,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
} }
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
int i;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
sp->spt[i] = shadow_trap_nonpresent_pte;
}
static void mmu_free_roots(struct kvm_vcpu *vcpu) static void mmu_free_roots(struct kvm_vcpu *vcpu)
{ {
int i; int i;
...@@ -1671,6 +1681,18 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1671,6 +1681,18 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu->arch.update_pte.pfn = pfn; vcpu->arch.update_pte.pfn = pfn;
} }
static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
u64 *spte = vcpu->arch.last_pte_updated;
if (spte
&& vcpu->arch.last_pte_gfn == gfn
&& shadow_accessed_mask
&& !(*spte & shadow_accessed_mask)
&& is_shadow_present_pte(*spte))
set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
}
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes) const u8 *new, int bytes)
{ {
...@@ -1694,6 +1716,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1694,6 +1716,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_access_page(vcpu, gfn);
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
++vcpu->kvm->stat.mmu_pte_write; ++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, "pre pte write"); kvm_mmu_audit(vcpu, "pre pte write");
...@@ -1948,7 +1971,7 @@ void kvm_mmu_zap_all(struct kvm *kvm) ...@@ -1948,7 +1971,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
} }
void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm) static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
{ {
struct kvm_mmu_page *page; struct kvm_mmu_page *page;
...@@ -1968,6 +1991,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) ...@@ -1968,6 +1991,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
int npages; int npages;
if (!down_read_trylock(&kvm->slots_lock))
continue;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
npages = kvm->arch.n_alloc_mmu_pages - npages = kvm->arch.n_alloc_mmu_pages -
kvm->arch.n_free_mmu_pages; kvm->arch.n_free_mmu_pages;
...@@ -1980,6 +2005,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) ...@@ -1980,6 +2005,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
nr_to_scan--; nr_to_scan--;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
up_read(&kvm->slots_lock);
} }
if (kvm_freed) if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list); list_move_tail(&kvm_freed->vm_list, &vm_list);
......
...@@ -15,7 +15,8 @@ ...@@ -15,7 +15,8 @@
#define PT_USER_MASK (1ULL << 2) #define PT_USER_MASK (1ULL << 2)
#define PT_PWT_MASK (1ULL << 3) #define PT_PWT_MASK (1ULL << 3)
#define PT_PCD_MASK (1ULL << 4) #define PT_PCD_MASK (1ULL << 4)
#define PT_ACCESSED_MASK (1ULL << 5) #define PT_ACCESSED_SHIFT 5
#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
#define PT_DIRTY_MASK (1ULL << 6) #define PT_DIRTY_MASK (1ULL << 6)
#define PT_PAGE_SIZE_MASK (1ULL << 7) #define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7) #define PT_PAT_MASK (1ULL << 7)
......
...@@ -460,8 +460,9 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) ...@@ -460,8 +460,9 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp) struct kvm_mmu_page *sp)
{ {
int i, offset = 0, r = 0; int i, j, offset, r;
pt_element_t pt; pt_element_t pt[256 / sizeof(pt_element_t)];
gpa_t pte_gpa;
if (sp->role.metaphysical if (sp->role.metaphysical
|| (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
...@@ -469,19 +470,20 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, ...@@ -469,19 +470,20 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
return; return;
} }
if (PTTYPE == 32) pte_gpa = gfn_to_gpa(sp->gfn);
if (PTTYPE == 32) {
offset = sp->role.quadrant << PT64_LEVEL_BITS; offset = sp->role.quadrant << PT64_LEVEL_BITS;
pte_gpa += offset * sizeof(pt_element_t);
}
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
gpa_t pte_gpa = gfn_to_gpa(sp->gfn); r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
pte_gpa += (i+offset) * sizeof(pt_element_t); pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
for (j = 0; j < ARRAY_SIZE(pt); ++j)
r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt, if (r || is_present_pte(pt[j]))
sizeof(pt_element_t)); sp->spt[i+j] = shadow_trap_nonpresent_pte;
if (r || is_present_pte(pt)) else
sp->spt[i] = shadow_trap_nonpresent_pte; sp->spt[i+j] = shadow_notrap_nonpresent_pte;
else
sp->spt[i] = shadow_notrap_nonpresent_pte;
} }
} }
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include <asm/desc.h> #include <asm/desc.h>
#define __ex(x) __kvm_handle_fault_on_reboot(x)
MODULE_AUTHOR("Qumranet"); MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -129,17 +131,17 @@ static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) ...@@ -129,17 +131,17 @@ static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
static inline void clgi(void) static inline void clgi(void)
{ {
asm volatile (SVM_CLGI); asm volatile (__ex(SVM_CLGI));
} }
static inline void stgi(void) static inline void stgi(void)
{ {
asm volatile (SVM_STGI); asm volatile (__ex(SVM_STGI));
} }
static inline void invlpga(unsigned long addr, u32 asid) static inline void invlpga(unsigned long addr, u32 asid)
{ {
asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid)); asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
} }
static inline unsigned long kvm_read_cr2(void) static inline unsigned long kvm_read_cr2(void)
...@@ -270,19 +272,11 @@ static int has_svm(void) ...@@ -270,19 +272,11 @@ static int has_svm(void)
static void svm_hardware_disable(void *garbage) static void svm_hardware_disable(void *garbage)
{ {
struct svm_cpu_data *svm_data uint64_t efer;
= per_cpu(svm_data, raw_smp_processor_id());
if (svm_data) {
uint64_t efer;
wrmsrl(MSR_VM_HSAVE_PA, 0); wrmsrl(MSR_VM_HSAVE_PA, 0);
rdmsrl(MSR_EFER, efer); rdmsrl(MSR_EFER, efer);
wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
__free_page(svm_data->save_area);
kfree(svm_data);
}
} }
static void svm_hardware_enable(void *garbage) static void svm_hardware_enable(void *garbage)
...@@ -321,6 +315,19 @@ static void svm_hardware_enable(void *garbage) ...@@ -321,6 +315,19 @@ static void svm_hardware_enable(void *garbage)
page_to_pfn(svm_data->save_area) << PAGE_SHIFT); page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
} }
static void svm_cpu_uninit(int cpu)
{
struct svm_cpu_data *svm_data
= per_cpu(svm_data, raw_smp_processor_id());
if (!svm_data)
return;
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
__free_page(svm_data->save_area);
kfree(svm_data);
}
static int svm_cpu_init(int cpu) static int svm_cpu_init(int cpu)
{ {
struct svm_cpu_data *svm_data; struct svm_cpu_data *svm_data;
...@@ -458,6 +465,11 @@ static __init int svm_hardware_setup(void) ...@@ -458,6 +465,11 @@ static __init int svm_hardware_setup(void)
static __exit void svm_hardware_unsetup(void) static __exit void svm_hardware_unsetup(void)
{ {
int cpu;
for_each_online_cpu(cpu)
svm_cpu_uninit(cpu);
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
iopm_base = 0; iopm_base = 0;
} }
...@@ -707,10 +719,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -707,10 +719,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
rdtscll(vcpu->arch.host_tsc); rdtscll(vcpu->arch.host_tsc);
} }
static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
{
}
static void svm_cache_regs(struct kvm_vcpu *vcpu) static void svm_cache_regs(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -949,7 +957,9 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) ...@@ -949,7 +957,9 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
{ {
return to_svm(vcpu)->db_regs[dr]; unsigned long val = to_svm(vcpu)->db_regs[dr];
KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
return val;
} }
static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
...@@ -1004,6 +1014,16 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1004,6 +1014,16 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
fault_address = svm->vmcb->control.exit_info_2; fault_address = svm->vmcb->control.exit_info_2;
error_code = svm->vmcb->control.exit_info_1; error_code = svm->vmcb->control.exit_info_1;
if (!npt_enabled)
KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
(u32)fault_address, (u32)(fault_address >> 32),
handler);
else
KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
(u32)fault_address, (u32)(fault_address >> 32),
handler);
return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
} }
...@@ -1081,6 +1101,19 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1081,6 +1101,19 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
} }
static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
KVMTRACE_0D(NMI, &svm->vcpu, handler);
return 1;
}
static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
++svm->vcpu.stat.irq_exits;
KVMTRACE_0D(INTR, &svm->vcpu, handler);
return 1;
}
static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{ {
return 1; return 1;
...@@ -1219,6 +1252,9 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1219,6 +1252,9 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
if (svm_get_msr(&svm->vcpu, ecx, &data)) if (svm_get_msr(&svm->vcpu, ecx, &data))
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(&svm->vcpu, 0);
else { else {
KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
(u32)(data >> 32), handler);
svm->vmcb->save.rax = data & 0xffffffff; svm->vmcb->save.rax = data & 0xffffffff;
svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
svm->next_rip = svm->vmcb->save.rip + 2; svm->next_rip = svm->vmcb->save.rip + 2;
...@@ -1284,16 +1320,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) ...@@ -1284,16 +1320,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3: case MSR_K7_EVNTSEL3:
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
/* /*
* only support writing 0 to the performance counters for now * Just discard all writes to the performance counters; this
* to make Windows happy. Should be replaced by a real * should keep both older linux and windows 64-bit guests
* performance counter emulation later. * happy
*/ */
if (data != 0) pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
goto unhandled;
break; break;
default: default:
unhandled:
return kvm_set_msr_common(vcpu, ecx, data); return kvm_set_msr_common(vcpu, ecx, data);
} }
return 0; return 0;
...@@ -1304,6 +1343,10 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1304,6 +1343,10 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u64 data = (svm->vmcb->save.rax & -1u) u64 data = (svm->vmcb->save.rax & -1u)
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
handler);
svm->next_rip = svm->vmcb->save.rip + 2; svm->next_rip = svm->vmcb->save.rip + 2;
if (svm_set_msr(&svm->vcpu, ecx, data)) if (svm_set_msr(&svm->vcpu, ecx, data))
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(&svm->vcpu, 0);
...@@ -1323,6 +1366,8 @@ static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1323,6 +1366,8 @@ static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
static int interrupt_window_interception(struct vcpu_svm *svm, static int interrupt_window_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run) struct kvm_run *kvm_run)
{ {
KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
/* /*
...@@ -1364,8 +1409,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, ...@@ -1364,8 +1409,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
[SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
[SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
[SVM_EXIT_INTR] = nop_on_interception, [SVM_EXIT_INTR] = intr_interception,
[SVM_EXIT_NMI] = nop_on_interception, [SVM_EXIT_NMI] = nmi_interception,
[SVM_EXIT_SMI] = nop_on_interception, [SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception, [SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_VINTR] = interrupt_window_interception,
...@@ -1397,6 +1442,9 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1397,6 +1442,9 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
u32 exit_code = svm->vmcb->control.exit_code; u32 exit_code = svm->vmcb->control.exit_code;
KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
(u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
if (npt_enabled) { if (npt_enabled) {
int mmu_reload = 0; int mmu_reload = 0;
if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
...@@ -1470,6 +1518,8 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) ...@@ -1470,6 +1518,8 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
{ {
struct vmcb_control_area *control; struct vmcb_control_area *control;
KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
control = &svm->vmcb->control; control = &svm->vmcb->control;
control->int_vector = irq; control->int_vector = irq;
control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl &= ~V_INTR_PRIO_MASK;
...@@ -1660,9 +1710,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1660,9 +1710,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
sync_lapic_to_cr8(vcpu); sync_lapic_to_cr8(vcpu);
save_host_msrs(vcpu); save_host_msrs(vcpu);
fs_selector = read_fs(); fs_selector = kvm_read_fs();
gs_selector = read_gs(); gs_selector = kvm_read_gs();
ldt_selector = read_ldt(); ldt_selector = kvm_read_ldt();
svm->host_cr2 = kvm_read_cr2(); svm->host_cr2 = kvm_read_cr2();
svm->host_dr6 = read_dr6(); svm->host_dr6 = read_dr6();
svm->host_dr7 = read_dr7(); svm->host_dr7 = read_dr7();
...@@ -1716,17 +1766,17 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1716,17 +1766,17 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* Enter guest mode */ /* Enter guest mode */
"push %%rax \n\t" "push %%rax \n\t"
"mov %c[vmcb](%[svm]), %%rax \n\t" "mov %c[vmcb](%[svm]), %%rax \n\t"
SVM_VMLOAD "\n\t" __ex(SVM_VMLOAD) "\n\t"
SVM_VMRUN "\n\t" __ex(SVM_VMRUN) "\n\t"
SVM_VMSAVE "\n\t" __ex(SVM_VMSAVE) "\n\t"
"pop %%rax \n\t" "pop %%rax \n\t"
#else #else
/* Enter guest mode */ /* Enter guest mode */
"push %%eax \n\t" "push %%eax \n\t"
"mov %c[vmcb](%[svm]), %%eax \n\t" "mov %c[vmcb](%[svm]), %%eax \n\t"
SVM_VMLOAD "\n\t" __ex(SVM_VMLOAD) "\n\t"
SVM_VMRUN "\n\t" __ex(SVM_VMRUN) "\n\t"
SVM_VMSAVE "\n\t" __ex(SVM_VMSAVE) "\n\t"
"pop %%eax \n\t" "pop %%eax \n\t"
#endif #endif
...@@ -1795,9 +1845,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1795,9 +1845,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
write_dr7(svm->host_dr7); write_dr7(svm->host_dr7);
kvm_write_cr2(svm->host_cr2); kvm_write_cr2(svm->host_cr2);
load_fs(fs_selector); kvm_load_fs(fs_selector);
load_gs(gs_selector); kvm_load_gs(gs_selector);
load_ldt(ldt_selector); kvm_load_ldt(ldt_selector);
load_host_msrs(vcpu); load_host_msrs(vcpu);
reload_tss(vcpu); reload_tss(vcpu);
...@@ -1889,7 +1939,6 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -1889,7 +1939,6 @@ static struct kvm_x86_ops svm_x86_ops = {
.prepare_guest_switch = svm_prepare_guest_switch, .prepare_guest_switch = svm_prepare_guest_switch,
.vcpu_load = svm_vcpu_load, .vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put, .vcpu_put = svm_vcpu_put,
.vcpu_decache = svm_vcpu_decache,
.set_guest_debug = svm_guest_debug, .set_guest_debug = svm_guest_debug,
.get_msr = svm_get_msr, .get_msr = svm_get_msr,
......
This diff is collapsed.
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#define CPU_BASED_CR8_LOAD_EXITING 0x00080000 #define CPU_BASED_CR8_LOAD_EXITING 0x00080000
#define CPU_BASED_CR8_STORE_EXITING 0x00100000 #define CPU_BASED_CR8_STORE_EXITING 0x00100000
#define CPU_BASED_TPR_SHADOW 0x00200000 #define CPU_BASED_TPR_SHADOW 0x00200000
#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
#define CPU_BASED_MOV_DR_EXITING 0x00800000 #define CPU_BASED_MOV_DR_EXITING 0x00800000
#define CPU_BASED_UNCOND_IO_EXITING 0x01000000 #define CPU_BASED_UNCOND_IO_EXITING 0x01000000
#define CPU_BASED_USE_IO_BITMAPS 0x02000000 #define CPU_BASED_USE_IO_BITMAPS 0x02000000
...@@ -216,7 +217,7 @@ enum vmcs_field { ...@@ -216,7 +217,7 @@ enum vmcs_field {
#define EXIT_REASON_TRIPLE_FAULT 2 #define EXIT_REASON_TRIPLE_FAULT 2
#define EXIT_REASON_PENDING_INTERRUPT 7 #define EXIT_REASON_PENDING_INTERRUPT 7
#define EXIT_REASON_NMI_WINDOW 8
#define EXIT_REASON_TASK_SWITCH 9 #define EXIT_REASON_TASK_SWITCH 9
#define EXIT_REASON_CPUID 10 #define EXIT_REASON_CPUID 10
#define EXIT_REASON_HLT 12 #define EXIT_REASON_HLT 12
...@@ -251,7 +252,9 @@ enum vmcs_field { ...@@ -251,7 +252,9 @@ enum vmcs_field {
#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
#define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */
#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
#define INTR_INFO_RESVD_BITS_MASK 0x7ffff000
#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK #define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK
#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK #define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK
...@@ -259,9 +262,16 @@ enum vmcs_field { ...@@ -259,9 +262,16 @@ enum vmcs_field {
#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
/* GUEST_INTERRUPTIBILITY_INFO flags. */
#define GUEST_INTR_STATE_STI 0x00000001
#define GUEST_INTR_STATE_MOV_SS 0x00000002
#define GUEST_INTR_STATE_SMI 0x00000004
#define GUEST_INTR_STATE_NMI 0x00000008
/* /*
* Exit Qualifications for MOV for Control Register Access * Exit Qualifications for MOV for Control Register Access
*/ */
......
This diff is collapsed.
This diff is collapsed.
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
/* memory slots that does not exposed to userspace */ /* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
/* define exit reasons from vmm to kvm*/ /* define exit reasons from vmm to kvm*/
#define EXIT_REASON_VM_PANIC 0 #define EXIT_REASON_VM_PANIC 0
...@@ -521,4 +522,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu); ...@@ -521,4 +522,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
void kvm_sal_emul(struct kvm_vcpu *vcpu); void kvm_sal_emul(struct kvm_vcpu *vcpu);
static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
#endif #endif
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
/* memory slots that does not exposed to userspace */ /* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
/* We don't currently support large pages. */ /* We don't currently support large pages. */
#define KVM_PAGES_PER_HPAGE (1<<31) #define KVM_PAGES_PER_HPAGE (1<<31)
......
...@@ -62,7 +62,7 @@ struct sca_block { ...@@ -62,7 +62,7 @@ struct sca_block {
#define CPUSTAT_J 0x00000002 #define CPUSTAT_J 0x00000002
#define CPUSTAT_P 0x00000001 #define CPUSTAT_P 0x00000001
struct sie_block { struct kvm_s390_sie_block {
atomic_t cpuflags; /* 0x0000 */ atomic_t cpuflags; /* 0x0000 */
__u32 prefix; /* 0x0004 */ __u32 prefix; /* 0x0004 */
__u8 reserved8[32]; /* 0x0008 */ __u8 reserved8[32]; /* 0x0008 */
...@@ -140,14 +140,14 @@ struct kvm_vcpu_stat { ...@@ -140,14 +140,14 @@ struct kvm_vcpu_stat {
u32 diagnose_44; u32 diagnose_44;
}; };
struct io_info { struct kvm_s390_io_info {
__u16 subchannel_id; /* 0x0b8 */ __u16 subchannel_id; /* 0x0b8 */
__u16 subchannel_nr; /* 0x0ba */ __u16 subchannel_nr; /* 0x0ba */
__u32 io_int_parm; /* 0x0bc */ __u32 io_int_parm; /* 0x0bc */
__u32 io_int_word; /* 0x0c0 */ __u32 io_int_word; /* 0x0c0 */
}; };
struct ext_info { struct kvm_s390_ext_info {
__u32 ext_params; __u32 ext_params;
__u64 ext_params2; __u64 ext_params2;
}; };
...@@ -160,22 +160,22 @@ struct ext_info { ...@@ -160,22 +160,22 @@ struct ext_info {
#define PGM_SPECIFICATION 0x06 #define PGM_SPECIFICATION 0x06
#define PGM_DATA 0x07 #define PGM_DATA 0x07
struct pgm_info { struct kvm_s390_pgm_info {
__u16 code; __u16 code;
}; };
struct prefix_info { struct kvm_s390_prefix_info {
__u32 address; __u32 address;
}; };
struct interrupt_info { struct kvm_s390_interrupt_info {
struct list_head list; struct list_head list;
u64 type; u64 type;
union { union {
struct io_info io; struct kvm_s390_io_info io;
struct ext_info ext; struct kvm_s390_ext_info ext;
struct pgm_info pgm; struct kvm_s390_pgm_info pgm;
struct prefix_info prefix; struct kvm_s390_prefix_info prefix;
}; };
}; };
...@@ -183,35 +183,35 @@ struct interrupt_info { ...@@ -183,35 +183,35 @@ struct interrupt_info {
#define ACTION_STORE_ON_STOP 1 #define ACTION_STORE_ON_STOP 1
#define ACTION_STOP_ON_STOP 2 #define ACTION_STOP_ON_STOP 2
struct local_interrupt { struct kvm_s390_local_interrupt {
spinlock_t lock; spinlock_t lock;
struct list_head list; struct list_head list;
atomic_t active; atomic_t active;
struct float_interrupt *float_int; struct kvm_s390_float_interrupt *float_int;
int timer_due; /* event indicator for waitqueue below */ int timer_due; /* event indicator for waitqueue below */
wait_queue_head_t wq; wait_queue_head_t wq;
atomic_t *cpuflags; atomic_t *cpuflags;
unsigned int action_bits; unsigned int action_bits;
}; };
struct float_interrupt { struct kvm_s390_float_interrupt {
spinlock_t lock; spinlock_t lock;
struct list_head list; struct list_head list;
atomic_t active; atomic_t active;
int next_rr_cpu; int next_rr_cpu;
unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)]; unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)];
struct local_interrupt *local_int[64]; struct kvm_s390_local_interrupt *local_int[64];
}; };
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
struct sie_block *sie_block; struct kvm_s390_sie_block *sie_block;
unsigned long guest_gprs[16]; unsigned long guest_gprs[16];
s390_fp_regs host_fpregs; s390_fp_regs host_fpregs;
unsigned int host_acrs[NUM_ACRS]; unsigned int host_acrs[NUM_ACRS];
s390_fp_regs guest_fpregs; s390_fp_regs guest_fpregs;
unsigned int guest_acrs[NUM_ACRS]; unsigned int guest_acrs[NUM_ACRS];
struct local_interrupt local_int; struct kvm_s390_local_interrupt local_int;
struct timer_list ckc_timer; struct timer_list ckc_timer;
union { union {
cpuid_t cpu_id; cpuid_t cpu_id;
...@@ -228,8 +228,8 @@ struct kvm_arch{ ...@@ -228,8 +228,8 @@ struct kvm_arch{
unsigned long guest_memsize; unsigned long guest_memsize;
struct sca_block *sca; struct sca_block *sca;
debug_info_t *dbf; debug_info_t *dbf;
struct float_interrupt float_int; struct kvm_s390_float_interrupt float_int;
}; };
extern int sie64a(struct sie_block *, __u64 *); extern int sie64a(struct kvm_s390_sie_block *, __u64 *);
#endif #endif
...@@ -228,5 +228,6 @@ struct kvm_pit_state { ...@@ -228,5 +228,6 @@ struct kvm_pit_state {
#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) #define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) #define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
#endif #endif
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_PIO_PAGE_OFFSET 1 #define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
...@@ -79,6 +80,7 @@ ...@@ -79,6 +80,7 @@
#define KVM_MIN_FREE_MMU_PAGES 5 #define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25 #define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40 #define KVM_MAX_CPUID_ENTRIES 40
#define KVM_NR_VAR_MTRR 8
extern spinlock_t kvm_lock; extern spinlock_t kvm_lock;
extern struct list_head vm_list; extern struct list_head vm_list;
...@@ -109,12 +111,12 @@ enum { ...@@ -109,12 +111,12 @@ enum {
}; };
enum { enum {
VCPU_SREG_ES,
VCPU_SREG_CS, VCPU_SREG_CS,
VCPU_SREG_SS,
VCPU_SREG_DS, VCPU_SREG_DS,
VCPU_SREG_ES,
VCPU_SREG_FS, VCPU_SREG_FS,
VCPU_SREG_GS, VCPU_SREG_GS,
VCPU_SREG_SS,
VCPU_SREG_TR, VCPU_SREG_TR,
VCPU_SREG_LDTR, VCPU_SREG_LDTR,
}; };
...@@ -243,6 +245,7 @@ struct kvm_vcpu_arch { ...@@ -243,6 +245,7 @@ struct kvm_vcpu_arch {
gfn_t last_pt_write_gfn; gfn_t last_pt_write_gfn;
int last_pt_write_count; int last_pt_write_count;
u64 *last_pte_updated; u64 *last_pte_updated;
gfn_t last_pte_gfn;
struct { struct {
gfn_t gfn; /* presumed gfn during guest pte update */ gfn_t gfn; /* presumed gfn during guest pte update */
...@@ -287,6 +290,10 @@ struct kvm_vcpu_arch { ...@@ -287,6 +290,10 @@ struct kvm_vcpu_arch {
unsigned int hv_clock_tsc_khz; unsigned int hv_clock_tsc_khz;
unsigned int time_offset; unsigned int time_offset;
struct page *time_page; struct page *time_page;
bool nmi_pending;
u64 mtrr[0x100];
}; };
struct kvm_mem_alias { struct kvm_mem_alias {
...@@ -344,6 +351,7 @@ struct kvm_vcpu_stat { ...@@ -344,6 +351,7 @@ struct kvm_vcpu_stat {
u32 mmio_exits; u32 mmio_exits;
u32 signal_exits; u32 signal_exits;
u32 irq_window_exits; u32 irq_window_exits;
u32 nmi_window_exits;
u32 halt_exits; u32 halt_exits;
u32 halt_wakeup; u32 halt_wakeup;
u32 request_irq_exits; u32 request_irq_exits;
...@@ -379,7 +387,6 @@ struct kvm_x86_ops { ...@@ -379,7 +387,6 @@ struct kvm_x86_ops {
void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
void (*vcpu_put)(struct kvm_vcpu *vcpu); void (*vcpu_put)(struct kvm_vcpu *vcpu);
void (*vcpu_decache)(struct kvm_vcpu *vcpu);
int (*set_guest_debug)(struct kvm_vcpu *vcpu, int (*set_guest_debug)(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg); struct kvm_debug_guest *dbg);
...@@ -497,6 +504,10 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, ...@@ -497,6 +504,10 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
unsigned long value); unsigned long value);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
int type_bits, int seg);
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
...@@ -515,6 +526,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); ...@@ -515,6 +526,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
u32 error_code); u32 error_code);
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
void fx_init(struct kvm_vcpu *vcpu); void fx_init(struct kvm_vcpu *vcpu);
int emulator_read_std(unsigned long addr, int emulator_read_std(unsigned long addr,
...@@ -554,55 +567,53 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) ...@@ -554,55 +567,53 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
return (struct kvm_mmu_page *)page_private(page); return (struct kvm_mmu_page *)page_private(page);
} }
static inline u16 read_fs(void) static inline u16 kvm_read_fs(void)
{ {
u16 seg; u16 seg;
asm("mov %%fs, %0" : "=g"(seg)); asm("mov %%fs, %0" : "=g"(seg));
return seg; return seg;
} }
static inline u16 read_gs(void) static inline u16 kvm_read_gs(void)
{ {
u16 seg; u16 seg;
asm("mov %%gs, %0" : "=g"(seg)); asm("mov %%gs, %0" : "=g"(seg));
return seg; return seg;
} }
static inline u16 read_ldt(void) static inline u16 kvm_read_ldt(void)
{ {
u16 ldt; u16 ldt;
asm("sldt %0" : "=g"(ldt)); asm("sldt %0" : "=g"(ldt));
return ldt; return ldt;
} }
static inline void load_fs(u16 sel) static inline void kvm_load_fs(u16 sel)
{ {
asm("mov %0, %%fs" : : "rm"(sel)); asm("mov %0, %%fs" : : "rm"(sel));
} }
static inline void load_gs(u16 sel) static inline void kvm_load_gs(u16 sel)
{ {
asm("mov %0, %%gs" : : "rm"(sel)); asm("mov %0, %%gs" : : "rm"(sel));
} }
#ifndef load_ldt static inline void kvm_load_ldt(u16 sel)
static inline void load_ldt(u16 sel)
{ {
asm("lldt %0" : : "rm"(sel)); asm("lldt %0" : : "rm"(sel));
} }
#endif
static inline void get_idt(struct descriptor_table *table) static inline void kvm_get_idt(struct descriptor_table *table)
{ {
asm("sidt %0" : "=m"(*table)); asm("sidt %0" : "=m"(*table));
} }
static inline void get_gdt(struct descriptor_table *table) static inline void kvm_get_gdt(struct descriptor_table *table)
{ {
asm("sgdt %0" : "=m"(*table)); asm("sgdt %0" : "=m"(*table));
} }
static inline unsigned long read_tr_base(void) static inline unsigned long kvm_read_tr_base(void)
{ {
u16 tr; u16 tr;
asm("str %0" : "=g"(tr)); asm("str %0" : "=g"(tr));
...@@ -619,17 +630,17 @@ static inline unsigned long read_msr(unsigned long msr) ...@@ -619,17 +630,17 @@ static inline unsigned long read_msr(unsigned long msr)
} }
#endif #endif
static inline void fx_save(struct i387_fxsave_struct *image) static inline void kvm_fx_save(struct i387_fxsave_struct *image)
{ {
asm("fxsave (%0)":: "r" (image)); asm("fxsave (%0)":: "r" (image));
} }
static inline void fx_restore(struct i387_fxsave_struct *image) static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
{ {
asm("fxrstor (%0)":: "r" (image)); asm("fxrstor (%0)":: "r" (image));
} }
static inline void fx_finit(void) static inline void kvm_fx_finit(void)
{ {
asm("finit"); asm("finit");
} }
...@@ -691,4 +702,28 @@ enum { ...@@ -691,4 +702,28 @@ enum {
trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
vcpu, 0, 0, 0, 0, 0, 0) vcpu, 0, 0, 0, 0, 0, 0)
#ifdef CONFIG_64BIT
#define KVM_EX_ENTRY ".quad"
#else
#define KVM_EX_ENTRY ".long"
#endif
/*
* Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running.
* Trap the fault and ignore the instruction if that happens.
*/
asmlinkage void kvm_handle_fault_on_reboot(void);
#define __kvm_handle_fault_on_reboot(insn) \
"666: " insn "\n\t" \
".pushsection .text.fixup, \"ax\" \n" \
"667: \n\t" \
"push $666b \n\t" \
"jmp kvm_handle_fault_on_reboot \n\t" \
".popsection \n\t" \
".pushsection __ex_table, \"a\" \n\t" \
KVM_EX_ENTRY " 666b, 667b \n\t" \
".popsection"
#endif #endif
...@@ -124,7 +124,8 @@ struct decode_cache { ...@@ -124,7 +124,8 @@ struct decode_cache {
u8 rex_prefix; u8 rex_prefix;
struct operand src; struct operand src;
struct operand dst; struct operand dst;
unsigned long *override_base; bool has_seg_override;
u8 seg_override;
unsigned int d; unsigned int d;
unsigned long regs[NR_VCPU_REGS]; unsigned long regs[NR_VCPU_REGS];
unsigned long eip; unsigned long eip;
...@@ -134,6 +135,7 @@ struct decode_cache { ...@@ -134,6 +135,7 @@ struct decode_cache {
u8 modrm_reg; u8 modrm_reg;
u8 modrm_rm; u8 modrm_rm;
u8 use_modrm_ea; u8 use_modrm_ea;
bool rip_relative;
unsigned long modrm_ea; unsigned long modrm_ea;
void *modrm_ptr; void *modrm_ptr;
unsigned long modrm_val; unsigned long modrm_val;
...@@ -150,12 +152,7 @@ struct x86_emulate_ctxt { ...@@ -150,12 +152,7 @@ struct x86_emulate_ctxt {
/* Emulated execution mode, represented by an X86EMUL_MODE value. */ /* Emulated execution mode, represented by an X86EMUL_MODE value. */
int mode; int mode;
unsigned long cs_base; u32 cs_base;
unsigned long ds_base;
unsigned long es_base;
unsigned long ss_base;
unsigned long gs_base;
unsigned long fs_base;
/* decode cache */ /* decode cache */
......
...@@ -173,6 +173,30 @@ struct kvm_run { ...@@ -173,6 +173,30 @@ struct kvm_run {
}; };
}; };
/* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */
struct kvm_coalesced_mmio_zone {
__u64 addr;
__u32 size;
__u32 pad;
};
struct kvm_coalesced_mmio {
__u64 phys_addr;
__u32 len;
__u32 pad;
__u8 data[8];
};
struct kvm_coalesced_mmio_ring {
__u32 first, last;
struct kvm_coalesced_mmio coalesced_mmio[0];
};
#define KVM_COALESCED_MMIO_MAX \
((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \
sizeof(struct kvm_coalesced_mmio))
/* for KVM_TRANSLATE */ /* for KVM_TRANSLATE */
struct kvm_translation { struct kvm_translation {
/* in */ /* in */
...@@ -294,14 +318,14 @@ struct kvm_trace_rec { ...@@ -294,14 +318,14 @@ struct kvm_trace_rec {
__u32 vcpu_id; __u32 vcpu_id;
union { union {
struct { struct {
__u32 cycle_lo, cycle_hi; __u64 cycle_u64;
__u32 extra_u32[KVM_TRC_EXTRA_MAX]; __u32 extra_u32[KVM_TRC_EXTRA_MAX];
} cycle; } cycle;
struct { struct {
__u32 extra_u32[KVM_TRC_EXTRA_MAX]; __u32 extra_u32[KVM_TRC_EXTRA_MAX];
} nocycle; } nocycle;
} u; } u;
}; } __attribute__((packed));
#define KVMIO 0xAE #define KVMIO 0xAE
...@@ -346,6 +370,7 @@ struct kvm_trace_rec { ...@@ -346,6 +370,7 @@ struct kvm_trace_rec {
#define KVM_CAP_NOP_IO_DELAY 12 #define KVM_CAP_NOP_IO_DELAY 12
#define KVM_CAP_PV_MMU 13 #define KVM_CAP_PV_MMU 13
#define KVM_CAP_MP_STATE 14 #define KVM_CAP_MP_STATE 14
#define KVM_CAP_COALESCED_MMIO 15
/* /*
* ioctls for VM fds * ioctls for VM fds
...@@ -371,6 +396,10 @@ struct kvm_trace_rec { ...@@ -371,6 +396,10 @@ struct kvm_trace_rec {
#define KVM_CREATE_PIT _IO(KVMIO, 0x64) #define KVM_CREATE_PIT _IO(KVMIO, 0x64)
#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state) #define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state) #define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
#define KVM_REGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
/* /*
* ioctls for vcpu fds * ioctls for vcpu fds
......
...@@ -52,7 +52,8 @@ struct kvm_io_bus { ...@@ -52,7 +52,8 @@ struct kvm_io_bus {
void kvm_io_bus_init(struct kvm_io_bus *bus); void kvm_io_bus_init(struct kvm_io_bus *bus);
void kvm_io_bus_destroy(struct kvm_io_bus *bus); void kvm_io_bus_destroy(struct kvm_io_bus *bus);
struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
gpa_t addr, int len, int is_write);
void kvm_io_bus_register_dev(struct kvm_io_bus *bus, void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
struct kvm_io_device *dev); struct kvm_io_device *dev);
...@@ -116,6 +117,10 @@ struct kvm { ...@@ -116,6 +117,10 @@ struct kvm {
struct kvm_vm_stat stat; struct kvm_vm_stat stat;
struct kvm_arch arch; struct kvm_arch arch;
atomic_t users_count; atomic_t users_count;
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
#endif
}; };
/* The guest did something we don't support. */ /* The guest did something we don't support. */
...@@ -135,9 +140,6 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); ...@@ -135,9 +140,6 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
void vcpu_load(struct kvm_vcpu *vcpu); void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu);
void decache_vcpus_on_cpu(int cpu);
int kvm_init(void *opaque, unsigned int vcpu_size, int kvm_init(void *opaque, unsigned int vcpu_size,
struct module *module); struct module *module);
void kvm_exit(void); void kvm_exit(void);
...@@ -166,6 +168,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -166,6 +168,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old, struct kvm_memory_slot old,
int user_alloc); int user_alloc);
void kvm_arch_flush_shadow(struct kvm *kvm);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
......
/*
* KVM coalesced MMIO
*
* Copyright (c) 2008 Bull S.A.S.
*
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
*
*/
#include "iodev.h"
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include "coalesced_mmio.h"
static int coalesced_mmio_in_range(struct kvm_io_device *this,
gpa_t addr, int len, int is_write)
{
struct kvm_coalesced_mmio_dev *dev =
(struct kvm_coalesced_mmio_dev*)this->private;
struct kvm_coalesced_mmio_zone *zone;
int next;
int i;
if (!is_write)
return 0;
/* kvm->lock is taken by the caller and must be not released before
* dev.read/write
*/
/* Are we able to batch it ? */
/* last is the first free entry
* check if we don't meet the first used entry
* there is always one unused entry in the buffer
*/
next = (dev->kvm->coalesced_mmio_ring->last + 1) %
KVM_COALESCED_MMIO_MAX;
if (next == dev->kvm->coalesced_mmio_ring->first) {
/* full */
return 0;
}
/* is it in a batchable area ? */
for (i = 0; i < dev->nb_zones; i++) {
zone = &dev->zone[i];
/* (addr,len) is fully included in
* (zone->addr, zone->size)
*/
if (zone->addr <= addr &&
addr + len <= zone->addr + zone->size)
return 1;
}
return 0;
}
static void coalesced_mmio_write(struct kvm_io_device *this,
gpa_t addr, int len, const void *val)
{
struct kvm_coalesced_mmio_dev *dev =
(struct kvm_coalesced_mmio_dev*)this->private;
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
/* kvm->lock must be taken by caller before call to in_range()*/
/* copy data in first free entry of the ring */
ring->coalesced_mmio[ring->last].phys_addr = addr;
ring->coalesced_mmio[ring->last].len = len;
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
}
static void coalesced_mmio_destructor(struct kvm_io_device *this)
{
kfree(this);
}
int kvm_coalesced_mmio_init(struct kvm *kvm)
{
struct kvm_coalesced_mmio_dev *dev;
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->dev.write = coalesced_mmio_write;
dev->dev.in_range = coalesced_mmio_in_range;
dev->dev.destructor = coalesced_mmio_destructor;
dev->dev.private = dev;
dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev;
kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);
return 0;
}
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
if (dev == NULL)
return -EINVAL;
mutex_lock(&kvm->lock);
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
mutex_unlock(&kvm->lock);
return -ENOBUFS;
}
dev->zone[dev->nb_zones] = *zone;
dev->nb_zones++;
mutex_unlock(&kvm->lock);
return 0;
}
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
int i;
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
struct kvm_coalesced_mmio_zone *z;
if (dev == NULL)
return -EINVAL;
mutex_lock(&kvm->lock);
i = dev->nb_zones;
while(i) {
z = &dev->zone[i - 1];
/* unregister all zones
* included in (zone->addr, zone->size)
*/
if (zone->addr <= z->addr &&
z->addr + z->size <= zone->addr + zone->size) {
dev->nb_zones--;
*z = dev->zone[dev->nb_zones];
}
i--;
}
mutex_unlock(&kvm->lock);
return 0;
}
/*
* KVM coalesced MMIO
*
* Copyright (c) 2008 Bull S.A.S.
*
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
*
*/
#define KVM_COALESCED_MMIO_ZONE_MAX 100
struct kvm_coalesced_mmio_dev {
struct kvm_io_device dev;
struct kvm *kvm;
int nb_zones;
struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
};
int kvm_coalesced_mmio_init(struct kvm *kvm);
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
...@@ -146,6 +146,11 @@ static int ioapic_inj_irq(struct kvm_ioapic *ioapic, ...@@ -146,6 +146,11 @@ static int ioapic_inj_irq(struct kvm_ioapic *ioapic,
return kvm_apic_set_irq(vcpu, vector, trig_mode); return kvm_apic_set_irq(vcpu, vector, trig_mode);
} }
static void ioapic_inj_nmi(struct kvm_vcpu *vcpu)
{
kvm_inject_nmi(vcpu);
}
static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
u8 dest_mode) u8 dest_mode)
{ {
...@@ -239,8 +244,19 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) ...@@ -239,8 +244,19 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
} }
} }
break; break;
case IOAPIC_NMI:
/* TODO: NMI */ for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
if (!(deliver_bitmask & (1 << vcpu_id)))
continue;
deliver_bitmask &= ~(1 << vcpu_id);
vcpu = ioapic->kvm->vcpus[vcpu_id];
if (vcpu)
ioapic_inj_nmi(vcpu);
else
ioapic_debug("NMI to vcpu %d failed\n",
vcpu->vcpu_id);
}
break;
default: default:
printk(KERN_WARNING "Unsupported delivery mode %d\n", printk(KERN_WARNING "Unsupported delivery mode %d\n",
delivery_mode); delivery_mode);
...@@ -291,7 +307,8 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector) ...@@ -291,7 +307,8 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector)
__kvm_ioapic_update_eoi(ioapic, i); __kvm_ioapic_update_eoi(ioapic, i);
} }
static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr) static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr,
int len, int is_write)
{ {
struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private; struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private;
......
...@@ -27,7 +27,8 @@ struct kvm_io_device { ...@@ -27,7 +27,8 @@ struct kvm_io_device {
gpa_t addr, gpa_t addr,
int len, int len,
const void *val); const void *val);
int (*in_range)(struct kvm_io_device *this, gpa_t addr); int (*in_range)(struct kvm_io_device *this, gpa_t addr, int len,
int is_write);
void (*destructor)(struct kvm_io_device *this); void (*destructor)(struct kvm_io_device *this);
void *private; void *private;
...@@ -49,9 +50,10 @@ static inline void kvm_iodevice_write(struct kvm_io_device *dev, ...@@ -49,9 +50,10 @@ static inline void kvm_iodevice_write(struct kvm_io_device *dev,
dev->write(dev, addr, len, val); dev->write(dev, addr, len, val);
} }
static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr) static inline int kvm_iodevice_inrange(struct kvm_io_device *dev,
gpa_t addr, int len, int is_write)
{ {
return dev->in_range(dev, addr); return dev->in_range(dev, addr, len, is_write);
} }
static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
......
...@@ -47,6 +47,10 @@ ...@@ -47,6 +47,10 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
#endif
MODULE_AUTHOR("Qumranet"); MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -65,6 +69,8 @@ struct dentry *kvm_debugfs_dir; ...@@ -65,6 +69,8 @@ struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg); unsigned long arg);
bool kvm_rebooting;
static inline int valid_vcpu(int n) static inline int valid_vcpu(int n)
{ {
return likely(n >= 0 && n < KVM_MAX_VCPUS); return likely(n >= 0 && n < KVM_MAX_VCPUS);
...@@ -99,10 +105,11 @@ static void ack_flush(void *_completed) ...@@ -99,10 +105,11 @@ static void ack_flush(void *_completed)
void kvm_flush_remote_tlbs(struct kvm *kvm) void kvm_flush_remote_tlbs(struct kvm *kvm)
{ {
int i, cpu; int i, cpu, me;
cpumask_t cpus; cpumask_t cpus;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
me = get_cpu();
cpus_clear(cpus); cpus_clear(cpus);
for (i = 0; i < KVM_MAX_VCPUS; ++i) { for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i]; vcpu = kvm->vcpus[i];
...@@ -111,21 +118,24 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) ...@@ -111,21 +118,24 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
continue; continue;
cpu = vcpu->cpu; cpu = vcpu->cpu;
if (cpu != -1 && cpu != raw_smp_processor_id()) if (cpu != -1 && cpu != me)
cpu_set(cpu, cpus); cpu_set(cpu, cpus);
} }
if (cpus_empty(cpus)) if (cpus_empty(cpus))
return; goto out;
++kvm->stat.remote_tlb_flush; ++kvm->stat.remote_tlb_flush;
smp_call_function_mask(cpus, ack_flush, NULL, 1); smp_call_function_mask(cpus, ack_flush, NULL, 1);
out:
put_cpu();
} }
void kvm_reload_remote_mmus(struct kvm *kvm) void kvm_reload_remote_mmus(struct kvm *kvm)
{ {
int i, cpu; int i, cpu, me;
cpumask_t cpus; cpumask_t cpus;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
me = get_cpu();
cpus_clear(cpus); cpus_clear(cpus);
for (i = 0; i < KVM_MAX_VCPUS; ++i) { for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i]; vcpu = kvm->vcpus[i];
...@@ -134,12 +144,14 @@ void kvm_reload_remote_mmus(struct kvm *kvm) ...@@ -134,12 +144,14 @@ void kvm_reload_remote_mmus(struct kvm *kvm)
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
continue; continue;
cpu = vcpu->cpu; cpu = vcpu->cpu;
if (cpu != -1 && cpu != raw_smp_processor_id()) if (cpu != -1 && cpu != me)
cpu_set(cpu, cpus); cpu_set(cpu, cpus);
} }
if (cpus_empty(cpus)) if (cpus_empty(cpus))
return; goto out;
smp_call_function_mask(cpus, ack_flush, NULL, 1); smp_call_function_mask(cpus, ack_flush, NULL, 1);
out:
put_cpu();
} }
...@@ -183,10 +195,23 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); ...@@ -183,10 +195,23 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
static struct kvm *kvm_create_vm(void) static struct kvm *kvm_create_vm(void)
{ {
struct kvm *kvm = kvm_arch_create_vm(); struct kvm *kvm = kvm_arch_create_vm();
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct page *page;
#endif
if (IS_ERR(kvm)) if (IS_ERR(kvm))
goto out; goto out;
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
kfree(kvm);
return ERR_PTR(-ENOMEM);
}
kvm->coalesced_mmio_ring =
(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif
kvm->mm = current->mm; kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count); atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock); spin_lock_init(&kvm->mmu_lock);
...@@ -198,6 +223,9 @@ static struct kvm *kvm_create_vm(void) ...@@ -198,6 +223,9 @@ static struct kvm *kvm_create_vm(void)
spin_lock(&kvm_lock); spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list); list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock); spin_unlock(&kvm_lock);
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
kvm_coalesced_mmio_init(kvm);
#endif
out: out:
return kvm; return kvm;
} }
...@@ -240,6 +268,10 @@ static void kvm_destroy_vm(struct kvm *kvm) ...@@ -240,6 +268,10 @@ static void kvm_destroy_vm(struct kvm *kvm)
spin_unlock(&kvm_lock); spin_unlock(&kvm_lock);
kvm_io_bus_destroy(&kvm->pio_bus); kvm_io_bus_destroy(&kvm->pio_bus);
kvm_io_bus_destroy(&kvm->mmio_bus); kvm_io_bus_destroy(&kvm->mmio_bus);
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
if (kvm->coalesced_mmio_ring != NULL)
free_page((unsigned long)kvm->coalesced_mmio_ring);
#endif
kvm_arch_destroy_vm(kvm); kvm_arch_destroy_vm(kvm);
mmdrop(mm); mmdrop(mm);
} }
...@@ -333,6 +365,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -333,6 +365,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
r = -ENOMEM; r = -ENOMEM;
/* Allocate if a slot is being created */ /* Allocate if a slot is being created */
#ifndef CONFIG_S390
if (npages && !new.rmap) { if (npages && !new.rmap) {
new.rmap = vmalloc(npages * sizeof(struct page *)); new.rmap = vmalloc(npages * sizeof(struct page *));
...@@ -373,10 +406,14 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -373,10 +406,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free; goto out_free;
memset(new.dirty_bitmap, 0, dirty_bytes); memset(new.dirty_bitmap, 0, dirty_bytes);
} }
#endif /* not defined CONFIG_S390 */
if (mem->slot >= kvm->nmemslots) if (mem->slot >= kvm->nmemslots)
kvm->nmemslots = mem->slot + 1; kvm->nmemslots = mem->slot + 1;
if (!npages)
kvm_arch_flush_shadow(kvm);
*memslot = new; *memslot = new;
r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
...@@ -532,6 +569,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) ...@@ -532,6 +569,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
struct page *page[1]; struct page *page[1];
unsigned long addr; unsigned long addr;
int npages; int npages;
pfn_t pfn;
might_sleep(); might_sleep();
...@@ -544,19 +582,38 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) ...@@ -544,19 +582,38 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
NULL); NULL);
if (npages != 1) { if (unlikely(npages != 1)) {
get_page(bad_page); struct vm_area_struct *vma;
return page_to_pfn(bad_page);
}
return page_to_pfn(page[0]); vma = find_vma(current->mm, addr);
if (vma == NULL || addr < vma->vm_start ||
!(vma->vm_flags & VM_PFNMAP)) {
get_page(bad_page);
return page_to_pfn(bad_page);
}
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
BUG_ON(pfn_valid(pfn));
} else
pfn = page_to_pfn(page[0]);
return pfn;
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn); EXPORT_SYMBOL_GPL(gfn_to_pfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{ {
return pfn_to_page(gfn_to_pfn(kvm, gfn)); pfn_t pfn;
pfn = gfn_to_pfn(kvm, gfn);
if (pfn_valid(pfn))
return pfn_to_page(pfn);
WARN_ON(!pfn_valid(pfn));
get_page(bad_page);
return bad_page;
} }
EXPORT_SYMBOL_GPL(gfn_to_page); EXPORT_SYMBOL_GPL(gfn_to_page);
...@@ -569,7 +626,8 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); ...@@ -569,7 +626,8 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_pfn_clean(pfn_t pfn) void kvm_release_pfn_clean(pfn_t pfn)
{ {
put_page(pfn_to_page(pfn)); if (pfn_valid(pfn))
put_page(pfn_to_page(pfn));
} }
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
...@@ -594,21 +652,25 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty); ...@@ -594,21 +652,25 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
void kvm_set_pfn_dirty(pfn_t pfn) void kvm_set_pfn_dirty(pfn_t pfn)
{ {
struct page *page = pfn_to_page(pfn); if (pfn_valid(pfn)) {
if (!PageReserved(page)) struct page *page = pfn_to_page(pfn);
SetPageDirty(page); if (!PageReserved(page))
SetPageDirty(page);
}
} }
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
void kvm_set_pfn_accessed(pfn_t pfn) void kvm_set_pfn_accessed(pfn_t pfn)
{ {
mark_page_accessed(pfn_to_page(pfn)); if (pfn_valid(pfn))
mark_page_accessed(pfn_to_page(pfn));
} }
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
void kvm_get_pfn(pfn_t pfn) void kvm_get_pfn(pfn_t pfn)
{ {
get_page(pfn_to_page(pfn)); if (pfn_valid(pfn))
get_page(pfn_to_page(pfn));
} }
EXPORT_SYMBOL_GPL(kvm_get_pfn); EXPORT_SYMBOL_GPL(kvm_get_pfn);
...@@ -798,6 +860,10 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -798,6 +860,10 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
#ifdef CONFIG_X86 #ifdef CONFIG_X86
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
page = virt_to_page(vcpu->arch.pio_data); page = virt_to_page(vcpu->arch.pio_data);
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
#endif #endif
else else
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
...@@ -1121,6 +1187,32 @@ static long kvm_vm_ioctl(struct file *filp, ...@@ -1121,6 +1187,32 @@ static long kvm_vm_ioctl(struct file *filp,
goto out; goto out;
break; break;
} }
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
case KVM_REGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
r = -EFAULT;
if (copy_from_user(&zone, argp, sizeof zone))
goto out;
r = -ENXIO;
r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
if (r)
goto out;
r = 0;
break;
}
case KVM_UNREGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
r = -EFAULT;
if (copy_from_user(&zone, argp, sizeof zone))
goto out;
r = -ENXIO;
r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
if (r)
goto out;
r = 0;
break;
}
#endif
default: default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg); r = kvm_arch_vm_ioctl(filp, ioctl, arg);
} }
...@@ -1179,7 +1271,6 @@ static int kvm_dev_ioctl_create_vm(void) ...@@ -1179,7 +1271,6 @@ static int kvm_dev_ioctl_create_vm(void)
static long kvm_dev_ioctl(struct file *filp, static long kvm_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
{ {
void __user *argp = (void __user *)arg;
long r = -EINVAL; long r = -EINVAL;
switch (ioctl) { switch (ioctl) {
...@@ -1196,7 +1287,7 @@ static long kvm_dev_ioctl(struct file *filp, ...@@ -1196,7 +1287,7 @@ static long kvm_dev_ioctl(struct file *filp,
r = kvm_dev_ioctl_create_vm(); r = kvm_dev_ioctl_create_vm();
break; break;
case KVM_CHECK_EXTENSION: case KVM_CHECK_EXTENSION:
r = kvm_dev_ioctl_check_extension((long)argp); r = kvm_dev_ioctl_check_extension(arg);
break; break;
case KVM_GET_VCPU_MMAP_SIZE: case KVM_GET_VCPU_MMAP_SIZE:
r = -EINVAL; r = -EINVAL;
...@@ -1205,6 +1296,9 @@ static long kvm_dev_ioctl(struct file *filp, ...@@ -1205,6 +1296,9 @@ static long kvm_dev_ioctl(struct file *filp,
r = PAGE_SIZE; /* struct kvm_run */ r = PAGE_SIZE; /* struct kvm_run */
#ifdef CONFIG_X86 #ifdef CONFIG_X86
r += PAGE_SIZE; /* pio data page */ r += PAGE_SIZE; /* pio data page */
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
r += PAGE_SIZE; /* coalesced mmio ring page */
#endif #endif
break; break;
case KVM_TRACE_ENABLE: case KVM_TRACE_ENABLE:
...@@ -1247,7 +1341,6 @@ static void hardware_disable(void *junk) ...@@ -1247,7 +1341,6 @@ static void hardware_disable(void *junk)
if (!cpu_isset(cpu, cpus_hardware_enabled)) if (!cpu_isset(cpu, cpus_hardware_enabled))
return; return;
cpu_clear(cpu, cpus_hardware_enabled); cpu_clear(cpu, cpus_hardware_enabled);
decache_vcpus_on_cpu(cpu);
kvm_arch_hardware_disable(NULL); kvm_arch_hardware_disable(NULL);
} }
...@@ -1277,6 +1370,18 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, ...@@ -1277,6 +1370,18 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
return NOTIFY_OK; return NOTIFY_OK;
} }
asmlinkage void kvm_handle_fault_on_reboot(void)
{
if (kvm_rebooting)
/* spin while reset goes on */
while (true)
;
/* Fault while not rebooting. We want the trace. */
BUG();
}
EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
static int kvm_reboot(struct notifier_block *notifier, unsigned long val, static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
void *v) void *v)
{ {
...@@ -1286,6 +1391,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val, ...@@ -1286,6 +1391,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
* in vmx root mode. * in vmx root mode.
*/ */
printk(KERN_INFO "kvm: exiting hardware virtualization\n"); printk(KERN_INFO "kvm: exiting hardware virtualization\n");
kvm_rebooting = true;
on_each_cpu(hardware_disable, NULL, 1); on_each_cpu(hardware_disable, NULL, 1);
} }
return NOTIFY_OK; return NOTIFY_OK;
...@@ -1312,14 +1418,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus) ...@@ -1312,14 +1418,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus)
} }
} }
struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
gpa_t addr, int len, int is_write)
{ {
int i; int i;
for (i = 0; i < bus->dev_count; i++) { for (i = 0; i < bus->dev_count; i++) {
struct kvm_io_device *pos = bus->devs[i]; struct kvm_io_device *pos = bus->devs[i];
if (pos->in_range(pos, addr)) if (pos->in_range(pos, addr, len, is_write))
return pos; return pos;
} }
......
...@@ -72,11 +72,7 @@ static void kvm_add_trace(void *probe_private, void *call_data, ...@@ -72,11 +72,7 @@ static void kvm_add_trace(void *probe_private, void *call_data,
rec.cycle_in = p->cycle_in; rec.cycle_in = p->cycle_in;
if (rec.cycle_in) { if (rec.cycle_in) {
u64 cycle = 0; rec.u.cycle.cycle_u64 = get_cycles();
cycle = get_cycles();
rec.u.cycle.cycle_lo = (u32)cycle;
rec.u.cycle.cycle_hi = (u32)(cycle >> 32);
for (i = 0; i < rec.extra_u32; i++) for (i = 0; i < rec.extra_u32; i++)
rec.u.cycle.extra_u32[i] = va_arg(*args, u32); rec.u.cycle.extra_u32[i] = va_arg(*args, u32);
...@@ -114,8 +110,18 @@ static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, ...@@ -114,8 +110,18 @@ static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
{ {
struct kvm_trace *kt; struct kvm_trace *kt;
if (!relay_buf_full(buf)) if (!relay_buf_full(buf)) {
if (!prev_subbuf) {
/*
* executed only once when the channel is opened
* save metadata as first record
*/
subbuf_start_reserve(buf, sizeof(u32));
*(u32 *)subbuf = 0x12345678;
}
return 1; return 1;
}
kt = buf->chan->private_data; kt = buf->chan->private_data;
atomic_inc(&kt->lost_records); atomic_inc(&kt->lost_records);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment