Commit 597b0d21 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'kvm-updates/2.6.29' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates/2.6.29' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (140 commits)
  KVM: MMU: handle large host sptes on invlpg/resync
  KVM: Add locking to virtual i8259 interrupt controller
  KVM: MMU: Don't treat a global pte as such if cr4.pge is cleared
  MAINTAINERS: Maintainership changes for kvm/ia64
  KVM: ia64: Fix kvm_arch_vcpu_ioctl_[gs]et_regs()
  KVM: x86: Rework user space NMI injection as KVM_CAP_USER_NMI
  KVM: VMX: Fix pending NMI-vs.-IRQ race for user space irqchip
  KVM: fix handling of ACK from shared guest IRQ
  KVM: MMU: check for present pdptr shadow page in walk_shadow
  KVM: Consolidate userspace memory capability reporting into common code
  KVM: Advertise the bug in memory region destruction as fixed
  KVM: use cpumask_var_t for cpus_hardware_enabled
  KVM: use modern cpumask primitives, no cpumask_t on stack
  KVM: Extract core of kvm_flush_remote_tlbs/kvm_reload_remote_mmus
  KVM: set owner of cpu and vm file operations
  anon_inodes: use fops->owner for module refcount
  x86: KVM guest: kvm_get_tsc_khz: return khz, not lpj
  KVM: MMU: prepopulate the shadow on invlpg
  KVM: MMU: skip global pgtables on sync due to cr3 switch
  KVM: MMU: collapse remote TLB flushes on root sync
  ...
parents 2640c9a9 87917239
...@@ -2542,8 +2542,6 @@ W: http://kvm.qumranet.com ...@@ -2542,8 +2542,6 @@ W: http://kvm.qumranet.com
S: Supported S: Supported
KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64) KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64)
P: Anthony Xu
M: anthony.xu@intel.com
P: Xiantao Zhang P: Xiantao Zhang
M: xiantao.zhang@intel.com M: xiantao.zhang@intel.com
L: kvm-ia64@vger.kernel.org L: kvm-ia64@vger.kernel.org
......
...@@ -166,8 +166,6 @@ struct saved_vpd { ...@@ -166,8 +166,6 @@ struct saved_vpd {
}; };
struct kvm_regs { struct kvm_regs {
char *saved_guest;
char *saved_stack;
struct saved_vpd vpd; struct saved_vpd vpd;
/*Arch-regs*/ /*Arch-regs*/
int mp_state; int mp_state;
...@@ -200,6 +198,10 @@ struct kvm_regs { ...@@ -200,6 +198,10 @@ struct kvm_regs {
unsigned long fp_psr; /*used for lazy float register */ unsigned long fp_psr; /*used for lazy float register */
unsigned long saved_gp; unsigned long saved_gp;
/*for phycial emulation */ /*for phycial emulation */
union context saved_guest;
unsigned long reserved[64]; /* for future use */
}; };
struct kvm_sregs { struct kvm_sregs {
......
...@@ -23,17 +23,6 @@ ...@@ -23,17 +23,6 @@
#ifndef __ASM_KVM_HOST_H #ifndef __ASM_KVM_HOST_H
#define __ASM_KVM_HOST_H #define __ASM_KVM_HOST_H
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
#include <linux/kvm_types.h>
#include <asm/pal.h>
#include <asm/sal.h>
#define KVM_MAX_VCPUS 4
#define KVM_MEMORY_SLOTS 32 #define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */ /* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_PRIVATE_MEM_SLOTS 4
...@@ -50,70 +39,132 @@ ...@@ -50,70 +39,132 @@
#define EXIT_REASON_EXTERNAL_INTERRUPT 6 #define EXIT_REASON_EXTERNAL_INTERRUPT 6
#define EXIT_REASON_IPI 7 #define EXIT_REASON_IPI 7
#define EXIT_REASON_PTC_G 8 #define EXIT_REASON_PTC_G 8
#define EXIT_REASON_DEBUG 20
/*Define vmm address space and vm data space.*/ /*Define vmm address space and vm data space.*/
#define KVM_VMM_SIZE (16UL<<20) #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
#define KVM_VMM_SHIFT 24 #define KVM_VMM_SHIFT 24
#define KVM_VMM_BASE 0xD000000000000000UL #define KVM_VMM_BASE 0xD000000000000000
#define VMM_SIZE (8UL<<20) #define VMM_SIZE (__IA64_UL_CONST(8)<<20)
/* /*
* Define vm_buffer, used by PAL Services, base address. * Define vm_buffer, used by PAL Services, base address.
* Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M
*/ */
#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
#define KVM_VM_BUFFER_SIZE (8UL<<20) #define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20)
/*Define Virtual machine data layout.*/ /*
#define KVM_VM_DATA_SHIFT 24 * kvm guest's data area looks as follow:
#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT) *
#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE) * +----------------------+ ------- KVM_VM_DATA_SIZE
* | vcpu[n]'s data | | ___________________KVM_STK_OFFSET
* | | | / |
* | .......... | | /vcpu's struct&stack |
* | .......... | | /---------------------|---- 0
* | vcpu[5]'s data | | / vpd |
* | vcpu[4]'s data | |/-----------------------|
* | vcpu[3]'s data | / vtlb |
* | vcpu[2]'s data | /|------------------------|
* | vcpu[1]'s data |/ | vhpt |
* | vcpu[0]'s data |____________________________|
* +----------------------+ |
* | memory dirty log | |
* +----------------------+ |
* | vm's data struct | |
* +----------------------+ |
* | | |
* | | |
* | | |
* | | |
* | | |
* | | |
* | | |
* | vm's p2m table | |
* | | |
* | | |
* | | | |
* vm's data->| | | |
* +----------------------+ ------- 0
* To support large memory, needs to increase the size of p2m.
* To support more vcpus, needs to ensure it has enough space to
* hold vcpus' data.
*/
#define KVM_VM_DATA_SHIFT 26
#define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT)
#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE)
#define KVM_P2M_BASE KVM_VM_DATA_BASE #define KVM_P2M_BASE KVM_VM_DATA_BASE
#define KVM_P2M_OFS 0 #define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20)
#define KVM_P2M_SIZE (8UL << 20)
#define VHPT_SHIFT 16
#define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE) #define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT)
#define KVM_VHPT_OFS KVM_P2M_SIZE #define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5))
#define KVM_VHPT_BLOCK_SIZE (2UL << 20)
#define VHPT_SHIFT 18 #define VTLB_SHIFT 16
#define VHPT_SIZE (1UL << VHPT_SHIFT) #define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT)
#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5)) #define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5))
#define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE)
#define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE)
#define KVM_VTLB_BLOCK_SIZE (1UL<<20)
#define VTLB_SHIFT 17
#define VTLB_SIZE (1UL<<VTLB_SHIFT)
#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5))
#define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE)
#define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE)
#define KVM_VPD_BLOCK_SIZE (2UL<<20)
#define VPD_SHIFT 16 #define VPD_SHIFT 16
#define VPD_SIZE (1UL<<VPD_SHIFT) #define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT)
#define VCPU_STRUCT_SHIFT 16
#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
#define KVM_STK_OFFSET VCPU_STRUCT_SIZE
#define KVM_VM_STRUCT_SHIFT 19
#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
#define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE) #define KVM_MEM_DIRY_LOG_SHIFT 19
#define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE) #define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT)
#define KVM_VCPU_BLOCK_SIZE (2UL<<20)
#define VCPU_SHIFT 18
#define VCPU_SIZE (1UL<<VCPU_SHIFT)
#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE
#define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE) #ifndef __ASSEMBLY__
#define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE)
#define KVM_VM_BLOCK_SIZE (1UL<<19)
#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE) /*Define the max vcpus and memory for Guests.*/
#define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE) #define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\
#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19) KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data)
#define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT)
/* Get vpd, vhpt, tlb, vcpu, base*/ #define VMM_LOG_LEN 256
#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE)
#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE) #include <linux/types.h>
#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE) #include <linux/mm.h>
#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE) #include <linux/kvm.h>
#include <linux/kvm_para.h>
#include <linux/kvm_types.h>
#include <asm/pal.h>
#include <asm/sal.h>
#include <asm/page.h>
struct kvm_vcpu_data {
char vcpu_vhpt[VHPT_SIZE];
char vcpu_vtlb[VTLB_SIZE];
char vcpu_vpd[VPD_SIZE];
char vcpu_struct[VCPU_STRUCT_SIZE];
};
struct kvm_vm_data {
char kvm_p2m[KVM_P2M_SIZE];
char kvm_vm_struct[KVM_VM_STRUCT_SIZE];
char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE];
struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
};
#define VCPU_BASE(n) KVM_VM_DATA_BASE + \
offsetof(struct kvm_vm_data, vcpu_data[n])
#define VM_BASE KVM_VM_DATA_BASE + \
offsetof(struct kvm_vm_data, kvm_vm_struct)
#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
#define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt))
#define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb))
#define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd))
#define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \
offsetof(struct kvm_vcpu_data, vcpu_struct))
/*IO section definitions*/ /*IO section definitions*/
#define IOREQ_READ 1 #define IOREQ_READ 1
...@@ -389,6 +440,7 @@ struct kvm_vcpu_arch { ...@@ -389,6 +440,7 @@ struct kvm_vcpu_arch {
unsigned long opcode; unsigned long opcode;
unsigned long cause; unsigned long cause;
char log_buf[VMM_LOG_LEN];
union context host; union context host;
union context guest; union context guest;
}; };
...@@ -403,14 +455,13 @@ struct kvm_sal_data { ...@@ -403,14 +455,13 @@ struct kvm_sal_data {
}; };
struct kvm_arch { struct kvm_arch {
spinlock_t dirty_log_lock;
unsigned long vm_base; unsigned long vm_base;
unsigned long metaphysical_rr0; unsigned long metaphysical_rr0;
unsigned long metaphysical_rr4; unsigned long metaphysical_rr4;
unsigned long vmm_init_rr; unsigned long vmm_init_rr;
unsigned long vhpt_base;
unsigned long vtlb_base;
unsigned long vpd_base;
spinlock_t dirty_log_lock;
struct kvm_ioapic *vioapic; struct kvm_ioapic *vioapic;
struct kvm_vm_stat stat; struct kvm_vm_stat stat;
struct kvm_sal_data rdv_sal_data; struct kvm_sal_data rdv_sal_data;
...@@ -512,7 +563,7 @@ struct kvm_pt_regs { ...@@ -512,7 +563,7 @@ struct kvm_pt_regs {
static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
{ {
return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1; return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1;
} }
typedef int kvm_vmm_entry(void); typedef int kvm_vmm_entry(void);
...@@ -531,5 +582,6 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); ...@@ -531,5 +582,6 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
void kvm_sal_emul(struct kvm_vcpu *vcpu); void kvm_sal_emul(struct kvm_vcpu *vcpu);
static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {} static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
#endif /* __ASSEMBLY__*/
#endif #endif
...@@ -60,7 +60,7 @@ obj-$(CONFIG_KVM) += kvm.o ...@@ -60,7 +60,7 @@ obj-$(CONFIG_KVM) += kvm.o
CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
vtlb.o process.o vtlb.o process.o kvm_lib.o
#Add link memcpy and memset to avoid possible structure assignment error #Add link memcpy and memset to avoid possible structure assignment error
kvm-intel-objs += memcpy.o memset.o kvm-intel-objs += memcpy.o memset.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
...@@ -24,19 +24,10 @@ ...@@ -24,19 +24,10 @@
#include <linux/autoconf.h> #include <linux/autoconf.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/kbuild.h>
#include "vcpu.h" #include "vcpu.h"
#define task_struct kvm_vcpu
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " (%0) " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : :)
#define OFFSET(_sym, _str, _mem) \
DEFINE(_sym, offsetof(_str, _mem));
void foo(void) void foo(void)
{ {
DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu)); DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu));
......
...@@ -180,7 +180,6 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -180,7 +180,6 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) { switch (ext) {
case KVM_CAP_IRQCHIP: case KVM_CAP_IRQCHIP:
case KVM_CAP_USER_MEMORY:
case KVM_CAP_MP_STATE: case KVM_CAP_MP_STATE:
r = 1; r = 1;
...@@ -439,7 +438,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) ...@@ -439,7 +438,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
expires = div64_u64(itc_diff, cyc_per_usec); expires = div64_u64(itc_diff, cyc_per_usec);
kt = ktime_set(0, 1000 * expires); kt = ktime_set(0, 1000 * expires);
down_read(&vcpu->kvm->slots_lock);
vcpu->arch.ht_active = 1; vcpu->arch.ht_active = 1;
hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
...@@ -452,7 +450,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) ...@@ -452,7 +450,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
vcpu->arch.mp_state = vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE; KVM_MP_STATE_RUNNABLE;
up_read(&vcpu->kvm->slots_lock);
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR; return -EINTR;
...@@ -476,6 +473,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu, ...@@ -476,6 +473,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu,
return 1; return 1;
} }
static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
printk("VMM: %s", vcpu->arch.log_buf);
return 1;
}
static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run) = { struct kvm_run *kvm_run) = {
[EXIT_REASON_VM_PANIC] = handle_vm_error, [EXIT_REASON_VM_PANIC] = handle_vm_error,
...@@ -487,6 +491,7 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, ...@@ -487,6 +491,7 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
[EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
[EXIT_REASON_IPI] = handle_ipi, [EXIT_REASON_IPI] = handle_ipi,
[EXIT_REASON_PTC_G] = handle_global_purge, [EXIT_REASON_PTC_G] = handle_global_purge,
[EXIT_REASON_DEBUG] = handle_vcpu_debug,
}; };
...@@ -698,27 +703,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -698,27 +703,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return r; return r;
} }
/*
* Allocate 16M memory for every vm to hold its specific data.
* Its memory map is defined in kvm_host.h.
*/
static struct kvm *kvm_alloc_kvm(void) static struct kvm *kvm_alloc_kvm(void)
{ {
struct kvm *kvm; struct kvm *kvm;
uint64_t vm_base; uint64_t vm_base;
BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
if (!vm_base) if (!vm_base)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
/* Zero all pages before use! */
memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
kvm = (struct kvm *)(vm_base +
kvm = (struct kvm *)(vm_base + KVM_VM_OFS); offsetof(struct kvm_vm_data, kvm_vm_struct));
kvm->arch.vm_base = vm_base; kvm->arch.vm_base = vm_base;
printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
return kvm; return kvm;
} }
...@@ -760,21 +762,12 @@ static void kvm_build_io_pmt(struct kvm *kvm) ...@@ -760,21 +762,12 @@ static void kvm_build_io_pmt(struct kvm *kvm)
static void kvm_init_vm(struct kvm *kvm) static void kvm_init_vm(struct kvm *kvm)
{ {
long vm_base;
BUG_ON(!kvm); BUG_ON(!kvm);
kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
kvm->arch.vmm_init_rr = VMM_INIT_RR; kvm->arch.vmm_init_rr = VMM_INIT_RR;
vm_base = kvm->arch.vm_base;
if (vm_base) {
kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
}
/* /*
*Fill P2M entries for MMIO/IO ranges *Fill P2M entries for MMIO/IO ranges
*/ */
...@@ -838,9 +831,8 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) ...@@ -838,9 +831,8 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ {
int i;
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
int r; int i;
vcpu_load(vcpu); vcpu_load(vcpu);
...@@ -857,18 +849,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -857,18 +849,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vpd->vpr = regs->vpd.vpr; vpd->vpr = regs->vpd.vpr;
r = -EFAULT; memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
sizeof(union context));
if (r)
goto out;
r = copy_from_user(vcpu + 1, regs->saved_stack +
sizeof(struct kvm_vcpu),
IA64_STK_OFFSET - sizeof(struct kvm_vcpu));
if (r)
goto out;
vcpu->arch.exit_data =
((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
RESTORE_REGS(mp_state); RESTORE_REGS(mp_state);
RESTORE_REGS(vmm_rr); RESTORE_REGS(vmm_rr);
...@@ -902,9 +883,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -902,9 +883,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
set_bit(KVM_REQ_RESUME, &vcpu->requests); set_bit(KVM_REQ_RESUME, &vcpu->requests);
vcpu_put(vcpu); vcpu_put(vcpu);
r = 0;
out: return 0;
return r;
} }
long kvm_arch_vm_ioctl(struct file *filp, long kvm_arch_vm_ioctl(struct file *filp,
...@@ -1166,10 +1146,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1166,10 +1146,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/*Set entry address for first run.*/ /*Set entry address for first run.*/
regs->cr_iip = PALE_RESET_ENTRY; regs->cr_iip = PALE_RESET_ENTRY;
/*Initilize itc offset for vcpus*/ /*Initialize itc offset for vcpus*/
itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
for (i = 0; i < MAX_VCPU_NUM; i++) { for (i = 0; i < KVM_MAX_VCPUS; i++) {
v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
v->arch.itc_offset = itc_offset; v->arch.itc_offset = itc_offset;
v->arch.last_itc = 0; v->arch.last_itc = 0;
} }
...@@ -1183,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1183,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.apic->vcpu = vcpu; vcpu->arch.apic->vcpu = vcpu;
p_ctx->gr[1] = 0; p_ctx->gr[1] = 0;
p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET); p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
p_ctx->gr[13] = (unsigned long)vmm_vcpu; p_ctx->gr[13] = (unsigned long)vmm_vcpu;
p_ctx->psr = 0x1008522000UL; p_ctx->psr = 0x1008522000UL;
p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
...@@ -1218,12 +1199,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1218,12 +1199,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.hlt_timer.function = hlt_timer_fn; vcpu->arch.hlt_timer.function = hlt_timer_fn;
vcpu->arch.last_run_cpu = -1; vcpu->arch.last_run_cpu = -1;
vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id); vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
vcpu->arch.vsa_base = kvm_vsa_base; vcpu->arch.vsa_base = kvm_vsa_base;
vcpu->arch.__gp = kvm_vmm_gp; vcpu->arch.__gp = kvm_vmm_gp;
vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id); vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id); vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
init_ptce_info(vcpu); init_ptce_info(vcpu);
r = 0; r = 0;
...@@ -1273,12 +1254,22 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1273,12 +1254,22 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
int r; int r;
int cpu; int cpu;
BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
r = -EINVAL;
if (id >= KVM_MAX_VCPUS) {
printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
KVM_MAX_VCPUS);
goto fail;
}
r = -ENOMEM; r = -ENOMEM;
if (!vm_base) { if (!vm_base) {
printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
goto fail; goto fail;
} }
vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id); vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
vcpu_data[id].vcpu_struct));
vcpu->kvm = kvm; vcpu->kvm = kvm;
cpu = get_cpu(); cpu = get_cpu();
...@@ -1374,9 +1365,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1374,9 +1365,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ {
int i;
int r;
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
int i;
vcpu_load(vcpu); vcpu_load(vcpu);
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
...@@ -1391,14 +1382,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -1391,14 +1382,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->vpd.vpsr = vpd->vpsr; regs->vpd.vpsr = vpd->vpsr;
regs->vpd.vpr = vpd->vpr; regs->vpd.vpr = vpd->vpr;
r = -EFAULT; memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
r = copy_to_user(regs->saved_guest, &vcpu->arch.guest,
sizeof(union context));
if (r)
goto out;
r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET);
if (r)
goto out;
SAVE_REGS(mp_state); SAVE_REGS(mp_state);
SAVE_REGS(vmm_rr); SAVE_REGS(vmm_rr);
memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
...@@ -1426,10 +1411,9 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -1426,10 +1411,9 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
SAVE_REGS(metaphysical_saved_rr4); SAVE_REGS(metaphysical_saved_rr4);
SAVE_REGS(fp_psr); SAVE_REGS(fp_psr);
SAVE_REGS(saved_gp); SAVE_REGS(saved_gp);
vcpu_put(vcpu); vcpu_put(vcpu);
r = 0; return 0;
out:
return r;
} }
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
...@@ -1457,6 +1441,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -1457,6 +1441,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
unsigned long base_gfn = memslot->base_gfn; unsigned long base_gfn = memslot->base_gfn;
if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
return -ENOMEM;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
pfn = gfn_to_pfn(kvm, base_gfn + i); pfn = gfn_to_pfn(kvm, base_gfn + i);
if (!kvm_is_mmio_pfn(pfn)) { if (!kvm_is_mmio_pfn(pfn)) {
...@@ -1631,8 +1618,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, ...@@ -1631,8 +1618,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int r, i; int r, i;
long n, base; long n, base;
unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
+ KVM_MEM_DIRTY_LOG_OFS); offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
r = -EINVAL; r = -EINVAL;
if (log->slot >= KVM_MEMORY_SLOTS) if (log->slot >= KVM_MEMORY_SLOTS)
......
/*
* kvm_lib.c: Compile some libraries for kvm-intel module.
*
* Just include kernel's library, and disable symbols export.
* Copyright (C) 2008, Intel Corporation.
* Xiantao Zhang (xiantao.zhang@intel.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#undef CONFIG_MODULES
#include "../../../lib/vsprintf.c"
#include "../../../lib/ctype.c"
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/kregs.h> #include <asm/kregs.h>
#include <asm/kvm_host.h>
#include "asm-offsets.h" #include "asm-offsets.h"
#define KVM_MINSTATE_START_SAVE_MIN \ #define KVM_MINSTATE_START_SAVE_MIN \
...@@ -33,7 +35,7 @@ ...@@ -33,7 +35,7 @@
addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
;; \ ;; \
lfetch.fault.excl.nt1 [r22]; \ lfetch.fault.excl.nt1 [r22]; \
addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \
mov r23 = ar.bspstore; /* save ar.bspstore */ \ mov r23 = ar.bspstore; /* save ar.bspstore */ \
;; \ ;; \
mov ar.bspstore = r22; /* switch to kernel RBS */\ mov ar.bspstore = r22; /* switch to kernel RBS */\
......
...@@ -27,7 +27,8 @@ ...@@ -27,7 +27,8 @@
*/ */
static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
{ {
return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS); return (uint64_t *)(kvm->arch.vm_base +
offsetof(struct kvm_vm_data, kvm_p2m));
} }
static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
......
...@@ -66,31 +66,25 @@ void lsapic_write(struct kvm_vcpu *v, unsigned long addr, ...@@ -66,31 +66,25 @@ void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
switch (addr) { switch (addr) {
case PIB_OFST_INTA: case PIB_OFST_INTA:
/*panic_domain(NULL, "Undefined write on PIB INTA\n");*/ panic_vm(v, "Undefined write on PIB INTA\n");
panic_vm(v);
break; break;
case PIB_OFST_XTP: case PIB_OFST_XTP:
if (length == 1) { if (length == 1) {
vlsapic_write_xtp(v, val); vlsapic_write_xtp(v, val);
} else { } else {
/*panic_domain(NULL, panic_vm(v, "Undefined write on PIB XTP\n");
"Undefined write on PIB XTP\n");*/
panic_vm(v);
} }
break; break;
default: default:
if (PIB_LOW_HALF(addr)) { if (PIB_LOW_HALF(addr)) {
/*lower half */ /*Lower half */
if (length != 8) if (length != 8)
/*panic_domain(NULL, panic_vm(v, "Can't LHF write with size %ld!\n",
"Can't LHF write with size %ld!\n", length);
length);*/
panic_vm(v);
else else
vlsapic_write_ipi(v, addr, val); vlsapic_write_ipi(v, addr, val);
} else { /* upper half } else { /*Upper half */
printk("IPI-UHF write %lx\n",addr);*/ panic_vm(v, "IPI-UHF write %lx\n", addr);
panic_vm(v);
} }
break; break;
} }
...@@ -108,22 +102,18 @@ unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr, ...@@ -108,22 +102,18 @@ unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
if (length == 1) /* 1 byte load */ if (length == 1) /* 1 byte load */
; /* There is no i8259, there is no INTA access*/ ; /* There is no i8259, there is no INTA access*/
else else
/*panic_domain(NULL,"Undefined read on PIB INTA\n"); */ panic_vm(v, "Undefined read on PIB INTA\n");
panic_vm(v);
break; break;
case PIB_OFST_XTP: case PIB_OFST_XTP:
if (length == 1) { if (length == 1) {
result = VLSAPIC_XTP(v); result = VLSAPIC_XTP(v);
/* printk("read xtp %lx\n", result); */
} else { } else {
/*panic_domain(NULL, panic_vm(v, "Undefined read on PIB XTP\n");
"Undefined read on PIB XTP\n");*/
panic_vm(v);
} }
break; break;
default: default:
panic_vm(v); panic_vm(v, "Undefined addr access for lsapic!\n");
break; break;
} }
return result; return result;
...@@ -162,7 +152,7 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, ...@@ -162,7 +152,7 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
/* it's necessary to ensure zero extending */ /* it's necessary to ensure zero extending */
*dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); *dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
} else } else
panic_vm(vcpu); panic_vm(vcpu, "Unhandled mmio access returned!\n");
out: out:
local_irq_restore(psr); local_irq_restore(psr);
return ; return ;
...@@ -324,7 +314,9 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) ...@@ -324,7 +314,9 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
return; return;
} else { } else {
inst_type = -1; inst_type = -1;
panic_vm(vcpu); panic_vm(vcpu, "Unsupported MMIO access instruction! \
Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
bundle.i64[0], bundle.i64[1]);
} }
size = 1 << size; size = 1 << size;
...@@ -335,7 +327,7 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) ...@@ -335,7 +327,7 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
if (inst_type == SL_INTEGER) if (inst_type == SL_INTEGER)
vcpu_set_gr(vcpu, inst.M1.r1, data, 0); vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
else else
panic_vm(vcpu); panic_vm(vcpu, "Unsupported instruction type!\n");
} }
vcpu_increment_iip(vcpu); vcpu_increment_iip(vcpu);
......
...@@ -527,7 +527,8 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim, ...@@ -527,7 +527,8 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim,
vector = vec2off[vec]; vector = vec2off[vec];
if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
panic_vm(vcpu); panic_vm(vcpu, "Interruption with vector :0x%lx occurs "
"with psr.ic = 0\n", vector);
return; return;
} }
...@@ -586,7 +587,7 @@ static void set_pal_call_result(struct kvm_vcpu *vcpu) ...@@ -586,7 +587,7 @@ static void set_pal_call_result(struct kvm_vcpu *vcpu)
vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
} else } else
panic_vm(vcpu); panic_vm(vcpu, "Mis-set for exit reason!\n");
} }
static void set_sal_call_data(struct kvm_vcpu *vcpu) static void set_sal_call_data(struct kvm_vcpu *vcpu)
...@@ -614,7 +615,7 @@ static void set_sal_call_result(struct kvm_vcpu *vcpu) ...@@ -614,7 +615,7 @@ static void set_sal_call_result(struct kvm_vcpu *vcpu)
vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
} else } else
panic_vm(vcpu); panic_vm(vcpu, "Mis-set for exit reason!\n");
} }
void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
...@@ -680,7 +681,7 @@ static void generate_exirq(struct kvm_vcpu *vcpu) ...@@ -680,7 +681,7 @@ static void generate_exirq(struct kvm_vcpu *vcpu)
vpsr = VCPU(vcpu, vpsr); vpsr = VCPU(vcpu, vpsr);
isr = vpsr & IA64_PSR_RI; isr = vpsr & IA64_PSR_RI;
if (!(vpsr & IA64_PSR_IC)) if (!(vpsr & IA64_PSR_IC))
panic_vm(vcpu); panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n");
reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
} }
...@@ -941,8 +942,20 @@ static void vcpu_do_resume(struct kvm_vcpu *vcpu) ...@@ -941,8 +942,20 @@ static void vcpu_do_resume(struct kvm_vcpu *vcpu)
ia64_set_pta(vcpu->arch.vhpt.pta.val); ia64_set_pta(vcpu->arch.vhpt.pta.val);
} }
static void vmm_sanity_check(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p = &vcpu->arch.exit_data;
if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) {
panic_vm(vcpu, "Failed to do vmm sanity check,"
"it maybe caused by crashed vmm!!\n\n");
}
}
static void kvm_do_resume_op(struct kvm_vcpu *vcpu) static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
{ {
vmm_sanity_check(vcpu); /*Guarantee vcpu runing on healthy vmm!*/
if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
vcpu_do_resume(vcpu); vcpu_do_resume(vcpu);
return; return;
...@@ -968,3 +981,11 @@ void vmm_transition(struct kvm_vcpu *vcpu) ...@@ -968,3 +981,11 @@ void vmm_transition(struct kvm_vcpu *vcpu)
1, 0, 0, 0, 0, 0); 1, 0, 0, 0, 0, 0);
kvm_do_resume_op(vcpu); kvm_do_resume_op(vcpu);
} }
void vmm_panic_handler(u64 vec)
{
struct kvm_vcpu *vcpu = current_vcpu;
vmm_sanity = 0;
panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n",
vec2off[vec]);
}
...@@ -816,8 +816,9 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) ...@@ -816,8 +816,9 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
unsigned long vitv = VCPU(vcpu, itv); unsigned long vitv = VCPU(vcpu, itv);
if (vcpu->vcpu_id == 0) { if (vcpu->vcpu_id == 0) {
for (i = 0; i < MAX_VCPU_NUM; i++) { for (i = 0; i < KVM_MAX_VCPUS; i++) {
v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
VMX(v, itc_offset) = itc_offset; VMX(v, itc_offset) = itc_offset;
VMX(v, last_itc) = 0; VMX(v, last_itc) = 0;
} }
...@@ -1650,7 +1651,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) ...@@ -1650,7 +1651,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
* Otherwise panic * Otherwise panic
*/ */
if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
panic_vm(vcpu); panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
& vpsr.is=0\n");
/* /*
* For those IA64_PSR bits: id/da/dd/ss/ed/ia * For those IA64_PSR bits: id/da/dd/ss/ed/ia
...@@ -2103,7 +2105,7 @@ void kvm_init_all_rr(struct kvm_vcpu *vcpu) ...@@ -2103,7 +2105,7 @@ void kvm_init_all_rr(struct kvm_vcpu *vcpu)
if (is_physical_mode(vcpu)) { if (is_physical_mode(vcpu)) {
if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
panic_vm(vcpu); panic_vm(vcpu, "Machine Status conflicts!\n");
ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
ia64_dv_serialize_data(); ia64_dv_serialize_data();
...@@ -2152,10 +2154,70 @@ int vmm_entry(void) ...@@ -2152,10 +2154,70 @@ int vmm_entry(void)
return 0; return 0;
} }
void panic_vm(struct kvm_vcpu *v) static void kvm_show_registers(struct kvm_pt_regs *regs)
{ {
unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
struct kvm_vcpu *vcpu = current_vcpu;
if (vcpu != NULL)
printk("vcpu 0x%p vcpu %d\n",
vcpu, vcpu->vcpu_id);
printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
regs->cr_ipsr, regs->cr_ifs, ip);
printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
regs->ar_rnat, regs->ar_bspstore, regs->pr);
printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
regs->b6, regs->b7);
printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
regs->f6.u.bits[1], regs->f6.u.bits[0],
regs->f7.u.bits[1], regs->f7.u.bits[0]);
printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
regs->f8.u.bits[1], regs->f8.u.bits[0],
regs->f9.u.bits[1], regs->f9.u.bits[0]);
printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
regs->f10.u.bits[1], regs->f10.u.bits[0],
regs->f11.u.bits[1], regs->f11.u.bits[0]);
printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
regs->r2, regs->r3);
printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
regs->r9, regs->r10);
printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
regs->r12, regs->r13);
printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
regs->r15, regs->r16);
printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
regs->r18, regs->r19);
printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
regs->r21, regs->r22);
printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
regs->r24, regs->r25);
printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
regs->r27, regs->r28);
printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
regs->r30, regs->r31);
}
void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
{
va_list args;
char buf[256];
struct kvm_pt_regs *regs = vcpu_regs(v);
struct exit_ctl_data *p = &v->arch.exit_data; struct exit_ctl_data *p = &v->arch.exit_data;
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
printk(buf);
kvm_show_registers(regs);
p->exit_reason = EXIT_REASON_VM_PANIC; p->exit_reason = EXIT_REASON_VM_PANIC;
vmm_transition(v); vmm_transition(v);
/*Never to return*/ /*Never to return*/
......
...@@ -737,9 +737,12 @@ void kvm_init_vtlb(struct kvm_vcpu *v); ...@@ -737,9 +737,12 @@ void kvm_init_vtlb(struct kvm_vcpu *v);
void kvm_init_vhpt(struct kvm_vcpu *v); void kvm_init_vhpt(struct kvm_vcpu *v);
void thash_init(struct thash_cb *hcb, u64 sz); void thash_init(struct thash_cb *hcb, u64 sz);
void panic_vm(struct kvm_vcpu *v); void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
u64 arg4, u64 arg5, u64 arg6, u64 arg7); u64 arg4, u64 arg5, u64 arg6, u64 arg7);
extern long vmm_sanity;
#endif #endif
#endif /* __VCPU_H__ */ #endif /* __VCPU_H__ */
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include<linux/kernel.h>
#include<linux/module.h> #include<linux/module.h>
#include<asm/fpswa.h> #include<asm/fpswa.h>
...@@ -31,6 +32,8 @@ MODULE_LICENSE("GPL"); ...@@ -31,6 +32,8 @@ MODULE_LICENSE("GPL");
extern char kvm_ia64_ivt; extern char kvm_ia64_ivt;
extern fpswa_interface_t *vmm_fpswa_interface; extern fpswa_interface_t *vmm_fpswa_interface;
long vmm_sanity = 1;
struct kvm_vmm_info vmm_info = { struct kvm_vmm_info vmm_info = {
.module = THIS_MODULE, .module = THIS_MODULE,
.vmm_entry = vmm_entry, .vmm_entry = vmm_entry,
...@@ -62,5 +65,31 @@ void vmm_spin_unlock(spinlock_t *lock) ...@@ -62,5 +65,31 @@ void vmm_spin_unlock(spinlock_t *lock)
{ {
_vmm_raw_spin_unlock(lock); _vmm_raw_spin_unlock(lock);
} }
static void vcpu_debug_exit(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p = &vcpu->arch.exit_data;
long psr;
local_irq_save(psr);
p->exit_reason = EXIT_REASON_DEBUG;
vmm_transition(vcpu);
local_irq_restore(psr);
}
asmlinkage int printk(const char *fmt, ...)
{
struct kvm_vcpu *vcpu = current_vcpu;
va_list args;
int r;
memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN);
va_start(args, fmt);
r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args);
va_end(args);
vcpu_debug_exit(vcpu);
return r;
}
module_init(kvm_vmm_init) module_init(kvm_vmm_init)
module_exit(kvm_vmm_exit) module_exit(kvm_vmm_exit)
/* /*
* /ia64/kvm_ivt.S * arch/ia64/kvm/vmm_ivt.S
* *
* Copyright (C) 1998-2001, 2003 Hewlett-Packard Co * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
...@@ -70,32 +70,39 @@ ...@@ -70,32 +70,39 @@
# define PSR_DEFAULT_BITS 0 # define PSR_DEFAULT_BITS 0
#endif #endif
#define KVM_FAULT(n) \ #define KVM_FAULT(n) \
kvm_fault_##n:; \ kvm_fault_##n:; \
mov r19=n;; \ mov r19=n;; \
br.sptk.many kvm_fault_##n; \ br.sptk.many kvm_vmm_panic; \
;; \ ;; \
#define KVM_REFLECT(n) \ #define KVM_REFLECT(n) \
mov r31=pr; \ mov r31=pr; \
mov r19=n; /* prepare to save predicates */ \ mov r19=n; /* prepare to save predicates */ \
mov r29=cr.ipsr; \ mov r29=cr.ipsr; \
;; \ ;; \
tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
(p7)br.sptk.many kvm_dispatch_reflection; \ (p7) br.sptk.many kvm_dispatch_reflection; \
br.sptk.many kvm_panic; \ br.sptk.many kvm_vmm_panic; \
GLOBAL_ENTRY(kvm_panic) GLOBAL_ENTRY(kvm_vmm_panic)
br.sptk.many kvm_panic KVM_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,1,0
mov out0=r15
adds r3=8,r2 // set up second base pointer
;; ;;
END(kvm_panic) ssm psr.ic
;;
srlz.i // guarantee that interruption collection is on
;;
//(p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
mov rp=r14
;;
br.call.sptk.many b6=vmm_panic_handler;
END(kvm_vmm_panic)
.section .text.ivt,"ax" .section .text.ivt,"ax"
...@@ -108,7 +115,6 @@ ENTRY(kvm_vhpt_miss) ...@@ -108,7 +115,6 @@ ENTRY(kvm_vhpt_miss)
KVM_FAULT(0) KVM_FAULT(0)
END(kvm_vhpt_miss) END(kvm_vhpt_miss)
.org kvm_ia64_ivt+0x400 .org kvm_ia64_ivt+0x400
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
// 0x0400 Entry 1 (size 64 bundles) ITLB (21) // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
...@@ -117,7 +123,7 @@ ENTRY(kvm_itlb_miss) ...@@ -117,7 +123,7 @@ ENTRY(kvm_itlb_miss)
mov r29=cr.ipsr; mov r29=cr.ipsr;
;; ;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT; tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
(p6) br.sptk kvm_alt_itlb_miss (p6) br.sptk kvm_alt_itlb_miss
mov r19 = 1 mov r19 = 1
br.sptk kvm_itlb_miss_dispatch br.sptk kvm_itlb_miss_dispatch
KVM_FAULT(1); KVM_FAULT(1);
...@@ -131,7 +137,7 @@ ENTRY(kvm_dtlb_miss) ...@@ -131,7 +137,7 @@ ENTRY(kvm_dtlb_miss)
mov r29=cr.ipsr; mov r29=cr.ipsr;
;; ;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT; tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
(p6)br.sptk kvm_alt_dtlb_miss (p6) br.sptk kvm_alt_dtlb_miss
br.sptk kvm_dtlb_miss_dispatch br.sptk kvm_dtlb_miss_dispatch
END(kvm_dtlb_miss) END(kvm_dtlb_miss)
...@@ -233,7 +239,7 @@ ENTRY(kvm_break_fault) ...@@ -233,7 +239,7 @@ ENTRY(kvm_break_fault)
;; ;;
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
;; ;;
alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
mov out0=cr.ifa mov out0=cr.ifa
mov out2=cr.isr // FIXME: pity to make this slow access twice mov out2=cr.isr // FIXME: pity to make this slow access twice
mov out3=cr.iim // FIXME: pity to make this slow access twice mov out3=cr.iim // FIXME: pity to make this slow access twice
...@@ -421,9 +427,9 @@ ENTRY(kvm_virtual_exirq) ...@@ -421,9 +427,9 @@ ENTRY(kvm_virtual_exirq)
kvm_dispatch_vexirq: kvm_dispatch_vexirq:
cmp.eq p6,p0 = 1,r30 cmp.eq p6,p0 = 1,r30
;; ;;
(p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 (p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
;; ;;
(p6)ld8 r1 = [r29] (p6) ld8 r1 = [r29]
;; ;;
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,1,0 alloc r14=ar.pfs,0,0,1,0
...@@ -449,13 +455,11 @@ END(kvm_virtual_exirq) ...@@ -449,13 +455,11 @@ END(kvm_virtual_exirq)
KVM_FAULT(14) KVM_FAULT(14)
// this code segment is from 2.6.16.13 // this code segment is from 2.6.16.13
.org kvm_ia64_ivt+0x3c00 .org kvm_ia64_ivt+0x3c00
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
// 0x3c00 Entry 15 (size 64 bundles) Reserved // 0x3c00 Entry 15 (size 64 bundles) Reserved
KVM_FAULT(15) KVM_FAULT(15)
.org kvm_ia64_ivt+0x4000 .org kvm_ia64_ivt+0x4000
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
// 0x4000 Entry 16 (size 64 bundles) Reserved // 0x4000 Entry 16 (size 64 bundles) Reserved
...@@ -612,13 +616,13 @@ ENTRY(kvm_virtualization_fault) ...@@ -612,13 +616,13 @@ ENTRY(kvm_virtualization_fault)
cmp.eq p10,p0=EVENT_SSM,r24 cmp.eq p10,p0=EVENT_SSM,r24
cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
cmp.eq p12,p0=EVENT_THASH,r24 cmp.eq p12,p0=EVENT_THASH,r24
(p6) br.dptk.many kvm_asm_mov_from_ar (p6) br.dptk.many kvm_asm_mov_from_ar
(p7) br.dptk.many kvm_asm_mov_from_rr (p7) br.dptk.many kvm_asm_mov_from_rr
(p8) br.dptk.many kvm_asm_mov_to_rr (p8) br.dptk.many kvm_asm_mov_to_rr
(p9) br.dptk.many kvm_asm_rsm (p9) br.dptk.many kvm_asm_rsm
(p10) br.dptk.many kvm_asm_ssm (p10) br.dptk.many kvm_asm_ssm
(p11) br.dptk.many kvm_asm_mov_to_psr (p11) br.dptk.many kvm_asm_mov_to_psr
(p12) br.dptk.many kvm_asm_thash (p12) br.dptk.many kvm_asm_thash
;; ;;
kvm_virtualization_fault_back: kvm_virtualization_fault_back:
adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
...@@ -633,7 +637,7 @@ kvm_virtualization_fault_back: ...@@ -633,7 +637,7 @@ kvm_virtualization_fault_back:
st8 [r17] = r25 st8 [r17] = r25
;; ;;
cmp.ne p6,p0=EVENT_RFI, r24 cmp.ne p6,p0=EVENT_RFI, r24
(p6) br.sptk kvm_dispatch_virtualization_fault (p6) br.sptk kvm_dispatch_virtualization_fault
;; ;;
adds r18=VMM_VPD_BASE_OFFSET,r21 adds r18=VMM_VPD_BASE_OFFSET,r21
;; ;;
...@@ -644,9 +648,9 @@ kvm_virtualization_fault_back: ...@@ -644,9 +648,9 @@ kvm_virtualization_fault_back:
ld8 r18=[r18] ld8 r18=[r18]
;; ;;
tbit.z p6,p0=r18,63 tbit.z p6,p0=r18,63
(p6) br.sptk kvm_dispatch_virtualization_fault (p6) br.sptk kvm_dispatch_virtualization_fault
;; ;;
//if vifs.v=1 desert current register frame //if vifs.v=1 desert current register frame
alloc r18=ar.pfs,0,0,0,0 alloc r18=ar.pfs,0,0,0,0
br.sptk kvm_dispatch_virtualization_fault br.sptk kvm_dispatch_virtualization_fault
END(kvm_virtualization_fault) END(kvm_virtualization_fault)
...@@ -849,7 +853,7 @@ ENTRY(kvm_itlb_miss_dispatch) ...@@ -849,7 +853,7 @@ ENTRY(kvm_itlb_miss_dispatch)
END(kvm_itlb_miss_dispatch) END(kvm_itlb_miss_dispatch)
ENTRY(kvm_dispatch_reflection) ENTRY(kvm_dispatch_reflection)
/* /*
* Input: * Input:
* psr.ic: off * psr.ic: off
* r19: intr type (offset into ivt, see ia64_int.h) * r19: intr type (offset into ivt, see ia64_int.h)
...@@ -886,7 +890,7 @@ ENTRY(kvm_dispatch_virtualization_fault) ...@@ -886,7 +890,7 @@ ENTRY(kvm_dispatch_virtualization_fault)
;; ;;
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
;; ;;
alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
mov out0=r13 //vcpu mov out0=r13 //vcpu
adds r3=8,r2 // set up second base pointer adds r3=8,r2 // set up second base pointer
;; ;;
...@@ -910,7 +914,6 @@ ENTRY(kvm_dispatch_interrupt) ...@@ -910,7 +914,6 @@ ENTRY(kvm_dispatch_interrupt)
KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
;; ;;
alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
//mov out0=cr.ivr // pass cr.ivr as first arg
adds r3=8,r2 // set up second base pointer for SAVE_REST adds r3=8,r2 // set up second base pointer for SAVE_REST
;; ;;
ssm psr.ic ssm psr.ic
...@@ -927,9 +930,6 @@ ENTRY(kvm_dispatch_interrupt) ...@@ -927,9 +930,6 @@ ENTRY(kvm_dispatch_interrupt)
br.call.sptk.many b6=kvm_ia64_handle_irq br.call.sptk.many b6=kvm_ia64_handle_irq
END(kvm_dispatch_interrupt) END(kvm_dispatch_interrupt)
GLOBAL_ENTRY(ia64_leave_nested) GLOBAL_ENTRY(ia64_leave_nested)
rsm psr.i rsm psr.i
;; ;;
...@@ -1058,13 +1058,10 @@ GLOBAL_ENTRY(ia64_leave_nested) ...@@ -1058,13 +1058,10 @@ GLOBAL_ENTRY(ia64_leave_nested)
rfi rfi
END(ia64_leave_nested) END(ia64_leave_nested)
GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
/* /*
* work.need_resched etc. mustn't get changed * work.need_resched etc. mustn't get changed
*by this CPU before it returns to *by this CPU before it returns to
;;
* user- or fsys-mode, hence we disable interrupts early on: * user- or fsys-mode, hence we disable interrupts early on:
*/ */
adds r2 = PT(R4)+16,r12 adds r2 = PT(R4)+16,r12
...@@ -1286,13 +1283,11 @@ GLOBAL_ENTRY(ia64_vmm_entry) ...@@ -1286,13 +1283,11 @@ GLOBAL_ENTRY(ia64_vmm_entry)
mov r24=r22 mov r24=r22
mov r25=r18 mov r25=r18
tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
(p1) br.cond.sptk.few kvm_vps_resume_normal (p1) br.cond.sptk.few kvm_vps_resume_normal
(p2) br.cond.sptk.many kvm_vps_resume_handler (p2) br.cond.sptk.many kvm_vps_resume_handler
;; ;;
END(ia64_vmm_entry) END(ia64_vmm_entry)
/* /*
* extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
* u64 arg3, u64 arg4, u64 arg5, * u64 arg3, u64 arg4, u64 arg5,
...@@ -1333,7 +1328,7 @@ hostret = r24 ...@@ -1333,7 +1328,7 @@ hostret = r24
mov b6=entry mov b6=entry
br.cond.sptk b6 // call the service br.cond.sptk b6 // call the service
2: 2:
// Architectural sequence for enabling interrupts if necessary // Architectural sequence for enabling interrupts if necessary
(p7) ssm psr.ic (p7) ssm psr.ic
;; ;;
(p7) srlz.i (p7) srlz.i
......
...@@ -183,8 +183,8 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) ...@@ -183,8 +183,8 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
u64 i, dirty_pages = 1; u64 i, dirty_pages = 1;
u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE) void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
+ KVM_MEM_DIRTY_LOG_OFS;
dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
vmm_spin_lock(lock); vmm_spin_lock(lock);
......
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#ifndef __ASM_PPC_DISASSEMBLE_H__
#define __ASM_PPC_DISASSEMBLE_H__
#include <linux/types.h>
static inline unsigned int get_op(u32 inst)
{
return inst >> 26;
}
static inline unsigned int get_xop(u32 inst)
{
return (inst >> 1) & 0x3ff;
}
static inline unsigned int get_sprn(u32 inst)
{
return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
}
static inline unsigned int get_dcrn(u32 inst)
{
return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
}
static inline unsigned int get_rt(u32 inst)
{
return (inst >> 21) & 0x1f;
}
static inline unsigned int get_rs(u32 inst)
{
return (inst >> 21) & 0x1f;
}
static inline unsigned int get_ra(u32 inst)
{
return (inst >> 16) & 0x1f;
}
static inline unsigned int get_rb(u32 inst)
{
return (inst >> 11) & 0x1f;
}
static inline unsigned int get_rc(u32 inst)
{
return inst & 0x1;
}
static inline unsigned int get_ws(u32 inst)
{
return (inst >> 11) & 0x1f;
}
static inline unsigned int get_d(u32 inst)
{
return inst & 0xffff;
}
#endif /* __ASM_PPC_DISASSEMBLE_H__ */
...@@ -17,67 +17,45 @@ ...@@ -17,67 +17,45 @@
* Authors: Hollis Blanchard <hollisb@us.ibm.com> * Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/ */
#include <linux/errno.h> #ifndef __ASM_44X_H__
#define __ASM_44X_H__
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/kvm_ppc.h>
unsigned long kvmppc_booke_handlers; #define PPC44x_TLB_SIZE 64
static int kvmppc_booke_init(void) /* If the guest is expecting it, this can be as large as we like; we'd just
{ * need to find some way of advertising it. */
unsigned long ivor[16]; #define KVM44x_GUEST_TLB_SIZE 64
unsigned long max_ivor = 0;
int i;
/* We install our own exception handlers by hijacking IVPR. IVPR must struct kvmppc_44x_shadow_ref {
* be 16-bit aligned, so we need a 64KB allocation. */ struct page *page;
kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, u16 gtlb_index;
VCPU_SIZE_ORDER); u8 writeable;
if (!kvmppc_booke_handlers) u8 tid;
return -ENOMEM; };
/* XXX make sure our handlers are smaller than Linux's */ struct kvmppc_vcpu_44x {
/* Unmodified copy of the guest's TLB. */
struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE];
/* Copy our interrupt handlers to match host IVORs. That way we don't /* References to guest pages in the hardware TLB. */
* have to swap the IVORs on every guest/host transition. */ struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE];
ivor[0] = mfspr(SPRN_IVOR0);
ivor[1] = mfspr(SPRN_IVOR1);
ivor[2] = mfspr(SPRN_IVOR2);
ivor[3] = mfspr(SPRN_IVOR3);
ivor[4] = mfspr(SPRN_IVOR4);
ivor[5] = mfspr(SPRN_IVOR5);
ivor[6] = mfspr(SPRN_IVOR6);
ivor[7] = mfspr(SPRN_IVOR7);
ivor[8] = mfspr(SPRN_IVOR8);
ivor[9] = mfspr(SPRN_IVOR9);
ivor[10] = mfspr(SPRN_IVOR10);
ivor[11] = mfspr(SPRN_IVOR11);
ivor[12] = mfspr(SPRN_IVOR12);
ivor[13] = mfspr(SPRN_IVOR13);
ivor[14] = mfspr(SPRN_IVOR14);
ivor[15] = mfspr(SPRN_IVOR15);
for (i = 0; i < 16; i++) { /* State of the shadow TLB at guest context switch time. */
if (ivor[i] > max_ivor) struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
max_ivor = ivor[i]; u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
memcpy((void *)kvmppc_booke_handlers + ivor[i], struct kvm_vcpu vcpu;
kvmppc_handlers_start + i * kvmppc_handler_len, };
kvmppc_handler_len);
}
flush_icache_range(kvmppc_booke_handlers,
kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE); static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
}
static void __exit kvmppc_booke_exit(void)
{ {
free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
kvm_exit();
} }
module_init(kvmppc_booke_init) void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid);
module_exit(kvmppc_booke_exit) void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
#endif /* __ASM_44X_H__ */
...@@ -64,27 +64,58 @@ struct kvm_vcpu_stat { ...@@ -64,27 +64,58 @@ struct kvm_vcpu_stat {
u32 halt_wakeup; u32 halt_wakeup;
}; };
struct tlbe { struct kvmppc_44x_tlbe {
u32 tid; /* Only the low 8 bits are used. */ u32 tid; /* Only the low 8 bits are used. */
u32 word0; u32 word0;
u32 word1; u32 word1;
u32 word2; u32 word2;
}; };
struct kvm_arch { enum kvm_exit_types {
MMIO_EXITS,
DCR_EXITS,
SIGNAL_EXITS,
ITLB_REAL_MISS_EXITS,
ITLB_VIRT_MISS_EXITS,
DTLB_REAL_MISS_EXITS,
DTLB_VIRT_MISS_EXITS,
SYSCALL_EXITS,
ISI_EXITS,
DSI_EXITS,
EMULATED_INST_EXITS,
EMULATED_MTMSRWE_EXITS,
EMULATED_WRTEE_EXITS,
EMULATED_MTSPR_EXITS,
EMULATED_MFSPR_EXITS,
EMULATED_MTMSR_EXITS,
EMULATED_MFMSR_EXITS,
EMULATED_TLBSX_EXITS,
EMULATED_TLBWE_EXITS,
EMULATED_RFI_EXITS,
DEC_EXITS,
EXT_INTR_EXITS,
HALT_WAKEUP,
USR_PR_INST,
FP_UNAVAIL,
DEBUG_EXITS,
TIMEINGUEST,
__NUMBER_OF_KVM_EXIT_TYPES
}; };
struct kvm_vcpu_arch { /* allow access to big endian 32bit upper/lower parts and 64bit var */
/* Unmodified copy of the guest's TLB. */ struct kvmppc_exit_timing {
struct tlbe guest_tlb[PPC44x_TLB_SIZE]; union {
/* TLB that's actually used when the guest is running. */ u64 tv64;
struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; struct {
/* Pages which are referenced in the shadow TLB. */ u32 tbu, tbl;
struct page *shadow_pages[PPC44x_TLB_SIZE]; } tv32;
};
};
/* Track which TLB entries we've modified in the current exit. */ struct kvm_arch {
u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; };
struct kvm_vcpu_arch {
u32 host_stack; u32 host_stack;
u32 host_pid; u32 host_pid;
u32 host_dbcr0; u32 host_dbcr0;
...@@ -94,32 +125,32 @@ struct kvm_vcpu_arch { ...@@ -94,32 +125,32 @@ struct kvm_vcpu_arch {
u32 host_msr; u32 host_msr;
u64 fpr[32]; u64 fpr[32];
u32 gpr[32]; ulong gpr[32];
u32 pc; ulong pc;
u32 cr; u32 cr;
u32 ctr; ulong ctr;
u32 lr; ulong lr;
u32 xer; ulong xer;
u32 msr; ulong msr;
u32 mmucr; u32 mmucr;
u32 sprg0; ulong sprg0;
u32 sprg1; ulong sprg1;
u32 sprg2; ulong sprg2;
u32 sprg3; ulong sprg3;
u32 sprg4; ulong sprg4;
u32 sprg5; ulong sprg5;
u32 sprg6; ulong sprg6;
u32 sprg7; ulong sprg7;
u32 srr0; ulong srr0;
u32 srr1; ulong srr1;
u32 csrr0; ulong csrr0;
u32 csrr1; ulong csrr1;
u32 dsrr0; ulong dsrr0;
u32 dsrr1; ulong dsrr1;
u32 dear; ulong dear;
u32 esr; ulong esr;
u32 dec; u32 dec;
u32 decar; u32 decar;
u32 tbl; u32 tbl;
...@@ -127,7 +158,7 @@ struct kvm_vcpu_arch { ...@@ -127,7 +158,7 @@ struct kvm_vcpu_arch {
u32 tcr; u32 tcr;
u32 tsr; u32 tsr;
u32 ivor[16]; u32 ivor[16];
u32 ivpr; ulong ivpr;
u32 pir; u32 pir;
u32 shadow_pid; u32 shadow_pid;
...@@ -140,9 +171,22 @@ struct kvm_vcpu_arch { ...@@ -140,9 +171,22 @@ struct kvm_vcpu_arch {
u32 dbcr0; u32 dbcr0;
u32 dbcr1; u32 dbcr1;
#ifdef CONFIG_KVM_EXIT_TIMING
struct kvmppc_exit_timing timing_exit;
struct kvmppc_exit_timing timing_last_enter;
u32 last_exit_type;
u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
u64 timing_last_exit;
struct dentry *debugfs_exit_timing;
#endif
u32 last_inst; u32 last_inst;
u32 fault_dear; ulong fault_dear;
u32 fault_esr; ulong fault_esr;
gpa_t paddr_accessed; gpa_t paddr_accessed;
u8 io_gpr; /* GPR used as IO source/target */ u8 io_gpr; /* GPR used as IO source/target */
......
...@@ -29,11 +29,6 @@ ...@@ -29,11 +29,6 @@
#include <linux/kvm_types.h> #include <linux/kvm_types.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
struct kvm_tlb {
struct tlbe guest_tlb[PPC44x_TLB_SIZE];
struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
};
enum emulation_result { enum emulation_result {
EMULATE_DONE, /* no further processing */ EMULATE_DONE, /* no further processing */
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
...@@ -41,9 +36,6 @@ enum emulation_result { ...@@ -41,9 +36,6 @@ enum emulation_result {
EMULATE_FAIL, /* can't emulate this instruction */ EMULATE_FAIL, /* can't emulate this instruction */
}; };
extern const unsigned char exception_priority[];
extern const unsigned char priority_exception[];
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern char kvmppc_handlers_start[]; extern char kvmppc_handlers_start[];
extern unsigned long kvmppc_handler_len; extern unsigned long kvmppc_handler_len;
...@@ -58,51 +50,44 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -58,51 +50,44 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_emulate_instruction(struct kvm_run *run, extern int kvmppc_emulate_instruction(struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
u64 asid, u32 flags); u64 asid, u32 flags, u32 max_bytes,
extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int gtlb_idx);
gva_t eend, u32 asid);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
/* XXX Book E specific */ /* Core-specific hooks */
extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); unsigned int id);
extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
{ extern int kvmppc_core_check_processor_compat(void);
unsigned int priority = exception_priority[exception]; extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
set_bit(priority, &vcpu->arch.pending_exceptions); struct kvm_translation *tr);
}
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
{
unsigned int priority = exception_priority[exception]; extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu);
clear_bit(priority, &vcpu->arch.pending_exceptions); extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu);
}
extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
/* Helper function for "full" MSR writes. No need to call this if only EE is extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
* changing. */ extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
{ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) struct kvm_interrupt *irq);
kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.msr = new_msr; unsigned int op, int *advance);
extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
if (vcpu->arch.msr & MSR_WE) extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
kvm_vcpu_block(vcpu);
} extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
{
if (vcpu->arch.pid != new_pid) {
vcpu->arch.pid = new_pid;
vcpu->arch.swap_pid = 1;
}
}
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern unsigned int tlb_44x_hwater; extern unsigned int tlb_44x_hwater;
extern unsigned int tlb_44x_index;
typedef struct { typedef struct {
unsigned int id; unsigned int id;
......
...@@ -23,9 +23,6 @@ ...@@ -23,9 +23,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#ifdef CONFIG_KVM
#include <linux/kvm_host.h>
#endif
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <linux/time.h> #include <linux/time.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
...@@ -51,6 +48,9 @@ ...@@ -51,6 +48,9 @@
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
#include <asm/iseries/alpaca.h> #include <asm/iseries/alpaca.h>
#endif #endif
#ifdef CONFIG_KVM
#include <asm/kvm_44x.h>
#endif
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#include "head_booke.h" #include "head_booke.h"
...@@ -357,12 +357,10 @@ int main(void) ...@@ -357,12 +357,10 @@ int main(void)
DEFINE(PTE_SIZE, sizeof(pte_t)); DEFINE(PTE_SIZE, sizeof(pte_t));
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
DEFINE(TLBE_BYTES, sizeof(struct tlbe)); DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe));
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
...@@ -385,5 +383,16 @@ int main(void) ...@@ -385,5 +383,16 @@ int main(void)
DEFINE(PTE_T_LOG2, PTE_T_LOG2); DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif #endif
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbu));
DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbl));
DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu,
arch.timing_last_enter.tv32.tbu));
DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu,
arch.timing_last_enter.tv32.tbl));
#endif
return 0; return 0;
} }
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/tlbflush.h>
#include <asm/kvm_44x.h>
#include <asm/kvm_ppc.h>
#include "44x_tlb.h"
/* Note: clearing MSR[DE] just means that the debug interrupt will not be
* delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
* If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
* will be delivered as an "imprecise debug event" (which is indicated by
* DBSR[IDE].
*/
static void kvm44x_disable_debug_interrupts(void)
{
mtmsr(mfmsr() & ~MSR_DE);
}
void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
{
kvm44x_disable_debug_interrupts();
mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
mtmsr(vcpu->arch.host_msr);
}
void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
{
struct kvm_guest_debug *dbg = &vcpu->guest_debug;
u32 dbcr0 = 0;
vcpu->arch.host_msr = mfmsr();
kvm44x_disable_debug_interrupts();
/* Save host debug register state. */
vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
/* set registers up for guest */
if (dbg->bp[0]) {
mtspr(SPRN_IAC1, dbg->bp[0]);
dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
}
if (dbg->bp[1]) {
mtspr(SPRN_IAC2, dbg->bp[1]);
dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
}
if (dbg->bp[2]) {
mtspr(SPRN_IAC3, dbg->bp[2]);
dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
}
if (dbg->bp[3]) {
mtspr(SPRN_IAC4, dbg->bp[3]);
dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
}
mtspr(SPRN_DBCR0, dbcr0);
mtspr(SPRN_DBCR1, 0);
mtspr(SPRN_DBCR2, 0);
}
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
kvmppc_44x_tlb_load(vcpu);
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
kvmppc_44x_tlb_put(vcpu);
}
int kvmppc_core_check_processor_compat(void)
{
int r;
if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
r = 0;
else
r = -ENOTSUPP;
return r;
}
int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0];
int i;
tlbe->tid = 0;
tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
tlbe->word1 = 0;
tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
tlbe++;
tlbe->tid = 0;
tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
tlbe->word1 = 0xef600000;
tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
| PPC44x_TLB_I | PPC44x_TLB_G;
/* Since the guest can directly access the timebase, it must know the
* real timebase frequency. Accordingly, it must see the state of
* CCR1[TCS]. */
vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++)
vcpu_44x->shadow_refs[i].gtlb_index = -1;
return 0;
}
/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *gtlbe;
int index;
gva_t eaddr;
u8 pid;
u8 as;
eaddr = tr->linear_address;
pid = (tr->linear_address >> 32) & 0xff;
as = (tr->linear_address >> 40) & 0x1;
index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
if (index == -1) {
tr->valid = 0;
return 0;
}
gtlbe = &vcpu_44x->guest_tlb[index];
tr->physical_address = tlb_xlate(gtlbe, eaddr);
/* XXX what does "writeable" and "usermode" even mean? */
tr->valid = 1;
return 0;
}
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_44x *vcpu_44x;
struct kvm_vcpu *vcpu;
int err;
vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu_44x) {
err = -ENOMEM;
goto out;
}
vcpu = &vcpu_44x->vcpu;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_vcpu;
return vcpu;
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
out:
return ERR_PTR(err);
}
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
}
static int kvmppc_44x_init(void)
{
int r;
r = kvmppc_booke_init();
if (r)
return r;
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE);
}
static void kvmppc_44x_exit(void)
{
kvmppc_booke_exit();
}
module_init(kvmppc_44x_init);
module_exit(kvmppc_44x_exit);
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#include <asm/kvm_ppc.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/disassemble.h>
#include <asm/kvm_44x.h>
#include "timing.h"
#include "booke.h"
#include "44x_tlb.h"
#define OP_RFI 19
#define XOP_RFI 50
#define XOP_MFMSR 83
#define XOP_WRTEE 131
#define XOP_MTMSR 146
#define XOP_WRTEEI 163
#define XOP_MFDCR 323
#define XOP_MTDCR 451
#define XOP_TLBSX 914
#define XOP_ICCCI 966
#define XOP_TLBWE 978
static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.srr0;
kvmppc_set_msr(vcpu, vcpu->arch.srr1);
}
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int dcrn;
int ra;
int rb;
int rc;
int rs;
int rt;
int ws;
switch (get_op(inst)) {
case OP_RFI:
switch (get_xop(inst)) {
case XOP_RFI:
kvmppc_emul_rfi(vcpu);
kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS);
*advance = 0;
break;
default:
emulated = EMULATE_FAIL;
break;
}
break;
case 31:
switch (get_xop(inst)) {
case XOP_MFMSR:
rt = get_rt(inst);
vcpu->arch.gpr[rt] = vcpu->arch.msr;
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
case XOP_MTMSR:
rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
break;
case XOP_WRTEE:
rs = get_rs(inst);
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
| (vcpu->arch.gpr[rs] & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
case XOP_WRTEEI:
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
| (inst & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
case XOP_MFDCR:
dcrn = get_dcrn(inst);
rt = get_rt(inst);
/* The guest may access CPR0 registers to determine the timebase
* frequency, and it must know the real host frequency because it
* can directly access the timebase registers.
*
* It would be possible to emulate those accesses in userspace,
* but userspace can really only figure out the end frequency.
* We could decompose that into the factors that compute it, but
* that's tricky math, and it's easier to just report the real
* CPR0 values.
*/
switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR:
vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
break;
case DCRN_CPR0_CONFIG_DATA:
local_irq_disable();
mtdcr(DCRN_CPR0_CONFIG_ADDR,
vcpu->arch.cpr0_cfgaddr);
vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
local_irq_enable();
break;
default:
run->dcr.dcrn = dcrn;
run->dcr.data = 0;
run->dcr.is_write = 0;
vcpu->arch.io_gpr = rt;
vcpu->arch.dcr_needed = 1;
kvmppc_account_exit(vcpu, DCR_EXITS);
emulated = EMULATE_DO_DCR;
}
break;
case XOP_MTDCR:
dcrn = get_dcrn(inst);
rs = get_rs(inst);
/* emulate some access in kernel */
switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR:
vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
break;
default:
run->dcr.dcrn = dcrn;
run->dcr.data = vcpu->arch.gpr[rs];
run->dcr.is_write = 1;
vcpu->arch.dcr_needed = 1;
kvmppc_account_exit(vcpu, DCR_EXITS);
emulated = EMULATE_DO_DCR;
}
break;
case XOP_TLBWE:
ra = get_ra(inst);
rs = get_rs(inst);
ws = get_ws(inst);
emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
break;
case XOP_TLBSX:
rt = get_rt(inst);
ra = get_ra(inst);
rb = get_rb(inst);
rc = get_rc(inst);
emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
break;
case XOP_ICCCI:
break;
default:
emulated = EMULATE_FAIL;
}
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
switch (sprn) {
case SPRN_MMUCR:
vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
case SPRN_PID:
kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
case SPRN_CCR0:
vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
case SPRN_CCR1:
vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
case SPRN_DEAR:
vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
case SPRN_ESR:
vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
case SPRN_DBCR0:
vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
case SPRN_DBCR1:
vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
case SPRN_TSR:
vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
case SPRN_TCR:
vcpu->arch.tcr = vcpu->arch.gpr[rs];
kvmppc_emulate_dec(vcpu);
break;
/* Note: SPRG4-7 are user-readable. These values are
* loaded into the real SPRGs when resuming the
* guest. */
case SPRN_SPRG4:
vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
case SPRN_SPRG5:
vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
case SPRN_SPRG6:
vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
case SPRN_SPRG7:
vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
case SPRN_IVPR:
vcpu->arch.ivpr = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR0:
vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR1:
vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR2:
vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR3:
vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR4:
vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR5:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR6:
vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR7:
vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR8:
vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR9:
vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR10:
vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR11:
vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR12:
vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR13:
vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR14:
vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR15:
vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs];
break;
default:
return EMULATE_FAIL;
}
kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
return EMULATE_DONE;
}
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
switch (sprn) {
/* 440 */
case SPRN_MMUCR:
vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
case SPRN_CCR0:
vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
case SPRN_CCR1:
vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
/* Book E */
case SPRN_PID:
vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
case SPRN_IVPR:
vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
case SPRN_DEAR:
vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
case SPRN_ESR:
vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
case SPRN_DBCR0:
vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
case SPRN_DBCR1:
vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
case SPRN_IVOR0:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
break;
case SPRN_IVOR1:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
break;
case SPRN_IVOR2:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
break;
case SPRN_IVOR3:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
break;
case SPRN_IVOR4:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
break;
case SPRN_IVOR5:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
break;
case SPRN_IVOR6:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
break;
case SPRN_IVOR7:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
break;
case SPRN_IVOR8:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
break;
case SPRN_IVOR9:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
break;
case SPRN_IVOR10:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
break;
case SPRN_IVOR11:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
break;
case SPRN_IVOR12:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
break;
case SPRN_IVOR13:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
break;
case SPRN_IVOR14:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
break;
case SPRN_IVOR15:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
break;
default:
return EMULATE_FAIL;
}
kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
return EMULATE_DONE;
}
This diff is collapsed.
...@@ -25,48 +25,52 @@ ...@@ -25,48 +25,52 @@
extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned int pid, unsigned int as); unsigned int pid, unsigned int as);
extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb,
u8 rc);
extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws);
/* TLB helper functions */ /* TLB helper functions */
static inline unsigned int get_tlb_size(const struct tlbe *tlbe) static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe)
{ {
return (tlbe->word0 >> 4) & 0xf; return (tlbe->word0 >> 4) & 0xf;
} }
static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) static inline gva_t get_tlb_eaddr(const struct kvmppc_44x_tlbe *tlbe)
{ {
return tlbe->word0 & 0xfffffc00; return tlbe->word0 & 0xfffffc00;
} }
static inline gva_t get_tlb_bytes(const struct tlbe *tlbe) static inline gva_t get_tlb_bytes(const struct kvmppc_44x_tlbe *tlbe)
{ {
unsigned int pgsize = get_tlb_size(tlbe); unsigned int pgsize = get_tlb_size(tlbe);
return 1 << 10 << (pgsize << 1); return 1 << 10 << (pgsize << 1);
} }
static inline gva_t get_tlb_end(const struct tlbe *tlbe) static inline gva_t get_tlb_end(const struct kvmppc_44x_tlbe *tlbe)
{ {
return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1; return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
} }
static inline u64 get_tlb_raddr(const struct tlbe *tlbe) static inline u64 get_tlb_raddr(const struct kvmppc_44x_tlbe *tlbe)
{ {
u64 word1 = tlbe->word1; u64 word1 = tlbe->word1;
return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00); return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
} }
static inline unsigned int get_tlb_tid(const struct tlbe *tlbe) static inline unsigned int get_tlb_tid(const struct kvmppc_44x_tlbe *tlbe)
{ {
return tlbe->tid & 0xff; return tlbe->tid & 0xff;
} }
static inline unsigned int get_tlb_ts(const struct tlbe *tlbe) static inline unsigned int get_tlb_ts(const struct kvmppc_44x_tlbe *tlbe)
{ {
return (tlbe->word0 >> 8) & 0x1; return (tlbe->word0 >> 8) & 0x1;
} }
static inline unsigned int get_tlb_v(const struct tlbe *tlbe) static inline unsigned int get_tlb_v(const struct kvmppc_44x_tlbe *tlbe)
{ {
return (tlbe->word0 >> 9) & 0x1; return (tlbe->word0 >> 9) & 0x1;
} }
...@@ -81,7 +85,7 @@ static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu) ...@@ -81,7 +85,7 @@ static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
return (vcpu->arch.mmucr >> 16) & 0x1; return (vcpu->arch.mmucr >> 16) & 0x1;
} }
static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr) static inline gpa_t tlb_xlate(struct kvmppc_44x_tlbe *tlbe, gva_t eaddr)
{ {
unsigned int pgmask = get_tlb_bytes(tlbe) - 1; unsigned int pgmask = get_tlb_bytes(tlbe) - 1;
......
...@@ -15,27 +15,33 @@ menuconfig VIRTUALIZATION ...@@ -15,27 +15,33 @@ menuconfig VIRTUALIZATION
if VIRTUALIZATION if VIRTUALIZATION
config KVM config KVM
bool "Kernel-based Virtual Machine (KVM) support" bool
depends on 44x && EXPERIMENTAL
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
# We can only run on Book E hosts so far
select KVM_BOOKE_HOST config KVM_440
bool "KVM support for PowerPC 440 processors"
depends on EXPERIMENTAL && 44x
select KVM
---help--- ---help---
Support hosting virtualized guest machines. You will also Support running unmodified 440 guest kernels in virtual machines on
need to select one or more of the processor modules below. 440 host processors.
This module provides access to the hardware capabilities through This module provides access to the hardware capabilities through
a character device node named /dev/kvm. a character device node named /dev/kvm.
If unsure, say N. If unsure, say N.
config KVM_BOOKE_HOST config KVM_EXIT_TIMING
bool "KVM host support for Book E PowerPC processors" bool "Detailed exit timing"
depends on KVM && 44x depends on KVM
---help--- ---help---
Provides host support for KVM on Book E PowerPC processors. Currently Calculate elapsed time for every exit/enter cycle. A per-vcpu
this works on 440 processors only. report is available in debugfs kvm/vm#_vcpu#_timing.
The overhead is relatively small, however it is not recommended for
production environments.
If unsure, say N.
config KVM_TRACE config KVM_TRACE
bool "KVM trace support" bool "KVM trace support"
......
...@@ -8,10 +8,16 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) ...@@ -8,10 +8,16 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o)
kvm-objs := $(common-objs-y) powerpc.o emulate.o booke_guest.o kvm-objs := $(common-objs-y) powerpc.o emulate.o
obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
AFLAGS_booke_interrupts.o := -I$(obj) AFLAGS_booke_interrupts.o := -I$(obj)
kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o kvm-440-objs := \
obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o booke.o \
booke_interrupts.o \
44x.o \
44x_tlb.o \
44x_emulate.o
obj-$(CONFIG_KVM_440) += kvm-440.o
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#ifndef __KVM_BOOKE_H__
#define __KVM_BOOKE_H__
#include <linux/types.h>
#include <linux/kvm_host.h>
#include "timing.h"
/* interrupt priortity ordering */
#define BOOKE_IRQPRIO_DATA_STORAGE 0
#define BOOKE_IRQPRIO_INST_STORAGE 1
#define BOOKE_IRQPRIO_ALIGNMENT 2
#define BOOKE_IRQPRIO_PROGRAM 3
#define BOOKE_IRQPRIO_FP_UNAVAIL 4
#define BOOKE_IRQPRIO_SYSCALL 5
#define BOOKE_IRQPRIO_AP_UNAVAIL 6
#define BOOKE_IRQPRIO_DTLB_MISS 7
#define BOOKE_IRQPRIO_ITLB_MISS 8
#define BOOKE_IRQPRIO_MACHINE_CHECK 9
#define BOOKE_IRQPRIO_DEBUG 10
#define BOOKE_IRQPRIO_CRITICAL 11
#define BOOKE_IRQPRIO_WATCHDOG 12
#define BOOKE_IRQPRIO_EXTERNAL 13
#define BOOKE_IRQPRIO_FIT 14
#define BOOKE_IRQPRIO_DECREMENTER 15
/* Helper function for "full" MSR writes. No need to call this if only EE is
* changing. */
static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
{
if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
vcpu->arch.msr = new_msr;
if (vcpu->arch.msr & MSR_WE) {
kvm_vcpu_block(vcpu);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
};
}
#endif /* __KVM_BOOKE_H__ */
...@@ -107,6 +107,18 @@ _GLOBAL(kvmppc_resume_host) ...@@ -107,6 +107,18 @@ _GLOBAL(kvmppc_resume_host)
li r6, 1 li r6, 1
slw r6, r6, r5 slw r6, r6, r5
#ifdef CONFIG_KVM_EXIT_TIMING
/* save exit time */
1:
mfspr r7, SPRN_TBRU
mfspr r8, SPRN_TBRL
mfspr r9, SPRN_TBRU
cmpw r9, r7
bne 1b
stw r8, VCPU_TIMING_EXIT_TBL(r4)
stw r9, VCPU_TIMING_EXIT_TBU(r4)
#endif
/* Save the faulting instruction and all GPRs for emulation. */ /* Save the faulting instruction and all GPRs for emulation. */
andi. r7, r6, NEED_INST_MASK andi. r7, r6, NEED_INST_MASK
beq ..skip_inst_copy beq ..skip_inst_copy
...@@ -335,54 +347,6 @@ lightweight_exit: ...@@ -335,54 +347,6 @@ lightweight_exit:
lwz r3, VCPU_SHADOW_PID(r4) lwz r3, VCPU_SHADOW_PID(r4)
mtspr SPRN_PID, r3 mtspr SPRN_PID, r3
/* Prevent all asynchronous TLB updates. */
mfmsr r5
lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
andc r6, r5, r6
mtmsr r6
/* Load the guest mappings, leaving the host's "pinned" kernel mappings
* in place. */
mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
li r5, PPC44x_TLB_SIZE
lis r5, tlb_44x_hwater@ha
lwz r5, tlb_44x_hwater@l(r5)
mtctr r5
addi r9, r4, VCPU_SHADOW_TLB
addi r5, r4, VCPU_SHADOW_MOD
li r3, 0
1:
lbzx r7, r3, r5
cmpwi r7, 0
beq 3f
/* Load guest entry. */
mulli r11, r3, TLBE_BYTES
add r11, r11, r9
lwz r7, 0(r11)
mtspr SPRN_MMUCR, r7
lwz r7, 4(r11)
tlbwe r7, r3, PPC44x_TLB_PAGEID
lwz r7, 8(r11)
tlbwe r7, r3, PPC44x_TLB_XLAT
lwz r7, 12(r11)
tlbwe r7, r3, PPC44x_TLB_ATTRIB
3:
addi r3, r3, 1 /* Increment index. */
bdnz 1b
mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
/* Clear bitmap of modified TLB entries */
li r5, PPC44x_TLB_SIZE>>2
mtctr r5
addi r5, r4, VCPU_SHADOW_MOD - 4
li r6, 0
1:
stwu r6, 4(r5)
bdnz 1b
iccci 0, 0 /* XXX hack */ iccci 0, 0 /* XXX hack */
/* Load some guest volatiles. */ /* Load some guest volatiles. */
...@@ -423,6 +387,18 @@ lightweight_exit: ...@@ -423,6 +387,18 @@ lightweight_exit:
lwz r3, VCPU_SPRG7(r4) lwz r3, VCPU_SPRG7(r4)
mtspr SPRN_SPRG7, r3 mtspr SPRN_SPRG7, r3
#ifdef CONFIG_KVM_EXIT_TIMING
/* save enter time */
1:
mfspr r6, SPRN_TBRU
mfspr r7, SPRN_TBRL
mfspr r8, SPRN_TBRU
cmpw r8, r6
bne 1b
stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
#endif
/* Finish loading guest volatiles and jump to guest. */ /* Finish loading guest volatiles and jump to guest. */
lwz r3, VCPU_CTR(r4) lwz r3, VCPU_CTR(r4)
mtctr r3 mtctr r3
......
This diff is collapsed.
...@@ -28,9 +28,9 @@ ...@@ -28,9 +28,9 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include "timing.h"
#include "../mm/mmu_decl.h" #include "../mm/mmu_decl.h"
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{ {
return gfn; return gfn;
...@@ -99,14 +99,7 @@ void kvm_arch_hardware_unsetup(void) ...@@ -99,14 +99,7 @@ void kvm_arch_hardware_unsetup(void)
void kvm_arch_check_processor_compat(void *rtn) void kvm_arch_check_processor_compat(void *rtn)
{ {
int r; *(int *)rtn = kvmppc_core_check_processor_compat();
if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
r = 0;
else
r = -ENOTSUPP;
*(int *)rtn = r;
} }
struct kvm *kvm_arch_create_vm(void) struct kvm *kvm_arch_create_vm(void)
...@@ -144,9 +137,6 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -144,9 +137,6 @@ int kvm_dev_ioctl_check_extension(long ext)
int r; int r;
switch (ext) { switch (ext) {
case KVM_CAP_USER_MEMORY:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO: case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET; r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break; break;
...@@ -179,30 +169,15 @@ void kvm_arch_flush_shadow(struct kvm *kvm) ...@@ -179,30 +169,15 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int err; vcpu = kvmppc_core_vcpu_create(kvm, id);
kvmppc_create_vcpu_debugfs(vcpu, id);
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu) {
err = -ENOMEM;
goto out;
}
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_vcpu;
return vcpu; return vcpu;
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
return ERR_PTR(err);
} }
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{ {
kvm_vcpu_uninit(vcpu); kvmppc_remove_vcpu_debugfs(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu); kvmppc_core_vcpu_free(vcpu);
} }
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
...@@ -212,16 +187,14 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -212,16 +187,14 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{ {
unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER]; return kvmppc_core_pending_dec(vcpu);
return test_bit(priority, &vcpu->arch.pending_exceptions);
} }
static void kvmppc_decrementer_func(unsigned long data) static void kvmppc_decrementer_func(unsigned long data)
{ {
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); kvmppc_core_queue_dec(vcpu);
if (waitqueue_active(&vcpu->wq)) { if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq); wake_up_interruptible(&vcpu->wq);
...@@ -242,96 +215,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) ...@@ -242,96 +215,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
kvmppc_core_destroy_mmu(vcpu); kvmppc_core_destroy_mmu(vcpu);
} }
/* Note: clearing MSR[DE] just means that the debug interrupt will not be
* delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
* If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
* will be delivered as an "imprecise debug event" (which is indicated by
* DBSR[IDE].
*/
static void kvmppc_disable_debug_interrupts(void)
{
mtmsr(mfmsr() & ~MSR_DE);
}
static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu)
{
kvmppc_disable_debug_interrupts();
mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
mtmsr(vcpu->arch.host_msr);
}
static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu)
{
struct kvm_guest_debug *dbg = &vcpu->guest_debug;
u32 dbcr0 = 0;
vcpu->arch.host_msr = mfmsr();
kvmppc_disable_debug_interrupts();
/* Save host debug register state. */
vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
/* set registers up for guest */
if (dbg->bp[0]) {
mtspr(SPRN_IAC1, dbg->bp[0]);
dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
}
if (dbg->bp[1]) {
mtspr(SPRN_IAC2, dbg->bp[1]);
dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
}
if (dbg->bp[2]) {
mtspr(SPRN_IAC3, dbg->bp[2]);
dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
}
if (dbg->bp[3]) {
mtspr(SPRN_IAC4, dbg->bp[3]);
dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
}
mtspr(SPRN_DBCR0, dbcr0);
mtspr(SPRN_DBCR1, 0);
mtspr(SPRN_DBCR2, 0);
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
int i;
if (vcpu->guest_debug.enabled) if (vcpu->guest_debug.enabled)
kvmppc_load_guest_debug_registers(vcpu); kvmppc_core_load_guest_debugstate(vcpu);
/* Mark every guest entry in the shadow TLB entry modified, so that they kvmppc_core_vcpu_load(vcpu, cpu);
* will all be reloaded on the next vcpu run (instead of being
* demand-faulted). */
for (i = 0; i <= tlb_44x_hwater; i++)
kvmppc_tlbe_set_modified(vcpu, i);
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
if (vcpu->guest_debug.enabled) if (vcpu->guest_debug.enabled)
kvmppc_restore_host_debug_state(vcpu); kvmppc_core_load_host_debugstate(vcpu);
/* Don't leave guest TLB entries resident when being de-scheduled. */ /* Don't leave guest TLB entries resident when being de-scheduled. */
/* XXX It would be nice to differentiate between heavyweight exit and /* XXX It would be nice to differentiate between heavyweight exit and
* sched_out here, since we could avoid the TLB flush for heavyweight * sched_out here, since we could avoid the TLB flush for heavyweight
* exits. */ * exits. */
_tlbil_all(); _tlbil_all();
kvmppc_core_vcpu_put(vcpu);
} }
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
...@@ -355,14 +257,14 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, ...@@ -355,14 +257,14 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
struct kvm_run *run) struct kvm_run *run)
{ {
u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
*gpr = run->dcr.data; *gpr = run->dcr.data;
} }
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run) struct kvm_run *run)
{ {
u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
if (run->mmio.len > sizeof(*gpr)) { if (run->mmio.len > sizeof(*gpr)) {
printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
...@@ -460,7 +362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -460,7 +362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu->arch.dcr_needed = 0; vcpu->arch.dcr_needed = 0;
} }
kvmppc_check_and_deliver_interrupts(vcpu); kvmppc_core_deliver_interrupts(vcpu);
local_irq_disable(); local_irq_disable();
kvm_guest_enter(); kvm_guest_enter();
...@@ -478,7 +380,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -478,7 +380,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{ {
kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); kvmppc_core_queue_external(vcpu, irq);
if (waitqueue_active(&vcpu->wq)) { if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq); wake_up_interruptible(&vcpu->wq);
......
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
*/
#include <linux/kvm_host.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <asm/time.h>
#include <asm-generic/div64.h>
#include "timing.h"
void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
{
int i;
/* pause guest execution to avoid concurrent updates */
local_irq_disable();
mutex_lock(&vcpu->mutex);
vcpu->arch.last_exit_type = 0xDEAD;
for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
vcpu->arch.timing_count_type[i] = 0;
vcpu->arch.timing_max_duration[i] = 0;
vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
vcpu->arch.timing_sum_duration[i] = 0;
vcpu->arch.timing_sum_quad_duration[i] = 0;
}
vcpu->arch.timing_last_exit = 0;
vcpu->arch.timing_exit.tv64 = 0;
vcpu->arch.timing_last_enter.tv64 = 0;
mutex_unlock(&vcpu->mutex);
local_irq_enable();
}
static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
{
u64 old;
do_div(duration, tb_ticks_per_usec);
if (unlikely(duration > 0xFFFFFFFF)) {
printk(KERN_ERR"%s - duration too big -> overflow"
" duration %lld type %d exit #%d\n",
__func__, duration, type,
vcpu->arch.timing_count_type[type]);
return;
}
vcpu->arch.timing_count_type[type]++;
/* sum */
old = vcpu->arch.timing_sum_duration[type];
vcpu->arch.timing_sum_duration[type] += duration;
if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
printk(KERN_ERR"%s - wrap adding sum of durations"
" old %lld new %lld type %d exit # of type %d\n",
__func__, old, vcpu->arch.timing_sum_duration[type],
type, vcpu->arch.timing_count_type[type]);
}
/* square sum */
old = vcpu->arch.timing_sum_quad_duration[type];
vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
printk(KERN_ERR"%s - wrap adding sum of squared durations"
" old %lld new %lld type %d exit # of type %d\n",
__func__, old,
vcpu->arch.timing_sum_quad_duration[type],
type, vcpu->arch.timing_count_type[type]);
}
/* set min/max */
if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
vcpu->arch.timing_min_duration[type] = duration;
if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
vcpu->arch.timing_max_duration[type] = duration;
}
void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
{
u64 exit = vcpu->arch.timing_last_exit;
u64 enter = vcpu->arch.timing_last_enter.tv64;
/* save exit time, used next exit when the reenter time is known */
vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0))
return; /* skip incomplete cycle (e.g. after reset) */
/* update statistics for average and standard deviation */
add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
/* enter -> timing_last_exit is time spent in guest - log this too */
add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
TIMEINGUEST);
}
static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
[MMIO_EXITS] = "MMIO",
[DCR_EXITS] = "DCR",
[SIGNAL_EXITS] = "SIGNAL",
[ITLB_REAL_MISS_EXITS] = "ITLBREAL",
[ITLB_VIRT_MISS_EXITS] = "ITLBVIRT",
[DTLB_REAL_MISS_EXITS] = "DTLBREAL",
[DTLB_VIRT_MISS_EXITS] = "DTLBVIRT",
[SYSCALL_EXITS] = "SYSCALL",
[ISI_EXITS] = "ISI",
[DSI_EXITS] = "DSI",
[EMULATED_INST_EXITS] = "EMULINST",
[EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT",
[EMULATED_WRTEE_EXITS] = "EMUL_WRTEE",
[EMULATED_MTSPR_EXITS] = "EMUL_MTSPR",
[EMULATED_MFSPR_EXITS] = "EMUL_MFSPR",
[EMULATED_MTMSR_EXITS] = "EMUL_MTMSR",
[EMULATED_MFMSR_EXITS] = "EMUL_MFMSR",
[EMULATED_TLBSX_EXITS] = "EMUL_TLBSX",
[EMULATED_TLBWE_EXITS] = "EMUL_TLBWE",
[EMULATED_RFI_EXITS] = "EMUL_RFI",
[DEC_EXITS] = "DEC",
[EXT_INTR_EXITS] = "EXTINT",
[HALT_WAKEUP] = "HALT",
[USR_PR_INST] = "USR_PR_INST",
[FP_UNAVAIL] = "FP_UNAVAIL",
[DEBUG_EXITS] = "DEBUG",
[TIMEINGUEST] = "TIMEINGUEST"
};
static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
{
struct kvm_vcpu *vcpu = m->private;
int i;
seq_printf(m, "%s", "type count min max sum sum_squared\n");
for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n",
kvm_exit_names[i],
vcpu->arch.timing_count_type[i],
vcpu->arch.timing_min_duration[i],
vcpu->arch.timing_max_duration[i],
vcpu->arch.timing_sum_duration[i],
vcpu->arch.timing_sum_quad_duration[i]);
}
return 0;
}
/* Write 'c' to clear the timing statistics. */
static ssize_t kvmppc_exit_timing_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
int err = -EINVAL;
char c;
if (count > 1) {
goto done;
}
if (get_user(c, user_buf)) {
err = -EFAULT;
goto done;
}
if (c == 'c') {
struct seq_file *seqf = (struct seq_file *)file->private_data;
struct kvm_vcpu *vcpu = seqf->private;
/* Write does not affect our buffers previously generated with
* show. seq_file is locked here to prevent races of init with
* a show call */
mutex_lock(&seqf->lock);
kvmppc_init_timing_stats(vcpu);
mutex_unlock(&seqf->lock);
err = count;
}
done:
return err;
}
static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
{
return single_open(file, kvmppc_exit_timing_show, inode->i_private);
}
static struct file_operations kvmppc_exit_timing_fops = {
.owner = THIS_MODULE,
.open = kvmppc_exit_timing_open,
.read = seq_read,
.write = kvmppc_exit_timing_write,
.llseek = seq_lseek,
.release = single_release,
};
void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
{
static char dbg_fname[50];
struct dentry *debugfs_file;
snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing",
current->pid, id);
debugfs_file = debugfs_create_file(dbg_fname, 0666,
kvm_debugfs_dir, vcpu,
&kvmppc_exit_timing_fops);
if (!debugfs_file) {
printk(KERN_ERR"%s: error creating debugfs file %s\n",
__func__, dbg_fname);
return;
}
vcpu->arch.debugfs_exit_timing = debugfs_file;
}
void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.debugfs_exit_timing) {
debugfs_remove(vcpu->arch.debugfs_exit_timing);
vcpu->arch.debugfs_exit_timing = NULL;
}
}
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
*/
#ifndef __POWERPC_KVM_EXITTIMING_H__
#define __POWERPC_KVM_EXITTIMING_H__
#include <linux/kvm_host.h>
#include <asm/kvm_host.h>
#ifdef CONFIG_KVM_EXIT_TIMING
void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu);
void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu);
void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id);
void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu);
static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type)
{
vcpu->arch.last_exit_type = type;
}
#else
/* if exit timing is not configured there is no need to build the c file */
static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu,
unsigned int id) {}
static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {}
#endif /* CONFIG_KVM_EXIT_TIMING */
/* account the exit in kvm_stats */
static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
{
/* type has to be known at build time for optimization */
BUILD_BUG_ON(__builtin_constant_p(type));
switch (type) {
case EXT_INTR_EXITS:
vcpu->stat.ext_intr_exits++;
break;
case DEC_EXITS:
vcpu->stat.dec_exits++;
break;
case EMULATED_INST_EXITS:
vcpu->stat.emulated_inst_exits++;
break;
case DCR_EXITS:
vcpu->stat.dcr_exits++;
break;
case DSI_EXITS:
vcpu->stat.dsi_exits++;
break;
case ISI_EXITS:
vcpu->stat.isi_exits++;
break;
case SYSCALL_EXITS:
vcpu->stat.syscall_exits++;
break;
case DTLB_REAL_MISS_EXITS:
vcpu->stat.dtlb_real_miss_exits++;
break;
case DTLB_VIRT_MISS_EXITS:
vcpu->stat.dtlb_virt_miss_exits++;
break;
case MMIO_EXITS:
vcpu->stat.mmio_exits++;
break;
case ITLB_REAL_MISS_EXITS:
vcpu->stat.itlb_real_miss_exits++;
break;
case ITLB_VIRT_MISS_EXITS:
vcpu->stat.itlb_virt_miss_exits++;
break;
case SIGNAL_EXITS:
vcpu->stat.signal_exits++;
break;
}
}
/* wrapper to set exit time and account for it in kvm_stats */
static inline void kvmppc_account_exit(struct kvm_vcpu *vcpu, int type)
{
kvmppc_set_exit_type(vcpu, type);
kvmppc_account_exit_stat(vcpu, type);
}
#endif /* __POWERPC_KVM_EXITTIMING_H__ */
...@@ -113,8 +113,6 @@ long kvm_arch_dev_ioctl(struct file *filp, ...@@ -113,8 +113,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
int kvm_dev_ioctl_check_extension(long ext) int kvm_dev_ioctl_check_extension(long ext)
{ {
switch (ext) { switch (ext) {
case KVM_CAP_USER_MEMORY:
return 1;
default: default:
return 0; return 0;
} }
...@@ -185,8 +183,6 @@ struct kvm *kvm_arch_create_vm(void) ...@@ -185,8 +183,6 @@ struct kvm *kvm_arch_create_vm(void)
debug_register_view(kvm->arch.dbf, &debug_sprintf_view); debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
VM_EVENT(kvm, 3, "%s", "vm created"); VM_EVENT(kvm, 3, "%s", "vm created");
try_module_get(THIS_MODULE);
return kvm; return kvm;
out_nodbf: out_nodbf:
free_page((unsigned long)(kvm->arch.sca)); free_page((unsigned long)(kvm->arch.sca));
...@@ -196,13 +192,33 @@ struct kvm *kvm_arch_create_vm(void) ...@@ -196,13 +192,33 @@ struct kvm *kvm_arch_create_vm(void)
return ERR_PTR(rc); return ERR_PTR(rc);
} }
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 3, "%s", "free cpu");
free_page((unsigned long)(vcpu->arch.sie_block));
kvm_vcpu_uninit(vcpu);
kfree(vcpu);
}
static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_destroy(kvm->vcpus[i]);
kvm->vcpus[i] = NULL;
}
}
}
void kvm_arch_destroy_vm(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm)
{ {
debug_unregister(kvm->arch.dbf); kvm_free_vcpus(kvm);
kvm_free_physmem(kvm); kvm_free_physmem(kvm);
free_page((unsigned long)(kvm->arch.sca)); free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
kfree(kvm); kfree(kvm);
module_put(THIS_MODULE);
} }
/* Section: vcpu related */ /* Section: vcpu related */
...@@ -213,8 +229,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -213,8 +229,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{ {
/* kvm common code refers to this, but does'nt call it */ /* Nothing todo */
BUG();
} }
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
...@@ -308,8 +323,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -308,8 +323,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
vcpu->arch.sie_block); vcpu->arch.sie_block);
try_module_get(THIS_MODULE);
return vcpu; return vcpu;
out_free_cpu: out_free_cpu:
kfree(vcpu); kfree(vcpu);
...@@ -317,14 +330,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -317,14 +330,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
return ERR_PTR(rc); return ERR_PTR(rc);
} }
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
free_page((unsigned long)(vcpu->arch.sie_block));
kfree(vcpu);
module_put(THIS_MODULE);
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{ {
/* kvm common code refers to this, but never calls it */ /* kvm common code refers to this, but never calls it */
......
This diff is collapsed.
...@@ -123,6 +123,7 @@ struct decode_cache { ...@@ -123,6 +123,7 @@ struct decode_cache {
u8 ad_bytes; u8 ad_bytes;
u8 rex_prefix; u8 rex_prefix;
struct operand src; struct operand src;
struct operand src2;
struct operand dst; struct operand dst;
bool has_seg_override; bool has_seg_override;
u8 seg_override; u8 seg_override;
...@@ -146,16 +147,12 @@ struct x86_emulate_ctxt { ...@@ -146,16 +147,12 @@ struct x86_emulate_ctxt {
/* Register state before/after emulation. */ /* Register state before/after emulation. */
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
/* Linear faulting address (if emulating a page-faulting instruction) */
unsigned long eflags; unsigned long eflags;
/* Emulated execution mode, represented by an X86EMUL_MODE value. */ /* Emulated execution mode, represented by an X86EMUL_MODE value. */
int mode; int mode;
u32 cs_base; u32 cs_base;
/* decode cache */ /* decode cache */
struct decode_cache decode; struct decode_cache decode;
}; };
...@@ -170,7 +167,7 @@ struct x86_emulate_ctxt { ...@@ -170,7 +167,7 @@ struct x86_emulate_ctxt {
#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
/* Host execution mode. */ /* Host execution mode. */
#if defined(__i386__) #if defined(CONFIG_X86_32)
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
#elif defined(CONFIG_X86_64) #elif defined(CONFIG_X86_64)
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
......
...@@ -57,6 +57,31 @@ struct mtrr_gentry { ...@@ -57,6 +57,31 @@ struct mtrr_gentry {
}; };
#endif /* !__i386__ */ #endif /* !__i386__ */
struct mtrr_var_range {
u32 base_lo;
u32 base_hi;
u32 mask_lo;
u32 mask_hi;
};
/* In the Intel processor's MTRR interface, the MTRR type is always held in
an 8 bit field: */
typedef u8 mtrr_type;
#define MTRR_NUM_FIXED_RANGES 88
#define MTRR_MAX_VAR_RANGES 256
struct mtrr_state_type {
struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
unsigned char enabled;
unsigned char have_fixed;
mtrr_type def_type;
};
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
/* These are the various ioctls */ /* These are the various ioctls */
#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
......
This diff is collapsed.
...@@ -14,14 +14,6 @@ ...@@ -14,14 +14,6 @@
#include <asm/pat.h> #include <asm/pat.h>
#include "mtrr.h" #include "mtrr.h"
struct mtrr_state {
struct mtrr_var_range var_ranges[MAX_VAR_RANGES];
mtrr_type fixed_ranges[NUM_FIXED_RANGES];
unsigned char enabled;
unsigned char have_fixed;
mtrr_type def_type;
};
struct fixed_range_block { struct fixed_range_block {
int base_msr; /* start address of an MTRR block */ int base_msr; /* start address of an MTRR block */
int ranges; /* number of MTRRs in this block */ int ranges; /* number of MTRRs in this block */
...@@ -35,10 +27,12 @@ static struct fixed_range_block fixed_range_blocks[] = { ...@@ -35,10 +27,12 @@ static struct fixed_range_block fixed_range_blocks[] = {
}; };
static unsigned long smp_changes_mask; static unsigned long smp_changes_mask;
static struct mtrr_state mtrr_state = {};
static int mtrr_state_set; static int mtrr_state_set;
u64 mtrr_tom2; u64 mtrr_tom2;
struct mtrr_state_type mtrr_state = {};
EXPORT_SYMBOL_GPL(mtrr_state);
#undef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "mtrr." #define MODULE_PARAM_PREFIX "mtrr."
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/msr.h> #include <asm/msr.h>
#include "svm.h" #include <asm/svm.h>
static const u32 host_save_user_msrs[] = { static const u32 host_save_user_msrs[] = {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment