Commit b209749f authored by Avi Kivity's avatar Avi Kivity

KVM: local APIC TPR access reporting facility

Add a facility to report on accesses to the local apic tpr even if the
local apic is emulated in the kernel.  This is basically a hack that
allows userspace to patch Windows which tends to bang on the tpr a lot.
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 565f1fbd
...@@ -551,6 +551,23 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) ...@@ -551,6 +551,23 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
return tmcct; return tmcct;
} }
static void __report_tpr_access(struct kvm_lapic *apic, bool write)
{
struct kvm_vcpu *vcpu = apic->vcpu;
struct kvm_run *run = vcpu->run;
set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests);
kvm_x86_ops->cache_regs(vcpu);
run->tpr_access.rip = vcpu->arch.rip;
run->tpr_access.is_write = write;
}
static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
{
if (apic->vcpu->arch.tpr_access_reporting)
__report_tpr_access(apic, write);
}
static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
{ {
u32 val = 0; u32 val = 0;
...@@ -568,6 +585,9 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) ...@@ -568,6 +585,9 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
val = apic_get_tmcct(apic); val = apic_get_tmcct(apic);
break; break;
case APIC_TASKPRI:
report_tpr_access(apic, false);
/* fall thru */
default: default:
apic_update_ppr(apic); apic_update_ppr(apic);
val = apic_get_reg(apic, offset); val = apic_get_reg(apic, offset);
...@@ -677,6 +697,7 @@ static void apic_mmio_write(struct kvm_io_device *this, ...@@ -677,6 +697,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
break; break;
case APIC_TASKPRI: case APIC_TASKPRI:
report_tpr_access(apic, true);
apic_set_tpr(apic, val & 0xff); apic_set_tpr(apic, val & 0xff);
break; break;
......
...@@ -684,6 +684,7 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -684,6 +684,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID: case KVM_CAP_EXT_CPUID:
case KVM_CAP_VAPIC:
r = 1; r = 1;
break; break;
default: default:
...@@ -1055,6 +1056,15 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, ...@@ -1055,6 +1056,15 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
struct kvm_tpr_access_ctl *tac)
{
if (tac->flags)
return -EINVAL;
vcpu->arch.tpr_access_reporting = !!tac->enabled;
return 0;
}
long kvm_arch_vcpu_ioctl(struct file *filp, long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
{ {
...@@ -1148,6 +1158,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -1148,6 +1158,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_SET_MSRS: case KVM_SET_MSRS:
r = msr_io(vcpu, argp, do_set_msr, 0); r = msr_io(vcpu, argp, do_set_msr, 0);
break; break;
case KVM_TPR_ACCESS_REPORTING: {
struct kvm_tpr_access_ctl tac;
r = -EFAULT;
if (copy_from_user(&tac, argp, sizeof tac))
goto out;
r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &tac, sizeof tac))
goto out;
r = 0;
break;
};
default: default:
r = -EINVAL; r = -EINVAL;
} }
......
...@@ -211,6 +211,7 @@ struct kvm_vcpu_arch { ...@@ -211,6 +211,7 @@ struct kvm_vcpu_arch {
int mp_state; int mp_state;
int sipi_vector; int sipi_vector;
u64 ia32_misc_enable_msr; u64 ia32_misc_enable_msr;
bool tpr_access_reporting;
struct kvm_mmu mmu; struct kvm_mmu mmu;
......
...@@ -72,6 +72,7 @@ struct kvm_irqchip { ...@@ -72,6 +72,7 @@ struct kvm_irqchip {
#define KVM_EXIT_FAIL_ENTRY 9 #define KVM_EXIT_FAIL_ENTRY 9
#define KVM_EXIT_INTR 10 #define KVM_EXIT_INTR 10
#define KVM_EXIT_SET_TPR 11 #define KVM_EXIT_SET_TPR 11
#define KVM_EXIT_TPR_ACCESS 12
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run { struct kvm_run {
...@@ -130,6 +131,12 @@ struct kvm_run { ...@@ -130,6 +131,12 @@ struct kvm_run {
__u32 longmode; __u32 longmode;
__u32 pad; __u32 pad;
} hypercall; } hypercall;
/* KVM_EXIT_TPR_ACCESS */
struct {
__u64 rip;
__u32 is_write;
__u32 pad;
} tpr_access;
/* Fix the size of the union. */ /* Fix the size of the union. */
char padding[256]; char padding[256];
}; };
...@@ -202,6 +209,13 @@ struct kvm_signal_mask { ...@@ -202,6 +209,13 @@ struct kvm_signal_mask {
__u8 sigset[0]; __u8 sigset[0];
}; };
/* for KVM_TPR_ACCESS_REPORTING */
struct kvm_tpr_access_ctl {
__u32 enabled;
__u32 flags;
__u32 reserved[8];
};
#define KVMIO 0xAE #define KVMIO 0xAE
/* /*
...@@ -229,6 +243,7 @@ struct kvm_signal_mask { ...@@ -229,6 +243,7 @@ struct kvm_signal_mask {
#define KVM_CAP_USER_MEMORY 3 #define KVM_CAP_USER_MEMORY 3
#define KVM_CAP_SET_TSS_ADDR 4 #define KVM_CAP_SET_TSS_ADDR 4
#define KVM_CAP_EXT_CPUID 5 #define KVM_CAP_EXT_CPUID 5
#define KVM_CAP_VAPIC 6
/* /*
* ioctls for VM fds * ioctls for VM fds
...@@ -274,5 +289,7 @@ struct kvm_signal_mask { ...@@ -274,5 +289,7 @@ struct kvm_signal_mask {
#define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state) #define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state)
#define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2) #define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2)
#define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2) #define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2)
/* Available with KVM_CAP_VAPIC */
#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
#endif #endif
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
* vcpu->requests bit members * vcpu->requests bit members
*/ */
#define KVM_REQ_TLB_FLUSH 0 #define KVM_REQ_TLB_FLUSH 0
#define KVM_REQ_REPORT_TPR_ACCESS 2
struct kvm_vcpu; struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache; extern struct kmem_cache *kvm_vcpu_cache;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment