Commit a1efbe77 authored by Jan Kiszka's avatar Jan Kiszka Committed by Avi Kivity

KVM: x86: Add support for saving&restoring debug registers

So far user space was not able to save and restore debug registers for
migration or after reset. Plug this hole.
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 48005f64
...@@ -718,6 +718,37 @@ If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in ...@@ -718,6 +718,37 @@ If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
the flags field to signal that interrupt.shadow contains a valid state and the flags field to signal that interrupt.shadow contains a valid state and
shall be written into the VCPU. shall be written into the VCPU.
4.32 KVM_GET_DEBUGREGS
Capability: KVM_CAP_DEBUGREGS
Architectures: x86
Type: vm ioctl
Parameters: struct kvm_debugregs (out)
Returns: 0 on success, -1 on error
Reads debug registers from the vcpu.
struct kvm_debugregs {
__u64 db[4];
__u64 dr6;
__u64 dr7;
__u64 flags;
__u64 reserved[9];
};
4.33 KVM_SET_DEBUGREGS
Capability: KVM_CAP_DEBUGREGS
Architectures: x86
Type: vm ioctl
Parameters: struct kvm_debugregs (in)
Returns: 0 on success, -1 on error
Writes debug registers into the vcpu.
See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
yet and must be cleared on entry.
5. The kvm_run structure 5. The kvm_run structure
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#define __KVM_HAVE_PIT_STATE2 #define __KVM_HAVE_PIT_STATE2
#define __KVM_HAVE_XEN_HVM #define __KVM_HAVE_XEN_HVM
#define __KVM_HAVE_VCPU_EVENTS #define __KVM_HAVE_VCPU_EVENTS
#define __KVM_HAVE_DEBUGREGS
/* Architectural interrupt line count. */ /* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256 #define KVM_NR_INTERRUPTS 256
...@@ -289,4 +290,13 @@ struct kvm_vcpu_events { ...@@ -289,4 +290,13 @@ struct kvm_vcpu_events {
__u32 reserved[10]; __u32 reserved[10];
}; };
/* for KVM_GET/SET_DEBUGREGS */
struct kvm_debugregs {
__u64 db[4];
__u64 dr6;
__u64 dr7;
__u64 flags;
__u64 reserved[9];
};
#endif /* _ASM_X86_KVM_H */ #endif /* _ASM_X86_KVM_H */
...@@ -1548,6 +1548,7 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -1548,6 +1548,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN: case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_PCI_SEGMENT: case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_X86_ROBUST_SINGLESTEP:
r = 1; r = 1;
break; break;
...@@ -2165,6 +2166,36 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2165,6 +2166,36 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
struct kvm_debugregs *dbgregs)
{
vcpu_load(vcpu);
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
dbgregs->dr6 = vcpu->arch.dr6;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
vcpu_put(vcpu);
}
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
struct kvm_debugregs *dbgregs)
{
if (dbgregs->flags)
return -EINVAL;
vcpu_load(vcpu);
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = dbgregs->dr6;
vcpu->arch.dr7 = dbgregs->dr7;
vcpu_put(vcpu);
return 0;
}
long kvm_arch_vcpu_ioctl(struct file *filp, long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
{ {
...@@ -2343,6 +2374,29 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -2343,6 +2374,29 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
break; break;
} }
case KVM_GET_DEBUGREGS: {
struct kvm_debugregs dbgregs;
kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
r = -EFAULT;
if (copy_to_user(argp, &dbgregs,
sizeof(struct kvm_debugregs)))
break;
r = 0;
break;
}
case KVM_SET_DEBUGREGS: {
struct kvm_debugregs dbgregs;
r = -EFAULT;
if (copy_from_user(&dbgregs, argp,
sizeof(struct kvm_debugregs)))
break;
r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
break;
}
default: default:
r = -EINVAL; r = -EINVAL;
} }
......
...@@ -503,6 +503,9 @@ struct kvm_ioeventfd { ...@@ -503,6 +503,9 @@ struct kvm_ioeventfd {
#define KVM_CAP_PCI_SEGMENT 47 #define KVM_CAP_PCI_SEGMENT 47
#define KVM_CAP_PPC_PAIRED_SINGLES 48 #define KVM_CAP_PPC_PAIRED_SINGLES 48
#define KVM_CAP_INTR_SHADOW 49 #define KVM_CAP_INTR_SHADOW 49
#ifdef __KVM_HAVE_DEBUGREGS
#define KVM_CAP_DEBUGREGS 50
#endif
#define KVM_CAP_X86_ROBUST_SINGLESTEP 51 #define KVM_CAP_X86_ROBUST_SINGLESTEP 51
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -690,6 +693,9 @@ struct kvm_clock_data { ...@@ -690,6 +693,9 @@ struct kvm_clock_data {
/* Available with KVM_CAP_VCPU_EVENTS */ /* Available with KVM_CAP_VCPU_EVENTS */
#define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events) #define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events)
#define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events) #define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events)
/* Available with KVM_CAP_DEBUGREGS */
#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment