Commit 2d5b5a66 authored by Sheng Yang's avatar Sheng Yang Committed by Avi Kivity

KVM: x86: XSAVE/XRSTOR live migration support

This patch enable save/restore of xsave state.
Signed-off-by: default avatarSheng Yang <sheng@linux.intel.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 69b61833
...@@ -922,6 +922,80 @@ Define which vcpu is the Bootstrap Processor (BSP). Values are the same ...@@ -922,6 +922,80 @@ Define which vcpu is the Bootstrap Processor (BSP). Values are the same
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
is vcpu 0. is vcpu 0.
4.41 KVM_GET_XSAVE
Capability: KVM_CAP_XSAVE
Architectures: x86
Type: vcpu ioctl
Parameters: struct kvm_xsave (out)
Returns: 0 on success, -1 on error
struct kvm_xsave {
__u32 region[1024];
};
This ioctl would copy current vcpu's xsave struct to the userspace.
4.42 KVM_SET_XSAVE
Capability: KVM_CAP_XSAVE
Architectures: x86
Type: vcpu ioctl
Parameters: struct kvm_xsave (in)
Returns: 0 on success, -1 on error
struct kvm_xsave {
__u32 region[1024];
};
This ioctl would copy userspace's xsave struct to the kernel.
4.43 KVM_GET_XCRS
Capability: KVM_CAP_XCRS
Architectures: x86
Type: vcpu ioctl
Parameters: struct kvm_xcrs (out)
Returns: 0 on success, -1 on error
struct kvm_xcr {
__u32 xcr;
__u32 reserved;
__u64 value;
};
struct kvm_xcrs {
__u32 nr_xcrs;
__u32 flags;
struct kvm_xcr xcrs[KVM_MAX_XCRS];
__u64 padding[16];
};
This ioctl would copy current vcpu's xcrs to the userspace.
4.44 KVM_SET_XCRS
Capability: KVM_CAP_XCRS
Architectures: x86
Type: vcpu ioctl
Parameters: struct kvm_xcrs (in)
Returns: 0 on success, -1 on error
struct kvm_xcr {
__u32 xcr;
__u32 reserved;
__u64 value;
};
struct kvm_xcrs {
__u32 nr_xcrs;
__u32 flags;
struct kvm_xcr xcrs[KVM_MAX_XCRS];
__u64 padding[16];
};
This ioctl would set vcpu's xcr to the value userspace specified.
5. The kvm_run structure 5. The kvm_run structure
Application code obtains a pointer to the kvm_run structure by Application code obtains a pointer to the kvm_run structure by
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#define __KVM_HAVE_XEN_HVM #define __KVM_HAVE_XEN_HVM
#define __KVM_HAVE_VCPU_EVENTS #define __KVM_HAVE_VCPU_EVENTS
#define __KVM_HAVE_DEBUGREGS #define __KVM_HAVE_DEBUGREGS
#define __KVM_HAVE_XSAVE
#define __KVM_HAVE_XCRS
/* Architectural interrupt line count. */ /* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256 #define KVM_NR_INTERRUPTS 256
...@@ -299,4 +301,24 @@ struct kvm_debugregs { ...@@ -299,4 +301,24 @@ struct kvm_debugregs {
__u64 reserved[9]; __u64 reserved[9];
}; };
/* for KVM_CAP_XSAVE */
struct kvm_xsave {
__u32 region[1024];
};
#define KVM_MAX_XCRS 16
struct kvm_xcr {
__u32 xcr;
__u32 reserved;
__u64 value;
};
struct kvm_xcrs {
__u32 nr_xcrs;
__u32 flags;
struct kvm_xcr xcrs[KVM_MAX_XCRS];
__u64 padding[16];
};
#endif /* _ASM_X86_KVM_H */ #endif /* _ASM_X86_KVM_H */
...@@ -13,8 +13,11 @@ ...@@ -13,8 +13,11 @@
#define FXSAVE_SIZE 512 #define FXSAVE_SIZE 512
#define XSTATE_YMM_SIZE 256 #define XSAVE_HDR_SIZE 64
#define XSTATE_YMM_OFFSET (512 + 64) #define XSAVE_HDR_OFFSET FXSAVE_SIZE
#define XSAVE_YMM_SIZE 256
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
/* /*
* These are the features that the OS can handle currently. * These are the features that the OS can handle currently.
......
...@@ -1680,6 +1680,7 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -1680,6 +1680,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PCI_SEGMENT: case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS: case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_X86_ROBUST_SINGLESTEP:
case KVM_CAP_XSAVE:
r = 1; r = 1;
break; break;
case KVM_CAP_COALESCED_MMIO: case KVM_CAP_COALESCED_MMIO:
...@@ -1703,6 +1704,9 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -1703,6 +1704,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_MCE: case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS; r = KVM_MAX_MCE_BANKS;
break; break;
case KVM_CAP_XCRS:
r = cpu_has_xsave;
break;
default: default:
r = 0; r = 0;
break; break;
...@@ -2355,6 +2359,77 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, ...@@ -2355,6 +2359,77 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
if (cpu_has_xsave)
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->xsave,
sizeof(struct xsave_struct));
else {
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->fxsave,
sizeof(struct i387_fxsave_struct));
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
XSTATE_FPSSE;
}
}
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
u64 xstate_bv =
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
if (cpu_has_xsave)
memcpy(&vcpu->arch.guest_fpu.state->xsave,
guest_xsave->region, sizeof(struct xsave_struct));
else {
if (xstate_bv & ~XSTATE_FPSSE)
return -EINVAL;
memcpy(&vcpu->arch.guest_fpu.state->fxsave,
guest_xsave->region, sizeof(struct i387_fxsave_struct));
}
return 0;
}
static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
struct kvm_xcrs *guest_xcrs)
{
if (!cpu_has_xsave) {
guest_xcrs->nr_xcrs = 0;
return;
}
guest_xcrs->nr_xcrs = 1;
guest_xcrs->flags = 0;
guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
}
static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
struct kvm_xcrs *guest_xcrs)
{
int i, r = 0;
if (!cpu_has_xsave)
return -EINVAL;
if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
return -EINVAL;
for (i = 0; i < guest_xcrs->nr_xcrs; i++)
/* Only support XCR0 currently */
if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
guest_xcrs->xcrs[0].value);
break;
}
if (r)
r = -EINVAL;
return r;
}
long kvm_arch_vcpu_ioctl(struct file *filp, long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
{ {
...@@ -2556,6 +2631,70 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -2556,6 +2631,70 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
break; break;
} }
case KVM_GET_XSAVE: {
struct kvm_xsave *xsave;
xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
r = -ENOMEM;
if (!xsave)
break;
kvm_vcpu_ioctl_x86_get_xsave(vcpu, xsave);
r = -EFAULT;
if (copy_to_user(argp, xsave, sizeof(struct kvm_xsave)))
break;
r = 0;
break;
}
case KVM_SET_XSAVE: {
struct kvm_xsave *xsave;
xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
r = -ENOMEM;
if (!xsave)
break;
r = -EFAULT;
if (copy_from_user(xsave, argp, sizeof(struct kvm_xsave)))
break;
r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, xsave);
break;
}
case KVM_GET_XCRS: {
struct kvm_xcrs *xcrs;
xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
r = -ENOMEM;
if (!xcrs)
break;
kvm_vcpu_ioctl_x86_get_xcrs(vcpu, xcrs);
r = -EFAULT;
if (copy_to_user(argp, xcrs,
sizeof(struct kvm_xcrs)))
break;
r = 0;
break;
}
case KVM_SET_XCRS: {
struct kvm_xcrs *xcrs;
xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
r = -ENOMEM;
if (!xcrs)
break;
r = -EFAULT;
if (copy_from_user(xcrs, argp,
sizeof(struct kvm_xcrs)))
break;
r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, xcrs);
break;
}
default: default:
r = -EINVAL; r = -EINVAL;
} }
......
...@@ -524,6 +524,12 @@ struct kvm_enable_cap { ...@@ -524,6 +524,12 @@ struct kvm_enable_cap {
#define KVM_CAP_PPC_OSI 52 #define KVM_CAP_PPC_OSI 52
#define KVM_CAP_PPC_UNSET_IRQ 53 #define KVM_CAP_PPC_UNSET_IRQ 53
#define KVM_CAP_ENABLE_CAP 54 #define KVM_CAP_ENABLE_CAP 54
#ifdef __KVM_HAVE_XSAVE
#define KVM_CAP_XSAVE 55
#endif
#ifdef __KVM_HAVE_XCRS
#define KVM_CAP_XCRS 56
#endif
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -714,6 +720,12 @@ struct kvm_clock_data { ...@@ -714,6 +720,12 @@ struct kvm_clock_data {
#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs) #define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs) #define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap) #define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
/* Available with KVM_CAP_XSAVE */
#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave)
#define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave)
/* Available with KVM_CAP_XCRS */
#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs)
#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment