Commit b0b42197 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: start moving SMM-related functions to new files

Create a new header and source with code related to system management
mode emulation.  Entry and exit will move there too; for now,
opportunistically rename put_smstate to PUT_SMSTATE while moving
it to smm.h, and adjust the SMM state saving code.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220929172016.319443-2-pbonzini@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d08b4858
...@@ -2087,12 +2087,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) ...@@ -2087,12 +2087,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
#endif #endif
} }
#define put_smstate(type, buf, offset, val) \
*(type *)((buf) + (offset) - 0x7e00) = val
#define GET_SMSTATE(type, buf, offset) \
(*(type *)((buf) + (offset) - 0x7e00))
int kvm_cpu_dirty_log_size(void); int kvm_cpu_dirty_log_size(void);
int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages); int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
......
...@@ -20,6 +20,7 @@ endif ...@@ -20,6 +20,7 @@ endif
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
kvm-$(CONFIG_KVM_XEN) += xen.o kvm-$(CONFIG_KVM_XEN) += xen.o
kvm-y += smm.o
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "tss.h" #include "tss.h"
#include "mmu.h" #include "mmu.h"
#include "pmu.h" #include "pmu.h"
#include "smm.h"
/* /*
* Operand types * Operand types
......
...@@ -200,9 +200,4 @@ static inline bool is_guest_mode(struct kvm_vcpu *vcpu) ...@@ -200,9 +200,4 @@ static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
return vcpu->arch.hflags & HF_GUEST_MASK; return vcpu->arch.hflags & HF_GUEST_MASK;
} }
static inline bool is_smm(struct kvm_vcpu *vcpu)
{
return vcpu->arch.hflags & HF_SMM_MASK;
}
#endif #endif
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "x86.h" #include "x86.h"
#include "cpuid.h" #include "cpuid.h"
#include "hyperv.h" #include "hyperv.h"
#include "smm.h"
#ifndef CONFIG_X86_64 #ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
...@@ -1170,9 +1171,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, ...@@ -1170,9 +1171,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
break; break;
case APIC_DM_SMI: case APIC_DM_SMI:
result = 1; if (!kvm_inject_smi(vcpu)) {
kvm_make_request(KVM_REQ_SMI, vcpu); kvm_vcpu_kick(vcpu);
kvm_vcpu_kick(vcpu); result = 1;
}
break; break;
case APIC_DM_NMI: case APIC_DM_NMI:
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "hyperv.h" #include "hyperv.h"
#include "kvm_cache_regs.h" #include "smm.h"
#define KVM_APIC_INIT 0 #define KVM_APIC_INIT 0
#define KVM_APIC_SIPI 1 #define KVM_APIC_SIPI 1
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "tdp_mmu.h" #include "tdp_mmu.h"
#include "x86.h" #include "x86.h"
#include "kvm_cache_regs.h" #include "kvm_cache_regs.h"
#include "smm.h"
#include "kvm_emulate.h" #include "kvm_emulate.h"
#include "cpuid.h" #include "cpuid.h"
#include "spte.h" #include "spte.h"
......
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/kvm_host.h>
#include "x86.h"
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include "smm.h"
#include "trace.h"
void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
{
trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
if (entering_smm) {
vcpu->arch.hflags |= HF_SMM_MASK;
} else {
vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
/* Process a latched INIT or SMI, if any. */
kvm_make_request(KVM_REQ_EVENT, vcpu);
/*
* Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
* on SMM exit we still need to reload them from
* guest memory
*/
vcpu->arch.pdptrs_from_userspace = false;
}
kvm_mmu_reset_context(vcpu);
}
void process_smi(struct kvm_vcpu *vcpu)
{
vcpu->arch.smi_pending = true;
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_KVM_SMM_H
#define ASM_KVM_SMM_H
#define GET_SMSTATE(type, buf, offset) \
(*(type *)((buf) + (offset) - 0x7e00))
#define PUT_SMSTATE(type, buf, offset, val) \
*(type *)((buf) + (offset) - 0x7e00) = val
static inline int kvm_inject_smi(struct kvm_vcpu *vcpu)
{
kvm_make_request(KVM_REQ_SMI, vcpu);
return 0;
}
static inline bool is_smm(struct kvm_vcpu *vcpu)
{
return vcpu->arch.hflags & HF_SMM_MASK;
}
void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm);
void process_smi(struct kvm_vcpu *vcpu);
#endif
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "trace.h" #include "trace.h"
#include "mmu.h" #include "mmu.h"
#include "x86.h" #include "x86.h"
#include "smm.h"
#include "cpuid.h" #include "cpuid.h"
#include "lapic.h" #include "lapic.h"
#include "svm.h" #include "svm.h"
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "mmu.h" #include "mmu.h"
#include "kvm_cache_regs.h" #include "kvm_cache_regs.h"
#include "x86.h" #include "x86.h"
#include "smm.h"
#include "cpuid.h" #include "cpuid.h"
#include "pmu.h" #include "pmu.h"
...@@ -4407,9 +4408,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ...@@ -4407,9 +4408,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0; return 0;
/* FED8h - SVM Guest */ /* FED8h - SVM Guest */
put_smstate(u64, smstate, 0x7ed8, 1); PUT_SMSTATE(u64, smstate, 0x7ed8, 1);
/* FEE0h - SVM Guest VMCB Physical Address */ /* FEE0h - SVM Guest VMCB Physical Address */
put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); PUT_SMSTATE(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "trace.h" #include "trace.h"
#include "vmx.h" #include "vmx.h"
#include "x86.h" #include "x86.h"
#include "smm.h"
static bool __read_mostly enable_shadow_vmcs = 1; static bool __read_mostly enable_shadow_vmcs = 1;
module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include "vmcs12.h" #include "vmcs12.h"
#include "vmx.h" #include "vmx.h"
#include "x86.h" #include "x86.h"
#include "smm.h"
MODULE_AUTHOR("Qumranet"); MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "hyperv.h" #include "hyperv.h"
#include "lapic.h" #include "lapic.h"
#include "xen.h" #include "xen.h"
#include "smm.h"
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -119,7 +120,6 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; ...@@ -119,7 +120,6 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu);
static void process_smi(struct kvm_vcpu *vcpu);
static void enter_smm(struct kvm_vcpu *vcpu); static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu); static void store_regs(struct kvm_vcpu *vcpu);
...@@ -4889,13 +4889,6 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) ...@@ -4889,13 +4889,6 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
{
kvm_make_request(KVM_REQ_SMI, vcpu);
return 0;
}
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
struct kvm_tpr_access_ctl *tac) struct kvm_tpr_access_ctl *tac)
{ {
...@@ -5118,8 +5111,6 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -5118,8 +5111,6 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
memset(&events->reserved, 0, sizeof(events->reserved)); memset(&events->reserved, 0, sizeof(events->reserved));
} }
static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm);
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events) struct kvm_vcpu_events *events)
{ {
...@@ -5572,7 +5563,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -5572,7 +5563,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break; break;
} }
case KVM_SMI: { case KVM_SMI: {
r = kvm_vcpu_ioctl_smi(vcpu); r = kvm_inject_smi(vcpu);
break; break;
} }
case KVM_SET_CPUID: { case KVM_SET_CPUID: {
...@@ -8569,29 +8560,6 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, ...@@ -8569,29 +8560,6 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu);
static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
{
trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
if (entering_smm) {
vcpu->arch.hflags |= HF_SMM_MASK;
} else {
vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
/* Process a latched INIT or SMI, if any. */
kvm_make_request(KVM_REQ_EVENT, vcpu);
/*
* Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
* on SMM exit we still need to reload them from
* guest memory
*/
vcpu->arch.pdptrs_from_userspace = false;
}
kvm_mmu_reset_context(vcpu);
}
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
unsigned long *db) unsigned long *db)
{ {
...@@ -10088,16 +10056,16 @@ static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) ...@@ -10088,16 +10056,16 @@ static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
int offset; int offset;
kvm_get_segment(vcpu, &seg, n); kvm_get_segment(vcpu, &seg, n);
put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector); PUT_SMSTATE(u32, buf, 0x7fa8 + n * 4, seg.selector);
if (n < 3) if (n < 3)
offset = 0x7f84 + n * 12; offset = 0x7f84 + n * 12;
else else
offset = 0x7f2c + (n - 3) * 12; offset = 0x7f2c + (n - 3) * 12;
put_smstate(u32, buf, offset + 8, seg.base); PUT_SMSTATE(u32, buf, offset + 8, seg.base);
put_smstate(u32, buf, offset + 4, seg.limit); PUT_SMSTATE(u32, buf, offset + 4, seg.limit);
put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg)); PUT_SMSTATE(u32, buf, offset, enter_smm_get_segment_flags(&seg));
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -10111,10 +10079,10 @@ static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) ...@@ -10111,10 +10079,10 @@ static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
offset = 0x7e00 + n * 16; offset = 0x7e00 + n * 16;
flags = enter_smm_get_segment_flags(&seg) >> 8; flags = enter_smm_get_segment_flags(&seg) >> 8;
put_smstate(u16, buf, offset, seg.selector); PUT_SMSTATE(u16, buf, offset, seg.selector);
put_smstate(u16, buf, offset + 2, flags); PUT_SMSTATE(u16, buf, offset + 2, flags);
put_smstate(u32, buf, offset + 4, seg.limit); PUT_SMSTATE(u32, buf, offset + 4, seg.limit);
put_smstate(u64, buf, offset + 8, seg.base); PUT_SMSTATE(u64, buf, offset + 8, seg.base);
} }
#endif #endif
...@@ -10125,47 +10093,47 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) ...@@ -10125,47 +10093,47 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
unsigned long val; unsigned long val;
int i; int i;
put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); PUT_SMSTATE(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); PUT_SMSTATE(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); PUT_SMSTATE(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); PUT_SMSTATE(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i)); PUT_SMSTATE(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));
kvm_get_dr(vcpu, 6, &val); kvm_get_dr(vcpu, 6, &val);
put_smstate(u32, buf, 0x7fcc, (u32)val); PUT_SMSTATE(u32, buf, 0x7fcc, (u32)val);
kvm_get_dr(vcpu, 7, &val); kvm_get_dr(vcpu, 7, &val);
put_smstate(u32, buf, 0x7fc8, (u32)val); PUT_SMSTATE(u32, buf, 0x7fc8, (u32)val);
kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
put_smstate(u32, buf, 0x7fc4, seg.selector); PUT_SMSTATE(u32, buf, 0x7fc4, seg.selector);
put_smstate(u32, buf, 0x7f64, seg.base); PUT_SMSTATE(u32, buf, 0x7f64, seg.base);
put_smstate(u32, buf, 0x7f60, seg.limit); PUT_SMSTATE(u32, buf, 0x7f60, seg.limit);
put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg)); PUT_SMSTATE(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
put_smstate(u32, buf, 0x7fc0, seg.selector); PUT_SMSTATE(u32, buf, 0x7fc0, seg.selector);
put_smstate(u32, buf, 0x7f80, seg.base); PUT_SMSTATE(u32, buf, 0x7f80, seg.base);
put_smstate(u32, buf, 0x7f7c, seg.limit); PUT_SMSTATE(u32, buf, 0x7f7c, seg.limit);
put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); PUT_SMSTATE(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
static_call(kvm_x86_get_gdt)(vcpu, &dt); static_call(kvm_x86_get_gdt)(vcpu, &dt);
put_smstate(u32, buf, 0x7f74, dt.address); PUT_SMSTATE(u32, buf, 0x7f74, dt.address);
put_smstate(u32, buf, 0x7f70, dt.size); PUT_SMSTATE(u32, buf, 0x7f70, dt.size);
static_call(kvm_x86_get_idt)(vcpu, &dt); static_call(kvm_x86_get_idt)(vcpu, &dt);
put_smstate(u32, buf, 0x7f58, dt.address); PUT_SMSTATE(u32, buf, 0x7f58, dt.address);
put_smstate(u32, buf, 0x7f54, dt.size); PUT_SMSTATE(u32, buf, 0x7f54, dt.size);
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
enter_smm_save_seg_32(vcpu, buf, i); enter_smm_save_seg_32(vcpu, buf, i);
put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); PUT_SMSTATE(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
/* revision id */ /* revision id */
put_smstate(u32, buf, 0x7efc, 0x00020000); PUT_SMSTATE(u32, buf, 0x7efc, 0x00020000);
put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); PUT_SMSTATE(u32, buf, 0x7ef8, vcpu->arch.smbase);
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -10177,46 +10145,46 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) ...@@ -10177,46 +10145,46 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
int i; int i;
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i)); PUT_SMSTATE(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); PUT_SMSTATE(u64, buf, 0x7f78, kvm_rip_read(vcpu));
put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); PUT_SMSTATE(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
kvm_get_dr(vcpu, 6, &val); kvm_get_dr(vcpu, 6, &val);
put_smstate(u64, buf, 0x7f68, val); PUT_SMSTATE(u64, buf, 0x7f68, val);
kvm_get_dr(vcpu, 7, &val); kvm_get_dr(vcpu, 7, &val);
put_smstate(u64, buf, 0x7f60, val); PUT_SMSTATE(u64, buf, 0x7f60, val);
put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); PUT_SMSTATE(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); PUT_SMSTATE(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); PUT_SMSTATE(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); PUT_SMSTATE(u32, buf, 0x7f00, vcpu->arch.smbase);
/* revision id */ /* revision id */
put_smstate(u32, buf, 0x7efc, 0x00020064); PUT_SMSTATE(u32, buf, 0x7efc, 0x00020064);
put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); PUT_SMSTATE(u64, buf, 0x7ed0, vcpu->arch.efer);
kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
put_smstate(u16, buf, 0x7e90, seg.selector); PUT_SMSTATE(u16, buf, 0x7e90, seg.selector);
put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8); PUT_SMSTATE(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
put_smstate(u32, buf, 0x7e94, seg.limit); PUT_SMSTATE(u32, buf, 0x7e94, seg.limit);
put_smstate(u64, buf, 0x7e98, seg.base); PUT_SMSTATE(u64, buf, 0x7e98, seg.base);
static_call(kvm_x86_get_idt)(vcpu, &dt); static_call(kvm_x86_get_idt)(vcpu, &dt);
put_smstate(u32, buf, 0x7e84, dt.size); PUT_SMSTATE(u32, buf, 0x7e84, dt.size);
put_smstate(u64, buf, 0x7e88, dt.address); PUT_SMSTATE(u64, buf, 0x7e88, dt.address);
kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
put_smstate(u16, buf, 0x7e70, seg.selector); PUT_SMSTATE(u16, buf, 0x7e70, seg.selector);
put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8); PUT_SMSTATE(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
put_smstate(u32, buf, 0x7e74, seg.limit); PUT_SMSTATE(u32, buf, 0x7e74, seg.limit);
put_smstate(u64, buf, 0x7e78, seg.base); PUT_SMSTATE(u64, buf, 0x7e78, seg.base);
static_call(kvm_x86_get_gdt)(vcpu, &dt); static_call(kvm_x86_get_gdt)(vcpu, &dt);
put_smstate(u32, buf, 0x7e64, dt.size); PUT_SMSTATE(u32, buf, 0x7e64, dt.size);
put_smstate(u64, buf, 0x7e68, dt.address); PUT_SMSTATE(u64, buf, 0x7e68, dt.address);
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
enter_smm_save_seg_64(vcpu, buf, i); enter_smm_save_seg_64(vcpu, buf, i);
...@@ -10302,12 +10270,6 @@ static void enter_smm(struct kvm_vcpu *vcpu) ...@@ -10302,12 +10270,6 @@ static void enter_smm(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
} }
static void process_smi(struct kvm_vcpu *vcpu)
{
vcpu->arch.smi_pending = true;
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
unsigned long *vcpu_bitmap) unsigned long *vcpu_bitmap)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment