Commit ad312c7c authored by Zhang Xiantao's avatar Zhang Xiantao Committed by Avi Kivity

KVM: Portability: Introduce kvm_vcpu_arch

Move all the architecture-specific fields in kvm_vcpu into a new struct
kvm_vcpu_arch.
Signed-off-by: default avatarZhang Xiantao <xiantao.zhang@intel.com>
Acked-by: default avatarCarsten Otte <cotte@de.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 682c59a3
......@@ -158,7 +158,7 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
if (dest_mode == 0) { /* Physical mode. */
if (dest == 0xFF) { /* Broadcast. */
for (i = 0; i < KVM_MAX_VCPUS; ++i)
if (kvm->vcpus[i] && kvm->vcpus[i]->apic)
if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
mask |= 1 << i;
return mask;
}
......@@ -166,8 +166,8 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
vcpu = kvm->vcpus[i];
if (!vcpu)
continue;
if (kvm_apic_match_physical_addr(vcpu->apic, dest)) {
if (vcpu->apic)
if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) {
if (vcpu->arch.apic)
mask = 1 << i;
break;
}
......@@ -177,8 +177,8 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
vcpu = kvm->vcpus[i];
if (!vcpu)
continue;
if (vcpu->apic &&
kvm_apic_match_logical_addr(vcpu->apic, dest))
if (vcpu->arch.apic &&
kvm_apic_match_logical_addr(vcpu->arch.apic, dest))
mask |= 1 << vcpu->vcpu_id;
}
ioapic_debug("mask %x\n", mask);
......
......@@ -670,7 +670,7 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (vmf->pgoff == 0)
page = virt_to_page(vcpu->run);
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
page = virt_to_page(vcpu->pio_data);
page = virt_to_page(vcpu->arch.pio_data);
else
return VM_FAULT_SIGBUS;
get_page(page);
......
This diff is collapsed.
This diff is collapsed.
......@@ -129,11 +129,11 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
walk:
walker->level = vcpu->mmu.root_level;
pte = vcpu->cr3;
walker->level = vcpu->arch.mmu.root_level;
pte = vcpu->arch.cr3;
#if PTTYPE == 64
if (!is_long_mode(vcpu)) {
pte = vcpu->pdptrs[(addr >> 30) & 3];
pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
if (!is_present_pte(pte))
goto not_present;
--walker->level;
......@@ -275,10 +275,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (!is_present_pte(walker->ptes[walker->level - 1]))
return NULL;
shadow_addr = vcpu->mmu.root_hpa;
level = vcpu->mmu.shadow_root_level;
shadow_addr = vcpu->arch.mmu.root_hpa;
level = vcpu->arch.mmu.shadow_root_level;
if (level == PT32E_ROOT_LEVEL) {
shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
shadow_addr &= PT64_BASE_ADDR_MASK;
--level;
}
......@@ -380,7 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
if (!r) {
pgprintk("%s: guest page fault\n", __FUNCTION__);
inject_page_fault(vcpu, addr, walker.error_code);
vcpu->last_pt_write_count = 0; /* reset fork detector */
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
return 0;
}
......@@ -390,7 +390,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_pte, *shadow_pte, write_pt);
if (!write_pt)
vcpu->last_pt_write_count = 0; /* reset fork detector */
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
/*
* mmio: emulate if accessible, otherwise its a guest fault.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -92,8 +92,7 @@ enum {
#include "x86_emulate.h"
struct kvm_vcpu {
KVM_VCPU_COMM;
struct kvm_vcpu_arch {
u64 host_tsc;
int interrupt_window_open;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
......@@ -130,7 +129,6 @@ struct kvm_vcpu {
int last_pt_write_count;
u64 *last_pte_updated;
struct i387_fxsave_struct host_fx_image;
struct i387_fxsave_struct guest_fx_image;
......@@ -159,12 +157,17 @@ struct kvm_vcpu {
int cpuid_nent;
struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
/* emulate context */
struct x86_emulate_ctxt emulate_ctxt;
};
struct kvm_vcpu {
KVM_VCPU_COMM;
struct kvm_vcpu_arch arch;
};
struct descriptor_table {
u16 limit;
unsigned long base;
......@@ -339,7 +342,7 @@ static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
return 0;
return kvm_mmu_load(vcpu);
......@@ -348,7 +351,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
static inline int is_long_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
return vcpu->shadow_efer & EFER_LME;
return vcpu->arch.shadow_efer & EFER_LME;
#else
return 0;
#endif
......@@ -356,17 +359,17 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
static inline int is_pae(struct kvm_vcpu *vcpu)
{
return vcpu->cr4 & X86_CR4_PAE;
return vcpu->arch.cr4 & X86_CR4_PAE;
}
static inline int is_pse(struct kvm_vcpu *vcpu)
{
return vcpu->cr4 & X86_CR4_PSE;
return vcpu->arch.cr4 & X86_CR4_PSE;
}
static inline int is_paging(struct kvm_vcpu *vcpu)
{
return vcpu->cr0 & X86_CR0_PG;
return vcpu->arch.cr0 & X86_CR0_PG;
}
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
......@@ -489,8 +492,8 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
static inline int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return vcpu->mp_state == VCPU_MP_STATE_RUNNABLE
|| vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
|| vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
}
#endif
......@@ -769,8 +769,8 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* Shadow copy of register state. Committed on successful emulation. */
memset(c, 0, sizeof(struct decode_cache));
c->eip = ctxt->vcpu->rip;
memcpy(c->regs, ctxt->vcpu->regs, sizeof c->regs);
c->eip = ctxt->vcpu->arch.rip;
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
switch (mode) {
case X86EMUL_MODE_REAL:
......@@ -1226,7 +1226,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
* modify them.
*/
memcpy(c->regs, ctxt->vcpu->regs, sizeof c->regs);
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
saved_eip = c->eip;
if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
......@@ -1235,7 +1235,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if (c->rep_prefix && (c->d & String)) {
/* All REP prefixes have the same first termination condition */
if (c->regs[VCPU_REGS_RCX] == 0) {
ctxt->vcpu->rip = c->eip;
ctxt->vcpu->arch.rip = c->eip;
goto done;
}
/* The second termination condition only applies for REPE
......@@ -1249,17 +1249,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
(c->b == 0xae) || (c->b == 0xaf)) {
if ((c->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == 0)) {
ctxt->vcpu->rip = c->eip;
ctxt->vcpu->arch.rip = c->eip;
goto done;
}
if ((c->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
ctxt->vcpu->rip = c->eip;
ctxt->vcpu->arch.rip = c->eip;
goto done;
}
}
c->regs[VCPU_REGS_RCX]--;
c->eip = ctxt->vcpu->rip;
c->eip = ctxt->vcpu->arch.rip;
}
if (c->src.type == OP_MEM) {
......@@ -1628,7 +1628,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
ctxt->vcpu->halt_request = 1;
ctxt->vcpu->arch.halt_request = 1;
goto done;
case 0xf5: /* cmc */
/* complement carry flag from eflags reg */
......@@ -1665,8 +1665,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
goto done;
/* Commit shadow register state. */
memcpy(ctxt->vcpu->regs, c->regs, sizeof c->regs);
ctxt->vcpu->rip = c->eip;
memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
ctxt->vcpu->arch.rip = c->eip;
done:
if (rc == X86EMUL_UNHANDLEABLE) {
......@@ -1783,7 +1783,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
if (rc) {
kvm_inject_gp(ctxt->vcpu, 0);
c->eip = ctxt->vcpu->rip;
c->eip = ctxt->vcpu->arch.rip;
}
rc = X86EMUL_CONTINUE;
c->dst.type = OP_NONE;
......@@ -1793,7 +1793,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
if (rc) {
kvm_inject_gp(ctxt->vcpu, 0);
c->eip = ctxt->vcpu->rip;
c->eip = ctxt->vcpu->arch.rip;
} else {
c->regs[VCPU_REGS_RAX] = (u32)msr_data;
c->regs[VCPU_REGS_RDX] = msr_data >> 32;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment