Commit cbdd1bea authored by Christian Ehrhardt's avatar Christian Ehrhardt Committed by Avi Kivity

KVM: Rename kvm_arch_ops to kvm_x86_ops

This patch just renames the current (misnamed) _arch namings to _x86 to
ensure better readability when a real arch layer takes place.
Signed-off-by: default avatarChristian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 0d8d2bd4
...@@ -441,7 +441,7 @@ struct descriptor_table { ...@@ -441,7 +441,7 @@ struct descriptor_table {
unsigned long base; unsigned long base;
} __attribute__((packed)); } __attribute__((packed));
struct kvm_arch_ops { struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */ int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */ int (*disabled_by_bios)(void); /* __init */
void (*hardware_enable)(void *dummy); /* __init */ void (*hardware_enable)(void *dummy); /* __init */
...@@ -499,7 +499,7 @@ struct kvm_arch_ops { ...@@ -499,7 +499,7 @@ struct kvm_arch_ops {
void (*set_irq)(struct kvm_vcpu *vcpu, int vec); void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
}; };
extern struct kvm_arch_ops *kvm_arch_ops; extern struct kvm_x86_ops *kvm_x86_ops;
/* The guest did something we don't support. */ /* The guest did something we don't support. */
#define pr_unimpl(vcpu, fmt, ...) \ #define pr_unimpl(vcpu, fmt, ...) \
...@@ -515,9 +515,9 @@ extern struct kvm_arch_ops *kvm_arch_ops; ...@@ -515,9 +515,9 @@ extern struct kvm_arch_ops *kvm_arch_ops;
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size, int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
struct module *module); struct module *module);
void kvm_exit_arch(void); void kvm_exit_x86(void);
int kvm_mmu_module_init(void); int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void); void kvm_mmu_module_exit(void);
......
This diff is collapsed.
...@@ -966,7 +966,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu) ...@@ -966,7 +966,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{ {
++vcpu->stat.tlb_flush; ++vcpu->stat.tlb_flush;
kvm_arch_ops->tlb_flush(vcpu); kvm_x86_ops->tlb_flush(vcpu);
} }
static void paging_new_cr3(struct kvm_vcpu *vcpu) static void paging_new_cr3(struct kvm_vcpu *vcpu)
...@@ -979,7 +979,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu, ...@@ -979,7 +979,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
u64 addr, u64 addr,
u32 err_code) u32 err_code)
{ {
kvm_arch_ops->inject_page_fault(vcpu, addr, err_code); kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
} }
static void paging_free(struct kvm_vcpu *vcpu) static void paging_free(struct kvm_vcpu *vcpu)
...@@ -1073,7 +1073,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) ...@@ -1073,7 +1073,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
if (r) if (r)
goto out; goto out;
mmu_alloc_roots(vcpu); mmu_alloc_roots(vcpu);
kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa); kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
kvm_mmu_flush_tlb(vcpu); kvm_mmu_flush_tlb(vcpu);
out: out:
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
......
...@@ -274,7 +274,7 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, ...@@ -274,7 +274,7 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
access_bits &= ~PT_WRITABLE_MASK; access_bits &= ~PT_WRITABLE_MASK;
if (is_writeble_pte(spte)) { if (is_writeble_pte(spte)) {
spte &= ~PT_WRITABLE_MASK; spte &= ~PT_WRITABLE_MASK;
kvm_arch_ops->tlb_flush(vcpu); kvm_x86_ops->tlb_flush(vcpu);
} }
if (write_fault) if (write_fault)
*ptwrite = 1; *ptwrite = 1;
......
...@@ -1741,7 +1741,7 @@ static void svm_check_processor_compat(void *rtn) ...@@ -1741,7 +1741,7 @@ static void svm_check_processor_compat(void *rtn)
*(int *)rtn = 0; *(int *)rtn = 0;
} }
static struct kvm_arch_ops svm_arch_ops = { static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm, .cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled, .disabled_by_bios = is_disabled,
.hardware_setup = svm_hardware_setup, .hardware_setup = svm_hardware_setup,
...@@ -1794,13 +1794,13 @@ static struct kvm_arch_ops svm_arch_ops = { ...@@ -1794,13 +1794,13 @@ static struct kvm_arch_ops svm_arch_ops = {
static int __init svm_init(void) static int __init svm_init(void)
{ {
return kvm_init_arch(&svm_arch_ops, sizeof(struct vcpu_svm), return kvm_init_x86(&svm_x86_ops, sizeof(struct vcpu_svm),
THIS_MODULE); THIS_MODULE);
} }
static void __exit svm_exit(void) static void __exit svm_exit(void)
{ {
kvm_exit_arch(); kvm_exit_x86();
} }
module_init(svm_init) module_init(svm_init)
......
...@@ -2548,7 +2548,7 @@ static void __init vmx_check_processor_compat(void *rtn) ...@@ -2548,7 +2548,7 @@ static void __init vmx_check_processor_compat(void *rtn)
} }
} }
static struct kvm_arch_ops vmx_arch_ops = { static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support, .cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios, .disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup, .hardware_setup = hardware_setup,
...@@ -2627,7 +2627,7 @@ static int __init vmx_init(void) ...@@ -2627,7 +2627,7 @@ static int __init vmx_init(void)
memset(iova, 0xff, PAGE_SIZE); memset(iova, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b); kunmap(vmx_io_bitmap_b);
r = kvm_init_arch(&vmx_arch_ops, sizeof(struct vcpu_vmx), THIS_MODULE); r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r) if (r)
goto out1; goto out1;
...@@ -2645,7 +2645,7 @@ static void __exit vmx_exit(void) ...@@ -2645,7 +2645,7 @@ static void __exit vmx_exit(void)
__free_page(vmx_io_bitmap_b); __free_page(vmx_io_bitmap_b);
__free_page(vmx_io_bitmap_a); __free_page(vmx_io_bitmap_a);
kvm_exit_arch(); kvm_exit_x86();
} }
module_init(vmx_init) module_init(vmx_init)
......
...@@ -1432,7 +1432,7 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1432,7 +1432,7 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
| ((u64)_regs[VCPU_REGS_RDX] << 32); | ((u64)_regs[VCPU_REGS_RDX] << 32);
rc = kvm_set_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], msr_data); rc = kvm_set_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], msr_data);
if (rc) { if (rc) {
kvm_arch_ops->inject_gp(ctxt->vcpu, 0); kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
_eip = ctxt->vcpu->rip; _eip = ctxt->vcpu->rip;
} }
rc = X86EMUL_CONTINUE; rc = X86EMUL_CONTINUE;
...@@ -1441,7 +1441,7 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1441,7 +1441,7 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* rdmsr */ /* rdmsr */
rc = kvm_get_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], &msr_data); rc = kvm_get_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], &msr_data);
if (rc) { if (rc) {
kvm_arch_ops->inject_gp(ctxt->vcpu, 0); kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
_eip = ctxt->vcpu->rip; _eip = ctxt->vcpu->rip;
} else { } else {
_regs[VCPU_REGS_RAX] = (u32)msr_data; _regs[VCPU_REGS_RAX] = (u32)msr_data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment