Commit 9931be3f authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Add "arch" to common utils that have arch implementations

Add "arch" into the name of utility functions that are declared in common
code, but (surprise!) have arch-specific implementations.  Shuffle code
around so that all such helpers' declarations are bundled together.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b8592448
...@@ -95,23 +95,6 @@ struct kvm_vm { ...@@ -95,23 +95,6 @@ struct kvm_vm {
struct kvm_vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpuid); struct kvm_vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpuid);
/*
* Virtual Translation Tables Dump
*
* Input Args:
* stream - Output FILE stream
* vm - Virtual Machine
* indent - Left margin indent amount
*
* Output Args: None
*
* Return: None
*
* Dumps to the FILE stream given by @stream, the contents of all the
* virtual translation tables for the VM given by @vm.
*/
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
struct userspace_mem_region * struct userspace_mem_region *
memslot2region(struct kvm_vm *vm, uint32_t memslot); memslot2region(struct kvm_vm *vm, uint32_t memslot);
...@@ -291,25 +274,6 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm) ...@@ -291,25 +274,6 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm)
return fd; return fd;
} }
/*
* VM VCPU Dump
*
* Input Args:
* stream - Output FILE stream
* vm - Virtual Machine
* vcpuid - VCPU ID
* indent - Left margin indent amount
*
* Output Args: None
*
* Return: None
*
* Dumps the current state of the VCPU specified by @vcpuid, within the VM
* given by @vm, to the FILE stream given by @stream.
*/
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
uint8_t indent);
void vm_create_irqchip(struct kvm_vm *vm); void vm_create_irqchip(struct kvm_vm *vm);
void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
...@@ -336,23 +300,6 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); ...@@ -336,23 +300,6 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
/*
* Address Guest Virtual to Guest Physical
*
* Input Args:
* vm - Virtual Machine
* gva - VM virtual address
*
* Output Args: None
*
* Return:
* Equivalent VM physical address
*
* Returns the VM physical address of the translated VM virtual
* address given by @gva.
*/
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
...@@ -569,26 +516,6 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); ...@@ -569,26 +516,6 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
const char *exit_reason_str(unsigned int exit_reason); const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm);
/*
* VM Virtual Page Map
*
* Input Args:
* vm - Virtual Machine
* vaddr - VM Virtual Address
* paddr - VM Physical Address
* memslot - Memory region slot for new virtual translation tables
*
* Output Args: None
*
* Return: None
*
* Within @vm, creates a virtual translation for the page starting
* at @vaddr to the page starting at @paddr.
*/
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot); uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
...@@ -657,16 +584,6 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, ...@@ -657,16 +584,6 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm); struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
/*
* Adds a vCPU with reasonable defaults (e.g. a stack)
*
* Input Args:
* vm - Virtual Machine
* vcpuid - The id of the VCPU to add to the VM.
* guest_code - The vCPU's entry point
*/
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
unsigned int vm_get_page_size(struct kvm_vm *vm); unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm); unsigned int vm_get_page_shift(struct kvm_vm *vm);
unsigned long vm_compute_max_gfn(struct kvm_vm *vm); unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
...@@ -705,4 +622,121 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, ...@@ -705,4 +622,121 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid); void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid);
/*
* VM VCPU Dump
*
* Input Args:
* stream - Output FILE stream
* vm - Virtual Machine
* vcpuid - VCPU ID
* indent - Left margin indent amount
*
* Output Args: None
*
* Return: None
*
* Dumps the current state of the VCPU specified by @vcpuid, within the VM
* given by @vm, to the FILE stream given by @stream.
*/
void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
uint8_t indent);
static inline void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
uint8_t indent)
{
vcpu_arch_dump(stream, vm, vcpuid, indent);
}
/*
* Adds a vCPU with reasonable defaults (e.g. a stack)
*
* Input Args:
* vm - Virtual Machine
* vcpuid - The id of the VCPU to add to the VM.
* guest_code - The vCPU's entry point
*/
void vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
static inline void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
void *guest_code)
{
vm_arch_vcpu_add(vm, vcpuid, guest_code);
}
void virt_arch_pgd_alloc(struct kvm_vm *vm);
static inline void virt_pgd_alloc(struct kvm_vm *vm)
{
virt_arch_pgd_alloc(vm);
}
/*
* VM Virtual Page Map
*
* Input Args:
* vm - Virtual Machine
* vaddr - VM Virtual Address
* paddr - VM Physical Address
* memslot - Memory region slot for new virtual translation tables
*
* Output Args: None
*
* Return: None
*
* Within @vm, creates a virtual translation for the page starting
* at @vaddr to the page starting at @paddr.
*/
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
virt_arch_pg_map(vm, vaddr, paddr);
}
/*
* Address Guest Virtual to Guest Physical
*
* Input Args:
* vm - Virtual Machine
* gva - VM virtual address
*
* Output Args: None
*
* Return:
* Equivalent VM physical address
*
* Returns the VM physical address of the translated VM virtual
* address given by @gva.
*/
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
return addr_arch_gva2gpa(vm, gva);
}
/*
* Virtual Translation Tables Dump
*
* Input Args:
* stream - Output FILE stream
* vm - Virtual Machine
* indent - Left margin indent amount
*
* Output Args: None
*
* Return: None
*
* Dumps to the FILE stream given by @stream, the contents of all the
* virtual translation tables for the VM given by @vm.
*/
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
virt_arch_dump(stream, vm, indent);
}
#endif /* SELFTEST_KVM_UTIL_BASE_H */ #endif /* SELFTEST_KVM_UTIL_BASE_H */
...@@ -74,7 +74,7 @@ static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm) ...@@ -74,7 +74,7 @@ static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
return 1 << (vm->page_shift - 3); return 1 << (vm->page_shift - 3);
} }
void virt_pgd_alloc(struct kvm_vm *vm) void virt_arch_pgd_alloc(struct kvm_vm *vm)
{ {
if (!vm->pgd_created) { if (!vm->pgd_created) {
vm_paddr_t paddr = vm_phy_pages_alloc(vm, vm_paddr_t paddr = vm_phy_pages_alloc(vm,
...@@ -131,14 +131,14 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -131,14 +131,14 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
*ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */; *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
} }
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{ {
uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */ uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
_virt_pg_map(vm, vaddr, paddr, attr_idx); _virt_pg_map(vm, vaddr, paddr, attr_idx);
} }
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{ {
uint64_t *ptep; uint64_t *ptep;
...@@ -195,7 +195,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p ...@@ -195,7 +195,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p
#endif #endif
} }
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{ {
int level = 4 - (vm->pgtable_levels - 1); int level = 4 - (vm->pgtable_levels - 1);
uint64_t pgd, *ptep; uint64_t pgd, *ptep;
...@@ -303,7 +303,7 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init ...@@ -303,7 +303,7 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid); set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid);
} }
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
{ {
uint64_t pstate, pc; uint64_t pstate, pc;
...@@ -330,7 +330,7 @@ void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, ...@@ -330,7 +330,7 @@ void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
} }
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) void vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
{ {
aarch64_vcpu_add_default(vm, vcpuid, NULL, guest_code); aarch64_vcpu_add_default(vm, vcpuid, NULL, guest_code);
} }
......
...@@ -53,7 +53,7 @@ static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) ...@@ -53,7 +53,7 @@ static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
return (gva & pte_index_mask[level]) >> pte_index_shift[level]; return (gva & pte_index_mask[level]) >> pte_index_shift[level];
} }
void virt_pgd_alloc(struct kvm_vm *vm) void virt_arch_pgd_alloc(struct kvm_vm *vm)
{ {
if (!vm->pgd_created) { if (!vm->pgd_created) {
vm_paddr_t paddr = vm_phy_pages_alloc(vm, vm_paddr_t paddr = vm_phy_pages_alloc(vm,
...@@ -64,7 +64,7 @@ void virt_pgd_alloc(struct kvm_vm *vm) ...@@ -64,7 +64,7 @@ void virt_pgd_alloc(struct kvm_vm *vm)
} }
} }
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{ {
uint64_t *ptep, next_ppn; uint64_t *ptep, next_ppn;
int level = vm->pgtable_levels - 1; int level = vm->pgtable_levels - 1;
...@@ -108,7 +108,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) ...@@ -108,7 +108,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK; PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;
} }
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{ {
uint64_t *ptep; uint64_t *ptep;
int level = vm->pgtable_levels - 1; int level = vm->pgtable_levels - 1;
...@@ -159,7 +159,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, ...@@ -159,7 +159,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
#endif #endif
} }
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{ {
int level = vm->pgtable_levels - 1; int level = vm->pgtable_levels - 1;
uint64_t pgd, *ptep; uint64_t pgd, *ptep;
...@@ -201,7 +201,7 @@ void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid) ...@@ -201,7 +201,7 @@ void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid)
set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp); set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp);
} }
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
{ {
struct kvm_riscv_core core; struct kvm_riscv_core core;
...@@ -274,7 +274,7 @@ static void __aligned(16) guest_unexp_trap(void) ...@@ -274,7 +274,7 @@ static void __aligned(16) guest_unexp_trap(void)
0, 0, 0, 0, 0, 0); 0, 0, 0, 0, 0, 0);
} }
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) void vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
{ {
int r; int r;
size_t stack_size = vm->page_size == 4096 ? size_t stack_size = vm->page_size == 4096 ?
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#define PAGES_PER_REGION 4 #define PAGES_PER_REGION 4
void virt_pgd_alloc(struct kvm_vm *vm) void virt_arch_pgd_alloc(struct kvm_vm *vm)
{ {
vm_paddr_t paddr; vm_paddr_t paddr;
...@@ -46,7 +46,7 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) ...@@ -46,7 +46,7 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH); | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
} }
void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
{ {
int ri, idx; int ri, idx;
uint64_t *entry; uint64_t *entry;
...@@ -85,7 +85,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) ...@@ -85,7 +85,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
entry[idx] = gpa; entry[idx] = gpa;
} }
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{ {
int ri, idx; int ri, idx;
uint64_t *entry; uint64_t *entry;
...@@ -146,7 +146,7 @@ static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent, ...@@ -146,7 +146,7 @@ static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
} }
} }
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{ {
if (!vm->pgd_created) if (!vm->pgd_created)
return; return;
...@@ -154,7 +154,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) ...@@ -154,7 +154,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
virt_dump_region(stream, vm, indent, vm->pgd); virt_dump_region(stream, vm, indent, vm->pgd);
} }
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) void vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
{ {
size_t stack_size = DEFAULT_STACK_PGS * getpagesize(); size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
uint64_t stack_vaddr; uint64_t stack_vaddr;
...@@ -205,7 +205,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) ...@@ -205,7 +205,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
va_end(ap); va_end(ap);
} }
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
{ {
struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid); struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
......
...@@ -109,7 +109,7 @@ static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent) ...@@ -109,7 +109,7 @@ static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
} }
} }
void virt_pgd_alloc(struct kvm_vm *vm) void virt_arch_pgd_alloc(struct kvm_vm *vm)
{ {
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode); "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
...@@ -207,7 +207,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) ...@@ -207,7 +207,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK); *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
} }
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{ {
__virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K); __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
} }
...@@ -302,7 +302,7 @@ void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr, ...@@ -302,7 +302,7 @@ void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
*(uint64_t *)new_pte = pte; *(uint64_t *)new_pte = pte;
} }
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{ {
uint64_t *pml4e, *pml4e_start; uint64_t *pml4e, *pml4e_start;
uint64_t *pdpe, *pdpe_start; uint64_t *pdpe, *pdpe_start;
...@@ -483,7 +483,7 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector, ...@@ -483,7 +483,7 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
kvm_seg_fill_gdt_64bit(vm, segp); kvm_seg_fill_gdt_64bit(vm, segp);
} }
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{ {
uint16_t index[4]; uint16_t index[4];
uint64_t *pml4e, *pdpe, *pde; uint64_t *pml4e, *pdpe, *pde;
...@@ -632,7 +632,7 @@ void vm_xsave_req_perm(int bit) ...@@ -632,7 +632,7 @@ void vm_xsave_req_perm(int bit)
bitmask); bitmask);
} }
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) void vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
{ {
struct kvm_mp_state mp_state; struct kvm_mp_state mp_state;
struct kvm_regs regs; struct kvm_regs regs;
...@@ -873,7 +873,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) ...@@ -873,7 +873,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
va_end(ap); va_end(ap);
} }
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
{ {
struct kvm_regs regs; struct kvm_regs regs;
struct kvm_sregs sregs; struct kvm_sregs sregs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment