Commit 9da0e4d5 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: work on all available address spaces

This patch has no semantic change, but it prepares for the introduction
of a second address space for system management mode.

A new function x86_set_memory_region (and the "slots_lock taken"
counterpart __x86_set_memory_region) is introduced in order to
operate on all address spaces when adding or deleting private
memory slots.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 54bf36aa
...@@ -1189,4 +1189,9 @@ int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); ...@@ -1189,4 +1189,9 @@ int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu); void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
int __x86_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
int x86_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
#endif /* _ASM_X86_KVM_HOST_H */ #endif /* _ASM_X86_KVM_HOST_H */
...@@ -1503,9 +1503,10 @@ static int kvm_handle_hva_range(struct kvm *kvm, ...@@ -1503,9 +1503,10 @@ static int kvm_handle_hva_range(struct kvm *kvm,
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
struct slot_rmap_walk_iterator iterator; struct slot_rmap_walk_iterator iterator;
int ret = 0; int ret = 0;
int i;
slots = kvm_memslots(kvm); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) { kvm_for_each_memslot(memslot, slots) {
unsigned long hva_start, hva_end; unsigned long hva_start, hva_end;
gfn_t gfn_start, gfn_end; gfn_t gfn_start, gfn_end;
...@@ -1523,11 +1524,13 @@ static int kvm_handle_hva_range(struct kvm *kvm, ...@@ -1523,11 +1524,13 @@ static int kvm_handle_hva_range(struct kvm *kvm,
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL, for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
PT_MAX_HUGEPAGE_LEVEL, gfn_start, gfn_end - 1, PT_MAX_HUGEPAGE_LEVEL,
gfn_start, gfn_end - 1,
&iterator) &iterator)
ret |= handler(kvm, iterator.rmap, memslot, ret |= handler(kvm, iterator.rmap, memslot,
iterator.gfn, iterator.level, data); iterator.gfn, iterator.level, data);
} }
}
return ret; return ret;
} }
...@@ -4536,10 +4539,11 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) ...@@ -4536,10 +4539,11 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int i;
slots = kvm_memslots(kvm);
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) { kvm_for_each_memslot(memslot, slots) {
gfn_t start, end; gfn_t start, end;
...@@ -4552,6 +4556,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) ...@@ -4552,6 +4556,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL, PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
start, end - 1, true); start, end - 1, true);
} }
}
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
...@@ -4907,11 +4912,14 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) ...@@ -4907,11 +4912,14 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
unsigned int nr_pages = 0; unsigned int nr_pages = 0;
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int i;
slots = kvm_memslots(kvm); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) kvm_for_each_memslot(memslot, slots)
nr_pages += memslot->npages; nr_pages += memslot->npages;
}
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages, nr_mmu_pages = max(nr_mmu_pages,
......
...@@ -4115,7 +4115,7 @@ static int alloc_apic_access_page(struct kvm *kvm) ...@@ -4115,7 +4115,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
kvm_userspace_mem.flags = 0; kvm_userspace_mem.flags = 0;
kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE; kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
kvm_userspace_mem.memory_size = PAGE_SIZE; kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem); r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
if (r) if (r)
goto out; goto out;
...@@ -4150,7 +4150,7 @@ static int alloc_identity_pagetable(struct kvm *kvm) ...@@ -4150,7 +4150,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
kvm_userspace_mem.guest_phys_addr = kvm_userspace_mem.guest_phys_addr =
kvm->arch.ept_identity_map_addr; kvm->arch.ept_identity_map_addr;
kvm_userspace_mem.memory_size = PAGE_SIZE; kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem); r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
return r; return r;
} }
...@@ -4956,7 +4956,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) ...@@ -4956,7 +4956,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
.flags = 0, .flags = 0,
}; };
ret = kvm_set_memory_region(kvm, &tss_mem); ret = x86_set_memory_region(kvm, &tss_mem);
if (ret) if (ret)
return ret; return ret;
kvm->arch.tss_addr = addr; kvm->arch.tss_addr = addr;
......
...@@ -7963,6 +7963,40 @@ void kvm_arch_sync_events(struct kvm *kvm) ...@@ -7963,6 +7963,40 @@ void kvm_arch_sync_events(struct kvm *kvm)
kvm_free_pit(kvm); kvm_free_pit(kvm);
} }
int __x86_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem)
{
int i, r;
/* Called with kvm->slots_lock held. */
BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_userspace_memory_region m = *mem;
m.slot |= i << 16;
r = __kvm_set_memory_region(kvm, &m);
if (r < 0)
return r;
}
return 0;
}
EXPORT_SYMBOL_GPL(__x86_set_memory_region);
int x86_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem)
{
int r;
mutex_lock(&kvm->slots_lock);
r = __x86_set_memory_region(kvm, mem);
mutex_unlock(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(x86_set_memory_region);
void kvm_arch_destroy_vm(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm)
{ {
if (current->mm == kvm->mm) { if (current->mm == kvm->mm) {
...@@ -7974,13 +8008,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -7974,13 +8008,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
struct kvm_userspace_memory_region mem; struct kvm_userspace_memory_region mem;
memset(&mem, 0, sizeof(mem)); memset(&mem, 0, sizeof(mem));
mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
kvm_set_memory_region(kvm, &mem); x86_set_memory_region(kvm, &mem);
mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
kvm_set_memory_region(kvm, &mem); x86_set_memory_region(kvm, &mem);
mem.slot = TSS_PRIVATE_MEMSLOT; mem.slot = TSS_PRIVATE_MEMSLOT;
kvm_set_memory_region(kvm, &mem); x86_set_memory_region(kvm, &mem);
} }
kvm_iommu_unmap_guest(kvm); kvm_iommu_unmap_guest(kvm);
kfree(kvm->arch.vpic); kfree(kvm->arch.vpic);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment