Commit 71a4c30b authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Refactor error handling for setting memory region

Replace a big pile o' gotos with returns to make it more obvious what
error code is being returned, and to prepare for refactoring the
functional, i.e. post-checks, portion of __kvm_set_memory_region().
Reviewed-by: default avatarJanosch Frank <frankja@linux.ibm.com>
Reviewed-by: default avatarPhilippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bd0e96fd
...@@ -1005,34 +1005,33 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1005,34 +1005,33 @@ int __kvm_set_memory_region(struct kvm *kvm,
r = check_memory_region_flags(mem); r = check_memory_region_flags(mem);
if (r) if (r)
goto out; return r;
r = -EINVAL;
as_id = mem->slot >> 16; as_id = mem->slot >> 16;
id = (u16)mem->slot; id = (u16)mem->slot;
/* General sanity checks */ /* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1)) if (mem->memory_size & (PAGE_SIZE - 1))
goto out; return -EINVAL;
if (mem->guest_phys_addr & (PAGE_SIZE - 1)) if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out; return -EINVAL;
/* We can read the guest memory with __xxx_user() later on. */ /* We can read the guest memory with __xxx_user() later on. */
if ((id < KVM_USER_MEM_SLOTS) && if ((id < KVM_USER_MEM_SLOTS) &&
((mem->userspace_addr & (PAGE_SIZE - 1)) || ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok((void __user *)(unsigned long)mem->userspace_addr, !access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))) mem->memory_size)))
goto out; return -EINVAL;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
goto out; return -EINVAL;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out; return -EINVAL;
slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT;
if (npages > KVM_MEM_MAX_NR_PAGES) if (npages > KVM_MEM_MAX_NR_PAGES)
goto out; return -EINVAL;
new = old = *slot; new = old = *slot;
...@@ -1049,20 +1048,18 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1049,20 +1048,18 @@ int __kvm_set_memory_region(struct kvm *kvm,
if ((new.userspace_addr != old.userspace_addr) || if ((new.userspace_addr != old.userspace_addr) ||
(npages != old.npages) || (npages != old.npages) ||
((new.flags ^ old.flags) & KVM_MEM_READONLY)) ((new.flags ^ old.flags) & KVM_MEM_READONLY))
goto out; return -EINVAL;
if (base_gfn != old.base_gfn) if (base_gfn != old.base_gfn)
change = KVM_MR_MOVE; change = KVM_MR_MOVE;
else if (new.flags != old.flags) else if (new.flags != old.flags)
change = KVM_MR_FLAGS_ONLY; change = KVM_MR_FLAGS_ONLY;
else { /* Nothing to change. */ else /* Nothing to change. */
r = 0; return 0;
goto out;
}
} }
} else { } else {
if (!old.npages) if (!old.npages)
goto out; return -EINVAL;
change = KVM_MR_DELETE; change = KVM_MR_DELETE;
new.base_gfn = 0; new.base_gfn = 0;
...@@ -1071,29 +1068,29 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1071,29 +1068,29 @@ int __kvm_set_memory_region(struct kvm *kvm,
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
/* Check for overlaps */ /* Check for overlaps */
r = -EEXIST;
kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
if (slot->id == id) if (slot->id == id)
continue; continue;
if (!((base_gfn + npages <= slot->base_gfn) || if (!((base_gfn + npages <= slot->base_gfn) ||
(base_gfn >= slot->base_gfn + slot->npages))) (base_gfn >= slot->base_gfn + slot->npages)))
goto out; return -EEXIST;
} }
} }
r = -ENOMEM;
/* Allocate/free page dirty bitmap as needed */ /* Allocate/free page dirty bitmap as needed */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL; new.dirty_bitmap = NULL;
else if (!new.dirty_bitmap) { else if (!new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0) r = kvm_create_dirty_bitmap(&new);
goto out; if (r)
return r;
} }
slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
if (!slots) if (!slots) {
r = -ENOMEM;
goto out_bitmap; goto out_bitmap;
}
memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
...@@ -1144,7 +1141,6 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1144,7 +1141,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
out_bitmap: out_bitmap:
if (new.dirty_bitmap && !old.dirty_bitmap) if (new.dirty_bitmap && !old.dirty_bitmap)
kvm_destroy_dirty_bitmap(&new); kvm_destroy_dirty_bitmap(&new);
out:
return r; return r;
} }
EXPORT_SYMBOL_GPL(__kvm_set_memory_region); EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment