Commit 0f9bdef3 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Wait 'til the bitter end to initialize the "new" memslot

Initialize the "new" memslot in the !DELETE path only after the various
sanity checks have passed.  This will allow a future commit to allocate
@new dynamically without having to copy a memslot, and without having to
deal with freeing @new in error paths and in the "nothing to change" path
that's hiding in the sanity checks.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Reviewed-by: default avatarMaciej S. Szmigiero <maciej.szmigiero@oracle.com>
Signed-off-by: default avatarMaciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <a084d0531ca3a826a7f861eb2b08b5d1c06ef265.1638817641.git.maciej.szmigiero@oracle.com>
parent 44401a20
...@@ -1843,6 +1843,8 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1843,6 +1843,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_memory_slot new; struct kvm_memory_slot new;
struct kvm_memslots *slots; struct kvm_memslots *slots;
enum kvm_mr_change change; enum kvm_mr_change change;
unsigned long npages;
gfn_t base_gfn;
int as_id, id; int as_id, id;
int r; int r;
...@@ -1869,6 +1871,8 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1869,6 +1871,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
return -EINVAL; return -EINVAL;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
return -EINVAL; return -EINVAL;
if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id); slots = __kvm_memslots(kvm, as_id);
...@@ -1892,15 +1896,8 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1892,15 +1896,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
return kvm_set_memslot(kvm, old, &new, KVM_MR_DELETE); return kvm_set_memslot(kvm, old, &new, KVM_MR_DELETE);
} }
new.as_id = as_id; base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
new.id = id; npages = (mem->memory_size >> PAGE_SHIFT);
new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
new.npages = mem->memory_size >> PAGE_SHIFT;
new.flags = mem->flags;
new.userspace_addr = mem->userspace_addr;
if (new.npages > KVM_MEM_MAX_NR_PAGES)
return -EINVAL;
if (!old || !old->npages) { if (!old || !old->npages) {
change = KVM_MR_CREATE; change = KVM_MR_CREATE;
...@@ -1909,27 +1906,33 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1909,27 +1906,33 @@ int __kvm_set_memory_region(struct kvm *kvm,
* To simplify KVM internals, the total number of pages across * To simplify KVM internals, the total number of pages across
* all memslots must fit in an unsigned long. * all memslots must fit in an unsigned long.
*/ */
if ((kvm->nr_memslot_pages + new.npages) < kvm->nr_memslot_pages) if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
return -EINVAL; return -EINVAL;
} else { /* Modify an existing slot. */ } else { /* Modify an existing slot. */
if ((new.userspace_addr != old->userspace_addr) || if ((mem->userspace_addr != old->userspace_addr) ||
(new.npages != old->npages) || (npages != old->npages) ||
((new.flags ^ old->flags) & KVM_MEM_READONLY)) ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
return -EINVAL; return -EINVAL;
if (new.base_gfn != old->base_gfn) if (base_gfn != old->base_gfn)
change = KVM_MR_MOVE; change = KVM_MR_MOVE;
else if (new.flags != old->flags) else if (mem->flags != old->flags)
change = KVM_MR_FLAGS_ONLY; change = KVM_MR_FLAGS_ONLY;
else /* Nothing to change. */ else /* Nothing to change. */
return 0; return 0;
} }
if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
kvm_check_memslot_overlap(slots, id, new.base_gfn, kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
new.base_gfn + new.npages))
return -EEXIST; return -EEXIST;
new.as_id = as_id;
new.id = id;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
new.userspace_addr = mem->userspace_addr;
return kvm_set_memslot(kvm, old, &new, change); return kvm_set_memslot(kvm, old, &new, change);
} }
EXPORT_SYMBOL_GPL(__kvm_set_memory_region); EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment