Commit 414de7ab authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Drop kvm_arch_create_memslot()

Remove kvm_arch_create_memslot() now that all arch implementations are
effectively nops.  Removing kvm_arch_create_memslot() eliminates the
possibility for arch specific code to allocate memory prior to setting
a memslot, which sets the stage for simplifying kvm_free_memslot().

Cc: Janosch Frank <frankja@linux.ibm.com>
Acked-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 0dab98b7
...@@ -188,12 +188,6 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, ...@@ -188,12 +188,6 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
return 0;
}
void kvm_arch_flush_shadow_all(struct kvm *kvm) void kvm_arch_flush_shadow_all(struct kvm *kvm)
{ {
/* Flush whole GPA */ /* Flush whole GPA */
......
...@@ -691,12 +691,6 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, ...@@ -691,12 +691,6 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
kvmppc_core_free_memslot(kvm, free, dont); kvmppc_core_free_memslot(kvm, free, dont);
} }
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
return 0;
}
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
const struct kvm_userspace_memory_region *mem, const struct kvm_userspace_memory_region *mem,
......
...@@ -4507,12 +4507,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) ...@@ -4507,12 +4507,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
return 0;
}
/* Section: memory related */ /* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
......
...@@ -9873,12 +9873,6 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, ...@@ -9873,12 +9873,6 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
kvm_page_track_free_memslot(free, dont); kvm_page_track_free_memslot(free, dont);
} }
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
return 0;
}
static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot, static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
unsigned long npages) unsigned long npages)
{ {
......
...@@ -671,8 +671,6 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -671,8 +671,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem); const struct kvm_userspace_memory_region *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont); struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
......
...@@ -2354,12 +2354,6 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, ...@@ -2354,12 +2354,6 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
{ {
} }
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{
return 0;
}
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{ {
} }
......
...@@ -1040,12 +1040,13 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1040,12 +1040,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.base_gfn = base_gfn; new.base_gfn = base_gfn;
new.npages = npages; new.npages = npages;
new.flags = mem->flags; new.flags = mem->flags;
new.userspace_addr = mem->userspace_addr;
if (npages) { if (npages) {
if (!old.npages) if (!old.npages)
change = KVM_MR_CREATE; change = KVM_MR_CREATE;
else { /* Modify an existing slot. */ else { /* Modify an existing slot. */
if ((mem->userspace_addr != old.userspace_addr) || if ((new.userspace_addr != old.userspace_addr) ||
(npages != old.npages) || (npages != old.npages) ||
((new.flags ^ old.flags) & KVM_MEM_READONLY)) ((new.flags ^ old.flags) & KVM_MEM_READONLY))
goto out; goto out;
...@@ -1080,22 +1081,14 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -1080,22 +1081,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
} }
} }
/* Free page dirty bitmap if unneeded */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
r = -ENOMEM; r = -ENOMEM;
if (change == KVM_MR_CREATE) {
new.userspace_addr = mem->userspace_addr;
if (kvm_arch_create_memslot(kvm, &new, npages)) /* Allocate/free page dirty bitmap as needed */
goto out; if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
} new.dirty_bitmap = NULL;
else if (!new.dirty_bitmap) {
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0) if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free; goto out;
} }
slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment