Commit 47ae31e2 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Marcelo Tosatti

KVM: set_memory_region: Drop user_alloc from set_memory_region()

Except ia64's stale code, KVM_SET_MEMORY_REGION support, this is only
used for sanity checks in __kvm_set_memory_region() which can easily
be changed to use slot id instead.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 462fce46
...@@ -942,24 +942,6 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -942,24 +942,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
int r = -ENOTTY; int r = -ENOTTY;
switch (ioctl) { switch (ioctl) {
case KVM_SET_MEMORY_REGION: {
struct kvm_memory_region kvm_mem;
struct kvm_userspace_memory_region kvm_userspace_mem;
r = -EFAULT;
if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
goto out;
kvm_userspace_mem.slot = kvm_mem.slot;
kvm_userspace_mem.flags = kvm_mem.flags;
kvm_userspace_mem.guest_phys_addr =
kvm_mem.guest_phys_addr;
kvm_userspace_mem.memory_size = kvm_mem.memory_size;
r = kvm_vm_ioctl_set_memory_region(kvm,
&kvm_userspace_mem, false);
if (r)
goto out;
break;
}
case KVM_CREATE_IRQCHIP: case KVM_CREATE_IRQCHIP:
r = -EFAULT; r = -EFAULT;
r = kvm_ioapic_init(kvm); r = kvm_ioapic_init(kvm);
......
...@@ -3694,7 +3694,7 @@ static int alloc_apic_access_page(struct kvm *kvm) ...@@ -3694,7 +3694,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
kvm_userspace_mem.flags = 0; kvm_userspace_mem.flags = 0;
kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL; kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
kvm_userspace_mem.memory_size = PAGE_SIZE; kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false); r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
if (r) if (r)
goto out; goto out;
...@@ -3724,7 +3724,7 @@ static int alloc_identity_pagetable(struct kvm *kvm) ...@@ -3724,7 +3724,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
kvm_userspace_mem.guest_phys_addr = kvm_userspace_mem.guest_phys_addr =
kvm->arch.ept_identity_map_addr; kvm->arch.ept_identity_map_addr;
kvm_userspace_mem.memory_size = PAGE_SIZE; kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false); r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
if (r) if (r)
goto out; goto out;
...@@ -4364,7 +4364,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) ...@@ -4364,7 +4364,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
.flags = 0, .flags = 0,
}; };
ret = kvm_set_memory_region(kvm, &tss_mem, false); ret = kvm_set_memory_region(kvm, &tss_mem);
if (ret) if (ret)
return ret; return ret;
kvm->arch.tss_addr = addr; kvm->arch.tss_addr = addr;
......
...@@ -453,11 +453,9 @@ id_to_memslot(struct kvm_memslots *slots, int id) ...@@ -453,11 +453,9 @@ id_to_memslot(struct kvm_memslots *slots, int id)
} }
int kvm_set_memory_region(struct kvm *kvm, int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem);
bool user_alloc);
int __kvm_set_memory_region(struct kvm *kvm, int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem);
bool user_alloc);
void kvm_arch_free_memslot(struct kvm_memory_slot *free, void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont); struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
...@@ -553,9 +551,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -553,9 +551,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log); struct kvm_dirty_log *log);
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct struct kvm_userspace_memory_region *mem);
kvm_userspace_memory_region *mem,
bool user_alloc);
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level); int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
long kvm_arch_vm_ioctl(struct file *filp, long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg); unsigned int ioctl, unsigned long arg);
......
...@@ -745,8 +745,7 @@ enum kvm_mr_change { ...@@ -745,8 +745,7 @@ enum kvm_mr_change {
* Must be called holding mmap_sem for write. * Must be called holding mmap_sem for write.
*/ */
int __kvm_set_memory_region(struct kvm *kvm, int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem)
bool user_alloc)
{ {
int r; int r;
gfn_t base_gfn; gfn_t base_gfn;
...@@ -767,7 +766,7 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -767,7 +766,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr & (PAGE_SIZE - 1)) if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out; goto out;
/* We can read the guest memory with __xxx_user() later on. */ /* We can read the guest memory with __xxx_user() later on. */
if (user_alloc && if ((mem->slot < KVM_USER_MEM_SLOTS) &&
((mem->userspace_addr & (PAGE_SIZE - 1)) || ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok(VERIFY_WRITE, !access_ok(VERIFY_WRITE,
(void __user *)(unsigned long)mem->userspace_addr, (void __user *)(unsigned long)mem->userspace_addr,
...@@ -932,26 +931,23 @@ int __kvm_set_memory_region(struct kvm *kvm, ...@@ -932,26 +931,23 @@ int __kvm_set_memory_region(struct kvm *kvm,
EXPORT_SYMBOL_GPL(__kvm_set_memory_region); EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
int kvm_set_memory_region(struct kvm *kvm, int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem)
bool user_alloc)
{ {
int r; int r;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
r = __kvm_set_memory_region(kvm, mem, user_alloc); r = __kvm_set_memory_region(kvm, mem);
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
return r; return r;
} }
EXPORT_SYMBOL_GPL(kvm_set_memory_region); EXPORT_SYMBOL_GPL(kvm_set_memory_region);
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct struct kvm_userspace_memory_region *mem)
kvm_userspace_memory_region *mem,
bool user_alloc)
{ {
if (mem->slot >= KVM_USER_MEM_SLOTS) if (mem->slot >= KVM_USER_MEM_SLOTS)
return -EINVAL; return -EINVAL;
return kvm_set_memory_region(kvm, mem, user_alloc); return kvm_set_memory_region(kvm, mem);
} }
int kvm_get_dirty_log(struct kvm *kvm, int kvm_get_dirty_log(struct kvm *kvm,
...@@ -2198,7 +2194,7 @@ static long kvm_vm_ioctl(struct file *filp, ...@@ -2198,7 +2194,7 @@ static long kvm_vm_ioctl(struct file *filp,
sizeof kvm_userspace_mem)) sizeof kvm_userspace_mem))
goto out; goto out;
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true); r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
break; break;
} }
case KVM_GET_DIRTY_LOG: { case KVM_GET_DIRTY_LOG: {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment