Commit 8c61af9e authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'annotations' of...

Merge branch 'annotations' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux into kvm-master
parents 70bcd708 7e988b10
...@@ -234,7 +234,7 @@ struct kvm_vcpu { ...@@ -234,7 +234,7 @@ struct kvm_vcpu {
int guest_fpu_loaded, guest_xcr0_loaded; int guest_fpu_loaded, guest_xcr0_loaded;
struct swait_queue_head wq; struct swait_queue_head wq;
struct pid *pid; struct pid __rcu *pid;
int sigset_active; int sigset_active;
sigset_t sigset; sigset_t sigset;
struct kvm_vcpu_stat stat; struct kvm_vcpu_stat stat;
...@@ -390,7 +390,7 @@ struct kvm { ...@@ -390,7 +390,7 @@ struct kvm {
spinlock_t mmu_lock; spinlock_t mmu_lock;
struct mutex slots_lock; struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */ struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
/* /*
...@@ -404,7 +404,7 @@ struct kvm { ...@@ -404,7 +404,7 @@ struct kvm {
int last_boosted_vcpu; int last_boosted_vcpu;
struct list_head vm_list; struct list_head vm_list;
struct mutex lock; struct mutex lock;
struct kvm_io_bus *buses[KVM_NR_BUSES]; struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
#ifdef CONFIG_HAVE_KVM_EVENTFD #ifdef CONFIG_HAVE_KVM_EVENTFD
struct { struct {
spinlock_t lock; spinlock_t lock;
...@@ -473,6 +473,12 @@ struct kvm { ...@@ -473,6 +473,12 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \ #define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
lockdep_is_held(&kvm->slots_lock));
}
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{ {
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
...@@ -562,9 +568,8 @@ void kvm_put_kvm(struct kvm *kvm); ...@@ -562,9 +568,8 @@ void kvm_put_kvm(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{ {
return rcu_dereference_check(kvm->memslots[as_id], return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
srcu_read_lock_held(&kvm->srcu) lockdep_is_held(&kvm->slots_lock));
|| lockdep_is_held(&kvm->slots_lock));
} }
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
......
...@@ -825,7 +825,7 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm, ...@@ -825,7 +825,7 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
if (ret < 0) if (ret < 0)
goto unlock_fail; goto unlock_fail;
kvm->buses[bus_idx]->ioeventfd_count++; kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
list_add_tail(&p->list, &kvm->ioeventfds); list_add_tail(&p->list, &kvm->ioeventfds);
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
...@@ -848,6 +848,7 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, ...@@ -848,6 +848,7 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
{ {
struct _ioeventfd *p, *tmp; struct _ioeventfd *p, *tmp;
struct eventfd_ctx *eventfd; struct eventfd_ctx *eventfd;
struct kvm_io_bus *bus;
int ret = -ENOENT; int ret = -ENOENT;
eventfd = eventfd_ctx_fdget(args->fd); eventfd = eventfd_ctx_fdget(args->fd);
...@@ -870,8 +871,9 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, ...@@ -870,8 +871,9 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
continue; continue;
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
if (kvm->buses[bus_idx]) bus = kvm_get_bus(kvm, bus_idx);
kvm->buses[bus_idx]->ioeventfd_count--; if (bus)
bus->ioeventfd_count--;
ioeventfd_release(p); ioeventfd_release(p);
ret = 0; ret = 0;
break; break;
......
...@@ -230,7 +230,7 @@ int kvm_set_irq_routing(struct kvm *kvm, ...@@ -230,7 +230,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
} }
mutex_lock(&kvm->irq_lock); mutex_lock(&kvm->irq_lock);
old = kvm->irq_routing; old = rcu_dereference_protected(kvm->irq_routing, 1);
rcu_assign_pointer(kvm->irq_routing, new); rcu_assign_pointer(kvm->irq_routing, new);
kvm_irq_routing_update(kvm); kvm_irq_routing_update(kvm);
kvm_arch_irq_routing_update(kvm); kvm_arch_irq_routing_update(kvm);
......
...@@ -299,7 +299,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init); ...@@ -299,7 +299,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{ {
put_pid(vcpu->pid); /*
* no need for rcu_read_lock as VCPU_RUN is the only place that
* will change the vcpu->pid pointer and on uninit all file
* descriptors are already gone.
*/
put_pid(rcu_dereference_protected(vcpu->pid, 1));
kvm_arch_vcpu_uninit(vcpu); kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run); free_page((unsigned long)vcpu->run);
} }
...@@ -680,8 +685,8 @@ static struct kvm *kvm_create_vm(unsigned long type) ...@@ -680,8 +685,8 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (init_srcu_struct(&kvm->irq_srcu)) if (init_srcu_struct(&kvm->irq_srcu))
goto out_err_no_irq_srcu; goto out_err_no_irq_srcu;
for (i = 0; i < KVM_NR_BUSES; i++) { for (i = 0; i < KVM_NR_BUSES; i++) {
kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), rcu_assign_pointer(kvm->buses[i],
GFP_KERNEL); kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL));
if (!kvm->buses[i]) if (!kvm->buses[i])
goto out_err; goto out_err;
} }
...@@ -706,9 +711,10 @@ static struct kvm *kvm_create_vm(unsigned long type) ...@@ -706,9 +711,10 @@ static struct kvm *kvm_create_vm(unsigned long type)
hardware_disable_all(); hardware_disable_all();
out_err_no_disable: out_err_no_disable:
for (i = 0; i < KVM_NR_BUSES; i++) for (i = 0; i < KVM_NR_BUSES; i++)
kfree(kvm->buses[i]); kfree(rcu_access_pointer(kvm->buses[i]));
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
kvm_free_memslots(kvm, kvm->memslots[i]); kvm_free_memslots(kvm,
rcu_dereference_protected(kvm->memslots[i], 1));
kvm_arch_free_vm(kvm); kvm_arch_free_vm(kvm);
mmdrop(current->mm); mmdrop(current->mm);
return ERR_PTR(r); return ERR_PTR(r);
...@@ -741,8 +747,11 @@ static void kvm_destroy_vm(struct kvm *kvm) ...@@ -741,8 +747,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
spin_unlock(&kvm_lock); spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm); kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) { for (i = 0; i < KVM_NR_BUSES; i++) {
if (kvm->buses[i]) struct kvm_io_bus *bus;
kvm_io_bus_destroy(kvm->buses[i]);
bus = rcu_dereference_protected(kvm->buses[i], 1);
if (bus)
kvm_io_bus_destroy(bus);
kvm->buses[i] = NULL; kvm->buses[i] = NULL;
} }
kvm_coalesced_mmio_free(kvm); kvm_coalesced_mmio_free(kvm);
...@@ -754,7 +763,8 @@ static void kvm_destroy_vm(struct kvm *kvm) ...@@ -754,7 +763,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
kvm_arch_destroy_vm(kvm); kvm_arch_destroy_vm(kvm);
kvm_destroy_devices(kvm); kvm_destroy_devices(kvm);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
kvm_free_memslots(kvm, kvm->memslots[i]); kvm_free_memslots(kvm,
rcu_dereference_protected(kvm->memslots[i], 1));
cleanup_srcu_struct(&kvm->irq_srcu); cleanup_srcu_struct(&kvm->irq_srcu);
cleanup_srcu_struct(&kvm->srcu); cleanup_srcu_struct(&kvm->srcu);
kvm_arch_free_vm(kvm); kvm_arch_free_vm(kvm);
...@@ -2557,13 +2567,14 @@ static long kvm_vcpu_ioctl(struct file *filp, ...@@ -2557,13 +2567,14 @@ static long kvm_vcpu_ioctl(struct file *filp,
if (r) if (r)
return r; return r;
switch (ioctl) { switch (ioctl) {
case KVM_RUN: case KVM_RUN: {
struct pid *oldpid;
r = -EINVAL; r = -EINVAL;
if (arg) if (arg)
goto out; goto out;
if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { oldpid = rcu_access_pointer(vcpu->pid);
if (unlikely(oldpid != current->pids[PIDTYPE_PID].pid)) {
/* The thread running this VCPU changed. */ /* The thread running this VCPU changed. */
struct pid *oldpid = vcpu->pid;
struct pid *newpid = get_task_pid(current, PIDTYPE_PID); struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
rcu_assign_pointer(vcpu->pid, newpid); rcu_assign_pointer(vcpu->pid, newpid);
...@@ -2574,6 +2585,7 @@ static long kvm_vcpu_ioctl(struct file *filp, ...@@ -2574,6 +2585,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
trace_kvm_userspace_exit(vcpu->run->exit_reason, r); trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break; break;
}
case KVM_GET_REGS: { case KVM_GET_REGS: {
struct kvm_regs *kvm_regs; struct kvm_regs *kvm_regs;
...@@ -3569,7 +3581,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, ...@@ -3569,7 +3581,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
{ {
struct kvm_io_bus *new_bus, *bus; struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx]; bus = kvm_get_bus(kvm, bus_idx);
if (!bus) if (!bus)
return -ENOMEM; return -ENOMEM;
...@@ -3598,7 +3610,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, ...@@ -3598,7 +3610,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
int i; int i;
struct kvm_io_bus *new_bus, *bus; struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx]; bus = kvm_get_bus(kvm, bus_idx);
if (!bus) if (!bus)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment