Commit 7411be04 authored by Junaid Shahid's avatar Junaid Shahid Committed by Stefan Bader

kvm: Convert kvm_lock to a mutex

It doesn't seem as if there is any particular need for kvm_lock to be a
spinlock, so convert the lock to a mutex so that sleepable functions (in
particular cond_resched()) can be called while holding it.
Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

CVE-2018-12207

(backported from commit 0d9ce162)
[tyhicks: Backport to 4.4
 - kvm_hyperv_tsc_notifier() does not exist
 - Adjust for surrounding code changes kvm-s390.c and kvm_main.c]
Signed-off-by: default avatarTyler Hicks <tyhicks@canonical.com>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent 5e50df2d
...@@ -132,7 +132,7 @@ See the comments in spte_has_volatile_bits() and mmu_spte_update(). ...@@ -132,7 +132,7 @@ See the comments in spte_has_volatile_bits() and mmu_spte_update().
------------ ------------
Name: kvm_lock Name: kvm_lock
Type: spinlock_t Type: mutex
Arch: any Arch: any
Protects: - vm_list Protects: - vm_list
......
...@@ -1107,12 +1107,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -1107,12 +1107,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
if (!kvm->arch.sca) if (!kvm->arch.sca)
goto out_err; goto out_err;
spin_lock(&kvm_lock); mutex_lock(&kvm_lock);
sca_offset += 16; sca_offset += 16;
if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE) if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
sca_offset = 0; sca_offset = 0;
kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
spin_unlock(&kvm_lock); mutex_unlock(&kvm_lock);
sprintf(debug_name, "kvm-%u", current->pid); sprintf(debug_name, "kvm-%u", current->pid);
......
...@@ -4831,7 +4831,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) ...@@ -4831,7 +4831,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
int nr_to_scan = sc->nr_to_scan; int nr_to_scan = sc->nr_to_scan;
unsigned long freed = 0; unsigned long freed = 0;
spin_lock(&kvm_lock); mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
int idx; int idx;
...@@ -4881,7 +4881,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) ...@@ -4881,7 +4881,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
break; break;
} }
spin_unlock(&kvm_lock); mutex_unlock(&kvm_lock);
return freed; return freed;
} }
......
...@@ -5851,17 +5851,17 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va ...@@ -5851,17 +5851,17 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
spin_lock(&kvm_lock); mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu) if (vcpu->cpu != freq->cpu)
continue; continue;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != smp_processor_id()) if (vcpu->cpu != raw_smp_processor_id())
send_ipi = 1; send_ipi = 1;
} }
} }
spin_unlock(&kvm_lock); mutex_unlock(&kvm_lock);
if (freq->old < freq->new && send_ipi) { if (freq->old < freq->new && send_ipi) {
/* /*
...@@ -6019,12 +6019,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work) ...@@ -6019,12 +6019,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int i; int i;
spin_lock(&kvm_lock); mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
atomic_set(&kvm_guest_has_master_clock, 0); atomic_set(&kvm_guest_has_master_clock, 0);
spin_unlock(&kvm_lock); mutex_unlock(&kvm_lock);
} }
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
......
...@@ -151,7 +151,7 @@ static inline bool is_error_page(struct page *page) ...@@ -151,7 +151,7 @@ static inline bool is_error_page(struct page *page)
extern struct kmem_cache *kvm_vcpu_cache; extern struct kmem_cache *kvm_vcpu_cache;
extern spinlock_t kvm_lock; extern struct mutex kvm_lock;
extern struct list_head vm_list; extern struct list_head vm_list;
struct kvm_io_range { struct kvm_io_range {
......
...@@ -84,7 +84,7 @@ module_param(halt_poll_ns_shrink, int, S_IRUGO); ...@@ -84,7 +84,7 @@ module_param(halt_poll_ns_shrink, int, S_IRUGO);
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/ */
DEFINE_SPINLOCK(kvm_lock); DEFINE_MUTEX(kvm_lock);
static DEFINE_RAW_SPINLOCK(kvm_count_lock); static DEFINE_RAW_SPINLOCK(kvm_count_lock);
LIST_HEAD(vm_list); LIST_HEAD(vm_list);
...@@ -595,9 +595,9 @@ static struct kvm *kvm_create_vm(unsigned long type) ...@@ -595,9 +595,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (r) if (r)
goto out_err; goto out_err;
spin_lock(&kvm_lock); mutex_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list); list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock); mutex_unlock(&kvm_lock);
preempt_notifier_inc(); preempt_notifier_inc();
...@@ -650,9 +650,9 @@ static void kvm_destroy_vm(struct kvm *kvm) ...@@ -650,9 +650,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
struct mm_struct *mm = kvm->mm; struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm); kvm_arch_sync_events(kvm);
spin_lock(&kvm_lock); mutex_lock(&kvm_lock);
list_del(&kvm->vm_list); list_del(&kvm->vm_list);
spin_unlock(&kvm_lock); mutex_unlock(&kvm_lock);
kvm_free_irq_routing(kvm); kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) { for (i = 0; i < KVM_NR_BUSES; i++) {
if (kvm->buses[i]) if (kvm->buses[i])
...@@ -3436,10 +3436,10 @@ static int vm_stat_get(void *_offset, u64 *val) ...@@ -3436,10 +3436,10 @@ static int vm_stat_get(void *_offset, u64 *val)
struct kvm *kvm; struct kvm *kvm;
*val = 0; *val = 0;
spin_lock(&kvm_lock); mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) list_for_each_entry(kvm, &vm_list, vm_list)
*val += *(u32 *)((void *)kvm + offset); *val += *(u32 *)((void *)kvm + offset);
spin_unlock(&kvm_lock); mutex_unlock(&kvm_lock);
return 0; return 0;
} }
...@@ -3453,12 +3453,12 @@ static int vcpu_stat_get(void *_offset, u64 *val) ...@@ -3453,12 +3453,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
int i; int i;
*val = 0; *val = 0;
spin_lock(&kvm_lock); mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
*val += *(u32 *)((void *)vcpu + offset); *val += *(u32 *)((void *)vcpu + offset);
spin_unlock(&kvm_lock); mutex_unlock(&kvm_lock);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment