Commit 5a3d883a authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

KVM: s390: switch to get_tod_clock() and fix STP sync races

Nobody except early.c makes use of store_tod_clock() to handle the
cc. So if we would get a cc != 0, we would be in more trouble.

Let's replace all users with get_tod_clock(). Returning a cc
on an ioctl sounded strange either way.

We can now also easily move the get_tod_clock() call into the
preempt_disable() section. This is in fact necessary to make the
STP sync work as expected. Otherwise the host TOD could change
and we would end up with a wrong epoch calculation.
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 238293b1
...@@ -523,19 +523,14 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -523,19 +523,14 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{ {
struct kvm_vcpu *cur_vcpu; struct kvm_vcpu *cur_vcpu;
unsigned int vcpu_idx; unsigned int vcpu_idx;
u64 host_tod, gtod; u64 gtod;
int r;
if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
return -EFAULT; return -EFAULT;
r = store_tod_clock(&host_tod);
if (r)
return r;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
preempt_disable(); preempt_disable();
kvm->arch.epoch = gtod - host_tod; kvm->arch.epoch = gtod - get_tod_clock();
kvm_s390_vcpu_block_all(kvm); kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
...@@ -581,15 +576,10 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -581,15 +576,10 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{ {
u64 host_tod, gtod; u64 gtod;
int r;
r = store_tod_clock(&host_tod);
if (r)
return r;
preempt_disable(); preempt_disable();
gtod = host_tod + kvm->arch.epoch; gtod = get_tod_clock() + kvm->arch.epoch;
preempt_enable(); preempt_enable();
if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
return -EFAULT; return -EFAULT;
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
static int handle_set_clock(struct kvm_vcpu *vcpu) static int handle_set_clock(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *cpup; struct kvm_vcpu *cpup;
s64 hostclk, val; s64 val;
int i, rc; int i, rc;
ar_t ar; ar_t ar;
u64 op2; u64 op2;
...@@ -49,15 +49,11 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) ...@@ -49,15 +49,11 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
if (rc) if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc); return kvm_s390_inject_prog_cond(vcpu, rc);
if (store_tod_clock(&hostclk)) {
kvm_s390_set_psw_cc(vcpu, 3);
return 0;
}
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
val = (val - hostclk) & ~0x3fUL;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
preempt_disable(); preempt_disable();
val = (val - get_tod_clock()) & ~0x3fUL;
kvm_for_each_vcpu(i, cpup, vcpu->kvm) kvm_for_each_vcpu(i, cpup, vcpu->kvm)
cpup->arch.sie_block->epoch = val; cpup->arch.sie_block->epoch = val;
preempt_enable(); preempt_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment