Commit ee1a15e3 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-master-4.16-2' of...

Merge tag 'kvm-s390-master-4.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: fixes for multiple epoch facility

We have certain cases where the multiple epoch facility is broken:
- timer wakeup during epoch change
- cpu hotplug
- SCK instruction
- stp sync checks
Fix those.
parents 6c62cc43 0e7def5f
...@@ -169,8 +169,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) ...@@ -169,8 +169,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
static int ckc_irq_pending(struct kvm_vcpu *vcpu) static int ckc_irq_pending(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
const u64 ckc = vcpu->arch.sie_block->ckc;
if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
if ((s64)ckc >= (s64)now)
return 0;
} else if (ckc >= now) {
return 0; return 0;
}
return ckc_interrupts_enabled(vcpu); return ckc_interrupts_enabled(vcpu);
} }
...@@ -1047,13 +1054,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) ...@@ -1047,13 +1054,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
static u64 __calculate_sltime(struct kvm_vcpu *vcpu) static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
{ {
u64 now, cputm, sltime = 0; const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
const u64 ckc = vcpu->arch.sie_block->ckc;
u64 cputm, sltime = 0;
if (ckc_interrupts_enabled(vcpu)) { if (ckc_interrupts_enabled(vcpu)) {
now = kvm_s390_get_tod_clock_fast(vcpu->kvm); if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); if ((s64)now < (s64)ckc)
/* already expired or overflow? */ sltime = tod_to_ns((s64)ckc - (s64)now);
if (!sltime || vcpu->arch.sie_block->ckc <= now) } else if (now < ckc) {
sltime = tod_to_ns(ckc - now);
}
/* already expired */
if (!sltime)
return 0; return 0;
if (cpu_timer_interrupts_enabled(vcpu)) { if (cpu_timer_interrupts_enabled(vcpu)) {
cputm = kvm_s390_get_cpu_timer(vcpu); cputm = kvm_s390_get_cpu_timer(vcpu);
......
...@@ -179,6 +179,28 @@ int kvm_arch_hardware_enable(void) ...@@ -179,6 +179,28 @@ int kvm_arch_hardware_enable(void)
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
unsigned long end); unsigned long end);
static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
{
u8 delta_idx = 0;
/*
* The TOD jumps by delta, we have to compensate this by adding
* -delta to the epoch.
*/
delta = -delta;
/* sign-extension - we're adding to signed values below */
if ((s64)delta < 0)
delta_idx = -1;
scb->epoch += delta;
if (scb->ecd & ECD_MEF) {
scb->epdx += delta_idx;
if (scb->epoch < delta)
scb->epdx += 1;
}
}
/* /*
* This callback is executed during stop_machine(). All CPUs are therefore * This callback is executed during stop_machine(). All CPUs are therefore
* temporarily stopped. In order not to change guest behavior, we have to * temporarily stopped. In order not to change guest behavior, we have to
...@@ -194,13 +216,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, ...@@ -194,13 +216,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
unsigned long long *delta = v; unsigned long long *delta = v;
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
kvm->arch.epoch -= *delta;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.sie_block->epoch -= *delta; kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
if (i == 0) {
kvm->arch.epoch = vcpu->arch.sie_block->epoch;
kvm->arch.epdx = vcpu->arch.sie_block->epdx;
}
if (vcpu->arch.cputm_enabled) if (vcpu->arch.cputm_enabled)
vcpu->arch.cputm_start += *delta; vcpu->arch.cputm_start += *delta;
if (vcpu->arch.vsie_block) if (vcpu->arch.vsie_block)
vcpu->arch.vsie_block->epoch -= *delta; kvm_clock_sync_scb(vcpu->arch.vsie_block,
*delta);
} }
} }
return NOTIFY_OK; return NOTIFY_OK;
...@@ -902,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -902,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
return -EFAULT; return -EFAULT;
if (test_kvm_facility(kvm, 139)) if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
kvm_s390_set_tod_clock_ext(kvm, &gtod);
else if (gtod.epoch_idx == 0)
kvm_s390_set_tod_clock(kvm, gtod.tod);
else
return -EINVAL; return -EINVAL;
kvm_s390_set_tod_clock(kvm, &gtod);
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
gtod.epoch_idx, gtod.tod); gtod.epoch_idx, gtod.tod);
...@@ -932,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -932,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{ {
u64 gtod; struct kvm_s390_vm_tod_clock gtod = { 0 };
if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
sizeof(gtod.tod)))
return -EFAULT; return -EFAULT;
kvm_s390_set_tod_clock(kvm, gtod); kvm_s390_set_tod_clock(kvm, &gtod);
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod); VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
return 0; return 0;
} }
...@@ -2389,6 +2413,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -2389,6 +2413,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
preempt_disable(); preempt_disable();
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
preempt_enable(); preempt_enable();
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
if (!kvm_is_ucontrol(vcpu->kvm)) { if (!kvm_is_ucontrol(vcpu->kvm)) {
...@@ -3021,8 +3046,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -3021,8 +3046,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
void kvm_s390_set_tod_clock_ext(struct kvm *kvm, void kvm_s390_set_tod_clock(struct kvm *kvm,
const struct kvm_s390_vm_tod_clock *gtod) const struct kvm_s390_vm_tod_clock *gtod)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_s390_tod_clock_ext htod; struct kvm_s390_tod_clock_ext htod;
...@@ -3034,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, ...@@ -3034,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
get_tod_clock_ext((char *)&htod); get_tod_clock_ext((char *)&htod);
kvm->arch.epoch = gtod->tod - htod.tod; kvm->arch.epoch = gtod->tod - htod.tod;
kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; kvm->arch.epdx = 0;
if (test_kvm_facility(kvm, 139)) {
if (kvm->arch.epoch > gtod->tod) kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
kvm->arch.epdx -= 1; if (kvm->arch.epoch > gtod->tod)
kvm->arch.epdx -= 1;
}
kvm_s390_vcpu_block_all(kvm); kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
...@@ -3050,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, ...@@ -3050,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
} }
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
{
struct kvm_vcpu *vcpu;
int i;
mutex_lock(&kvm->lock);
preempt_disable();
kvm->arch.epoch = tod - get_tod_clock();
kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
vcpu->arch.sie_block->epoch = kvm->arch.epoch;
kvm_s390_vcpu_unblock_all(kvm);
preempt_enable();
mutex_unlock(&kvm->lock);
}
/** /**
* kvm_arch_fault_in_page - fault-in guest page if necessary * kvm_arch_fault_in_page - fault-in guest page if necessary
* @vcpu: The corresponding virtual cpu * @vcpu: The corresponding virtual cpu
......
...@@ -281,9 +281,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); ...@@ -281,9 +281,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */ /* implemented in kvm-s390.c */
void kvm_s390_set_tod_clock_ext(struct kvm *kvm, void kvm_s390_set_tod_clock(struct kvm *kvm,
const struct kvm_s390_vm_tod_clock *gtod); const struct kvm_s390_vm_tod_clock *gtod);
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
......
...@@ -85,9 +85,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) ...@@ -85,9 +85,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
/* Handle SCK (SET CLOCK) interception */ /* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu) static int handle_set_clock(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_vm_tod_clock gtod = { 0 };
int rc; int rc;
u8 ar; u8 ar;
u64 op2, val; u64 op2;
vcpu->stat.instruction_sck++; vcpu->stat.instruction_sck++;
...@@ -97,12 +98,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) ...@@ -97,12 +98,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
op2 = kvm_s390_get_base_disp_s(vcpu, &ar); op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (op2 & 7) /* Operand must be on a doubleword boundary */ if (op2 & 7) /* Operand must be on a doubleword boundary */
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
if (rc) if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc); return kvm_s390_inject_prog_cond(vcpu, rc);
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
kvm_s390_set_tod_clock(vcpu->kvm, val); kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
kvm_s390_set_psw_cc(vcpu, 0); kvm_s390_set_psw_cc(vcpu, 0);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment