Commit 24fe0195 authored by Pierre Morel's avatar Pierre Morel Committed by Janosch Frank

KVM: s390: guest support for topology function

We report a topology change to the guest for any CPU hotplug.

The reporting to the guest is done using the Multiprocessor
Topology-Change-Report (MTCR) bit of the utility entry in the guest's
SCA which will be cleared during the interpretation of PTF.

On every vCPU creation we set the MCTR bit to let the guest know the
next time it uses the PTF with command 2 instruction that the
topology changed and that it should use the STSI(15.1.x) instruction
to get the topology details.

STSI(15.1.x) gives information on the CPU configuration topology.
Let's accept the interception of STSI with the function code 15 and
let the userland part of the hypervisor handle it when userland
supports the CPU Topology facility.
Signed-off-by: default avatarPierre Morel <pmorel@linux.ibm.com>
Reviewed-by: default avatarNico Boehr <nrb@linux.ibm.com>
Reviewed-by: default avatarJanis Schoetterl-Glausch <scgl@linux.ibm.com>
Reviewed-by: default avatarJanosch Frank <frankja@linux.ibm.com>
Link: https://lore.kernel.org/r/20220714101824.101601-2-pmorel@linux.ibm.com
Message-Id: <20220714101824.101601-2-pmorel@linux.ibm.com>
Signed-off-by: default avatarJanosch Frank <frankja@linux.ibm.com>
parent 0130337e
...@@ -95,19 +95,30 @@ union ipte_control { ...@@ -95,19 +95,30 @@ union ipte_control {
}; };
}; };
union sca_utility {
__u16 val;
struct {
__u16 mtcr : 1;
__u16 reserved : 15;
};
};
struct bsca_block { struct bsca_block {
union ipte_control ipte_control; union ipte_control ipte_control;
__u64 reserved[5]; __u64 reserved[5];
__u64 mcn; __u64 mcn;
__u64 reserved2; union sca_utility utility;
__u8 reserved2[6];
struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS]; struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
}; };
struct esca_block { struct esca_block {
union ipte_control ipte_control; union ipte_control ipte_control;
__u64 reserved1[7]; __u64 reserved1[6];
union sca_utility utility;
__u8 reserved2[6];
__u64 mcn[4]; __u64 mcn[4];
__u64 reserved2[20]; __u64 reserved3[20];
struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS]; struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
}; };
...@@ -251,6 +262,7 @@ struct kvm_s390_sie_block { ...@@ -251,6 +262,7 @@ struct kvm_s390_sie_block {
#define ECB_SPECI 0x08 #define ECB_SPECI 0x08
#define ECB_SRSI 0x04 #define ECB_SRSI 0x04
#define ECB_HOSTPROTINT 0x02 #define ECB_HOSTPROTINT 0x02
#define ECB_PTF 0x01
__u8 ecb; /* 0x0061 */ __u8 ecb; /* 0x0061 */
#define ECB2_CMMA 0x80 #define ECB2_CMMA 0x80
#define ECB2_IEP 0x20 #define ECB2_IEP 0x20
......
...@@ -1763,6 +1763,32 @@ static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -1763,6 +1763,32 @@ static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
return ret; return ret;
} }
/**
* kvm_s390_update_topology_change_report - update CPU topology change report
* @kvm: guest KVM description
* @val: set or clear the MTCR bit
*
* Updates the Multiprocessor Topology-Change-Report bit to signal
* the guest with a topology change.
* This is only relevant if the topology facility is present.
*
* The SCA version, bsca or esca, doesn't matter as offset is the same.
*/
static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
{
union sca_utility new, old;
struct bsca_block *sca;
read_lock(&kvm->arch.sca_lock);
sca = kvm->arch.sca;
do {
old = READ_ONCE(sca->utility);
new = old;
new.mtcr = val;
} while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val);
read_unlock(&kvm->arch.sca_lock);
}
static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{ {
int ret; int ret;
...@@ -3172,6 +3198,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -3172,6 +3198,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_clear_async_pf_completion_queue(vcpu); kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_is_ucontrol(vcpu->kvm)) if (!kvm_is_ucontrol(vcpu->kvm))
sca_del_vcpu(vcpu); sca_del_vcpu(vcpu);
kvm_s390_update_topology_change_report(vcpu->kvm, 1);
if (kvm_is_ucontrol(vcpu->kvm)) if (kvm_is_ucontrol(vcpu->kvm))
gmap_remove(vcpu->arch.gmap); gmap_remove(vcpu->arch.gmap);
...@@ -3574,6 +3601,8 @@ static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -3574,6 +3601,8 @@ static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
if (test_kvm_facility(vcpu->kvm, 9)) if (test_kvm_facility(vcpu->kvm, 9))
vcpu->arch.sie_block->ecb |= ECB_SRSI; vcpu->arch.sie_block->ecb |= ECB_SRSI;
if (test_kvm_facility(vcpu->kvm, 11))
vcpu->arch.sie_block->ecb |= ECB_PTF;
if (test_kvm_facility(vcpu->kvm, 73)) if (test_kvm_facility(vcpu->kvm, 73))
vcpu->arch.sie_block->ecb |= ECB_TE; vcpu->arch.sie_block->ecb |= ECB_TE;
if (!kvm_is_ucontrol(vcpu->kvm)) if (!kvm_is_ucontrol(vcpu->kvm))
...@@ -3707,6 +3736,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -3707,6 +3736,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
rc = kvm_s390_vcpu_setup(vcpu); rc = kvm_s390_vcpu_setup(vcpu);
if (rc) if (rc)
goto out_ucontrol_uninit; goto out_ucontrol_uninit;
kvm_s390_update_topology_change_report(vcpu->kvm, 1);
return 0; return 0;
out_ucontrol_uninit: out_ucontrol_uninit:
......
...@@ -873,10 +873,18 @@ static int handle_stsi(struct kvm_vcpu *vcpu) ...@@ -873,10 +873,18 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
if (fc > 3) { /* Bailout forbidden function codes */
kvm_s390_set_psw_cc(vcpu, 3); if (fc > 3 && fc != 15)
return 0; goto out_no_data;
}
/*
* fc 15 is provided only with
* - PTF/CPU topology support through facility 15
* - KVM_CAP_S390_USER_STSI
*/
if (fc == 15 && (!test_kvm_facility(vcpu->kvm, 11) ||
!vcpu->kvm->arch.user_stsi))
goto out_no_data;
if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
|| vcpu->run->s.regs.gprs[1] & 0xffff0000) || vcpu->run->s.regs.gprs[1] & 0xffff0000)
...@@ -910,6 +918,10 @@ static int handle_stsi(struct kvm_vcpu *vcpu) ...@@ -910,6 +918,10 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
goto out_no_data; goto out_no_data;
handle_stsi_3_2_2(vcpu, (void *) mem); handle_stsi_3_2_2(vcpu, (void *) mem);
break; break;
case 15: /* fc 15 is fully handled in userspace */
insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
return -EREMOTE;
} }
if (kvm_s390_pv_cpu_is_protected(vcpu)) { if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem, memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem,
......
...@@ -503,6 +503,14 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -503,6 +503,14 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* Host-protection-interruption introduced with ESOP */ /* Host-protection-interruption introduced with ESOP */
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP)) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT; scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
/*
* CPU Topology
* This facility only uses the utility field of the SCA and none of
* the cpu entries that are problematic with the other interpretation
* facilities so we can pass it through
*/
if (test_kvm_facility(vcpu->kvm, 11))
scb_s->ecb |= scb_o->ecb & ECB_PTF;
/* transactional execution */ /* transactional execution */
if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) { if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
/* remap the prefix is tx is toggled on */ /* remap the prefix is tx is toggled on */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment