Commit d7a8bea3 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-4.18-1' of...

Merge tag 'kvm-s390-next-4.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Cleanups for 4.18

- cleanups for nested, clock handling, crypto, storage keys and
control register bits
parents 5eec43a1 2c8180e8
...@@ -10,8 +10,20 @@ ...@@ -10,8 +10,20 @@
#include <linux/const.h> #include <linux/const.h>
#define CR0_CLOCK_COMPARATOR_SIGN _BITUL(63 - 10)
#define CR0_EMERGENCY_SIGNAL_SUBMASK _BITUL(63 - 49)
#define CR0_EXTERNAL_CALL_SUBMASK _BITUL(63 - 50)
#define CR0_CLOCK_COMPARATOR_SUBMASK _BITUL(63 - 52)
#define CR0_CPU_TIMER_SUBMASK _BITUL(63 - 53)
#define CR0_SERVICE_SIGNAL_SUBMASK _BITUL(63 - 54)
#define CR0_UNUSED_56 _BITUL(63 - 56)
#define CR0_INTERRUPT_KEY_SUBMASK _BITUL(63 - 57)
#define CR0_MEASUREMENT_ALERT_SUBMASK _BITUL(63 - 58)
#define CR2_GUARDED_STORAGE _BITUL(63 - 59) #define CR2_GUARDED_STORAGE _BITUL(63 - 59)
#define CR14_UNUSED_32 _BITUL(63 - 32)
#define CR14_UNUSED_33 _BITUL(63 - 33)
#define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35) #define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35)
#define CR14_RECOVERY_SUBMASK _BITUL(63 - 36) #define CR14_RECOVERY_SUBMASK _BITUL(63 - 36)
#define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37) #define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37)
......
...@@ -812,6 +812,7 @@ struct kvm_arch{ ...@@ -812,6 +812,7 @@ struct kvm_arch{
int use_irqchip; int use_irqchip;
int use_cmma; int use_cmma;
int use_pfmfi; int use_pfmfi;
int use_skf;
int user_cpu_state_ctrl; int user_cpu_state_ctrl;
int user_sigp; int user_sigp;
int user_stsi; int user_stsi;
......
...@@ -21,7 +21,7 @@ typedef struct { ...@@ -21,7 +21,7 @@ typedef struct {
/* The mmu context uses extended page tables. */ /* The mmu context uses extended page tables. */
unsigned int has_pgste:1; unsigned int has_pgste:1;
/* The mmu context uses storage keys. */ /* The mmu context uses storage keys. */
unsigned int use_skey:1; unsigned int uses_skeys:1;
/* The mmu context uses CMM. */ /* The mmu context uses CMM. */
unsigned int uses_cmm:1; unsigned int uses_cmm:1;
} mm_context_t; } mm_context_t;
......
...@@ -30,7 +30,7 @@ static inline int init_new_context(struct task_struct *tsk, ...@@ -30,7 +30,7 @@ static inline int init_new_context(struct task_struct *tsk,
test_thread_flag(TIF_PGSTE) || test_thread_flag(TIF_PGSTE) ||
(current->mm && current->mm->context.alloc_pgste); (current->mm && current->mm->context.alloc_pgste);
mm->context.has_pgste = 0; mm->context.has_pgste = 0;
mm->context.use_skey = 0; mm->context.uses_skeys = 0;
mm->context.uses_cmm = 0; mm->context.uses_cmm = 0;
#endif #endif
switch (mm->context.asce_limit) { switch (mm->context.asce_limit) {
......
...@@ -507,10 +507,10 @@ static inline int mm_alloc_pgste(struct mm_struct *mm) ...@@ -507,10 +507,10 @@ static inline int mm_alloc_pgste(struct mm_struct *mm)
* faults should no longer be backed by zero pages * faults should no longer be backed by zero pages
*/ */
#define mm_forbids_zeropage mm_has_pgste #define mm_forbids_zeropage mm_has_pgste
static inline int mm_use_skey(struct mm_struct *mm) static inline int mm_uses_skeys(struct mm_struct *mm)
{ {
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
if (mm->context.use_skey) if (mm->context.uses_skeys)
return 1; return 1;
#endif #endif
return 0; return 0;
......
...@@ -153,7 +153,7 @@ void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu) ...@@ -153,7 +153,7 @@ void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
if (guestdbg_sstep_enabled(vcpu)) { if (guestdbg_sstep_enabled(vcpu)) {
/* disable timer (clock-comparator) interrupts */ /* disable timer (clock-comparator) interrupts */
vcpu->arch.sie_block->gcr[0] &= ~0x800ul; vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK;
vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
vcpu->arch.sie_block->gcr[10] = 0; vcpu->arch.sie_block->gcr[10] = 0;
vcpu->arch.sie_block->gcr[11] = -1UL; vcpu->arch.sie_block->gcr[11] = -1UL;
......
...@@ -159,7 +159,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) ...@@ -159,7 +159,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
{ {
if (psw_extint_disabled(vcpu) || if (psw_extint_disabled(vcpu) ||
!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
return 0; return 0;
if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
/* No timer interrupts when single stepping */ /* No timer interrupts when single stepping */
...@@ -172,7 +172,7 @@ static int ckc_irq_pending(struct kvm_vcpu *vcpu) ...@@ -172,7 +172,7 @@ static int ckc_irq_pending(struct kvm_vcpu *vcpu)
const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
const u64 ckc = vcpu->arch.sie_block->ckc; const u64 ckc = vcpu->arch.sie_block->ckc;
if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
if ((s64)ckc >= (s64)now) if ((s64)ckc >= (s64)now)
return 0; return 0;
} else if (ckc >= now) { } else if (ckc >= now) {
...@@ -184,7 +184,7 @@ static int ckc_irq_pending(struct kvm_vcpu *vcpu) ...@@ -184,7 +184,7 @@ static int ckc_irq_pending(struct kvm_vcpu *vcpu)
static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
{ {
return !psw_extint_disabled(vcpu) && return !psw_extint_disabled(vcpu) &&
(vcpu->arch.sie_block->gcr[0] & 0x400ul); (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
} }
static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
...@@ -285,15 +285,15 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) ...@@ -285,15 +285,15 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
active_mask &= ~IRQ_PEND_IO_MASK; active_mask &= ~IRQ_PEND_IO_MASK;
else else
active_mask = disable_iscs(vcpu, active_mask); active_mask = disable_iscs(vcpu, active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
__clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
__clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
__clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
__clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
__clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
if (psw_mchk_disabled(vcpu)) if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK; active_mask &= ~IRQ_PEND_MCHK_MASK;
...@@ -1042,7 +1042,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) ...@@ -1042,7 +1042,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
/* external call pending and deliverable */ /* external call pending and deliverable */
if (kvm_s390_ext_call_pending(vcpu) && if (kvm_s390_ext_call_pending(vcpu) &&
!psw_extint_disabled(vcpu) && !psw_extint_disabled(vcpu) &&
(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
return 1; return 1;
if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
...@@ -1062,7 +1062,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu) ...@@ -1062,7 +1062,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
u64 cputm, sltime = 0; u64 cputm, sltime = 0;
if (ckc_interrupts_enabled(vcpu)) { if (ckc_interrupts_enabled(vcpu)) {
if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
if ((s64)now < (s64)ckc) if ((s64)now < (s64)ckc)
sltime = tod_to_ns((s64)ckc - (s64)now); sltime = tod_to_ns((s64)ckc - (s64)now);
} else if (now < ckc) { } else if (now < ckc) {
......
...@@ -791,11 +791,21 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att ...@@ -791,11 +791,21 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int i; int i;
kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_s390_vcpu_crypto_setup(vcpu);
kvm_s390_vcpu_unblock_all(kvm);
}
static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
{
if (!test_kvm_facility(kvm, 76)) if (!test_kvm_facility(kvm, 76))
return -EINVAL; return -EINVAL;
...@@ -832,10 +842,7 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -832,10 +842,7 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
return -ENXIO; return -ENXIO;
} }
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_s390_vcpu_crypto_reset_all(kvm);
kvm_s390_vcpu_crypto_setup(vcpu);
exit_sie(vcpu);
}
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return 0; return 0;
} }
...@@ -1033,8 +1040,8 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -1033,8 +1040,8 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
return ret; return ret;
} }
static void kvm_s390_get_tod_clock_ext(struct kvm *kvm, static void kvm_s390_get_tod_clock(struct kvm *kvm,
struct kvm_s390_vm_tod_clock *gtod) struct kvm_s390_vm_tod_clock *gtod)
{ {
struct kvm_s390_tod_clock_ext htod; struct kvm_s390_tod_clock_ext htod;
...@@ -1043,10 +1050,12 @@ static void kvm_s390_get_tod_clock_ext(struct kvm *kvm, ...@@ -1043,10 +1050,12 @@ static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
get_tod_clock_ext((char *)&htod); get_tod_clock_ext((char *)&htod);
gtod->tod = htod.tod + kvm->arch.epoch; gtod->tod = htod.tod + kvm->arch.epoch;
gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; gtod->epoch_idx = 0;
if (test_kvm_facility(kvm, 139)) {
if (gtod->tod < htod.tod) gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
gtod->epoch_idx += 1; if (gtod->tod < htod.tod)
gtod->epoch_idx += 1;
}
preempt_enable(); preempt_enable();
} }
...@@ -1056,12 +1065,7 @@ static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -1056,12 +1065,7 @@ static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
struct kvm_s390_vm_tod_clock gtod; struct kvm_s390_vm_tod_clock gtod;
memset(&gtod, 0, sizeof(gtod)); memset(&gtod, 0, sizeof(gtod));
kvm_s390_get_tod_clock(kvm, &gtod);
if (test_kvm_facility(kvm, 139))
kvm_s390_get_tod_clock_ext(kvm, &gtod);
else
gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
return -EFAULT; return -EFAULT;
...@@ -1493,7 +1497,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) ...@@ -1493,7 +1497,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
return -EINVAL; return -EINVAL;
/* Is this guest using storage keys? */ /* Is this guest using storage keys? */
if (!mm_use_skey(current->mm)) if (!mm_uses_skeys(current->mm))
return KVM_S390_GET_SKEYS_NONE; return KVM_S390_GET_SKEYS_NONE;
/* Enforce sane limit on memory allocation */ /* Enforce sane limit on memory allocation */
...@@ -1982,10 +1986,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -1982,10 +1986,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
rc = -ENOMEM; rc = -ENOMEM;
kvm->arch.use_esca = 0; /* start with basic SCA */
if (!sclp.has_64bscao) if (!sclp.has_64bscao)
alloc_flags |= GFP_DMA; alloc_flags |= GFP_DMA;
rwlock_init(&kvm->arch.sca_lock); rwlock_init(&kvm->arch.sca_lock);
/* start with basic SCA */
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
if (!kvm->arch.sca) if (!kvm->arch.sca)
goto out_err; goto out_err;
...@@ -2036,8 +2040,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -2036,8 +2040,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_s390_crypto_init(kvm); kvm_s390_crypto_init(kvm);
mutex_init(&kvm->arch.float_int.ais_lock); mutex_init(&kvm->arch.float_int.ais_lock);
kvm->arch.float_int.simm = 0;
kvm->arch.float_int.nimm = 0;
spin_lock_init(&kvm->arch.float_int.lock); spin_lock_init(&kvm->arch.float_int.lock);
for (i = 0; i < FIRQ_LIST_COUNT; i++) for (i = 0; i < FIRQ_LIST_COUNT; i++)
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
...@@ -2063,11 +2065,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -2063,11 +2065,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.gmap->pfault_enabled = 0; kvm->arch.gmap->pfault_enabled = 0;
} }
kvm->arch.css_support = 0;
kvm->arch.use_irqchip = 0;
kvm->arch.use_pfmfi = sclp.has_pfmfi; kvm->arch.use_pfmfi = sclp.has_pfmfi;
kvm->arch.epoch = 0; kvm->arch.use_skf = sclp.has_skey;
spin_lock_init(&kvm->arch.start_stop_lock); spin_lock_init(&kvm->arch.start_stop_lock);
kvm_s390_vsie_init(kvm); kvm_s390_vsie_init(kvm);
kvm_s390_gisa_init(kvm); kvm_s390_gisa_init(kvm);
...@@ -2433,8 +2432,12 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) ...@@ -2433,8 +2432,12 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->ckc = 0UL; vcpu->arch.sie_block->ckc = 0UL;
vcpu->arch.sie_block->todpr = 0; vcpu->arch.sie_block->todpr = 0;
memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
vcpu->arch.sie_block->gcr[0] = 0xE0UL; vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; CR0_INTERRUPT_KEY_SUBMASK |
CR0_MEASUREMENT_ALERT_SUBMASK;
vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
CR14_UNUSED_33 |
CR14_EXTERNAL_DAMAGE_SUBMASK;
/* make sure the new fpc will be lazily loaded */ /* make sure the new fpc will be lazily loaded */
save_fpu_regs(); save_fpu_regs();
current->thread.fpu.fpc = 0; current->thread.fpu.fpc = 0;
...@@ -3192,7 +3195,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) ...@@ -3192,7 +3195,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
return 0; return 0;
if (kvm_s390_vcpu_has_irq(vcpu, 0)) if (kvm_s390_vcpu_has_irq(vcpu, 0))
return 0; return 0;
if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
return 0; return 0;
if (!vcpu->arch.gmap->pfault_enabled) if (!vcpu->arch.gmap->pfault_enabled)
return 0; return 0;
......
...@@ -410,4 +410,17 @@ static inline int kvm_s390_use_sca_entries(void) ...@@ -410,4 +410,17 @@ static inline int kvm_s390_use_sca_entries(void)
} }
void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu, void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
struct mcck_volatile_info *mcck_info); struct mcck_volatile_info *mcck_info);
/**
* kvm_s390_vcpu_crypto_reset_all
*
* Reset the crypto attributes for each vcpu. This can be done while the vcpus
* are running as each vcpu will be removed from SIE before resetting the crypt
* attributes and restored to SIE afterward.
*
* Note: The kvm->lock must be held while calling this function
*
* @kvm: the KVM guest
*/
void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
#endif #endif
...@@ -205,24 +205,28 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) ...@@ -205,24 +205,28 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
{ {
int rc = 0; int rc;
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
trace_kvm_s390_skey_related_inst(vcpu); trace_kvm_s390_skey_related_inst(vcpu);
if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && /* Already enabled? */
if (vcpu->kvm->arch.use_skf &&
!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
!kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) !kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
return rc; return 0;
rc = s390_enable_skey(); rc = s390_enable_skey();
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
if (!rc) { if (rc)
if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) return rc;
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
else if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
ICTL_RRBE); if (!vcpu->kvm->arch.use_skf)
} sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
return rc; else
sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
return 0;
} }
static int try_handle_skey(struct kvm_vcpu *vcpu) static int try_handle_skey(struct kvm_vcpu *vcpu)
...@@ -232,7 +236,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu) ...@@ -232,7 +236,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
rc = kvm_s390_skey_check_enable(vcpu); rc = kvm_s390_skey_check_enable(vcpu);
if (rc) if (rc)
return rc; return rc;
if (sclp.has_skey) { if (vcpu->kvm->arch.use_skf) {
/* with storage-key facility, SIE interprets it for us */ /* with storage-key facility, SIE interprets it for us */
kvm_s390_retry_instr(vcpu); kvm_s390_retry_instr(vcpu);
VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
......
...@@ -557,7 +557,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -557,7 +557,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
if (gpa) { if (gpa) {
if (!(gpa & ~0x1fffUL)) if (gpa < 2 * PAGE_SIZE)
rc = set_validity_icpt(scb_s, 0x0038U); rc = set_validity_icpt(scb_s, 0x0038U);
else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu)) else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
rc = set_validity_icpt(scb_s, 0x0011U); rc = set_validity_icpt(scb_s, 0x0011U);
...@@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
if (gpa && (scb_s->ecb & ECB_TE)) { if (gpa && (scb_s->ecb & ECB_TE)) {
if (!(gpa & ~0x1fffU)) { if (gpa < 2 * PAGE_SIZE) {
rc = set_validity_icpt(scb_s, 0x0080U); rc = set_validity_icpt(scb_s, 0x0080U);
goto unpin; goto unpin;
} }
...@@ -594,7 +594,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -594,7 +594,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL; gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
if (!(gpa & ~0x1fffUL)) { if (gpa < 2 * PAGE_SIZE) {
rc = set_validity_icpt(scb_s, 0x1310U); rc = set_validity_icpt(scb_s, 0x1310U);
goto unpin; goto unpin;
} }
...@@ -613,7 +613,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -613,7 +613,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL; gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
if (gpa && (scb_s->ecb3 & ECB3_RI)) { if (gpa && (scb_s->ecb3 & ECB3_RI)) {
if (!(gpa & ~0x1fffUL)) { if (gpa < 2 * PAGE_SIZE) {
rc = set_validity_icpt(scb_s, 0x0043U); rc = set_validity_icpt(scb_s, 0x0043U);
goto unpin; goto unpin;
} }
...@@ -632,7 +632,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -632,7 +632,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL; gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL; sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
if (!gpa || !(gpa & ~0x1fffUL)) { if (!gpa || gpa < 2 * PAGE_SIZE) {
rc = set_validity_icpt(scb_s, 0x10b0U); rc = set_validity_icpt(scb_s, 0x10b0U);
goto unpin; goto unpin;
} }
......
...@@ -2184,14 +2184,14 @@ int s390_enable_skey(void) ...@@ -2184,14 +2184,14 @@ int s390_enable_skey(void)
int rc = 0; int rc = 0;
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
if (mm_use_skey(mm)) if (mm_uses_skeys(mm))
goto out_up; goto out_up;
mm->context.use_skey = 1; mm->context.uses_skeys = 1;
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (ksm_madvise(vma, vma->vm_start, vma->vm_end, if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
MADV_UNMERGEABLE, &vma->vm_flags)) { MADV_UNMERGEABLE, &vma->vm_flags)) {
mm->context.use_skey = 0; mm->context.uses_skeys = 0;
rc = -ENOMEM; rc = -ENOMEM;
goto out_up; goto out_up;
} }
......
...@@ -158,7 +158,7 @@ static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste, ...@@ -158,7 +158,7 @@ static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
unsigned long address, bits, skey; unsigned long address, bits, skey;
if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID) if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
return pgste; return pgste;
address = pte_val(pte) & PAGE_MASK; address = pte_val(pte) & PAGE_MASK;
skey = (unsigned long) page_get_storage_key(address); skey = (unsigned long) page_get_storage_key(address);
...@@ -180,7 +180,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, ...@@ -180,7 +180,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
unsigned long address; unsigned long address;
unsigned long nkey; unsigned long nkey;
if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
return; return;
VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
address = pte_val(entry) & PAGE_MASK; address = pte_val(entry) & PAGE_MASK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment