Commit 8fff5e37 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-20150122' of...

Merge tag 'kvm-s390-next-20150122' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next

KVM: s390: fixes and features for kvm/next (3.20)

1. Generic
- sparse warning (make function static)
- optimize locking
- bugfixes for interrupt injection
- fix MVPG addressing modes

2. hrtimer/wakeup fun
A recent change can cause KVM hangs if adjtime is used in the host.
The hrtimer might wake up too early or too late. Too early is fatal
as vcpu_block will see that the wakeup condition is not met and
sleep again. This CPU might never wake up again.
This series addresses this problem. adjclock slowing down the host
clock will result in too late wakeups. This will require more work.
In addition to that we also change the hrtimer from REALTIME to
MONOTONIC to avoid similar problems with timedatectl set-time.

3. sigp rework
We will move all "slow" sigps to QEMU (protected with a capability that
can be enabled) to avoid several races between concurrent SIGP orders.

4. Optimize the shadow page table
Provide an interface to announce the maximum guest size. The kernel
will use that to make the pagetable 2,3,4 (or theoretically) 5 levels.

5. Provide an interface to set the guest TOD
We now use two vm attributes instead of two oneregs, as oneregs are
vcpu ioctl and we don't want to call them from other threads.

6. Protected key functions
The real HMC allows to enable/disable protected key CPACF functions.
Lets provide an implementation + an interface for QEMU to activate
this the protected key instructions.
parents 1c6007d5 0eb135ff
...@@ -2315,7 +2315,7 @@ struct kvm_s390_interrupt { ...@@ -2315,7 +2315,7 @@ struct kvm_s390_interrupt {
type can be one of the following: type can be one of the following:
KVM_S390_SIGP_STOP (vcpu) - sigp restart KVM_S390_SIGP_STOP (vcpu) - sigp stop; optional flags in parm
KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm
KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm
KVM_S390_RESTART (vcpu) - restart KVM_S390_RESTART (vcpu) - restart
...@@ -3228,3 +3228,23 @@ userspace from doing that. ...@@ -3228,3 +3228,23 @@ userspace from doing that.
If the hcall number specified is not one that has an in-kernel If the hcall number specified is not one that has an in-kernel
implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL
error. error.
7.2 KVM_CAP_S390_USER_SIGP
Architectures: s390
Parameters: none
This capability controls which SIGP orders will be handled completely in user
space. With this capability enabled, all fast orders will be handled completely
in the kernel:
- SENSE
- SENSE RUNNING
- EXTERNAL CALL
- EMERGENCY SIGNAL
- CONDITIONAL EMERGENCY SIGNAL
All other orders will be handled completely in user space.
Only privileged operation exceptions will be checked for in the kernel (or even
in the hardware prior to interception). If this capability is not enabled, the
old way of handling SIGP orders is used (partially in kernel and user space).
...@@ -24,3 +24,17 @@ Returns: 0 ...@@ -24,3 +24,17 @@ Returns: 0
Clear the CMMA status for all guest pages, so any pages the guest marked Clear the CMMA status for all guest pages, so any pages the guest marked
as unused are again used any may not be reclaimed by the host. as unused are again used any may not be reclaimed by the host.
1.3. ATTRIBUTE KVM_S390_VM_MEM_LIMIT_SIZE
Parameters: in attr->addr the address for the new limit of guest memory
Returns: -EFAULT if the given address is not accessible
-EINVAL if the virtual machine is of type UCONTROL
-E2BIG if the given guest memory is to big for that machine
-EBUSY if a vcpu is already defined
-ENOMEM if not enough memory is available for a new shadow guest mapping
0 otherwise
Allows userspace to query the actual limit and set a new limit for
the maximum guest memory size. The limit will be rounded up to
2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by
the number of page table levels.
...@@ -249,9 +249,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -249,9 +249,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
return ERR_PTR(err); return ERR_PTR(err);
} }
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
return 0;
} }
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
......
...@@ -832,9 +832,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -832,9 +832,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
return 0;
} }
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
......
...@@ -623,9 +623,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -623,9 +623,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
return vcpu; return vcpu;
} }
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
return 0;
} }
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
......
...@@ -35,11 +35,13 @@ ...@@ -35,11 +35,13 @@
#define KVM_NR_IRQCHIPS 1 #define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096 #define KVM_IRQCHIP_NUM_PINS 4096
#define SIGP_CTRL_C 0x00800000 #define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
struct sca_entry { struct sca_entry {
atomic_t ctrl; __u8 reserved0;
__u32 reserved; __u8 sigp_ctrl;
__u16 reserved[3];
__u64 sda; __u64 sda;
__u64 reserved2[2]; __u64 reserved2[2];
} __attribute__((packed)); } __attribute__((packed));
...@@ -132,7 +134,9 @@ struct kvm_s390_sie_block { ...@@ -132,7 +134,9 @@ struct kvm_s390_sie_block {
__u8 reserved60; /* 0x0060 */ __u8 reserved60; /* 0x0060 */
__u8 ecb; /* 0x0061 */ __u8 ecb; /* 0x0061 */
__u8 ecb2; /* 0x0062 */ __u8 ecb2; /* 0x0062 */
__u8 reserved63[1]; /* 0x0063 */ #define ECB3_AES 0x04
#define ECB3_DEA 0x08
__u8 ecb3; /* 0x0063 */
__u32 scaol; /* 0x0064 */ __u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */ __u8 reserved68[4]; /* 0x0068 */
__u32 todpr; /* 0x006c */ __u32 todpr; /* 0x006c */
...@@ -378,14 +382,11 @@ struct kvm_s390_interrupt_info { ...@@ -378,14 +382,11 @@ struct kvm_s390_interrupt_info {
struct kvm_s390_emerg_info emerg; struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall; struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix; struct kvm_s390_prefix_info prefix;
struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk; struct kvm_s390_mchk_info mchk;
}; };
}; };
/* for local_interrupt.action_flags */
#define ACTION_STORE_ON_STOP (1<<0)
#define ACTION_STOP_ON_STOP (1<<1)
struct kvm_s390_irq_payload { struct kvm_s390_irq_payload {
struct kvm_s390_io_info io; struct kvm_s390_io_info io;
struct kvm_s390_ext_info ext; struct kvm_s390_ext_info ext;
...@@ -393,6 +394,7 @@ struct kvm_s390_irq_payload { ...@@ -393,6 +394,7 @@ struct kvm_s390_irq_payload {
struct kvm_s390_emerg_info emerg; struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall; struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix; struct kvm_s390_prefix_info prefix;
struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk; struct kvm_s390_mchk_info mchk;
}; };
...@@ -401,7 +403,6 @@ struct kvm_s390_local_interrupt { ...@@ -401,7 +403,6 @@ struct kvm_s390_local_interrupt {
struct kvm_s390_float_interrupt *float_int; struct kvm_s390_float_interrupt *float_int;
wait_queue_head_t *wq; wait_queue_head_t *wq;
atomic_t *cpuflags; atomic_t *cpuflags;
unsigned int action_bits;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_irq_payload irq; struct kvm_s390_irq_payload irq;
unsigned long pending_irqs; unsigned long pending_irqs;
...@@ -470,7 +471,6 @@ struct kvm_vcpu_arch { ...@@ -470,7 +471,6 @@ struct kvm_vcpu_arch {
}; };
struct gmap *gmap; struct gmap *gmap;
struct kvm_guestdbg_info_arch guestdbg; struct kvm_guestdbg_info_arch guestdbg;
#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL)
unsigned long pfault_token; unsigned long pfault_token;
unsigned long pfault_select; unsigned long pfault_select;
unsigned long pfault_compare; unsigned long pfault_compare;
...@@ -507,10 +507,14 @@ struct s390_io_adapter { ...@@ -507,10 +507,14 @@ struct s390_io_adapter {
struct kvm_s390_crypto { struct kvm_s390_crypto {
struct kvm_s390_crypto_cb *crycb; struct kvm_s390_crypto_cb *crycb;
__u32 crycbd; __u32 crycbd;
__u8 aes_kw;
__u8 dea_kw;
}; };
struct kvm_s390_crypto_cb { struct kvm_s390_crypto_cb {
__u8 reserved00[128]; /* 0x0000 */ __u8 reserved00[72]; /* 0x0000 */
__u8 dea_wrapping_key_mask[24]; /* 0x0048 */
__u8 aes_wrapping_key_mask[32]; /* 0x0060 */
}; };
struct kvm_arch{ struct kvm_arch{
...@@ -523,12 +527,14 @@ struct kvm_arch{ ...@@ -523,12 +527,14 @@ struct kvm_arch{
int use_irqchip; int use_irqchip;
int use_cmma; int use_cmma;
int user_cpu_state_ctrl; int user_cpu_state_ctrl;
int user_sigp;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq; wait_queue_head_t ipte_wq;
int ipte_lock_count; int ipte_lock_count;
struct mutex ipte_mutex; struct mutex ipte_mutex;
spinlock_t start_stop_lock; spinlock_t start_stop_lock;
struct kvm_s390_crypto crypto; struct kvm_s390_crypto crypto;
u64 epoch;
}; };
#define KVM_HVA_ERR_BAD (-1UL) #define KVM_HVA_ERR_BAD (-1UL)
......
...@@ -31,7 +31,8 @@ struct sclp_cpu_entry { ...@@ -31,7 +31,8 @@ struct sclp_cpu_entry {
u8 reserved0[2]; u8 reserved0[2];
u8 : 3; u8 : 3;
u8 siif : 1; u8 siif : 1;
u8 : 4; u8 sigpif : 1;
u8 : 3;
u8 reserved2[10]; u8 reserved2[10];
u8 type; u8 type;
u8 reserved1; u8 reserved1;
...@@ -66,6 +67,7 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); ...@@ -66,6 +67,7 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
unsigned long sclp_get_hsa_size(void); unsigned long sclp_get_hsa_size(void);
void sclp_early_detect(void); void sclp_early_detect(void);
int sclp_has_siif(void); int sclp_has_siif(void);
int sclp_has_sigpif(void);
unsigned int sclp_get_ibc(void); unsigned int sclp_get_ibc(void);
#endif /* _ASM_S390_SCLP_H */ #endif /* _ASM_S390_SCLP_H */
...@@ -57,10 +57,23 @@ struct kvm_s390_io_adapter_req { ...@@ -57,10 +57,23 @@ struct kvm_s390_io_adapter_req {
/* kvm attr_group on vm fd */ /* kvm attr_group on vm fd */
#define KVM_S390_VM_MEM_CTRL 0 #define KVM_S390_VM_MEM_CTRL 0
#define KVM_S390_VM_TOD 1
#define KVM_S390_VM_CRYPTO 2
/* kvm attributes for mem_ctrl */ /* kvm attributes for mem_ctrl */
#define KVM_S390_VM_MEM_ENABLE_CMMA 0 #define KVM_S390_VM_MEM_ENABLE_CMMA 0
#define KVM_S390_VM_MEM_CLR_CMMA 1 #define KVM_S390_VM_MEM_CLR_CMMA 1
#define KVM_S390_VM_MEM_LIMIT_SIZE 2
/* kvm attributes for KVM_S390_VM_TOD */
#define KVM_S390_VM_TOD_LOW 0
#define KVM_S390_VM_TOD_HIGH 1
/* kvm attributes for crypto */
#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
/* for KVM_GET_REGS and KVM_SET_REGS */ /* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs { struct kvm_regs {
...@@ -107,6 +120,9 @@ struct kvm_guest_debug_arch { ...@@ -107,6 +120,9 @@ struct kvm_guest_debug_arch {
struct kvm_hw_breakpoint __user *hw_bp; struct kvm_hw_breakpoint __user *hw_bp;
}; };
/* for KVM_SYNC_PFAULT and KVM_REG_S390_PFTOKEN */
#define KVM_S390_PFAULT_TOKEN_INVALID 0xffffffffffffffffULL
#define KVM_SYNC_PREFIX (1UL << 0) #define KVM_SYNC_PREFIX (1UL << 0)
#define KVM_SYNC_GPRS (1UL << 1) #define KVM_SYNC_GPRS (1UL << 1)
#define KVM_SYNC_ACRS (1UL << 2) #define KVM_SYNC_ACRS (1UL << 2)
......
...@@ -68,18 +68,27 @@ static int handle_noop(struct kvm_vcpu *vcpu) ...@@ -68,18 +68,27 @@ static int handle_noop(struct kvm_vcpu *vcpu)
static int handle_stop(struct kvm_vcpu *vcpu) static int handle_stop(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc = 0; int rc = 0;
unsigned int action_bits; uint8_t flags, stop_pending;
vcpu->stat.exit_stop_request++; vcpu->stat.exit_stop_request++;
trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
action_bits = vcpu->arch.local_int.action_bits; /* delay the stop if any non-stop irq is pending */
if (kvm_s390_vcpu_has_irq(vcpu, 1))
return 0;
/* avoid races with the injection/SIGP STOP code */
spin_lock(&li->lock);
flags = li->irq.stop.flags;
stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
spin_unlock(&li->lock);
if (!(action_bits & ACTION_STOP_ON_STOP)) trace_kvm_s390_stop_request(stop_pending, flags);
if (!stop_pending)
return 0; return 0;
if (action_bits & ACTION_STORE_ON_STOP) { if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
rc = kvm_s390_vcpu_store_status(vcpu, rc = kvm_s390_vcpu_store_status(vcpu,
KVM_S390_STORE_STATUS_NOADDR); KVM_S390_STORE_STATUS_NOADDR);
if (rc) if (rc)
...@@ -279,11 +288,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) ...@@ -279,11 +288,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
irq.type = KVM_S390_INT_CPU_TIMER; irq.type = KVM_S390_INT_CPU_TIMER;
break; break;
case EXT_IRQ_EXTERNAL_CALL: case EXT_IRQ_EXTERNAL_CALL:
if (kvm_s390_si_ext_call_pending(vcpu))
return 0;
irq.type = KVM_S390_INT_EXTERNAL_CALL; irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr; irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
break; rc = kvm_s390_inject_vcpu(vcpu, &irq);
/* ignore if another external call is already pending */
if (rc == -EBUSY)
return 0;
return rc;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -307,17 +318,19 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) ...@@ -307,17 +318,19 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
/* Make sure that the source is paged-in */ /* Make sure that the source is paged-in */
srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]); rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
if (kvm_is_error_gpa(vcpu->kvm, srcaddr)) &srcaddr, 0);
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
if (rc != 0) if (rc != 0)
return rc; return rc;
/* Make sure that the destination is paged-in */ /* Make sure that the destination is paged-in */
dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]); rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
if (kvm_is_error_gpa(vcpu->kvm, dstaddr)) &dstaddr, 1);
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
if (rc != 0) if (rc != 0)
return rc; return rc;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/sclp.h>
#include "kvm-s390.h" #include "kvm-s390.h"
#include "gaccess.h" #include "gaccess.h"
#include "trace-s390.h" #include "trace-s390.h"
...@@ -159,6 +160,12 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) ...@@ -159,6 +160,12 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
if (psw_mchk_disabled(vcpu)) if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK; active_mask &= ~IRQ_PEND_MCHK_MASK;
/*
* STOP irqs will never be actively delivered. They are triggered via
* intercept requests and cleared when the stop intercept is performed.
*/
__clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
return active_mask; return active_mask;
} }
...@@ -186,9 +193,6 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) ...@@ -186,9 +193,6 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
LCTL_CR10 | LCTL_CR11); LCTL_CR10 | LCTL_CR11);
vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
} }
if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
} }
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
...@@ -216,11 +220,18 @@ static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) ...@@ -216,11 +220,18 @@ static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->lctl |= LCTL_CR14; vcpu->arch.sie_block->lctl |= LCTL_CR14;
} }
static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
{
if (kvm_s390_is_stop_irq_pending(vcpu))
__set_cpuflag(vcpu, CPUSTAT_STOP_INT);
}
/* Set interception request for non-deliverable local interrupts */ /* Set interception request for non-deliverable local interrupts */
static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
{ {
set_intercept_indicators_ext(vcpu); set_intercept_indicators_ext(vcpu);
set_intercept_indicators_mchk(vcpu); set_intercept_indicators_mchk(vcpu);
set_intercept_indicators_stop(vcpu);
} }
static void __set_intercept_indicator(struct kvm_vcpu *vcpu, static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
...@@ -392,18 +403,6 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) ...@@ -392,18 +403,6 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0; return rc ? -EFAULT : 0;
} }
static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
vcpu->stat.deliver_stop_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
0, 0);
__set_cpuflag(vcpu, CPUSTAT_STOP_INT);
clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs);
return 0;
}
static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
...@@ -705,7 +704,6 @@ static const deliver_irq_t deliver_irq_funcs[] = { ...@@ -705,7 +704,6 @@ static const deliver_irq_t deliver_irq_funcs[] = {
[IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
[IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
[IRQ_PEND_RESTART] = __deliver_restart, [IRQ_PEND_RESTART] = __deliver_restart,
[IRQ_PEND_SIGP_STOP] = __deliver_stop,
[IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
[IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
}; };
...@@ -738,21 +736,20 @@ static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu, ...@@ -738,21 +736,20 @@ static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
return rc; return rc;
} }
/* Check whether SIGP interpretation facility has an external call pending */ /* Check whether an external call is pending (deliverable or not) */
int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{ {
atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
if (!psw_extint_disabled(vcpu) && if (!sclp_has_sigpif())
(vcpu->arch.sie_block->gcr[0] & 0x2000ul) && return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
(atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
return 1;
return 0; return (sigp_ctrl & SIGP_CTRL_C) &&
(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
} }
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
{ {
struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
struct kvm_s390_interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
...@@ -773,7 +770,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) ...@@ -773,7 +770,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
if (!rc && kvm_cpu_has_pending_timer(vcpu)) if (!rc && kvm_cpu_has_pending_timer(vcpu))
rc = 1; rc = 1;
if (!rc && kvm_s390_si_ext_call_pending(vcpu)) /* external call pending and deliverable */
if (!rc && kvm_s390_ext_call_pending(vcpu) &&
!psw_extint_disabled(vcpu) &&
(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
rc = 1;
if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
rc = 1; rc = 1;
return rc; return rc;
...@@ -804,14 +807,20 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) ...@@ -804,14 +807,20 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; /* disabled wait */ return -EOPNOTSUPP; /* disabled wait */
} }
__set_cpu_idle(vcpu);
if (!ckc_interrupts_enabled(vcpu)) { if (!ckc_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
__set_cpu_idle(vcpu);
goto no_timer; goto no_timer;
} }
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
/* underflow */
if (vcpu->arch.sie_block->ckc < now)
return 0;
__set_cpu_idle(vcpu);
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
no_timer: no_timer:
...@@ -820,7 +829,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) ...@@ -820,7 +829,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
__unset_cpu_idle(vcpu); __unset_cpu_idle(vcpu);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); hrtimer_cancel(&vcpu->arch.ckc_timer);
return 0; return 0;
} }
...@@ -840,10 +849,20 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) ...@@ -840,10 +849,20 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
u64 now, sltime;
vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
kvm_s390_vcpu_wakeup(vcpu); now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
/*
* If the monotonic clock runs faster than the tod clock we might be
* woken up too early and have to go back to sleep to avoid deadlocks.
*/
if (vcpu->arch.sie_block->ckc > now &&
hrtimer_forward_now(timer, ns_to_ktime(sltime)))
return HRTIMER_RESTART;
kvm_s390_vcpu_wakeup(vcpu);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -859,8 +878,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ...@@ -859,8 +878,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
/* clear pending external calls set by sigp interpretation facility */ /* clear pending external calls set by sigp interpretation facility */
atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
atomic_clear_mask(SIGP_CTRL_C, vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
&vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
} }
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
...@@ -984,18 +1002,43 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -984,18 +1002,43 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return 0; return 0;
} }
int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
{
unsigned char new_val, old_val;
uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
old_val = *sigp_ctrl & ~SIGP_CTRL_C;
if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
/* another external call is pending */
return -EBUSY;
}
atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}
static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_extcall_info *extcall = &li->irq.extcall; struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
uint16_t src_id = irq->u.extcall.code;
VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
irq->u.extcall.code); src_id);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
irq->u.extcall.code, 0, 2); src_id, 0, 2);
/* sending vcpu invalid */
if (src_id >= KVM_MAX_VCPUS ||
kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
return -EINVAL;
if (sclp_has_sigpif())
return __inject_extcall_sigpif(vcpu, src_id);
if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY;
*extcall = irq->u.extcall; *extcall = irq->u.extcall;
set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1006,23 +1049,41 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1006,23 +1049,41 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
struct kvm_s390_prefix_info *prefix = &li->irq.prefix; struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
prefix->address); irq->u.prefix.address);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
prefix->address, 0, 2); irq->u.prefix.address, 0, 2);
if (!is_vcpu_stopped(vcpu))
return -EBUSY;
*prefix = irq->u.prefix; *prefix = irq->u.prefix;
set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
return 0; return 0;
} }
#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_stop_info *stop = &li->irq.stop;
int rc = 0;
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
li->action_bits |= ACTION_STOP_ON_STOP; if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); return -EINVAL;
if (is_vcpu_stopped(vcpu)) {
if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
rc = kvm_s390_store_status_unloaded(vcpu,
KVM_S390_STORE_STATUS_NOADDR);
return rc;
}
if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
return -EBUSY;
stop->flags = irq->u.stop.flags;
__set_cpuflag(vcpu, CPUSTAT_STOP_INT);
return 0; return 0;
} }
...@@ -1042,14 +1103,13 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, ...@@ -1042,14 +1103,13 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
struct kvm_s390_irq *irq) struct kvm_s390_irq *irq)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_emerg_info *emerg = &li->irq.emerg;
VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
irq->u.emerg.code); irq->u.emerg.code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
emerg->code, 0, 2); irq->u.emerg.code, 0, 2);
set_bit(emerg->code, li->sigp_emerg_pending); set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
...@@ -1061,9 +1121,9 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1061,9 +1121,9 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
struct kvm_s390_mchk_info *mchk = &li->irq.mchk; struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
mchk->mcic); irq->u.mchk.mcic);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
mchk->mcic, 2); irq->u.mchk.mcic, 2);
/* /*
* Because repressible machine checks can be indicated along with * Because repressible machine checks can be indicated along with
...@@ -1121,7 +1181,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, ...@@ -1121,7 +1181,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
if ((!schid && !cr6) || (schid && cr6)) if ((!schid && !cr6) || (schid && cr6))
return NULL; return NULL;
mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int; fi = &kvm->arch.float_int;
spin_lock(&fi->lock); spin_lock(&fi->lock);
inti = NULL; inti = NULL;
...@@ -1149,7 +1208,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, ...@@ -1149,7 +1208,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
if (list_empty(&fi->list)) if (list_empty(&fi->list))
atomic_set(&fi->active, 0); atomic_set(&fi->active, 0);
spin_unlock(&fi->lock); spin_unlock(&fi->lock);
mutex_unlock(&kvm->lock);
return inti; return inti;
} }
...@@ -1162,7 +1220,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -1162,7 +1220,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
int sigcpu; int sigcpu;
int rc = 0; int rc = 0;
mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int; fi = &kvm->arch.float_int;
spin_lock(&fi->lock); spin_lock(&fi->lock);
if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
...@@ -1213,7 +1270,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -1213,7 +1270,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
unlock_fi: unlock_fi:
spin_unlock(&fi->lock); spin_unlock(&fi->lock);
mutex_unlock(&kvm->lock);
return rc; return rc;
} }
...@@ -1221,6 +1277,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, ...@@ -1221,6 +1277,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int) struct kvm_s390_interrupt *s390int)
{ {
struct kvm_s390_interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc;
inti = kzalloc(sizeof(*inti), GFP_KERNEL); inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti) if (!inti)
...@@ -1239,7 +1296,6 @@ int kvm_s390_inject_vm(struct kvm *kvm, ...@@ -1239,7 +1296,6 @@ int kvm_s390_inject_vm(struct kvm *kvm,
inti->ext.ext_params = s390int->parm; inti->ext.ext_params = s390int->parm;
break; break;
case KVM_S390_INT_PFAULT_DONE: case KVM_S390_INT_PFAULT_DONE:
inti->type = s390int->type;
inti->ext.ext_params2 = s390int->parm64; inti->ext.ext_params2 = s390int->parm64;
break; break;
case KVM_S390_MCHK: case KVM_S390_MCHK:
...@@ -1268,7 +1324,10 @@ int kvm_s390_inject_vm(struct kvm *kvm, ...@@ -1268,7 +1324,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
2); 2);
return __inject_vm(kvm, inti); rc = __inject_vm(kvm, inti);
if (rc)
kfree(inti);
return rc;
} }
void kvm_s390_reinject_io_int(struct kvm *kvm, void kvm_s390_reinject_io_int(struct kvm *kvm,
...@@ -1290,13 +1349,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, ...@@ -1290,13 +1349,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
case KVM_S390_SIGP_SET_PREFIX: case KVM_S390_SIGP_SET_PREFIX:
irq->u.prefix.address = s390int->parm; irq->u.prefix.address = s390int->parm;
break; break;
case KVM_S390_SIGP_STOP:
irq->u.stop.flags = s390int->parm;
break;
case KVM_S390_INT_EXTERNAL_CALL: case KVM_S390_INT_EXTERNAL_CALL:
if (irq->u.extcall.code & 0xffff0000) if (s390int->parm & 0xffff0000)
return -EINVAL; return -EINVAL;
irq->u.extcall.code = s390int->parm; irq->u.extcall.code = s390int->parm;
break; break;
case KVM_S390_INT_EMERGENCY: case KVM_S390_INT_EMERGENCY:
if (irq->u.emerg.code & 0xffff0000) if (s390int->parm & 0xffff0000)
return -EINVAL; return -EINVAL;
irq->u.emerg.code = s390int->parm; irq->u.emerg.code = s390int->parm;
break; break;
...@@ -1307,6 +1369,23 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, ...@@ -1307,6 +1369,23 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
return 0; return 0;
} }
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
}
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
spin_lock(&li->lock);
li->irq.stop.flags = 0;
clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
spin_unlock(&li->lock);
}
int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
...@@ -1363,7 +1442,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) ...@@ -1363,7 +1442,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
struct kvm_s390_float_interrupt *fi; struct kvm_s390_float_interrupt *fi;
struct kvm_s390_interrupt_info *n, *inti = NULL; struct kvm_s390_interrupt_info *n, *inti = NULL;
mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int; fi = &kvm->arch.float_int;
spin_lock(&fi->lock); spin_lock(&fi->lock);
list_for_each_entry_safe(inti, n, &fi->list, list) { list_for_each_entry_safe(inti, n, &fi->list, list) {
...@@ -1373,7 +1451,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) ...@@ -1373,7 +1451,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
fi->irq_count = 0; fi->irq_count = 0;
atomic_set(&fi->active, 0); atomic_set(&fi->active, 0);
spin_unlock(&fi->lock); spin_unlock(&fi->lock);
mutex_unlock(&kvm->lock);
} }
static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
...@@ -1413,7 +1490,6 @@ static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) ...@@ -1413,7 +1490,6 @@ static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
int ret = 0; int ret = 0;
int n = 0; int n = 0;
mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int; fi = &kvm->arch.float_int;
spin_lock(&fi->lock); spin_lock(&fi->lock);
...@@ -1432,7 +1508,6 @@ static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) ...@@ -1432,7 +1508,6 @@ static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
} }
spin_unlock(&fi->lock); spin_unlock(&fi->lock);
mutex_unlock(&kvm->lock);
return ret < 0 ? ret : n; return ret < 0 ? ret : n;
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -166,6 +167,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -166,6 +167,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_IRQCHIP: case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES: case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE: case KVM_CAP_MP_STATE:
case KVM_CAP_S390_USER_SIGP:
r = 1; r = 1;
break; break;
case KVM_CAP_NR_VCPUS: case KVM_CAP_NR_VCPUS:
...@@ -254,6 +256,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) ...@@ -254,6 +256,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
kvm->arch.use_irqchip = 1; kvm->arch.use_irqchip = 1;
r = 0; r = 0;
break; break;
case KVM_CAP_S390_USER_SIGP:
kvm->arch.user_sigp = 1;
r = 0;
break;
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
...@@ -261,7 +267,24 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) ...@@ -261,7 +267,24 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
return r; return r;
} }
static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
switch (attr->attr) {
case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0;
if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
ret = -EFAULT;
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{ {
int ret; int ret;
unsigned int idx; unsigned int idx;
...@@ -283,6 +306,190 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -283,6 +306,190 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
ret = 0; ret = 0;
break; break;
case KVM_S390_VM_MEM_LIMIT_SIZE: {
unsigned long new_limit;
if (kvm_is_ucontrol(kvm))
return -EINVAL;
if (get_user(new_limit, (u64 __user *)attr->addr))
return -EFAULT;
if (new_limit > kvm->arch.gmap->asce_end)
return -E2BIG;
ret = -EBUSY;
mutex_lock(&kvm->lock);
if (atomic_read(&kvm->online_vcpus) == 0) {
/* gmap_alloc will round the limit up */
struct gmap *new = gmap_alloc(current->mm, new_limit);
if (!new) {
ret = -ENOMEM;
} else {
gmap_free(kvm->arch.gmap);
new->private = kvm;
kvm->arch.gmap = new;
ret = 0;
}
}
mutex_unlock(&kvm->lock);
break;
}
default:
ret = -ENXIO;
break;
}
return ret;
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_vcpu *vcpu;
int i;
if (!test_vfacility(76))
return -EINVAL;
mutex_lock(&kvm->lock);
switch (attr->attr) {
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
get_random_bytes(
kvm->arch.crypto.crycb->aes_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
kvm->arch.crypto.aes_kw = 1;
break;
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
get_random_bytes(
kvm->arch.crypto.crycb->dea_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
kvm->arch.crypto.dea_kw = 1;
break;
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
kvm->arch.crypto.aes_kw = 0;
memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
break;
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
kvm->arch.crypto.dea_kw = 0;
memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
break;
default:
mutex_unlock(&kvm->lock);
return -ENXIO;
}
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_s390_vcpu_crypto_setup(vcpu);
exit_sie(vcpu);
}
mutex_unlock(&kvm->lock);
return 0;
}
static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
u8 gtod_high;
if (copy_from_user(&gtod_high, (void __user *)attr->addr,
sizeof(gtod_high)))
return -EFAULT;
if (gtod_high != 0)
return -EINVAL;
return 0;
}
static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_vcpu *cur_vcpu;
unsigned int vcpu_idx;
u64 host_tod, gtod;
int r;
if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
return -EFAULT;
r = store_tod_clock(&host_tod);
if (r)
return r;
mutex_lock(&kvm->lock);
kvm->arch.epoch = gtod - host_tod;
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
exit_sie(cur_vcpu);
}
mutex_unlock(&kvm->lock);
return 0;
}
static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
if (attr->flags)
return -EINVAL;
switch (attr->attr) {
case KVM_S390_VM_TOD_HIGH:
ret = kvm_s390_set_tod_high(kvm, attr);
break;
case KVM_S390_VM_TOD_LOW:
ret = kvm_s390_set_tod_low(kvm, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
u8 gtod_high = 0;
if (copy_to_user((void __user *)attr->addr, &gtod_high,
sizeof(gtod_high)))
return -EFAULT;
return 0;
}
static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
u64 host_tod, gtod;
int r;
r = store_tod_clock(&host_tod);
if (r)
return r;
gtod = host_tod + kvm->arch.epoch;
if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
return -EFAULT;
return 0;
}
static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
if (attr->flags)
return -EINVAL;
switch (attr->attr) {
case KVM_S390_VM_TOD_HIGH:
ret = kvm_s390_get_tod_high(kvm, attr);
break;
case KVM_S390_VM_TOD_LOW:
ret = kvm_s390_get_tod_low(kvm, attr);
break;
default: default:
ret = -ENXIO; ret = -ENXIO;
break; break;
...@@ -296,7 +503,13 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -296,7 +503,13 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
switch (attr->group) { switch (attr->group) {
case KVM_S390_VM_MEM_CTRL: case KVM_S390_VM_MEM_CTRL:
ret = kvm_s390_mem_control(kvm, attr); ret = kvm_s390_set_mem_control(kvm, attr);
break;
case KVM_S390_VM_TOD:
ret = kvm_s390_set_tod(kvm, attr);
break;
case KVM_S390_VM_CRYPTO:
ret = kvm_s390_vm_set_crypto(kvm, attr);
break; break;
default: default:
ret = -ENXIO; ret = -ENXIO;
...@@ -308,7 +521,21 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -308,7 +521,21 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{ {
return -ENXIO; int ret;
switch (attr->group) {
case KVM_S390_VM_MEM_CTRL:
ret = kvm_s390_get_mem_control(kvm, attr);
break;
case KVM_S390_VM_TOD:
ret = kvm_s390_get_tod(kvm, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
} }
static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
...@@ -320,6 +547,31 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -320,6 +547,31 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
switch (attr->attr) { switch (attr->attr) {
case KVM_S390_VM_MEM_ENABLE_CMMA: case KVM_S390_VM_MEM_ENABLE_CMMA:
case KVM_S390_VM_MEM_CLR_CMMA: case KVM_S390_VM_MEM_CLR_CMMA:
case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0;
break;
default:
ret = -ENXIO;
break;
}
break;
case KVM_S390_VM_TOD:
switch (attr->attr) {
case KVM_S390_VM_TOD_LOW:
case KVM_S390_VM_TOD_HIGH:
ret = 0;
break;
default:
ret = -ENXIO;
break;
}
break;
case KVM_S390_VM_CRYPTO:
switch (attr->attr) {
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
ret = 0; ret = 0;
break; break;
default: default:
...@@ -414,6 +666,10 @@ static int kvm_s390_crypto_init(struct kvm *kvm) ...@@ -414,6 +666,10 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb | kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
CRYCB_FORMAT1; CRYCB_FORMAT1;
/* Disable AES/DEA protected key functions by default */
kvm->arch.crypto.aes_kw = 0;
kvm->arch.crypto.dea_kw = 0;
return 0; return 0;
} }
...@@ -477,6 +733,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -477,6 +733,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.css_support = 0; kvm->arch.css_support = 0;
kvm->arch.use_irqchip = 0; kvm->arch.use_irqchip = 0;
kvm->arch.epoch = 0;
spin_lock_init(&kvm->arch.start_stop_lock); spin_lock_init(&kvm->arch.start_stop_lock);
...@@ -546,25 +803,30 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -546,25 +803,30 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
} }
/* Section: vcpu related */ /* Section: vcpu related */
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
if (kvm_is_ucontrol(vcpu->kvm)) {
vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
if (!vcpu->arch.gmap) if (!vcpu->arch.gmap)
return -ENOMEM; return -ENOMEM;
vcpu->arch.gmap->private = vcpu->kvm; vcpu->arch.gmap->private = vcpu->kvm;
return 0; return 0;
} }
vcpu->arch.gmap = vcpu->kvm->arch.gmap; int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
KVM_SYNC_GPRS | KVM_SYNC_GPRS |
KVM_SYNC_ACRS | KVM_SYNC_ACRS |
KVM_SYNC_CRS | KVM_SYNC_CRS |
KVM_SYNC_ARCH0 | KVM_SYNC_ARCH0 |
KVM_SYNC_PFAULT; KVM_SYNC_PFAULT;
if (kvm_is_ucontrol(vcpu->kvm))
return __kvm_ucontrol_vcpu_init(vcpu);
return 0; return 0;
} }
...@@ -615,9 +877,13 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) ...@@ -615,9 +877,13 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
kvm_s390_clear_local_irqs(vcpu); kvm_s390_clear_local_irqs(vcpu);
} }
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
return 0; mutex_lock(&vcpu->kvm->lock);
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
mutex_unlock(&vcpu->kvm->lock);
if (!kvm_is_ucontrol(vcpu->kvm))
vcpu->arch.gmap = vcpu->kvm->arch.gmap;
} }
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
...@@ -625,6 +891,13 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) ...@@ -625,6 +891,13 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
if (!test_vfacility(76)) if (!test_vfacility(76))
return; return;
vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
if (vcpu->kvm->arch.crypto.aes_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_AES;
if (vcpu->kvm->arch.crypto.dea_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
} }
...@@ -658,9 +931,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -658,9 +931,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->ecb |= 0x10; vcpu->arch.sie_block->ecb |= 0x10;
vcpu->arch.sie_block->ecb2 = 8; vcpu->arch.sie_block->ecb2 = 8;
vcpu->arch.sie_block->eca = 0xD1002000U; vcpu->arch.sie_block->eca = 0xC1002000U;
if (sclp_has_siif()) if (sclp_has_siif())
vcpu->arch.sie_block->eca |= 1; vcpu->arch.sie_block->eca |= 1;
if (sclp_has_sigpif())
vcpu->arch.sie_block->eca |= 0x10000000U;
vcpu->arch.sie_block->fac = (int) (long) vfacilities; vcpu->arch.sie_block->fac = (int) (long) vfacilities;
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
ICTL_TPROT; ICTL_TPROT;
...@@ -670,7 +945,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -670,7 +945,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (rc) if (rc)
return rc; return rc;
} }
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
get_cpu_id(&vcpu->arch.cpu_id); get_cpu_id(&vcpu->arch.cpu_id);
vcpu->arch.cpu_id.version = 0xff; vcpu->arch.cpu_id.version = 0xff;
...@@ -741,7 +1016,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -741,7 +1016,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{ {
return kvm_cpu_has_interrupt(vcpu); return kvm_s390_vcpu_has_irq(vcpu, 0);
} }
void s390_vcpu_block(struct kvm_vcpu *vcpu) void s390_vcpu_block(struct kvm_vcpu *vcpu)
...@@ -869,6 +1144,8 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, ...@@ -869,6 +1144,8 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_S390_PFTOKEN: case KVM_REG_S390_PFTOKEN:
r = get_user(vcpu->arch.pfault_token, r = get_user(vcpu->arch.pfault_token,
(u64 __user *)reg->addr); (u64 __user *)reg->addr);
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
break; break;
case KVM_REG_S390_PFCOMPARE: case KVM_REG_S390_PFCOMPARE:
r = get_user(vcpu->arch.pfault_compare, r = get_user(vcpu->arch.pfault_compare,
...@@ -1176,7 +1453,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) ...@@ -1176,7 +1453,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
return 0; return 0;
if (psw_extint_disabled(vcpu)) if (psw_extint_disabled(vcpu))
return 0; return 0;
if (kvm_cpu_has_interrupt(vcpu)) if (kvm_s390_vcpu_has_irq(vcpu, 0))
return 0; return 0;
if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
return 0; return 0;
...@@ -1341,6 +1618,8 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1341,6 +1618,8 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.pfault_token = kvm_run->s.regs.pft; vcpu->arch.pfault_token = kvm_run->s.regs.pft;
vcpu->arch.pfault_select = kvm_run->s.regs.pfs; vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
} }
kvm_run->kvm_dirty_regs = 0; kvm_run->kvm_dirty_regs = 0;
} }
...@@ -1559,15 +1838,10 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) ...@@ -1559,15 +1838,10 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
spin_lock(&vcpu->kvm->arch.start_stop_lock); spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
/* Need to lock access to action_bits to avoid a SIGP race condition */
spin_lock(&vcpu->arch.local_int.lock);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
vcpu->arch.local_int.action_bits &= kvm_s390_clear_stop_irq(vcpu);
~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
spin_unlock(&vcpu->arch.local_int.lock);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
__disable_ibs_on_vcpu(vcpu); __disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) { for (i = 0; i < online_vcpus; i++) {
......
...@@ -228,11 +228,13 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, ...@@ -228,11 +228,13 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
struct kvm_s390_irq *s390irq); struct kvm_s390_irq *s390irq);
/* implemented in interrupt.c */ /* implemented in interrupt.c */
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
int psw_extint_disabled(struct kvm_vcpu *vcpu); int psw_extint_disabled(struct kvm_vcpu *vcpu);
void kvm_s390_destroy_adapters(struct kvm *kvm); void kvm_s390_destroy_adapters(struct kvm *kvm);
int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu); int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
extern struct kvm_device_ops kvm_flic_ops; extern struct kvm_device_ops kvm_flic_ops;
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
/* implemented in guestdbg.c */ /* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
......
...@@ -26,15 +26,17 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, ...@@ -26,15 +26,17 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
int cpuflags; int cpuflags;
int rc; int rc;
int ext_call_pending;
li = &dst_vcpu->arch.local_int; li = &dst_vcpu->arch.local_int;
cpuflags = atomic_read(li->cpuflags); cpuflags = atomic_read(li->cpuflags);
if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
rc = SIGP_CC_ORDER_CODE_ACCEPTED; rc = SIGP_CC_ORDER_CODE_ACCEPTED;
else { else {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
if (cpuflags & CPUSTAT_ECALL_PEND) if (ext_call_pending)
*reg |= SIGP_STATUS_EXT_CALL_PENDING; *reg |= SIGP_STATUS_EXT_CALL_PENDING;
if (cpuflags & CPUSTAT_STOPPED) if (cpuflags & CPUSTAT_STOPPED)
*reg |= SIGP_STATUS_STOPPED; *reg |= SIGP_STATUS_STOPPED;
...@@ -96,7 +98,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, ...@@ -96,7 +98,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
} }
static int __sigp_external_call(struct kvm_vcpu *vcpu, static int __sigp_external_call(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu) struct kvm_vcpu *dst_vcpu, u64 *reg)
{ {
struct kvm_s390_irq irq = { struct kvm_s390_irq irq = {
.type = KVM_S390_INT_EXTERNAL_CALL, .type = KVM_S390_INT_EXTERNAL_CALL,
...@@ -105,45 +107,31 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, ...@@ -105,45 +107,31 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu,
int rc; int rc;
rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (!rc) if (rc == -EBUSY) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_EXT_CALL_PENDING;
return SIGP_CC_STATUS_STORED;
} else if (rc == 0) {
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
dst_vcpu->vcpu_id); dst_vcpu->vcpu_id);
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
{
struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) {
/* another SIGP STOP is pending */
rc = SIGP_CC_BUSY;
goto out;
}
if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
if ((action & ACTION_STORE_ON_STOP) != 0)
rc = -ESHUTDOWN;
goto out;
} }
set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
li->action_bits |= action;
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
kvm_s390_vcpu_wakeup(dst_vcpu);
out:
spin_unlock(&li->lock);
return rc; return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{ {
struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_STOP,
};
int rc; int rc;
rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP); rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id); if (rc == -EBUSY)
rc = SIGP_CC_BUSY;
else if (rc == 0)
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
dst_vcpu->vcpu_id);
return rc; return rc;
} }
...@@ -151,21 +139,19 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) ...@@ -151,21 +139,19 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u64 *reg) struct kvm_vcpu *dst_vcpu, u64 *reg)
{ {
struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_STOP,
.u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
};
int rc; int rc;
rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP | rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
ACTION_STORE_ON_STOP); if (rc == -EBUSY)
rc = SIGP_CC_BUSY;
else if (rc == 0)
VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
dst_vcpu->vcpu_id); dst_vcpu->vcpu_id);
if (rc == -ESHUTDOWN) {
/* If the CPU has already been stopped, we still have
* to save the status when doing stop-and-store. This
* has to be done after unlocking all spinlocks. */
rc = kvm_s390_store_status_unloaded(dst_vcpu,
KVM_S390_STORE_STATUS_NOADDR);
}
return rc; return rc;
} }
...@@ -197,41 +183,33 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) ...@@ -197,41 +183,33 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u32 address, u64 *reg) u32 address, u64 *reg)
{ {
struct kvm_s390_local_interrupt *li; struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_SET_PREFIX,
.u.prefix.address = address & 0x7fffe000u,
};
int rc; int rc;
li = &dst_vcpu->arch.local_int;
/* /*
* Make sure the new value is valid memory. We only need to check the * Make sure the new value is valid memory. We only need to check the
* first page, since address is 8k aligned and memory pieces are always * first page, since address is 8k aligned and memory pieces are always
* at least 1MB aligned and have at least a size of 1MB. * at least 1MB aligned and have at least a size of 1MB.
*/ */
address &= 0x7fffe000u; if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
if (kvm_is_error_gpa(vcpu->kvm, address)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INVALID_PARAMETER; *reg |= SIGP_STATUS_INVALID_PARAMETER;
return SIGP_CC_STATUS_STORED; return SIGP_CC_STATUS_STORED;
} }
spin_lock(&li->lock); rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
/* cpu must be in stopped state */ if (rc == -EBUSY) {
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE; *reg |= SIGP_STATUS_INCORRECT_STATE;
rc = SIGP_CC_STATUS_STORED; return SIGP_CC_STATUS_STORED;
goto out_li; } else if (rc == 0) {
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x",
dst_vcpu->vcpu_id, irq.u.prefix.address);
} }
li->irq.prefix.address = address;
set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
kvm_s390_vcpu_wakeup(dst_vcpu);
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
address);
out_li:
spin_unlock(&li->lock);
return rc; return rc;
} }
...@@ -242,9 +220,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, ...@@ -242,9 +220,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
int flags; int flags;
int rc; int rc;
spin_lock(&dst_vcpu->arch.local_int.lock);
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
spin_unlock(&dst_vcpu->arch.local_int.lock);
if (!(flags & CPUSTAT_STOPPED)) { if (!(flags & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE; *reg |= SIGP_STATUS_INCORRECT_STATE;
...@@ -291,8 +267,9 @@ static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, ...@@ -291,8 +267,9 @@ static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
/* handle (RE)START in user space */ /* handle (RE)START in user space */
int rc = -EOPNOTSUPP; int rc = -EOPNOTSUPP;
/* make sure we don't race with STOP irq injection */
spin_lock(&li->lock); spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) if (kvm_s390_is_stop_irq_pending(dst_vcpu))
rc = SIGP_CC_BUSY; rc = SIGP_CC_BUSY;
spin_unlock(&li->lock); spin_unlock(&li->lock);
...@@ -333,7 +310,7 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, ...@@ -333,7 +310,7 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
break; break;
case SIGP_EXTERNAL_CALL: case SIGP_EXTERNAL_CALL:
vcpu->stat.instruction_sigp_external_call++; vcpu->stat.instruction_sigp_external_call++;
rc = __sigp_external_call(vcpu, dst_vcpu); rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
break; break;
case SIGP_EMERGENCY_SIGNAL: case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++; vcpu->stat.instruction_sigp_emergency++;
...@@ -394,6 +371,53 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, ...@@ -394,6 +371,53 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
return rc; return rc;
} }
static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
{
if (!vcpu->kvm->arch.user_sigp)
return 0;
switch (order_code) {
case SIGP_SENSE:
case SIGP_EXTERNAL_CALL:
case SIGP_EMERGENCY_SIGNAL:
case SIGP_COND_EMERGENCY_SIGNAL:
case SIGP_SENSE_RUNNING:
return 0;
/* update counters as we're directly dropping to user space */
case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++;
break;
case SIGP_STOP_AND_STORE_STATUS:
vcpu->stat.instruction_sigp_stop_store_status++;
break;
case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++;
break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
break;
case SIGP_START:
vcpu->stat.instruction_sigp_start++;
break;
case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++;
break;
case SIGP_INITIAL_CPU_RESET:
vcpu->stat.instruction_sigp_init_cpu_reset++;
break;
case SIGP_CPU_RESET:
vcpu->stat.instruction_sigp_cpu_reset++;
break;
default:
vcpu->stat.instruction_sigp_unknown++;
}
VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space",
order_code);
return 1;
}
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{ {
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
...@@ -408,6 +432,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) ...@@ -408,6 +432,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
order_code = kvm_s390_get_base_disp_rs(vcpu); order_code = kvm_s390_get_base_disp_rs(vcpu);
if (handle_sigp_order_in_user_space(vcpu, order_code))
return -EOPNOTSUPP;
if (r1 % 2) if (r1 % 2)
parameter = vcpu->run->s.regs.gprs[r1]; parameter = vcpu->run->s.regs.gprs[r1];
......
...@@ -209,19 +209,21 @@ TRACE_EVENT(kvm_s390_request_resets, ...@@ -209,19 +209,21 @@ TRACE_EVENT(kvm_s390_request_resets,
* Trace point for a vcpu's stop requests. * Trace point for a vcpu's stop requests.
*/ */
TRACE_EVENT(kvm_s390_stop_request, TRACE_EVENT(kvm_s390_stop_request,
TP_PROTO(unsigned int action_bits), TP_PROTO(unsigned char stop_irq, unsigned char flags),
TP_ARGS(action_bits), TP_ARGS(stop_irq, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, action_bits) __field(unsigned char, stop_irq)
__field(unsigned char, flags)
), ),
TP_fast_assign( TP_fast_assign(
__entry->action_bits = action_bits; __entry->stop_irq = stop_irq;
__entry->flags = flags;
), ),
TP_printk("stop request, action_bits = %08x", TP_printk("stop request, stop irq = %u, flags = %08x",
__entry->action_bits) __entry->stop_irq, __entry->flags)
); );
......
...@@ -7021,15 +7021,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -7021,15 +7021,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return r; return r;
} }
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
int r;
struct msr_data msr; struct msr_data msr;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
r = vcpu_load(vcpu); if (vcpu_load(vcpu))
if (r) return;
return r;
msr.data = 0x0; msr.data = 0x0;
msr.index = MSR_IA32_TSC; msr.index = MSR_IA32_TSC;
msr.host_initiated = true; msr.host_initiated = true;
...@@ -7038,8 +7036,6 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -7038,8 +7036,6 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
schedule_delayed_work(&kvm->arch.kvmclock_sync_work, schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
KVMCLOCK_SYNC_PERIOD); KVMCLOCK_SYNC_PERIOD);
return r;
} }
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
......
...@@ -49,6 +49,7 @@ static unsigned long sclp_hsa_size; ...@@ -49,6 +49,7 @@ static unsigned long sclp_hsa_size;
static unsigned int sclp_max_cpu; static unsigned int sclp_max_cpu;
static struct sclp_ipl_info sclp_ipl_info; static struct sclp_ipl_info sclp_ipl_info;
static unsigned char sclp_siif; static unsigned char sclp_siif;
static unsigned char sclp_sigpif;
static u32 sclp_ibc; static u32 sclp_ibc;
u64 sclp_facilities; u64 sclp_facilities;
...@@ -131,6 +132,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) ...@@ -131,6 +132,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
if (boot_cpu_address != cpue->address) if (boot_cpu_address != cpue->address)
continue; continue;
sclp_siif = cpue->siif; sclp_siif = cpue->siif;
sclp_sigpif = cpue->sigpif;
break; break;
} }
...@@ -172,6 +174,12 @@ int sclp_has_siif(void) ...@@ -172,6 +174,12 @@ int sclp_has_siif(void)
} }
EXPORT_SYMBOL(sclp_has_siif); EXPORT_SYMBOL(sclp_has_siif);
int sclp_has_sigpif(void)
{
return sclp_sigpif;
}
EXPORT_SYMBOL(sclp_has_sigpif);
unsigned int sclp_get_ibc(void) unsigned int sclp_get_ibc(void)
{ {
return sclp_ibc; return sclp_ibc;
......
...@@ -661,7 +661,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); ...@@ -661,7 +661,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_arch_hardware_enable(void); int kvm_arch_hardware_enable(void);
......
...@@ -491,6 +491,11 @@ struct kvm_s390_emerg_info { ...@@ -491,6 +491,11 @@ struct kvm_s390_emerg_info {
__u16 code; __u16 code;
}; };
#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
struct kvm_s390_stop_info {
__u32 flags;
};
struct kvm_s390_mchk_info { struct kvm_s390_mchk_info {
__u64 cr14; __u64 cr14;
__u64 mcic; __u64 mcic;
...@@ -509,6 +514,7 @@ struct kvm_s390_irq { ...@@ -509,6 +514,7 @@ struct kvm_s390_irq {
struct kvm_s390_emerg_info emerg; struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall; struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix; struct kvm_s390_prefix_info prefix;
struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk; struct kvm_s390_mchk_info mchk;
char reserved[64]; char reserved[64];
} u; } u;
...@@ -753,6 +759,7 @@ struct kvm_ppc_smmu_info { ...@@ -753,6 +759,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_PPC_FIXUP_HCALL 103 #define KVM_CAP_PPC_FIXUP_HCALL 103
#define KVM_CAP_PPC_ENABLE_HCALL 104 #define KVM_CAP_PPC_ENABLE_HCALL 104
#define KVM_CAP_CHECK_EXTENSION_VM 105 #define KVM_CAP_CHECK_EXTENSION_VM 105
#define KVM_CAP_S390_USER_SIGP 106
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment