Commit 8d5fb0dc authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

KVM: s390: introduce and use kvm_s390_test_cpuflags()

Use it just like kvm_s390_set_cpuflags() and kvm_s390_clear_cpuflags().
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Message-Id: <20180123170531.13687-5-david@redhat.com>
Reviewed-by: default avatarThomas Huth <thuth@redhat.com>
Reviewed-by: default avatarCornelia Huck <cohuck@redhat.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 9daecfc6
...@@ -36,7 +36,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) ...@@ -36,7 +36,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{ {
int c, scn; int c, scn;
if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
return 0; return 0;
BUG_ON(!kvm_s390_use_sca_entries()); BUG_ON(!kvm_s390_use_sca_entries());
......
...@@ -2875,7 +2875,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ...@@ -2875,7 +2875,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
static bool ibs_enabled(struct kvm_vcpu *vcpu) static bool ibs_enabled(struct kvm_vcpu *vcpu)
{ {
return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
} }
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
......
...@@ -57,9 +57,14 @@ static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags) ...@@ -57,9 +57,14 @@ static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags); atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
} }
static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
{
return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
}
static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
{ {
return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
} }
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
......
...@@ -209,13 +209,13 @@ int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) ...@@ -209,13 +209,13 @@ int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
trace_kvm_s390_skey_related_inst(vcpu); trace_kvm_s390_skey_related_inst(vcpu);
if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
!(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)) !kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
return rc; return rc;
rc = s390_enable_skey(); rc = s390_enable_skey();
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
if (!rc) { if (!rc) {
if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS) if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS); kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
else else
sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
......
...@@ -20,19 +20,18 @@ ...@@ -20,19 +20,18 @@
static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u64 *reg) u64 *reg)
{ {
int cpuflags; const bool stopped = kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED);
int rc; int rc;
int ext_call_pending; int ext_call_pending;
cpuflags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu); ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending) if (!stopped && !ext_call_pending)
rc = SIGP_CC_ORDER_CODE_ACCEPTED; rc = SIGP_CC_ORDER_CODE_ACCEPTED;
else { else {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
if (ext_call_pending) if (ext_call_pending)
*reg |= SIGP_STATUS_EXT_CALL_PENDING; *reg |= SIGP_STATUS_EXT_CALL_PENDING;
if (cpuflags & CPUSTAT_STOPPED) if (stopped)
*reg |= SIGP_STATUS_STOPPED; *reg |= SIGP_STATUS_STOPPED;
rc = SIGP_CC_STATUS_STORED; rc = SIGP_CC_STATUS_STORED;
} }
...@@ -205,11 +204,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, ...@@ -205,11 +204,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, struct kvm_vcpu *dst_vcpu,
u32 addr, u64 *reg) u32 addr, u64 *reg)
{ {
int flags;
int rc; int rc;
flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags); if (!kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED)) {
if (!(flags & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE; *reg |= SIGP_STATUS_INCORRECT_STATE;
return SIGP_CC_STATUS_STORED; return SIGP_CC_STATUS_STORED;
...@@ -236,8 +233,7 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, ...@@ -236,8 +233,7 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu,
return SIGP_CC_STATUS_STORED; return SIGP_CC_STATUS_STORED;
} }
if (atomic_read(&dst_vcpu->arch.sie_block->cpuflags) & if (kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_RUNNING)) {
CPUSTAT_RUNNING) {
/* running */ /* running */
rc = SIGP_CC_ORDER_CODE_ACCEPTED; rc = SIGP_CC_ORDER_CODE_ACCEPTED;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment