Commit ede58222 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-arm64/for-3.13-1' of...

Merge tag 'kvm-arm64/for-3.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into kvm-next

A handful of fixes for KVM/arm64:

- A couple a basic fixes for running BE guests on a LE host
- A performance improvement for overcommitted VMs (same as the equivalent
  patch for ARM)
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

Conflicts:
	arch/arm/include/asm/kvm_emulate.h
	arch/arm64/include/asm/kvm_emulate.h
parents 6da8ae55 ce94fe93
...@@ -162,4 +162,50 @@ static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) ...@@ -162,4 +162,50 @@ static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
return vcpu->arch.cp15[c0_MPIDR]; return vcpu->arch.cp15[c0_MPIDR];
} }
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
}
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
}
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
unsigned long data,
unsigned int len)
{
if (kvm_vcpu_is_be(vcpu)) {
switch (len) {
case 1:
return data & 0xff;
case 2:
return be16_to_cpu(data & 0xffff);
default:
return be32_to_cpu(data);
}
}
return data; /* Leave LE untouched */
}
static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
unsigned long data,
unsigned int len)
{
if (kvm_vcpu_is_be(vcpu)) {
switch (len) {
case 1:
return data & 0xff;
case 2:
return cpu_to_be16(data & 0xffff);
default:
return cpu_to_be32(data);
}
}
return data; /* Leave LE untouched */
}
#endif /* __ARM_KVM_EMULATE_H__ */ #endif /* __ARM_KVM_EMULATE_H__ */
...@@ -23,6 +23,68 @@ ...@@ -23,6 +23,68 @@
#include "trace.h" #include "trace.h"
static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
{
void *datap = NULL;
union {
u8 byte;
u16 hword;
u32 word;
u64 dword;
} tmp;
switch (len) {
case 1:
tmp.byte = data;
datap = &tmp.byte;
break;
case 2:
tmp.hword = data;
datap = &tmp.hword;
break;
case 4:
tmp.word = data;
datap = &tmp.word;
break;
case 8:
tmp.dword = data;
datap = &tmp.dword;
break;
}
memcpy(buf, datap, len);
}
static unsigned long mmio_read_buf(char *buf, unsigned int len)
{
unsigned long data = 0;
union {
u16 hword;
u32 word;
u64 dword;
} tmp;
switch (len) {
case 1:
data = buf[0];
break;
case 2:
memcpy(&tmp.hword, buf, len);
data = tmp.hword;
break;
case 4:
memcpy(&tmp.word, buf, len);
data = tmp.word;
break;
case 8:
memcpy(&tmp.dword, buf, len);
data = tmp.dword;
break;
}
return data;
}
/** /**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
...@@ -33,28 +95,27 @@ ...@@ -33,28 +95,27 @@
*/ */
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
unsigned long *dest; unsigned long data;
unsigned int len; unsigned int len;
int mask; int mask;
if (!run->mmio.is_write) { if (!run->mmio.is_write) {
dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
*dest = 0;
len = run->mmio.len; len = run->mmio.len;
if (len > sizeof(unsigned long)) if (len > sizeof(unsigned long))
return -EINVAL; return -EINVAL;
memcpy(dest, run->mmio.data, len); data = mmio_read_buf(run->mmio.data, len);
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
*((u64 *)run->mmio.data));
if (vcpu->arch.mmio_decode.sign_extend && if (vcpu->arch.mmio_decode.sign_extend &&
len < sizeof(unsigned long)) { len < sizeof(unsigned long)) {
mask = 1U << ((len * 8) - 1); mask = 1U << ((len * 8) - 1);
*dest = (*dest ^ mask) - mask; data = (data ^ mask) - mask;
} }
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
data);
data = vcpu_data_host_to_guest(vcpu, data, len);
*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
} }
return 0; return 0;
...@@ -105,6 +166,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -105,6 +166,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa) phys_addr_t fault_ipa)
{ {
struct kvm_exit_mmio mmio; struct kvm_exit_mmio mmio;
unsigned long data;
unsigned long rt; unsigned long rt;
int ret; int ret;
...@@ -125,13 +187,15 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -125,13 +187,15 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
} }
rt = vcpu->arch.mmio_decode.rt; rt = vcpu->arch.mmio_decode.rt;
data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);
trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
KVM_TRACE_MMIO_READ_UNSATISFIED, KVM_TRACE_MMIO_READ_UNSATISFIED,
mmio.len, fault_ipa, mmio.len, fault_ipa,
(mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0); (mmio.is_write) ? data : 0);
if (mmio.is_write) if (mmio.is_write)
memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len); mmio_write_buf(mmio.data, mmio.len, data);
if (vgic_handle_mmio(vcpu, run, &mmio)) if (vgic_handle_mmio(vcpu, run, &mmio))
return 1; return 1;
......
...@@ -71,6 +71,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) ...@@ -71,6 +71,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
vcpu_set_thumb(vcpu); vcpu_set_thumb(vcpu);
} }
/* Propagate caller endianness */
if (kvm_vcpu_is_be(source_vcpu))
kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc; *vcpu_pc(vcpu) = target_pc;
vcpu->arch.pause = false; vcpu->arch.pause = false;
smp_mb(); /* Make sure the above is visible */ smp_mb(); /* Make sure the above is visible */
......
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
* TAC: Trap ACTLR * TAC: Trap ACTLR
* TSC: Trap SMC * TSC: Trap SMC
* TSW: Trap cache operations by set/way * TSW: Trap cache operations by set/way
* TWE: Trap WFE
* TWI: Trap WFI * TWI: Trap WFI
* TIDCP: Trap L2CTLR/L2ECTLR * TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain * BSU_IS: Upgrade barriers to the inner shareable domain
...@@ -72,8 +73,9 @@ ...@@ -72,8 +73,9 @@
* FMO: Override CPSR.F and enable signaling with VF * FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate * SWIO: Turn set/way invalidates into set/way clean+invalidate
*/ */
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_IMO | HCR_FMO | \
HCR_SWIO | HCR_TIDCP | HCR_RW) HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
...@@ -242,4 +244,6 @@ ...@@ -242,4 +244,6 @@
#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10 #define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
#endif /* __ARM64_KVM_ARM_H__ */ #endif /* __ARM64_KVM_ARM_H__ */
...@@ -182,4 +182,60 @@ static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) ...@@ -182,4 +182,60 @@ static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
return vcpu_sys_reg(vcpu, MPIDR_EL1); return vcpu_sys_reg(vcpu, MPIDR_EL1);
} }
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
*vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
else
vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
}
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
}
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
unsigned long data,
unsigned int len)
{
if (kvm_vcpu_is_be(vcpu)) {
switch (len) {
case 1:
return data & 0xff;
case 2:
return be16_to_cpu(data & 0xffff);
case 4:
return be32_to_cpu(data & 0xffffffff);
default:
return be64_to_cpu(data);
}
}
return data; /* Leave LE untouched */
}
static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
unsigned long data,
unsigned int len)
{
if (kvm_vcpu_is_be(vcpu)) {
switch (len) {
case 1:
return data & 0xff;
case 2:
return cpu_to_be16(data & 0xffff);
case 4:
return cpu_to_be32(data & 0xffffffff);
default:
return cpu_to_be64(data);
}
}
return data; /* Leave LE untouched */
}
#endif /* __ARM64_KVM_EMULATE_H__ */ #endif /* __ARM64_KVM_EMULATE_H__ */
...@@ -21,6 +21,7 @@ config KVM ...@@ -21,6 +21,7 @@ config KVM
select MMU_NOTIFIER select MMU_NOTIFIER
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO select KVM_MMIO
select KVM_ARM_HOST select KVM_ARM_HOST
select KVM_ARM_VGIC select KVM_ARM_VGIC
......
...@@ -47,21 +47,29 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -47,21 +47,29 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
} }
/** /**
* kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
* instruction executed by a guest
*
* @vcpu: the vcpu pointer * @vcpu: the vcpu pointer
* *
* Simply call kvm_vcpu_block(), which will halt execution of * WFE: Yield the CPU and come back to this vcpu when the scheduler
* decides to.
* WFI: Simply call kvm_vcpu_block(), which will halt execution of
* world-switches and schedule other host processes until there is an * world-switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM. * incoming IRQ or FIQ to the VM.
*/ */
static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
kvm_vcpu_block(vcpu); if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
kvm_vcpu_on_spin(vcpu);
else
kvm_vcpu_block(vcpu);
return 1; return 1;
} }
static exit_handle_fn arm_exit_handlers[] = { static exit_handle_fn arm_exit_handlers[] = {
[ESR_EL2_EC_WFI] = kvm_handle_wfi, [ESR_EL2_EC_WFI] = kvm_handle_wfx,
[ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
[ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment