Commit ae98a4a9 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/sysreg-cleanup-5.20 into kvmarm-master/next

* kvm-arm64/sysreg-cleanup-5.20:
  : .
  : Long overdue cleanup of the sysreg userspace access,
  : with extra scrubbing on the vgic side of things.
  : From the cover letter:
  :
  : "Schspa Shi recently reported[1] that some of the vgic code interacting
  : with userspace was reading uninitialised stack memory, and although
  : that read wasn't used any further, it prompted me to revisit this part
  : of the code.
  :
  : Needless to say, this area of the kernel is pretty crufty, and shows a
  : bunch of issues in other parts of the KVM/arm64 infrastructure. This
  : series tries to remedy a bunch of them:
  :
  : - Sanitise the way we deal with sysregs from userspace: at the moment,
  :   each and every .set_user/.get_user callback has to implement its own
  :   userspace accesses (directly or indirectly). It'd be much better if
  :   that was centralised so that we can reason about it.
  :
  : - Enforce that all AArch64 sysregs are 64bit. Always. This was sort of
  :   implied by the code, but it took some effort to convince myself that
  :   this was actually the case.
  :
  : - Move the vgic-v3 sysreg userspace accessors to the userspace
  :   callbacks instead of hijacking the vcpu trap callback. This allows
  :   us to reuse the sysreg infrastructure.
  :
  : - Consolidate userspace accesses for both GICv2, GICv3 and common code
  :   as much as possible.
  :
  : - Cleanup a bunch of not-very-useful helpers, tidy up some of the code
  :   as we touch it.
  :
  : [1] https://lore.kernel.org/r/m2h740zz1i.fsf@gmail.com"
  : .
  KVM: arm64: Get rid or outdated comments
  KVM: arm64: Descope kvm_arm_sys_reg_{get,set}_reg()
  KVM: arm64: Get rid of find_reg_by_id()
  KVM: arm64: vgic: Tidy-up calls to vgic_{get,set}_common_attr()
  KVM: arm64: vgic: Consolidate userspace access for base address setting
  KVM: arm64: vgic-v2: Add helper for legacy dist/cpuif base address setting
  KVM: arm64: vgic: Use {get,put}_user() instead of copy_{from.to}_user
  KVM: arm64: vgic-v2: Consolidate userspace access for MMIO registers
  KVM: arm64: vgic-v3: Consolidate userspace access for MMIO registers
  KVM: arm64: vgic-v3: Use u32 to manage the line level from userspace
  KVM: arm64: vgic-v3: Convert userspace accessors over to FIELD_GET/FIELD_PREP
  KVM: arm64: vgic-v3: Make the userspace accessors use sysreg API
  KVM: arm64: vgic-v3: Push user access into vgic_v3_cpu_sysregs_uaccess()
  KVM: arm64: vgic-v3: Simplify vgic_v3_has_cpu_sysregs_attr()
  KVM: arm64: Get rid of reg_from/to_user()
  KVM: arm64: Consolidate sysreg userspace accesses
  KVM: arm64: Rely on index_to_param() for size checks on userspace access
  KVM: arm64: Introduce generic get_user/set_user helpers for system registers
  KVM: arm64: Reorder handling of invariant sysregs from userspace
  KVM: arm64: Add get_reg_by_id() as a sys_reg_desc retrieving helper
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents aeb7942b 4274d427
...@@ -714,8 +714,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); ...@@ -714,8 +714,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events); struct kvm_vcpu_events *events);
......
...@@ -1420,18 +1420,11 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, ...@@ -1420,18 +1420,11 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
struct kvm_arm_device_addr *dev_addr) struct kvm_arm_device_addr *dev_addr)
{ {
unsigned long dev_id, type; switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) {
dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
KVM_ARM_DEVICE_ID_SHIFT;
type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
KVM_ARM_DEVICE_TYPE_SHIFT;
switch (dev_id) {
case KVM_ARM_DEVICE_VGIC_V2: case KVM_ARM_DEVICE_VGIC_V2:
if (!vgic_present) if (!vgic_present)
return -ENXIO; return -ENXIO;
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr);
default: default:
return -ENODEV; return -ENODEV;
} }
......
...@@ -34,18 +34,11 @@ ...@@ -34,18 +34,11 @@
#include "trace.h" #include "trace.h"
/* /*
* All of this file is extremely similar to the ARM coproc.c, but the
* types are different. My gut feeling is that it should be pretty
* easy to merge, but that would be an ABI breakage -- again. VFP
* would also need to be abstracted.
*
* For AArch32, we only take care of what is being trapped. Anything * For AArch32, we only take care of what is being trapped. Anything
* that has to do with init and userspace access has to go via the * that has to do with init and userspace access has to go via the
* 64bit interface. * 64bit interface.
*/ */
static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
static u64 sys_reg_to_index(const struct sys_reg_desc *reg); static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
static bool read_from_write_only(struct kvm_vcpu *vcpu, static bool read_from_write_only(struct kvm_vcpu *vcpu,
...@@ -321,16 +314,8 @@ static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, ...@@ -321,16 +314,8 @@ static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
} }
static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 val)
{ {
u64 id = sys_reg_to_index(rd);
u64 val;
int err;
err = reg_from_user(&val, uaddr, id);
if (err)
return err;
/* /*
* The only modifiable bit is the OSLK bit. Refuse the write if * The only modifiable bit is the OSLK bit. Refuse the write if
* userspace attempts to change any other bit in the register. * userspace attempts to change any other bit in the register.
...@@ -451,22 +436,16 @@ static bool trap_bvr(struct kvm_vcpu *vcpu, ...@@ -451,22 +436,16 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
} }
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 val)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0; return 0;
} }
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 *val)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0; return 0;
} }
...@@ -493,23 +472,16 @@ static bool trap_bcr(struct kvm_vcpu *vcpu, ...@@ -493,23 +472,16 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
} }
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 val)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0; return 0;
} }
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 *val)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0; return 0;
} }
...@@ -537,22 +509,16 @@ static bool trap_wvr(struct kvm_vcpu *vcpu, ...@@ -537,22 +509,16 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
} }
static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 val)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0; return 0;
} }
static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 *val)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0; return 0;
} }
...@@ -579,22 +545,16 @@ static bool trap_wcr(struct kvm_vcpu *vcpu, ...@@ -579,22 +545,16 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
} }
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 val)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0; return 0;
} }
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 *val)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
return 0; return 0;
} }
...@@ -1227,16 +1187,9 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, ...@@ -1227,16 +1187,9 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 val)
{ {
const u64 id = sys_reg_to_index(rd);
u8 csv2, csv3; u8 csv2, csv3;
int err;
u64 val;
err = reg_from_user(&val, uaddr, id);
if (err)
return err;
/* /*
* Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
...@@ -1262,7 +1215,7 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, ...@@ -1262,7 +1215,7 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
vcpu->kvm->arch.pfr0_csv2 = csv2; vcpu->kvm->arch.pfr0_csv2 = csv2;
vcpu->kvm->arch.pfr0_csv3 = csv3 ; vcpu->kvm->arch.pfr0_csv3 = csv3;
return 0; return 0;
} }
...@@ -1275,27 +1228,17 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, ...@@ -1275,27 +1228,17 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
* to be changed. * to be changed.
*/ */
static int __get_id_reg(const struct kvm_vcpu *vcpu, static int __get_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, void __user *uaddr, const struct sys_reg_desc *rd, u64 *val,
bool raz) bool raz)
{ {
const u64 id = sys_reg_to_index(rd); *val = read_id_reg(vcpu, rd, raz);
const u64 val = read_id_reg(vcpu, rd, raz); return 0;
return reg_to_user(uaddr, &val, id);
} }
static int __set_id_reg(const struct kvm_vcpu *vcpu, static int __set_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, void __user *uaddr, const struct sys_reg_desc *rd, u64 val,
bool raz) bool raz)
{ {
const u64 id = sys_reg_to_index(rd);
int err;
u64 val;
err = reg_from_user(&val, uaddr, id);
if (err)
return err;
/* This is what we mean by invariant: you can't change it. */ /* This is what we mean by invariant: you can't change it. */
if (val != read_id_reg(vcpu, rd, raz)) if (val != read_id_reg(vcpu, rd, raz))
return -EINVAL; return -EINVAL;
...@@ -1304,47 +1247,37 @@ static int __set_id_reg(const struct kvm_vcpu *vcpu, ...@@ -1304,47 +1247,37 @@ static int __set_id_reg(const struct kvm_vcpu *vcpu,
} }
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 *val)
{ {
bool raz = sysreg_visible_as_raz(vcpu, rd); bool raz = sysreg_visible_as_raz(vcpu, rd);
return __get_id_reg(vcpu, rd, uaddr, raz); return __get_id_reg(vcpu, rd, val, raz);
} }
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 val)
{ {
bool raz = sysreg_visible_as_raz(vcpu, rd); bool raz = sysreg_visible_as_raz(vcpu, rd);
return __set_id_reg(vcpu, rd, uaddr, raz); return __set_id_reg(vcpu, rd, val, raz);
} }
static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 val)
{ {
return __set_id_reg(vcpu, rd, uaddr, true); return __set_id_reg(vcpu, rd, val, true);
} }
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 *val)
{ {
const u64 id = sys_reg_to_index(rd); *val = 0;
const u64 val = 0; return 0;
return reg_to_user(uaddr, &val, id);
} }
static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) u64 val)
{ {
int err;
u64 val;
/* Perform the access even if we are going to ignore the value */
err = reg_from_user(&val, uaddr, sys_reg_to_index(rd));
if (err)
return err;
return 0; return 0;
} }
...@@ -2639,35 +2572,34 @@ static bool index_to_params(u64 id, struct sys_reg_params *params) ...@@ -2639,35 +2572,34 @@ static bool index_to_params(u64 id, struct sys_reg_params *params)
} }
} }
const struct sys_reg_desc *find_reg_by_id(u64 id, const struct sys_reg_desc *get_reg_by_id(u64 id,
struct sys_reg_params *params,
const struct sys_reg_desc table[], const struct sys_reg_desc table[],
unsigned int num) unsigned int num)
{ {
if (!index_to_params(id, params)) struct sys_reg_params params;
if (!index_to_params(id, &params))
return NULL; return NULL;
return find_reg(params, table, num); return find_reg(&params, table, num);
} }
/* Decode an index value, and find the sys_reg_desc entry. */ /* Decode an index value, and find the sys_reg_desc entry. */
static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, static const struct sys_reg_desc *
u64 id) id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
const struct sys_reg_desc table[], unsigned int num)
{ {
const struct sys_reg_desc *r; const struct sys_reg_desc *r;
struct sys_reg_params params;
/* We only do sys_reg for now. */ /* We only do sys_reg for now. */
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
return NULL; return NULL;
if (!index_to_params(id, &params)) r = get_reg_by_id(id, table, num);
return NULL;
r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
/* Not saved in the sys_reg array and not otherwise accessible? */ /* Not saved in the sys_reg array and not otherwise accessible? */
if (r && !(r->reg || r->get_user)) if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
r = NULL; r = NULL;
return r; return r;
...@@ -2707,48 +2639,30 @@ static struct sys_reg_desc invariant_sys_regs[] = { ...@@ -2707,48 +2639,30 @@ static struct sys_reg_desc invariant_sys_regs[] = {
{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 }, { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
}; };
static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
{ {
if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
{
if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
static int get_invariant_sys_reg(u64 id, void __user *uaddr)
{
struct sys_reg_params params;
const struct sys_reg_desc *r; const struct sys_reg_desc *r;
r = find_reg_by_id(id, &params, invariant_sys_regs, r = get_reg_by_id(id, invariant_sys_regs,
ARRAY_SIZE(invariant_sys_regs)); ARRAY_SIZE(invariant_sys_regs));
if (!r) if (!r)
return -ENOENT; return -ENOENT;
return reg_to_user(uaddr, &r->val, id); return put_user(r->val, uaddr);
} }
static int set_invariant_sys_reg(u64 id, void __user *uaddr) static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
{ {
struct sys_reg_params params;
const struct sys_reg_desc *r; const struct sys_reg_desc *r;
int err; u64 val;
u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
r = find_reg_by_id(id, &params, invariant_sys_regs, r = get_reg_by_id(id, invariant_sys_regs,
ARRAY_SIZE(invariant_sys_regs)); ARRAY_SIZE(invariant_sys_regs));
if (!r) if (!r)
return -ENOENT; return -ENOENT;
err = reg_from_user(&val, uaddr, id); if (get_user(val, uaddr))
if (err) return -EFAULT;
return err;
/* This is what we mean by invariant: you can't change it. */ /* This is what we mean by invariant: you can't change it. */
if (r->val != val) if (r->val != val)
...@@ -2839,54 +2753,86 @@ static int demux_c15_set(u64 id, void __user *uaddr) ...@@ -2839,54 +2753,86 @@ static int demux_c15_set(u64 id, void __user *uaddr)
} }
} }
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num)
{ {
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
const struct sys_reg_desc *r; const struct sys_reg_desc *r;
u64 val;
int ret;
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
if (!r)
return -ENOENT;
if (r->get_user) {
ret = (r->get_user)(vcpu, r, &val);
} else {
val = __vcpu_sys_reg(vcpu, r->reg);
ret = 0;
}
if (!ret)
ret = put_user(val, uaddr);
return ret;
}
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(unsigned long)reg->addr; void __user *uaddr = (void __user *)(unsigned long)reg->addr;
int err;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_get(reg->id, uaddr); return demux_c15_get(reg->id, uaddr);
if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) err = get_invariant_sys_reg(reg->id, uaddr);
return -ENOENT; if (err != -ENOENT)
return err;
r = index_to_sys_reg_desc(vcpu, reg->id); return kvm_sys_reg_get_user(vcpu, reg,
if (!r) sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
return get_invariant_sys_reg(reg->id, uaddr); }
/* Check for regs disabled by runtime config */ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
if (sysreg_hidden(vcpu, r)) const struct sys_reg_desc table[], unsigned int num)
{
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
const struct sys_reg_desc *r;
u64 val;
int ret;
if (get_user(val, uaddr))
return -EFAULT;
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
if (!r)
return -ENOENT; return -ENOENT;
if (r->get_user) if (r->set_user) {
return (r->get_user)(vcpu, r, reg, uaddr); ret = (r->set_user)(vcpu, r, val);
} else {
__vcpu_sys_reg(vcpu, r->reg) = val;
ret = 0;
}
return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id); return ret;
} }
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{ {
const struct sys_reg_desc *r;
void __user *uaddr = (void __user *)(unsigned long)reg->addr; void __user *uaddr = (void __user *)(unsigned long)reg->addr;
int err;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_set(reg->id, uaddr); return demux_c15_set(reg->id, uaddr);
if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) err = set_invariant_sys_reg(reg->id, uaddr);
return -ENOENT; if (err != -ENOENT)
return err;
r = index_to_sys_reg_desc(vcpu, reg->id);
if (!r)
return set_invariant_sys_reg(reg->id, uaddr);
/* Check for regs disabled by runtime config */
if (sysreg_hidden(vcpu, r))
return -ENOENT;
if (r->set_user)
return (r->set_user)(vcpu, r, reg, uaddr);
return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); return kvm_sys_reg_set_user(vcpu, reg,
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
} }
static unsigned int num_demux_regs(void) static unsigned int num_demux_regs(void)
......
...@@ -75,9 +75,9 @@ struct sys_reg_desc { ...@@ -75,9 +75,9 @@ struct sys_reg_desc {
/* Custom get/set_user functions, fallback to generic if NULL */ /* Custom get/set_user functions, fallback to generic if NULL */
int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr); u64 *val);
int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr); u64 val);
/* Return mask of REG_* runtime visibility overrides */ /* Return mask of REG_* runtime visibility overrides */
unsigned int (*visibility)(const struct kvm_vcpu *vcpu, unsigned int (*visibility)(const struct kvm_vcpu *vcpu,
...@@ -190,11 +190,17 @@ find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[], ...@@ -190,11 +190,17 @@ find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[],
return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
} }
const struct sys_reg_desc *find_reg_by_id(u64 id, const struct sys_reg_desc *get_reg_by_id(u64 id,
struct sys_reg_params *params,
const struct sys_reg_desc table[], const struct sys_reg_desc table[],
unsigned int num); unsigned int num);
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num);
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num);
#define AA32(_x) .aarch32_map = AA32_##_x #define AA32(_x) .aarch32_map = AA32_##_x
#define Op0(_x) .Op0 = _x #define Op0(_x) .Op0 = _x
#define Op1(_x) .Op1 = _x #define Op1(_x) .Op1 = _x
......
...@@ -10,293 +10,357 @@ ...@@ -10,293 +10,357 @@
#include "vgic/vgic.h" #include "vgic/vgic.h"
#include "sys_regs.h" #include "sys_regs.h"
static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static int set_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
const struct sys_reg_desc *r) u64 val)
{ {
u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v; u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
struct vgic_vmcr vmcr; struct vgic_vmcr vmcr;
u64 val;
vgic_get_vmcr(vcpu, &vmcr); vgic_get_vmcr(vcpu, &vmcr);
if (p->is_write) {
val = p->regval;
/* /*
* Disallow restoring VM state if not supported by this * Disallow restoring VM state if not supported by this
* hardware. * hardware.
*/ */
host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >> host_pri_bits = FIELD_GET(ICC_CTLR_EL1_PRI_BITS_MASK, val) + 1;
ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1;
if (host_pri_bits > vgic_v3_cpu->num_pri_bits) if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
return false; return -EINVAL;
vgic_v3_cpu->num_pri_bits = host_pri_bits; vgic_v3_cpu->num_pri_bits = host_pri_bits;
host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >> host_id_bits = FIELD_GET(ICC_CTLR_EL1_ID_BITS_MASK, val);
ICC_CTLR_EL1_ID_BITS_SHIFT;
if (host_id_bits > vgic_v3_cpu->num_id_bits) if (host_id_bits > vgic_v3_cpu->num_id_bits)
return false; return -EINVAL;
vgic_v3_cpu->num_id_bits = host_id_bits; vgic_v3_cpu->num_id_bits = host_id_bits;
host_seis = ((kvm_vgic_global_state.ich_vtr_el2 & host_seis = FIELD_GET(ICH_VTR_SEIS_MASK, kvm_vgic_global_state.ich_vtr_el2);
ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT); seis = FIELD_GET(ICC_CTLR_EL1_SEIS_MASK, val);
seis = (val & ICC_CTLR_EL1_SEIS_MASK) >>
ICC_CTLR_EL1_SEIS_SHIFT;
if (host_seis != seis) if (host_seis != seis)
return false; return -EINVAL;
host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 & host_a3v = FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2);
ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT); a3v = FIELD_GET(ICC_CTLR_EL1_A3V_MASK, val);
a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT;
if (host_a3v != a3v) if (host_a3v != a3v)
return false; return -EINVAL;
/* /*
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout. * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
* The vgic_set_vmcr() will convert to ICH_VMCR layout. * The vgic_set_vmcr() will convert to ICH_VMCR layout.
*/ */
vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT; vmcr.cbpr = FIELD_GET(ICC_CTLR_EL1_CBPR_MASK, val);
vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT; vmcr.eoim = FIELD_GET(ICC_CTLR_EL1_EOImode_MASK, val);
vgic_set_vmcr(vcpu, &vmcr); vgic_set_vmcr(vcpu, &vmcr);
} else {
return 0;
}
static int get_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *valp)
{
struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
struct vgic_vmcr vmcr;
u64 val;
vgic_get_vmcr(vcpu, &vmcr);
val = 0; val = 0;
val |= (vgic_v3_cpu->num_pri_bits - 1) << val |= FIELD_PREP(ICC_CTLR_EL1_PRI_BITS_MASK, vgic_v3_cpu->num_pri_bits - 1);
ICC_CTLR_EL1_PRI_BITS_SHIFT; val |= FIELD_PREP(ICC_CTLR_EL1_ID_BITS_MASK, vgic_v3_cpu->num_id_bits);
val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT; val |= FIELD_PREP(ICC_CTLR_EL1_SEIS_MASK,
val |= ((kvm_vgic_global_state.ich_vtr_el2 & FIELD_GET(ICH_VTR_SEIS_MASK,
ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) << kvm_vgic_global_state.ich_vtr_el2));
ICC_CTLR_EL1_SEIS_SHIFT; val |= FIELD_PREP(ICC_CTLR_EL1_A3V_MASK,
val |= ((kvm_vgic_global_state.ich_vtr_el2 & FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2));
ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) <<
ICC_CTLR_EL1_A3V_SHIFT;
/* /*
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout. * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
* Extract it directly using ICC_CTLR_EL1 reg definitions. * Extract it directly using ICC_CTLR_EL1 reg definitions.
*/ */
val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK; val |= FIELD_PREP(ICC_CTLR_EL1_CBPR_MASK, vmcr.cbpr);
val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; val |= FIELD_PREP(ICC_CTLR_EL1_EOImode_MASK, vmcr.eoim);
p->regval = val; *valp = val;
}
return true; return 0;
} }
static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static int set_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
const struct sys_reg_desc *r) u64 val)
{ {
struct vgic_vmcr vmcr; struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr); vgic_get_vmcr(vcpu, &vmcr);
if (p->is_write) { vmcr.pmr = FIELD_GET(ICC_PMR_EL1_MASK, val);
vmcr.pmr = (p->regval & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT;
vgic_set_vmcr(vcpu, &vmcr); vgic_set_vmcr(vcpu, &vmcr);
} else {
p->regval = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK;
}
return true; return 0;
} }
static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static int get_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
const struct sys_reg_desc *r) u64 *val)
{ {
struct vgic_vmcr vmcr; struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr); vgic_get_vmcr(vcpu, &vmcr);
if (p->is_write) { *val = FIELD_PREP(ICC_PMR_EL1_MASK, vmcr.pmr);
vmcr.bpr = (p->regval & ICC_BPR0_EL1_MASK) >>
ICC_BPR0_EL1_SHIFT; return 0;
}
static int set_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
vmcr.bpr = FIELD_GET(ICC_BPR0_EL1_MASK, val);
vgic_set_vmcr(vcpu, &vmcr); vgic_set_vmcr(vcpu, &vmcr);
} else {
p->regval = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) &
ICC_BPR0_EL1_MASK;
}
return true; return 0;
} }
static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static int get_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
const struct sys_reg_desc *r) u64 *val)
{ {
struct vgic_vmcr vmcr; struct vgic_vmcr vmcr;
if (!p->is_write) vgic_get_vmcr(vcpu, &vmcr);
p->regval = 0; *val = FIELD_PREP(ICC_BPR0_EL1_MASK, vmcr.bpr);
return 0;
}
static int set_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr); vgic_get_vmcr(vcpu, &vmcr);
if (!vmcr.cbpr) { if (!vmcr.cbpr) {
if (p->is_write) { vmcr.abpr = FIELD_GET(ICC_BPR1_EL1_MASK, val);
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
ICC_BPR1_EL1_SHIFT;
vgic_set_vmcr(vcpu, &vmcr); vgic_set_vmcr(vcpu, &vmcr);
} else {
p->regval = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) &
ICC_BPR1_EL1_MASK;
}
} else {
if (!p->is_write)
p->regval = min((vmcr.bpr + 1), 7U);
} }
return true; return 0;
}
static int get_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
if (!vmcr.cbpr)
*val = FIELD_PREP(ICC_BPR1_EL1_MASK, vmcr.abpr);
else
*val = min((vmcr.bpr + 1), 7U);
return 0;
} }
static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static int set_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
const struct sys_reg_desc *r) u64 val)
{ {
struct vgic_vmcr vmcr; struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr); vgic_get_vmcr(vcpu, &vmcr);
if (p->is_write) { vmcr.grpen0 = FIELD_GET(ICC_IGRPEN0_EL1_MASK, val);
vmcr.grpen0 = (p->regval & ICC_IGRPEN0_EL1_MASK) >>
ICC_IGRPEN0_EL1_SHIFT;
vgic_set_vmcr(vcpu, &vmcr); vgic_set_vmcr(vcpu, &vmcr);
} else {
p->regval = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) &
ICC_IGRPEN0_EL1_MASK;
}
return true; return 0;
} }
static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static int get_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
const struct sys_reg_desc *r) u64 *val)
{ {
struct vgic_vmcr vmcr; struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr); vgic_get_vmcr(vcpu, &vmcr);
if (p->is_write) { *val = FIELD_PREP(ICC_IGRPEN0_EL1_MASK, vmcr.grpen0);
vmcr.grpen1 = (p->regval & ICC_IGRPEN1_EL1_MASK) >>
ICC_IGRPEN1_EL1_SHIFT; return 0;
}
static int set_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
vmcr.grpen1 = FIELD_GET(ICC_IGRPEN1_EL1_MASK, val);
vgic_set_vmcr(vcpu, &vmcr); vgic_set_vmcr(vcpu, &vmcr);
} else {
p->regval = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) &
ICC_IGRPEN1_EL1_MASK;
}
return true; return 0;
} }
static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu, static int get_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
struct sys_reg_params *p, u8 apr, u8 idx) u64 *val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
*val = FIELD_GET(ICC_IGRPEN1_EL1_MASK, vmcr.grpen1);
return 0;
}
static void set_apr_reg(struct kvm_vcpu *vcpu, u64 val, u8 apr, u8 idx)
{ {
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
uint32_t *ap_reg;
if (apr) if (apr)
ap_reg = &vgicv3->vgic_ap1r[idx]; vgicv3->vgic_ap1r[idx] = val;
else else
ap_reg = &vgicv3->vgic_ap0r[idx]; vgicv3->vgic_ap0r[idx] = val;
}
if (p->is_write) static u64 get_apr_reg(struct kvm_vcpu *vcpu, u8 apr, u8 idx)
*ap_reg = p->regval; {
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
if (apr)
return vgicv3->vgic_ap1r[idx];
else else
p->regval = *ap_reg; return vgicv3->vgic_ap0r[idx];
}
static int set_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
u8 idx = r->Op2 & 3;
if (idx > vgic_v3_max_apr_idx(vcpu))
return -EINVAL;
set_apr_reg(vcpu, val, 0, idx);
return 0;
} }
static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static int get_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
const struct sys_reg_desc *r, u8 apr) u64 *val)
{ {
u8 idx = r->Op2 & 3; u8 idx = r->Op2 & 3;
if (idx > vgic_v3_max_apr_idx(vcpu)) if (idx > vgic_v3_max_apr_idx(vcpu))
goto err; return -EINVAL;
vgic_v3_access_apr_reg(vcpu, p, apr, idx); *val = get_apr_reg(vcpu, 0, idx);
return true;
err:
if (!p->is_write)
p->regval = 0;
return false; return 0;
} }
static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static int set_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
const struct sys_reg_desc *r) u64 val)
{
u8 idx = r->Op2 & 3;
if (idx > vgic_v3_max_apr_idx(vcpu))
return -EINVAL;
set_apr_reg(vcpu, val, 1, idx);
return 0;
}
static int get_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{ {
return access_gic_aprn(vcpu, p, r, 0); u8 idx = r->Op2 & 3;
if (idx > vgic_v3_max_apr_idx(vcpu))
return -EINVAL;
*val = get_apr_reg(vcpu, 1, idx);
return 0;
} }
static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static int set_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
const struct sys_reg_desc *r) u64 val)
{ {
return access_gic_aprn(vcpu, p, r, 1); /* Validate SRE bit */
if (!(val & ICC_SRE_EL1_SRE))
return -EINVAL;
return 0;
} }
static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static int get_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
const struct sys_reg_desc *r) u64 *val)
{ {
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
/* Validate SRE bit */ *val = vgicv3->vgic_sre;
if (p->is_write) {
if (!(p->regval & ICC_SRE_EL1_SRE))
return false;
} else {
p->regval = vgicv3->vgic_sre;
}
return true; return 0;
} }
static const struct sys_reg_desc gic_v3_icc_reg_descs[] = { static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
{ SYS_DESC(SYS_ICC_PMR_EL1), access_gic_pmr }, { SYS_DESC(SYS_ICC_PMR_EL1),
{ SYS_DESC(SYS_ICC_BPR0_EL1), access_gic_bpr0 }, .set_user = set_gic_pmr, .get_user = get_gic_pmr, },
{ SYS_DESC(SYS_ICC_AP0R0_EL1), access_gic_ap0r }, { SYS_DESC(SYS_ICC_BPR0_EL1),
{ SYS_DESC(SYS_ICC_AP0R1_EL1), access_gic_ap0r }, .set_user = set_gic_bpr0, .get_user = get_gic_bpr0, },
{ SYS_DESC(SYS_ICC_AP0R2_EL1), access_gic_ap0r }, { SYS_DESC(SYS_ICC_AP0R0_EL1),
{ SYS_DESC(SYS_ICC_AP0R3_EL1), access_gic_ap0r }, .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
{ SYS_DESC(SYS_ICC_AP1R0_EL1), access_gic_ap1r }, { SYS_DESC(SYS_ICC_AP0R1_EL1),
{ SYS_DESC(SYS_ICC_AP1R1_EL1), access_gic_ap1r }, .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
{ SYS_DESC(SYS_ICC_AP1R2_EL1), access_gic_ap1r }, { SYS_DESC(SYS_ICC_AP0R2_EL1),
{ SYS_DESC(SYS_ICC_AP1R3_EL1), access_gic_ap1r }, .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
{ SYS_DESC(SYS_ICC_BPR1_EL1), access_gic_bpr1 }, { SYS_DESC(SYS_ICC_AP0R3_EL1),
{ SYS_DESC(SYS_ICC_CTLR_EL1), access_gic_ctlr }, .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, { SYS_DESC(SYS_ICC_AP1R0_EL1),
{ SYS_DESC(SYS_ICC_IGRPEN0_EL1), access_gic_grpen0 }, .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
{ SYS_DESC(SYS_ICC_IGRPEN1_EL1), access_gic_grpen1 }, { SYS_DESC(SYS_ICC_AP1R1_EL1),
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
{ SYS_DESC(SYS_ICC_AP1R2_EL1),
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
{ SYS_DESC(SYS_ICC_AP1R3_EL1),
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
{ SYS_DESC(SYS_ICC_BPR1_EL1),
.set_user = set_gic_bpr1, .get_user = get_gic_bpr1, },
{ SYS_DESC(SYS_ICC_CTLR_EL1),
.set_user = set_gic_ctlr, .get_user = get_gic_ctlr, },
{ SYS_DESC(SYS_ICC_SRE_EL1),
.set_user = set_gic_sre, .get_user = get_gic_sre, },
{ SYS_DESC(SYS_ICC_IGRPEN0_EL1),
.set_user = set_gic_grpen0, .get_user = get_gic_grpen0, },
{ SYS_DESC(SYS_ICC_IGRPEN1_EL1),
.set_user = set_gic_grpen1, .get_user = get_gic_grpen1, },
}; };
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id, static u64 attr_to_id(u64 attr)
u64 *reg)
{ {
struct sys_reg_params params; return ARM64_SYS_REG(FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP0_MASK, attr),
u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64; FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP1_MASK, attr),
FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRN_MASK, attr),
params.regval = *reg; FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRM_MASK, attr),
params.is_write = is_write; FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP2_MASK, attr));
}
if (find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs, int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
if (get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs,
ARRAY_SIZE(gic_v3_icc_reg_descs))) ARRAY_SIZE(gic_v3_icc_reg_descs)))
return 0; return 0;
return -ENXIO; return -ENXIO;
} }
int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id, int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
u64 *reg) struct kvm_device_attr *attr,
bool is_write)
{ {
struct sys_reg_params params; struct kvm_one_reg reg = {
const struct sys_reg_desc *r; .id = attr_to_id(attr->attr),
u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64; .addr = attr->addr,
};
if (is_write) if (is_write)
params.regval = *reg; return kvm_sys_reg_set_user(vcpu, &reg, gic_v3_icc_reg_descs,
params.is_write = is_write; ARRAY_SIZE(gic_v3_icc_reg_descs));
else
r = find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs, return kvm_sys_reg_get_user(vcpu, &reg, gic_v3_icc_reg_descs,
ARRAY_SIZE(gic_v3_icc_reg_descs)); ARRAY_SIZE(gic_v3_icc_reg_descs));
if (!r)
return -ENXIO;
if (!r->access(vcpu, &params, r))
return -EINVAL;
if (!is_write)
*reg = params.regval;
return 0;
} }
...@@ -41,11 +41,42 @@ static int vgic_check_type(struct kvm *kvm, int type_needed) ...@@ -41,11 +41,42 @@ static int vgic_check_type(struct kvm *kvm, int type_needed)
return 0; return 0;
} }
int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
{
struct vgic_dist *vgic = &kvm->arch.vgic;
int r;
mutex_lock(&kvm->lock);
switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
if (!r)
r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr,
SZ_4K, KVM_VGIC_V2_DIST_SIZE);
if (!r)
vgic->vgic_dist_base = dev_addr->addr;
break;
case KVM_VGIC_V2_ADDR_TYPE_CPU:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
if (!r)
r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr,
SZ_4K, KVM_VGIC_V2_CPU_SIZE);
if (!r)
vgic->vgic_cpu_base = dev_addr->addr;
break;
default:
r = -ENODEV;
}
mutex_unlock(&kvm->lock);
return r;
}
/** /**
* kvm_vgic_addr - set or get vgic VM base addresses * kvm_vgic_addr - set or get vgic VM base addresses
* @kvm: pointer to the vm struct * @kvm: pointer to the vm struct
* @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX * @attr: pointer to the attribute being retrieved/updated
* @addr: pointer to address value
* @write: if true set the address in the VM address space, if false read the * @write: if true set the address in the VM address space, if false read the
* address * address
* *
...@@ -57,15 +88,22 @@ static int vgic_check_type(struct kvm *kvm, int type_needed) ...@@ -57,15 +88,22 @@ static int vgic_check_type(struct kvm *kvm, int type_needed)
* overlapping regions in case of a virtual GICv3 here, since we don't know * overlapping regions in case of a virtual GICv3 here, since we don't know
* the number of VCPUs yet, so we defer this check to map_resources(). * the number of VCPUs yet, so we defer this check to map_resources().
*/ */
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write)
{ {
int r = 0; u64 __user *uaddr = (u64 __user *)attr->addr;
struct vgic_dist *vgic = &kvm->arch.vgic; struct vgic_dist *vgic = &kvm->arch.vgic;
phys_addr_t *addr_ptr, alignment, size; phys_addr_t *addr_ptr, alignment, size;
u64 undef_value = VGIC_ADDR_UNDEF; u64 undef_value = VGIC_ADDR_UNDEF;
u64 addr;
int r;
/* Reading a redistributor region addr implies getting the index */
if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION)
if (get_user(addr, uaddr))
return -EFAULT;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
switch (type) { switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST: case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
addr_ptr = &vgic->vgic_dist_base; addr_ptr = &vgic->vgic_dist_base;
...@@ -91,7 +129,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) ...@@ -91,7 +129,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
if (r) if (r)
break; break;
if (write) { if (write) {
r = vgic_v3_set_redist_base(kvm, 0, *addr, 0); r = vgic_v3_set_redist_base(kvm, 0, addr, 0);
goto out; goto out;
} }
rdreg = list_first_entry_or_null(&vgic->rd_regions, rdreg = list_first_entry_or_null(&vgic->rd_regions,
...@@ -111,14 +149,12 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) ...@@ -111,14 +149,12 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
if (r) if (r)
break; break;
index = *addr & KVM_VGIC_V3_RDIST_INDEX_MASK; index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
if (write) { if (write) {
gpa_t base = *addr & KVM_VGIC_V3_RDIST_BASE_MASK; gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK;
u32 count = (*addr & KVM_VGIC_V3_RDIST_COUNT_MASK) u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr);
>> KVM_VGIC_V3_RDIST_COUNT_SHIFT; u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr);
u8 flags = (*addr & KVM_VGIC_V3_RDIST_FLAGS_MASK)
>> KVM_VGIC_V3_RDIST_FLAGS_SHIFT;
if (!count || flags) if (!count || flags)
r = -EINVAL; r = -EINVAL;
...@@ -134,9 +170,9 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) ...@@ -134,9 +170,9 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
goto out; goto out;
} }
*addr = index; addr = index;
*addr |= rdreg->base; addr |= rdreg->base;
*addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT; addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
goto out; goto out;
} }
default: default:
...@@ -147,15 +183,19 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) ...@@ -147,15 +183,19 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
goto out; goto out;
if (write) { if (write) {
r = vgic_check_iorange(kvm, *addr_ptr, *addr, alignment, size); r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
if (!r) if (!r)
*addr_ptr = *addr; *addr_ptr = addr;
} else { } else {
*addr = *addr_ptr; addr = *addr_ptr;
} }
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
if (!r && !write)
r = put_user(addr, uaddr);
return r; return r;
} }
...@@ -165,17 +205,9 @@ static int vgic_set_common_attr(struct kvm_device *dev, ...@@ -165,17 +205,9 @@ static int vgic_set_common_attr(struct kvm_device *dev,
int r; int r;
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_ADDR: { case KVM_DEV_ARM_VGIC_GRP_ADDR:
u64 __user *uaddr = (u64 __user *)(long)attr->addr; r = kvm_vgic_addr(dev->kvm, attr, true);
u64 addr;
unsigned long type = (unsigned long)attr->attr;
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
r = kvm_vgic_addr(dev->kvm, type, &addr, true);
return (r == -ENODEV) ? -ENXIO : r; return (r == -ENODEV) ? -ENXIO : r;
}
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr; u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 val; u32 val;
...@@ -214,6 +246,24 @@ static int vgic_set_common_attr(struct kvm_device *dev, ...@@ -214,6 +246,24 @@ static int vgic_set_common_attr(struct kvm_device *dev,
r = vgic_init(dev->kvm); r = vgic_init(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
return r; return r;
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
/*
* OK, this one isn't common at all, but we
* want to handle all control group attributes
* in a single place.
*/
if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3))
return -ENXIO;
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
r = vgic_v3_save_pending_tables(dev->kvm);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return r;
} }
break; break;
} }
...@@ -228,22 +278,9 @@ static int vgic_get_common_attr(struct kvm_device *dev, ...@@ -228,22 +278,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
int r = -ENXIO; int r = -ENXIO;
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_ADDR: { case KVM_DEV_ARM_VGIC_GRP_ADDR:
u64 __user *uaddr = (u64 __user *)(long)attr->addr; r = kvm_vgic_addr(dev->kvm, attr, false);
u64 addr;
unsigned long type = (unsigned long)attr->attr;
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
r = kvm_vgic_addr(dev->kvm, type, &addr, false);
if (r)
return (r == -ENODEV) ? -ENXIO : r; return (r == -ENODEV) ? -ENXIO : r;
if (copy_to_user(uaddr, &addr, sizeof(addr)))
return -EFAULT;
break;
}
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr; u32 __user *uaddr = (u32 __user *)(long)attr->addr;
...@@ -348,17 +385,18 @@ bool lock_all_vcpus(struct kvm *kvm) ...@@ -348,17 +385,18 @@ bool lock_all_vcpus(struct kvm *kvm)
* *
* @dev: kvm device handle * @dev: kvm device handle
* @attr: kvm device attribute * @attr: kvm device attribute
* @reg: address the value is read or written
* @is_write: true if userspace is writing a register * @is_write: true if userspace is writing a register
*/ */
static int vgic_v2_attr_regs_access(struct kvm_device *dev, static int vgic_v2_attr_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr, struct kvm_device_attr *attr,
u32 *reg, bool is_write) bool is_write)
{ {
u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
struct vgic_reg_attr reg_attr; struct vgic_reg_attr reg_attr;
gpa_t addr; gpa_t addr;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int ret; int ret;
u32 val;
ret = vgic_v2_parse_attr(dev, attr, &reg_attr); ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
if (ret) if (ret)
...@@ -367,6 +405,10 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, ...@@ -367,6 +405,10 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
vcpu = reg_attr.vcpu; vcpu = reg_attr.vcpu;
addr = reg_attr.addr; addr = reg_attr.addr;
if (is_write)
if (get_user(val, uaddr))
return -EFAULT;
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
ret = vgic_init(dev->kvm); ret = vgic_init(dev->kvm);
...@@ -380,10 +422,10 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, ...@@ -380,10 +422,10 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg); ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
break; break;
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg); ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
...@@ -393,57 +435,35 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, ...@@ -393,57 +435,35 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
unlock_all_vcpus(dev->kvm); unlock_all_vcpus(dev->kvm);
out: out:
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write)
ret = put_user(val, uaddr);
return ret; return ret;
} }
static int vgic_v2_set_attr(struct kvm_device *dev, static int vgic_v2_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
int ret;
ret = vgic_set_common_attr(dev, attr);
if (ret != -ENXIO)
return ret;
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
u32 __user *uaddr = (u32 __user *)(long)attr->addr; return vgic_v2_attr_regs_access(dev, attr, true);
u32 reg; default:
return vgic_set_common_attr(dev, attr);
if (get_user(reg, uaddr))
return -EFAULT;
return vgic_v2_attr_regs_access(dev, attr, &reg, true);
}
} }
return -ENXIO;
} }
static int vgic_v2_get_attr(struct kvm_device *dev, static int vgic_v2_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
int ret;
ret = vgic_get_common_attr(dev, attr);
if (ret != -ENXIO)
return ret;
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
u32 __user *uaddr = (u32 __user *)(long)attr->addr; return vgic_v2_attr_regs_access(dev, attr, false);
u32 reg = 0; default:
return vgic_get_common_attr(dev, attr);
ret = vgic_v2_attr_regs_access(dev, attr, &reg, false);
if (ret)
return ret;
return put_user(reg, uaddr);
}
} }
return -ENXIO;
} }
static int vgic_v2_has_attr(struct kvm_device *dev, static int vgic_v2_has_attr(struct kvm_device *dev,
...@@ -512,18 +532,18 @@ int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, ...@@ -512,18 +532,18 @@ int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
* *
* @dev: kvm device handle * @dev: kvm device handle
* @attr: kvm device attribute * @attr: kvm device attribute
* @reg: address the value is read or written
* @is_write: true if userspace is writing a register * @is_write: true if userspace is writing a register
*/ */
static int vgic_v3_attr_regs_access(struct kvm_device *dev, static int vgic_v3_attr_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr, struct kvm_device_attr *attr,
u64 *reg, bool is_write) bool is_write)
{ {
struct vgic_reg_attr reg_attr; struct vgic_reg_attr reg_attr;
gpa_t addr; gpa_t addr;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
bool uaccess;
u32 val;
int ret; int ret;
u32 tmp32;
ret = vgic_v3_parse_attr(dev, attr, &reg_attr); ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
if (ret) if (ret)
...@@ -532,6 +552,21 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, ...@@ -532,6 +552,21 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
vcpu = reg_attr.vcpu; vcpu = reg_attr.vcpu;
addr = reg_attr.addr; addr = reg_attr.addr;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
/* Sysregs uaccess is performed by the sysreg handling code */
uaccess = false;
break;
default:
uaccess = true;
}
if (uaccess && is_write) {
u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
if (get_user(val, uaddr))
return -EFAULT;
}
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
if (unlikely(!vgic_initialized(dev->kvm))) { if (unlikely(!vgic_initialized(dev->kvm))) {
...@@ -546,29 +581,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, ...@@ -546,29 +581,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
if (is_write) ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val);
tmp32 = *reg;
ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
if (!is_write)
*reg = tmp32;
break; break;
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
if (is_write) ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val);
tmp32 = *reg;
ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
if (!is_write)
*reg = tmp32;
break; break;
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: { case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
u64 regid; ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write);
regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
regid, reg);
break; break;
}
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: { case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
unsigned int info, intid; unsigned int info, intid;
...@@ -578,7 +598,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, ...@@ -578,7 +598,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
intid = attr->attr & intid = attr->attr &
KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK; KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
ret = vgic_v3_line_level_info_uaccess(vcpu, is_write, ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
intid, reg); intid, &val);
} else { } else {
ret = -EINVAL; ret = -EINVAL;
} }
...@@ -592,117 +612,41 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, ...@@ -592,117 +612,41 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
unlock_all_vcpus(dev->kvm); unlock_all_vcpus(dev->kvm);
out: out:
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) {
u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
ret = put_user(val, uaddr);
}
return ret; return ret;
} }
static int vgic_v3_set_attr(struct kvm_device *dev, static int vgic_v3_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
int ret;
ret = vgic_set_common_attr(dev, attr);
if (ret != -ENXIO)
return ret;
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: { case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
u32 __user *uaddr = (u32 __user *)(long)attr->addr; case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
u32 tmp32; case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
u64 reg; return vgic_v3_attr_regs_access(dev, attr, true);
default:
if (get_user(tmp32, uaddr)) return vgic_set_common_attr(dev, attr);
return -EFAULT;
reg = tmp32;
return vgic_v3_attr_regs_access(dev, attr, &reg, true);
}
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
u64 reg;
if (get_user(reg, uaddr))
return -EFAULT;
return vgic_v3_attr_regs_access(dev, attr, &reg, true);
}
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u64 reg;
u32 tmp32;
if (get_user(tmp32, uaddr))
return -EFAULT;
reg = tmp32;
return vgic_v3_attr_regs_access(dev, attr, &reg, true);
}
case KVM_DEV_ARM_VGIC_GRP_CTRL: {
int ret;
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
ret = vgic_v3_save_pending_tables(dev->kvm);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return ret;
}
break;
}
} }
return -ENXIO;
} }
static int vgic_v3_get_attr(struct kvm_device *dev, static int vgic_v3_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
int ret;
ret = vgic_get_common_attr(dev, attr);
if (ret != -ENXIO)
return ret;
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: { case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
u32 __user *uaddr = (u32 __user *)(long)attr->addr; case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
u64 reg; case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
u32 tmp32; return vgic_v3_attr_regs_access(dev, attr, false);
default:
ret = vgic_v3_attr_regs_access(dev, attr, &reg, false); return vgic_get_common_attr(dev, attr);
if (ret)
return ret;
tmp32 = reg;
return put_user(tmp32, uaddr);
}
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
u64 reg;
ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
if (ret)
return ret;
return put_user(reg, uaddr);
}
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u64 reg;
u32 tmp32;
ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
if (ret)
return ret;
tmp32 = reg;
return put_user(tmp32, uaddr);
}
} }
return -ENXIO;
} }
static int vgic_v3_has_attr(struct kvm_device *dev, static int vgic_v3_has_attr(struct kvm_device *dev,
......
...@@ -986,12 +986,8 @@ int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) ...@@ -986,12 +986,8 @@ int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
iodev.base_addr = 0; iodev.base_addr = 0;
break; break;
} }
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: { case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
u64 reg, id; return vgic_v3_has_cpu_sysregs_attr(vcpu, attr);
id = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, &reg);
}
default: default:
return -ENXIO; return -ENXIO;
} }
...@@ -1158,7 +1154,7 @@ int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write, ...@@ -1158,7 +1154,7 @@ int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
} }
int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write, int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
u32 intid, u64 *val) u32 intid, u32 *val)
{ {
if (intid % 32) if (intid % 32)
return -EINVAL; return -EINVAL;
......
...@@ -775,10 +775,10 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, ...@@ -775,10 +775,10 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
} }
} }
u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid) u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
{ {
int i; int i;
u64 val = 0; u32 val = 0;
int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
...@@ -798,7 +798,7 @@ u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid) ...@@ -798,7 +798,7 @@ u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
} }
void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
const u64 val) const u32 val)
{ {
int i; int i;
int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
......
...@@ -207,10 +207,10 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, ...@@ -207,10 +207,10 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
bool is_write, int offset, u32 *val); bool is_write, int offset, u32 *val);
u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid); u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
const u64 val); const u32 val);
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
......
...@@ -245,12 +245,11 @@ int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, ...@@ -245,12 +245,11 @@ int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int offset, u32 *val); int offset, u32 *val);
int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write, int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int offset, u32 *val); int offset, u32 *val);
int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
u64 id, u64 *val); struct kvm_device_attr *attr, bool is_write);
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id, int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
u64 *reg);
int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write, int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
u32 intid, u64 *val); u32 intid, u32 *val);
int kvm_register_vgic_device(unsigned long type); int kvm_register_vgic_device(unsigned long type);
void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
......
...@@ -364,7 +364,7 @@ struct vgic_cpu { ...@@ -364,7 +364,7 @@ struct vgic_cpu {
extern struct static_key_false vgic_v2_cpuif_trap; extern struct static_key_false vgic_v2_cpuif_trap;
extern struct static_key_false vgic_v3_cpuif_trap; extern struct static_key_false vgic_v3_cpuif_trap;
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr);
void kvm_vgic_early_init(struct kvm *kvm); void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
int kvm_vgic_create(struct kvm *kvm, u32 type); int kvm_vgic_create(struct kvm *kvm, u32 type);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment