Commit ae98a4a9 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/sysreg-cleanup-5.20 into kvmarm-master/next

* kvm-arm64/sysreg-cleanup-5.20:
  : .
  : Long overdue cleanup of the sysreg userspace access,
  : with extra scrubbing on the vgic side of things.
  : From the cover letter:
  :
  : "Schspa Shi recently reported[1] that some of the vgic code interacting
  : with userspace was reading uninitialised stack memory, and although
  : that read wasn't used any further, it prompted me to revisit this part
  : of the code.
  :
  : Needless to say, this area of the kernel is pretty crufty, and shows a
  : bunch of issues in other parts of the KVM/arm64 infrastructure. This
  : series tries to remedy a bunch of them:
  :
  : - Sanitise the way we deal with sysregs from userspace: at the moment,
  :   each and every .set_user/.get_user callback has to implement its own
  :   userspace accesses (directly or indirectly). It'd be much better if
  :   that was centralised so that we can reason about it.
  :
  : - Enforce that all AArch64 sysregs are 64bit. Always. This was sort of
  :   implied by the code, but it took some effort to convince myself that
  :   this was actually the case.
  :
  : - Move the vgic-v3 sysreg userspace accessors to the userspace
  :   callbacks instead of hijacking the vcpu trap callback. This allows
  :   us to reuse the sysreg infrastructure.
  :
  : - Consolidate userspace accesses for both GICv2, GICv3 and common code
  :   as much as possible.
  :
  : - Cleanup a bunch of not-very-useful helpers, tidy up some of the code
  :   as we touch it.
  :
  : [1] https://lore.kernel.org/r/m2h740zz1i.fsf@gmail.com"
  : .
  KVM: arm64: Get rid or outdated comments
  KVM: arm64: Descope kvm_arm_sys_reg_{get,set}_reg()
  KVM: arm64: Get rid of find_reg_by_id()
  KVM: arm64: vgic: Tidy-up calls to vgic_{get,set}_common_attr()
  KVM: arm64: vgic: Consolidate userspace access for base address setting
  KVM: arm64: vgic-v2: Add helper for legacy dist/cpuif base address setting
  KVM: arm64: vgic: Use {get,put}_user() instead of copy_{from.to}_user
  KVM: arm64: vgic-v2: Consolidate userspace access for MMIO registers
  KVM: arm64: vgic-v3: Consolidate userspace access for MMIO registers
  KVM: arm64: vgic-v3: Use u32 to manage the line level from userspace
  KVM: arm64: vgic-v3: Convert userspace accessors over to FIELD_GET/FIELD_PREP
  KVM: arm64: vgic-v3: Make the userspace accessors use sysreg API
  KVM: arm64: vgic-v3: Push user access into vgic_v3_cpu_sysregs_uaccess()
  KVM: arm64: vgic-v3: Simplify vgic_v3_has_cpu_sysregs_attr()
  KVM: arm64: Get rid of reg_from/to_user()
  KVM: arm64: Consolidate sysreg userspace accesses
  KVM: arm64: Rely on index_to_param() for size checks on userspace access
  KVM: arm64: Introduce generic get_user/set_user helpers for system registers
  KVM: arm64: Reorder handling of invariant sysregs from userspace
  KVM: arm64: Add get_reg_by_id() as a sys_reg_desc retrieving helper
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents aeb7942b 4274d427
......@@ -714,8 +714,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
......
......@@ -1420,18 +1420,11 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
struct kvm_arm_device_addr *dev_addr)
{
unsigned long dev_id, type;
dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
KVM_ARM_DEVICE_ID_SHIFT;
type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
KVM_ARM_DEVICE_TYPE_SHIFT;
switch (dev_id) {
switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) {
case KVM_ARM_DEVICE_VGIC_V2:
if (!vgic_present)
return -ENXIO;
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr);
default:
return -ENODEV;
}
......
......@@ -34,18 +34,11 @@
#include "trace.h"
/*
* All of this file is extremely similar to the ARM coproc.c, but the
* types are different. My gut feeling is that it should be pretty
* easy to merge, but that would be an ABI breakage -- again. VFP
* would also need to be abstracted.
*
* For AArch32, we only take care of what is being trapped. Anything
* that has to do with init and userspace access has to go via the
* 64bit interface.
*/
static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
static bool read_from_write_only(struct kvm_vcpu *vcpu,
......@@ -321,16 +314,8 @@ static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
}
static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 val)
{
u64 id = sys_reg_to_index(rd);
u64 val;
int err;
err = reg_from_user(&val, uaddr, id);
if (err)
return err;
/*
* The only modifiable bit is the OSLK bit. Refuse the write if
* userspace attempts to change any other bit in the register.
......@@ -451,22 +436,16 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
}
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 val)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
return 0;
}
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 *val)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
*val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
return 0;
}
......@@ -493,23 +472,16 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
}
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 val)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
return 0;
}
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 *val)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
*val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
return 0;
}
......@@ -537,22 +509,16 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
}
static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 val)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
return 0;
}
static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 *val)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
*val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
return 0;
}
......@@ -579,22 +545,16 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
}
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 val)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
return 0;
}
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 *val)
{
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
*val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
return 0;
}
......@@ -1227,16 +1187,9 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 val)
{
const u64 id = sys_reg_to_index(rd);
u8 csv2, csv3;
int err;
u64 val;
err = reg_from_user(&val, uaddr, id);
if (err)
return err;
/*
* Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
......@@ -1262,7 +1215,7 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
return -EINVAL;
vcpu->kvm->arch.pfr0_csv2 = csv2;
vcpu->kvm->arch.pfr0_csv3 = csv3 ;
vcpu->kvm->arch.pfr0_csv3 = csv3;
return 0;
}
......@@ -1275,27 +1228,17 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
* to be changed.
*/
static int __get_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, void __user *uaddr,
const struct sys_reg_desc *rd, u64 *val,
bool raz)
{
const u64 id = sys_reg_to_index(rd);
const u64 val = read_id_reg(vcpu, rd, raz);
return reg_to_user(uaddr, &val, id);
*val = read_id_reg(vcpu, rd, raz);
return 0;
}
static int __set_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, void __user *uaddr,
const struct sys_reg_desc *rd, u64 val,
bool raz)
{
const u64 id = sys_reg_to_index(rd);
int err;
u64 val;
err = reg_from_user(&val, uaddr, id);
if (err)
return err;
/* This is what we mean by invariant: you can't change it. */
if (val != read_id_reg(vcpu, rd, raz))
return -EINVAL;
......@@ -1304,47 +1247,37 @@ static int __set_id_reg(const struct kvm_vcpu *vcpu,
}
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 *val)
{
bool raz = sysreg_visible_as_raz(vcpu, rd);
return __get_id_reg(vcpu, rd, uaddr, raz);
return __get_id_reg(vcpu, rd, val, raz);
}
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 val)
{
bool raz = sysreg_visible_as_raz(vcpu, rd);
return __set_id_reg(vcpu, rd, uaddr, raz);
return __set_id_reg(vcpu, rd, val, raz);
}
static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 val)
{
return __set_id_reg(vcpu, rd, uaddr, true);
return __set_id_reg(vcpu, rd, val, true);
}
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 *val)
{
const u64 id = sys_reg_to_index(rd);
const u64 val = 0;
return reg_to_user(uaddr, &val, id);
*val = 0;
return 0;
}
static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
u64 val)
{
int err;
u64 val;
/* Perform the access even if we are going to ignore the value */
err = reg_from_user(&val, uaddr, sys_reg_to_index(rd));
if (err)
return err;
return 0;
}
......@@ -2639,35 +2572,34 @@ static bool index_to_params(u64 id, struct sys_reg_params *params)
}
}
const struct sys_reg_desc *find_reg_by_id(u64 id,
struct sys_reg_params *params,
const struct sys_reg_desc table[],
unsigned int num)
const struct sys_reg_desc *get_reg_by_id(u64 id,
const struct sys_reg_desc table[],
unsigned int num)
{
if (!index_to_params(id, params))
struct sys_reg_params params;
if (!index_to_params(id, &params))
return NULL;
return find_reg(params, table, num);
return find_reg(&params, table, num);
}
/* Decode an index value, and find the sys_reg_desc entry. */
static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
u64 id)
static const struct sys_reg_desc *
id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
const struct sys_reg_desc table[], unsigned int num)
{
const struct sys_reg_desc *r;
struct sys_reg_params params;
/* We only do sys_reg for now. */
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
return NULL;
if (!index_to_params(id, &params))
return NULL;
r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
r = get_reg_by_id(id, table, num);
/* Not saved in the sys_reg array and not otherwise accessible? */
if (r && !(r->reg || r->get_user))
if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
r = NULL;
return r;
......@@ -2707,48 +2639,30 @@ static struct sys_reg_desc invariant_sys_regs[] = {
{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
};
static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
{
if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
{
if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
static int get_invariant_sys_reg(u64 id, void __user *uaddr)
static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
{
struct sys_reg_params params;
const struct sys_reg_desc *r;
r = find_reg_by_id(id, &params, invariant_sys_regs,
ARRAY_SIZE(invariant_sys_regs));
r = get_reg_by_id(id, invariant_sys_regs,
ARRAY_SIZE(invariant_sys_regs));
if (!r)
return -ENOENT;
return reg_to_user(uaddr, &r->val, id);
return put_user(r->val, uaddr);
}
static int set_invariant_sys_reg(u64 id, void __user *uaddr)
static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
{
struct sys_reg_params params;
const struct sys_reg_desc *r;
int err;
u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
u64 val;
r = find_reg_by_id(id, &params, invariant_sys_regs,
ARRAY_SIZE(invariant_sys_regs));
r = get_reg_by_id(id, invariant_sys_regs,
ARRAY_SIZE(invariant_sys_regs));
if (!r)
return -ENOENT;
err = reg_from_user(&val, uaddr, id);
if (err)
return err;
if (get_user(val, uaddr))
return -EFAULT;
/* This is what we mean by invariant: you can't change it. */
if (r->val != val)
......@@ -2839,54 +2753,86 @@ static int demux_c15_set(u64 id, void __user *uaddr)
}
}
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num)
{
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
const struct sys_reg_desc *r;
u64 val;
int ret;
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
if (!r)
return -ENOENT;
if (r->get_user) {
ret = (r->get_user)(vcpu, r, &val);
} else {
val = __vcpu_sys_reg(vcpu, r->reg);
ret = 0;
}
if (!ret)
ret = put_user(val, uaddr);
return ret;
}
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
int err;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_get(reg->id, uaddr);
if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
return -ENOENT;
err = get_invariant_sys_reg(reg->id, uaddr);
if (err != -ENOENT)
return err;
r = index_to_sys_reg_desc(vcpu, reg->id);
if (!r)
return get_invariant_sys_reg(reg->id, uaddr);
return kvm_sys_reg_get_user(vcpu, reg,
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
}
/* Check for regs disabled by runtime config */
if (sysreg_hidden(vcpu, r))
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num)
{
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
const struct sys_reg_desc *r;
u64 val;
int ret;
if (get_user(val, uaddr))
return -EFAULT;
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
if (!r)
return -ENOENT;
if (r->get_user)
return (r->get_user)(vcpu, r, reg, uaddr);
if (r->set_user) {
ret = (r->set_user)(vcpu, r, val);
} else {
__vcpu_sys_reg(vcpu, r->reg) = val;
ret = 0;
}
return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
return ret;
}
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
const struct sys_reg_desc *r;
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
int err;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_set(reg->id, uaddr);
if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
return -ENOENT;
r = index_to_sys_reg_desc(vcpu, reg->id);
if (!r)
return set_invariant_sys_reg(reg->id, uaddr);
/* Check for regs disabled by runtime config */
if (sysreg_hidden(vcpu, r))
return -ENOENT;
if (r->set_user)
return (r->set_user)(vcpu, r, reg, uaddr);
err = set_invariant_sys_reg(reg->id, uaddr);
if (err != -ENOENT)
return err;
return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
return kvm_sys_reg_set_user(vcpu, reg,
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
}
static unsigned int num_demux_regs(void)
......
......@@ -75,9 +75,9 @@ struct sys_reg_desc {
/* Custom get/set_user functions, fallback to generic if NULL */
int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr);
u64 *val);
int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr);
u64 val);
/* Return mask of REG_* runtime visibility overrides */
unsigned int (*visibility)(const struct kvm_vcpu *vcpu,
......@@ -190,10 +190,16 @@ find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[],
return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
}
const struct sys_reg_desc *find_reg_by_id(u64 id,
struct sys_reg_params *params,
const struct sys_reg_desc table[],
unsigned int num);
const struct sys_reg_desc *get_reg_by_id(u64 id,
const struct sys_reg_desc table[],
unsigned int num);
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num);
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num);
#define AA32(_x) .aarch32_map = AA32_##_x
#define Op0(_x) .Op0 = _x
......
......@@ -10,293 +10,357 @@
#include "vgic/vgic.h"
#include "sys_regs.h"
static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static int set_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
/*
* Disallow restoring VM state if not supported by this
* hardware.
*/
host_pri_bits = FIELD_GET(ICC_CTLR_EL1_PRI_BITS_MASK, val) + 1;
if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
return -EINVAL;
vgic_v3_cpu->num_pri_bits = host_pri_bits;
host_id_bits = FIELD_GET(ICC_CTLR_EL1_ID_BITS_MASK, val);
if (host_id_bits > vgic_v3_cpu->num_id_bits)
return -EINVAL;
vgic_v3_cpu->num_id_bits = host_id_bits;
host_seis = FIELD_GET(ICH_VTR_SEIS_MASK, kvm_vgic_global_state.ich_vtr_el2);
seis = FIELD_GET(ICC_CTLR_EL1_SEIS_MASK, val);
if (host_seis != seis)
return -EINVAL;
host_a3v = FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2);
a3v = FIELD_GET(ICC_CTLR_EL1_A3V_MASK, val);
if (host_a3v != a3v)
return -EINVAL;
/*
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
*/
vmcr.cbpr = FIELD_GET(ICC_CTLR_EL1_CBPR_MASK, val);
vmcr.eoim = FIELD_GET(ICC_CTLR_EL1_EOImode_MASK, val);
vgic_set_vmcr(vcpu, &vmcr);
return 0;
}
static int get_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *valp)
{
struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
struct vgic_vmcr vmcr;
u64 val;
vgic_get_vmcr(vcpu, &vmcr);
if (p->is_write) {
val = p->regval;
/*
* Disallow restoring VM state if not supported by this
* hardware.
*/
host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >>
ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1;
if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
return false;
vgic_v3_cpu->num_pri_bits = host_pri_bits;
host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >>
ICC_CTLR_EL1_ID_BITS_SHIFT;
if (host_id_bits > vgic_v3_cpu->num_id_bits)
return false;
vgic_v3_cpu->num_id_bits = host_id_bits;
host_seis = ((kvm_vgic_global_state.ich_vtr_el2 &
ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT);
seis = (val & ICC_CTLR_EL1_SEIS_MASK) >>
ICC_CTLR_EL1_SEIS_SHIFT;
if (host_seis != seis)
return false;
host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 &
ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT);
a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT;
if (host_a3v != a3v)
return false;
/*
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
*/
vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
vgic_set_vmcr(vcpu, &vmcr);
} else {
val = 0;
val |= (vgic_v3_cpu->num_pri_bits - 1) <<
ICC_CTLR_EL1_PRI_BITS_SHIFT;
val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT;
val |= ((kvm_vgic_global_state.ich_vtr_el2 &
ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) <<
ICC_CTLR_EL1_SEIS_SHIFT;
val |= ((kvm_vgic_global_state.ich_vtr_el2 &
ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) <<
ICC_CTLR_EL1_A3V_SHIFT;
/*
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
* Extract it directly using ICC_CTLR_EL1 reg definitions.
*/
val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
p->regval = val;
}
val = 0;
val |= FIELD_PREP(ICC_CTLR_EL1_PRI_BITS_MASK, vgic_v3_cpu->num_pri_bits - 1);
val |= FIELD_PREP(ICC_CTLR_EL1_ID_BITS_MASK, vgic_v3_cpu->num_id_bits);
val |= FIELD_PREP(ICC_CTLR_EL1_SEIS_MASK,
FIELD_GET(ICH_VTR_SEIS_MASK,
kvm_vgic_global_state.ich_vtr_el2));
val |= FIELD_PREP(ICC_CTLR_EL1_A3V_MASK,
FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2));
/*
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
* Extract it directly using ICC_CTLR_EL1 reg definitions.
*/
val |= FIELD_PREP(ICC_CTLR_EL1_CBPR_MASK, vmcr.cbpr);
val |= FIELD_PREP(ICC_CTLR_EL1_EOImode_MASK, vmcr.eoim);
*valp = val;
return true;
return 0;
}
static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static int set_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
if (p->is_write) {
vmcr.pmr = (p->regval & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT;
vgic_set_vmcr(vcpu, &vmcr);
} else {
p->regval = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK;
}
vmcr.pmr = FIELD_GET(ICC_PMR_EL1_MASK, val);
vgic_set_vmcr(vcpu, &vmcr);
return true;
return 0;
}
static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static int get_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
if (p->is_write) {
vmcr.bpr = (p->regval & ICC_BPR0_EL1_MASK) >>
ICC_BPR0_EL1_SHIFT;
vgic_set_vmcr(vcpu, &vmcr);
} else {
p->regval = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) &
ICC_BPR0_EL1_MASK;
}
*val = FIELD_PREP(ICC_PMR_EL1_MASK, vmcr.pmr);
return true;
return 0;
}
static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static int set_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
struct vgic_vmcr vmcr;
if (!p->is_write)
p->regval = 0;
vgic_get_vmcr(vcpu, &vmcr);
vmcr.bpr = FIELD_GET(ICC_BPR0_EL1_MASK, val);
vgic_set_vmcr(vcpu, &vmcr);
return 0;
}
static int get_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
if (!vmcr.cbpr) {
if (p->is_write) {
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
ICC_BPR1_EL1_SHIFT;
vgic_set_vmcr(vcpu, &vmcr);
} else {
p->regval = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) &
ICC_BPR1_EL1_MASK;
}
} else {
if (!p->is_write)
p->regval = min((vmcr.bpr + 1), 7U);
}
*val = FIELD_PREP(ICC_BPR0_EL1_MASK, vmcr.bpr);
return true;
return 0;
}
static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static int set_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
if (p->is_write) {
vmcr.grpen0 = (p->regval & ICC_IGRPEN0_EL1_MASK) >>
ICC_IGRPEN0_EL1_SHIFT;
if (!vmcr.cbpr) {
vmcr.abpr = FIELD_GET(ICC_BPR1_EL1_MASK, val);
vgic_set_vmcr(vcpu, &vmcr);
} else {
p->regval = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) &
ICC_IGRPEN0_EL1_MASK;
}
return true;
return 0;
}
static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static int get_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
if (p->is_write) {
vmcr.grpen1 = (p->regval & ICC_IGRPEN1_EL1_MASK) >>
ICC_IGRPEN1_EL1_SHIFT;
vgic_set_vmcr(vcpu, &vmcr);
} else {
p->regval = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) &
ICC_IGRPEN1_EL1_MASK;
}
if (!vmcr.cbpr)
*val = FIELD_PREP(ICC_BPR1_EL1_MASK, vmcr.abpr);
else
*val = min((vmcr.bpr + 1), 7U);
return 0;
}
static int set_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
vmcr.grpen0 = FIELD_GET(ICC_IGRPEN0_EL1_MASK, val);
vgic_set_vmcr(vcpu, &vmcr);
return 0;
}
static int get_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
*val = FIELD_PREP(ICC_IGRPEN0_EL1_MASK, vmcr.grpen0);
return true;
return 0;
}
static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, u8 apr, u8 idx)
static int set_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
vmcr.grpen1 = FIELD_GET(ICC_IGRPEN1_EL1_MASK, val);
vgic_set_vmcr(vcpu, &vmcr);
return 0;
}
static int get_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
struct vgic_vmcr vmcr;
vgic_get_vmcr(vcpu, &vmcr);
*val = FIELD_GET(ICC_IGRPEN1_EL1_MASK, vmcr.grpen1);
return 0;
}
static void set_apr_reg(struct kvm_vcpu *vcpu, u64 val, u8 apr, u8 idx)
{
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
uint32_t *ap_reg;
if (apr)
ap_reg = &vgicv3->vgic_ap1r[idx];
vgicv3->vgic_ap1r[idx] = val;
else
ap_reg = &vgicv3->vgic_ap0r[idx];
vgicv3->vgic_ap0r[idx] = val;
}
static u64 get_apr_reg(struct kvm_vcpu *vcpu, u8 apr, u8 idx)
{
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
if (p->is_write)
*ap_reg = p->regval;
if (apr)
return vgicv3->vgic_ap1r[idx];
else
p->regval = *ap_reg;
return vgicv3->vgic_ap0r[idx];
}
static int set_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
u8 idx = r->Op2 & 3;
if (idx > vgic_v3_max_apr_idx(vcpu))
return -EINVAL;
set_apr_reg(vcpu, val, 0, idx);
return 0;
}
static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r, u8 apr)
static int get_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
u8 idx = r->Op2 & 3;
if (idx > vgic_v3_max_apr_idx(vcpu))
goto err;
return -EINVAL;
vgic_v3_access_apr_reg(vcpu, p, apr, idx);
return true;
err:
if (!p->is_write)
p->regval = 0;
*val = get_apr_reg(vcpu, 0, idx);
return false;
return 0;
}
static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static int set_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
u8 idx = r->Op2 & 3;
if (idx > vgic_v3_max_apr_idx(vcpu))
return -EINVAL;
set_apr_reg(vcpu, val, 1, idx);
return 0;
}
static int get_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
return access_gic_aprn(vcpu, p, r, 0);
u8 idx = r->Op2 & 3;
if (idx > vgic_v3_max_apr_idx(vcpu))
return -EINVAL;
*val = get_apr_reg(vcpu, 1, idx);
return 0;
}
static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static int set_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
return access_gic_aprn(vcpu, p, r, 1);
/* Validate SRE bit */
if (!(val & ICC_SRE_EL1_SRE))
return -EINVAL;
return 0;
}
static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
static int get_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
/* Validate SRE bit */
if (p->is_write) {
if (!(p->regval & ICC_SRE_EL1_SRE))
return false;
} else {
p->regval = vgicv3->vgic_sre;
}
*val = vgicv3->vgic_sre;
return true;
return 0;
}
static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
{ SYS_DESC(SYS_ICC_PMR_EL1), access_gic_pmr },
{ SYS_DESC(SYS_ICC_BPR0_EL1), access_gic_bpr0 },
{ SYS_DESC(SYS_ICC_AP0R0_EL1), access_gic_ap0r },
{ SYS_DESC(SYS_ICC_AP0R1_EL1), access_gic_ap0r },
{ SYS_DESC(SYS_ICC_AP0R2_EL1), access_gic_ap0r },
{ SYS_DESC(SYS_ICC_AP0R3_EL1), access_gic_ap0r },
{ SYS_DESC(SYS_ICC_AP1R0_EL1), access_gic_ap1r },
{ SYS_DESC(SYS_ICC_AP1R1_EL1), access_gic_ap1r },
{ SYS_DESC(SYS_ICC_AP1R2_EL1), access_gic_ap1r },
{ SYS_DESC(SYS_ICC_AP1R3_EL1), access_gic_ap1r },
{ SYS_DESC(SYS_ICC_BPR1_EL1), access_gic_bpr1 },
{ SYS_DESC(SYS_ICC_CTLR_EL1), access_gic_ctlr },
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
{ SYS_DESC(SYS_ICC_IGRPEN0_EL1), access_gic_grpen0 },
{ SYS_DESC(SYS_ICC_IGRPEN1_EL1), access_gic_grpen1 },
{ SYS_DESC(SYS_ICC_PMR_EL1),
.set_user = set_gic_pmr, .get_user = get_gic_pmr, },
{ SYS_DESC(SYS_ICC_BPR0_EL1),
.set_user = set_gic_bpr0, .get_user = get_gic_bpr0, },
{ SYS_DESC(SYS_ICC_AP0R0_EL1),
.set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
{ SYS_DESC(SYS_ICC_AP0R1_EL1),
.set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
{ SYS_DESC(SYS_ICC_AP0R2_EL1),
.set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
{ SYS_DESC(SYS_ICC_AP0R3_EL1),
.set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
{ SYS_DESC(SYS_ICC_AP1R0_EL1),
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
{ SYS_DESC(SYS_ICC_AP1R1_EL1),
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
{ SYS_DESC(SYS_ICC_AP1R2_EL1),
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
{ SYS_DESC(SYS_ICC_AP1R3_EL1),
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
{ SYS_DESC(SYS_ICC_BPR1_EL1),
.set_user = set_gic_bpr1, .get_user = get_gic_bpr1, },
{ SYS_DESC(SYS_ICC_CTLR_EL1),
.set_user = set_gic_ctlr, .get_user = get_gic_ctlr, },
{ SYS_DESC(SYS_ICC_SRE_EL1),
.set_user = set_gic_sre, .get_user = get_gic_sre, },
{ SYS_DESC(SYS_ICC_IGRPEN0_EL1),
.set_user = set_gic_grpen0, .get_user = get_gic_grpen0, },
{ SYS_DESC(SYS_ICC_IGRPEN1_EL1),
.set_user = set_gic_grpen1, .get_user = get_gic_grpen1, },
};
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
u64 *reg)
static u64 attr_to_id(u64 attr)
{
struct sys_reg_params params;
u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
params.regval = *reg;
params.is_write = is_write;
return ARM64_SYS_REG(FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP0_MASK, attr),
FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP1_MASK, attr),
FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRN_MASK, attr),
FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRM_MASK, attr),
FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP2_MASK, attr));
}
if (find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs,
ARRAY_SIZE(gic_v3_icc_reg_descs)))
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
if (get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs,
ARRAY_SIZE(gic_v3_icc_reg_descs)))
return 0;
return -ENXIO;
}
int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
u64 *reg)
int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr,
bool is_write)
{
struct sys_reg_params params;
const struct sys_reg_desc *r;
u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
struct kvm_one_reg reg = {
.id = attr_to_id(attr->attr),
.addr = attr->addr,
};
if (is_write)
params.regval = *reg;
params.is_write = is_write;
r = find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs,
ARRAY_SIZE(gic_v3_icc_reg_descs));
if (!r)
return -ENXIO;
if (!r->access(vcpu, &params, r))
return -EINVAL;
if (!is_write)
*reg = params.regval;
return 0;
return kvm_sys_reg_set_user(vcpu, &reg, gic_v3_icc_reg_descs,
ARRAY_SIZE(gic_v3_icc_reg_descs));
else
return kvm_sys_reg_get_user(vcpu, &reg, gic_v3_icc_reg_descs,
ARRAY_SIZE(gic_v3_icc_reg_descs));
}
......@@ -41,11 +41,42 @@ static int vgic_check_type(struct kvm *kvm, int type_needed)
return 0;
}
int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
{
struct vgic_dist *vgic = &kvm->arch.vgic;
int r;
mutex_lock(&kvm->lock);
switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
if (!r)
r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr,
SZ_4K, KVM_VGIC_V2_DIST_SIZE);
if (!r)
vgic->vgic_dist_base = dev_addr->addr;
break;
case KVM_VGIC_V2_ADDR_TYPE_CPU:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
if (!r)
r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr,
SZ_4K, KVM_VGIC_V2_CPU_SIZE);
if (!r)
vgic->vgic_cpu_base = dev_addr->addr;
break;
default:
r = -ENODEV;
}
mutex_unlock(&kvm->lock);
return r;
}
/**
* kvm_vgic_addr - set or get vgic VM base addresses
* @kvm: pointer to the vm struct
* @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
* @addr: pointer to address value
* @attr: pointer to the attribute being retrieved/updated
* @write: if true set the address in the VM address space, if false read the
* address
*
......@@ -57,15 +88,22 @@ static int vgic_check_type(struct kvm *kvm, int type_needed)
* overlapping regions in case of a virtual GICv3 here, since we don't know
* the number of VCPUs yet, so we defer this check to map_resources().
*/
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write)
{
int r = 0;
u64 __user *uaddr = (u64 __user *)attr->addr;
struct vgic_dist *vgic = &kvm->arch.vgic;
phys_addr_t *addr_ptr, alignment, size;
u64 undef_value = VGIC_ADDR_UNDEF;
u64 addr;
int r;
/* Reading a redistributor region addr implies getting the index */
if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION)
if (get_user(addr, uaddr))
return -EFAULT;
mutex_lock(&kvm->lock);
switch (type) {
switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
addr_ptr = &vgic->vgic_dist_base;
......@@ -91,7 +129,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
if (r)
break;
if (write) {
r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
r = vgic_v3_set_redist_base(kvm, 0, addr, 0);
goto out;
}
rdreg = list_first_entry_or_null(&vgic->rd_regions,
......@@ -111,14 +149,12 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
if (r)
break;
index = *addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
if (write) {
gpa_t base = *addr & KVM_VGIC_V3_RDIST_BASE_MASK;
u32 count = (*addr & KVM_VGIC_V3_RDIST_COUNT_MASK)
>> KVM_VGIC_V3_RDIST_COUNT_SHIFT;
u8 flags = (*addr & KVM_VGIC_V3_RDIST_FLAGS_MASK)
>> KVM_VGIC_V3_RDIST_FLAGS_SHIFT;
gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK;
u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr);
u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr);
if (!count || flags)
r = -EINVAL;
......@@ -134,9 +170,9 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
goto out;
}
*addr = index;
*addr |= rdreg->base;
*addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
addr = index;
addr |= rdreg->base;
addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
goto out;
}
default:
......@@ -147,15 +183,19 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
goto out;
if (write) {
r = vgic_check_iorange(kvm, *addr_ptr, *addr, alignment, size);
r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
if (!r)
*addr_ptr = *addr;
*addr_ptr = addr;
} else {
*addr = *addr_ptr;
addr = *addr_ptr;
}
out:
mutex_unlock(&kvm->lock);
if (!r && !write)
r = put_user(addr, uaddr);
return r;
}
......@@ -165,17 +205,9 @@ static int vgic_set_common_attr(struct kvm_device *dev,
int r;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_ADDR: {
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
u64 addr;
unsigned long type = (unsigned long)attr->attr;
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
r = kvm_vgic_addr(dev->kvm, type, &addr, true);
case KVM_DEV_ARM_VGIC_GRP_ADDR:
r = kvm_vgic_addr(dev->kvm, attr, true);
return (r == -ENODEV) ? -ENXIO : r;
}
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 val;
......@@ -214,6 +246,24 @@ static int vgic_set_common_attr(struct kvm_device *dev,
r = vgic_init(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return r;
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
/*
* OK, this one isn't common at all, but we
* want to handle all control group attributes
* in a single place.
*/
if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3))
return -ENXIO;
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
r = vgic_v3_save_pending_tables(dev->kvm);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return r;
}
break;
}
......@@ -228,22 +278,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
int r = -ENXIO;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_ADDR: {
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
u64 addr;
unsigned long type = (unsigned long)attr->attr;
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
r = kvm_vgic_addr(dev->kvm, type, &addr, false);
if (r)
return (r == -ENODEV) ? -ENXIO : r;
if (copy_to_user(uaddr, &addr, sizeof(addr)))
return -EFAULT;
break;
}
case KVM_DEV_ARM_VGIC_GRP_ADDR:
r = kvm_vgic_addr(dev->kvm, attr, false);
return (r == -ENODEV) ? -ENXIO : r;
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
......@@ -348,17 +385,18 @@ bool lock_all_vcpus(struct kvm *kvm)
*
* @dev: kvm device handle
* @attr: kvm device attribute
* @reg: address the value is read or written
* @is_write: true if userspace is writing a register
*/
static int vgic_v2_attr_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
u32 *reg, bool is_write)
bool is_write)
{
u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
struct vgic_reg_attr reg_attr;
gpa_t addr;
struct kvm_vcpu *vcpu;
int ret;
u32 val;
ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
if (ret)
......@@ -367,6 +405,10 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
vcpu = reg_attr.vcpu;
addr = reg_attr.addr;
if (is_write)
if (get_user(val, uaddr))
return -EFAULT;
mutex_lock(&dev->kvm->lock);
ret = vgic_init(dev->kvm);
......@@ -380,10 +422,10 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
break;
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val);
break;
default:
ret = -EINVAL;
......@@ -393,57 +435,35 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write)
ret = put_user(val, uaddr);
return ret;
}
static int vgic_v2_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
int ret;
ret = vgic_set_common_attr(dev, attr);
if (ret != -ENXIO)
return ret;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 reg;
if (get_user(reg, uaddr))
return -EFAULT;
return vgic_v2_attr_regs_access(dev, attr, &reg, true);
}
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
return vgic_v2_attr_regs_access(dev, attr, true);
default:
return vgic_set_common_attr(dev, attr);
}
return -ENXIO;
}
static int vgic_v2_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
int ret;
ret = vgic_get_common_attr(dev, attr);
if (ret != -ENXIO)
return ret;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 reg = 0;
ret = vgic_v2_attr_regs_access(dev, attr, &reg, false);
if (ret)
return ret;
return put_user(reg, uaddr);
}
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
return vgic_v2_attr_regs_access(dev, attr, false);
default:
return vgic_get_common_attr(dev, attr);
}
return -ENXIO;
}
static int vgic_v2_has_attr(struct kvm_device *dev,
......@@ -512,18 +532,18 @@ int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
*
* @dev: kvm device handle
* @attr: kvm device attribute
* @reg: address the value is read or written
* @is_write: true if userspace is writing a register
*/
static int vgic_v3_attr_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
u64 *reg, bool is_write)
bool is_write)
{
struct vgic_reg_attr reg_attr;
gpa_t addr;
struct kvm_vcpu *vcpu;
bool uaccess;
u32 val;
int ret;
u32 tmp32;
ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
if (ret)
......@@ -532,6 +552,21 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
vcpu = reg_attr.vcpu;
addr = reg_attr.addr;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
/* Sysregs uaccess is performed by the sysreg handling code */
uaccess = false;
break;
default:
uaccess = true;
}
if (uaccess && is_write) {
u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
if (get_user(val, uaddr))
return -EFAULT;
}
mutex_lock(&dev->kvm->lock);
if (unlikely(!vgic_initialized(dev->kvm))) {
......@@ -546,29 +581,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
if (is_write)
tmp32 = *reg;
ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
if (!is_write)
*reg = tmp32;
ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val);
break;
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
if (is_write)
tmp32 = *reg;
ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
if (!is_write)
*reg = tmp32;
ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val);
break;
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
u64 regid;
regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
regid, reg);
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write);
break;
}
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
unsigned int info, intid;
......@@ -578,7 +598,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
intid = attr->attr &
KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
intid, reg);
intid, &val);
} else {
ret = -EINVAL;
}
......@@ -592,117 +612,41 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) {
u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
ret = put_user(val, uaddr);
}
return ret;
}
static int vgic_v3_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
int ret;
ret = vgic_set_common_attr(dev, attr);
if (ret != -ENXIO)
return ret;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 tmp32;
u64 reg;
if (get_user(tmp32, uaddr))
return -EFAULT;
reg = tmp32;
return vgic_v3_attr_regs_access(dev, attr, &reg, true);
}
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
u64 reg;
if (get_user(reg, uaddr))
return -EFAULT;
return vgic_v3_attr_regs_access(dev, attr, &reg, true);
}
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u64 reg;
u32 tmp32;
if (get_user(tmp32, uaddr))
return -EFAULT;
reg = tmp32;
return vgic_v3_attr_regs_access(dev, attr, &reg, true);
}
case KVM_DEV_ARM_VGIC_GRP_CTRL: {
int ret;
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
ret = vgic_v3_save_pending_tables(dev->kvm);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return ret;
}
break;
}
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
return vgic_v3_attr_regs_access(dev, attr, true);
default:
return vgic_set_common_attr(dev, attr);
}
return -ENXIO;
}
static int vgic_v3_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
int ret;
ret = vgic_get_common_attr(dev, attr);
if (ret != -ENXIO)
return ret;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u64 reg;
u32 tmp32;
ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
if (ret)
return ret;
tmp32 = reg;
return put_user(tmp32, uaddr);
}
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
u64 reg;
ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
if (ret)
return ret;
return put_user(reg, uaddr);
}
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u64 reg;
u32 tmp32;
ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
if (ret)
return ret;
tmp32 = reg;
return put_user(tmp32, uaddr);
}
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
return vgic_v3_attr_regs_access(dev, attr, false);
default:
return vgic_get_common_attr(dev, attr);
}
return -ENXIO;
}
static int vgic_v3_has_attr(struct kvm_device *dev,
......
......@@ -986,12 +986,8 @@ int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
iodev.base_addr = 0;
break;
}
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
u64 reg, id;
id = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, &reg);
}
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
return vgic_v3_has_cpu_sysregs_attr(vcpu, attr);
default:
return -ENXIO;
}
......@@ -1158,7 +1154,7 @@ int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
}
int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
u32 intid, u64 *val)
u32 intid, u32 *val)
{
if (intid % 32)
return -EINVAL;
......
......@@ -775,10 +775,10 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
}
}
u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
{
int i;
u64 val = 0;
u32 val = 0;
int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
for (i = 0; i < 32; i++) {
......@@ -798,7 +798,7 @@ u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
}
void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
const u64 val)
const u32 val)
{
int i;
int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
......
......@@ -207,10 +207,10 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
bool is_write, int offset, u32 *val);
u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
const u64 val);
const u32 val);
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
......
......@@ -245,12 +245,11 @@ int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int offset, u32 *val);
int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int offset, u32 *val);
int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write,
u64 id, u64 *val);
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
u64 *reg);
int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr, bool is_write);
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
u32 intid, u64 *val);
u32 intid, u32 *val);
int kvm_register_vgic_device(unsigned long type);
void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
......
......@@ -364,7 +364,7 @@ struct vgic_cpu {
extern struct static_key_false vgic_v2_cpuif_trap;
extern struct static_key_false vgic_v3_cpuif_trap;
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr);
void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
int kvm_vgic_create(struct kvm *kvm, u32 type);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment