Commit 52110aa9 authored by Dave Martin's avatar Dave Martin Committed by Marc Zyngier

KVM: arm64/sve: Make register ioctl access errors more consistent

Currently, the way error codes are generated when processing the
SVE register access ioctls in a bit haphazard.

This patch refactors the code so that the behaviour is more
consistent: now, -EINVAL should be returned only for unrecognised
register IDs or when some other runtime error occurs.  -ENOENT is
returned for register IDs that are recognised, but whose
corresponding register (or slice) does not exist for the vcpu.

To this end, in {get,set}_sve_reg() we now delegate the
vcpu_has_sve() check down into {get,set}_sve_vls() and
sve_reg_to_region().  The KVM_REG_ARM64_SVE_VLS special case is
picked off first, then sve_reg_to_region() plays the role of
exhaustively validating or rejecting the register ID and (where
accepted) computing the applicable register region as before.

sve_reg_to_region() is rearranged so that -ENOENT or -EPERM is not
returned prematurely, before checking whether reg->id is in a
recognised range.

-EPERM is now only returned when an attempt is made to access an
actually existing register slice on an unfinalized vcpu.

Fixes: e1c9c983 ("KVM: arm64/sve: Add SVE support to register access ioctl interface")
Fixes: 9033bba4 ("KVM: arm64/sve: Add pseudo-register for the guest's vector lengths")
Suggested-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarDave Martin <Dave.Martin@arm.com>
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent f8d4635a
...@@ -221,6 +221,9 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -221,6 +221,9 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
unsigned int max_vq, vq; unsigned int max_vq, vq;
u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)]; u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
if (!vcpu_has_sve(vcpu))
return -ENOENT;
if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
return -EINVAL; return -EINVAL;
...@@ -242,6 +245,9 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -242,6 +245,9 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
unsigned int max_vq, vq; unsigned int max_vq, vq;
u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)]; u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
if (!vcpu_has_sve(vcpu))
return -ENOENT;
if (kvm_arm_vcpu_sve_finalized(vcpu)) if (kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM; /* too late! */ return -EPERM; /* too late! */
...@@ -304,7 +310,10 @@ struct sve_state_reg_region { ...@@ -304,7 +310,10 @@ struct sve_state_reg_region {
unsigned int upad; /* extra trailing padding in user memory */ unsigned int upad; /* extra trailing padding in user memory */
}; };
/* Get sanitised bounds for user/kernel SVE register copy */ /*
* Validate SVE register ID and get sanitised bounds for user/kernel SVE
* register copy
*/
static int sve_reg_to_region(struct sve_state_reg_region *region, static int sve_reg_to_region(struct sve_state_reg_region *region,
struct kvm_vcpu *vcpu, struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg) const struct kvm_one_reg *reg)
...@@ -335,25 +344,30 @@ static int sve_reg_to_region(struct sve_state_reg_region *region, ...@@ -335,25 +344,30 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
/* Verify that we match the UAPI header: */ /* Verify that we match the UAPI header: */
BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES); BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
if ((reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT; reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) { if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET; SVE_SIG_REGS_OFFSET;
reqlen = KVM_SVE_ZREG_SIZE; reqlen = KVM_SVE_ZREG_SIZE;
maxlen = SVE_SIG_ZREG_SIZE(vq); maxlen = SVE_SIG_ZREG_SIZE(vq);
} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) { } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET; SVE_SIG_REGS_OFFSET;
reqlen = KVM_SVE_PREG_SIZE; reqlen = KVM_SVE_PREG_SIZE;
maxlen = SVE_SIG_PREG_SIZE(vq); maxlen = SVE_SIG_PREG_SIZE(vq);
} else { } else {
return -ENOENT; return -EINVAL;
} }
sve_state_size = vcpu_sve_state_size(vcpu); sve_state_size = vcpu_sve_state_size(vcpu);
...@@ -369,24 +383,22 @@ static int sve_reg_to_region(struct sve_state_reg_region *region, ...@@ -369,24 +383,22 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{ {
int ret;
struct sve_state_reg_region region; struct sve_state_reg_region region;
char __user *uptr = (char __user *)reg->addr; char __user *uptr = (char __user *)reg->addr;
if (!vcpu_has_sve(vcpu))
return -ENOENT;
/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
if (reg->id == KVM_REG_ARM64_SVE_VLS) if (reg->id == KVM_REG_ARM64_SVE_VLS)
return get_sve_vls(vcpu, reg); return get_sve_vls(vcpu, reg);
/* Otherwise, reg is an architectural SVE register... */ /* Try to interpret reg ID as an architectural SVE register... */
ret = sve_reg_to_region(&region, vcpu, reg);
if (ret)
return ret;
if (!kvm_arm_vcpu_sve_finalized(vcpu)) if (!kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM; return -EPERM;
if (sve_reg_to_region(&region, vcpu, reg))
return -ENOENT;
if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
region.klen) || region.klen) ||
clear_user(uptr + region.klen, region.upad)) clear_user(uptr + region.klen, region.upad))
...@@ -397,24 +409,22 @@ static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -397,24 +409,22 @@ static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{ {
int ret;
struct sve_state_reg_region region; struct sve_state_reg_region region;
const char __user *uptr = (const char __user *)reg->addr; const char __user *uptr = (const char __user *)reg->addr;
if (!vcpu_has_sve(vcpu))
return -ENOENT;
/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
if (reg->id == KVM_REG_ARM64_SVE_VLS) if (reg->id == KVM_REG_ARM64_SVE_VLS)
return set_sve_vls(vcpu, reg); return set_sve_vls(vcpu, reg);
/* Otherwise, reg is an architectural SVE register... */ /* Try to interpret reg ID as an architectural SVE register... */
ret = sve_reg_to_region(&region, vcpu, reg);
if (ret)
return ret;
if (!kvm_arm_vcpu_sve_finalized(vcpu)) if (!kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM; return -EPERM;
if (sve_reg_to_region(&region, vcpu, reg))
return -ENOENT;
if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
region.klen)) region.klen))
return -EFAULT; return -EFAULT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment