Commit 85ed24da authored by Mark Brown's avatar Mark Brown Committed by Catalin Marinas

arm64/sme: Implement streaming SVE signal handling

When in streaming mode we have the same set of SVE registers as we do in
regular SVE mode with the exception of FFR and the use of the SME vector
length. Provide signal handling for these registers by taking one of the
reserved words in the SVE signal context as a flags field and defining a
flag which is set for streaming mode. When the flag is set the vector
length is set to the streaming mode vector length and we save and
restore streaming mode data. We support entering or leaving streaming
mode based on the value of the flag but do not support changing the
vector length, this is not currently supported SVE signal handling.

We could instead allocate a separate record in the signal frame for the
streaming mode SVE context but this inflates the size of the maximal signal
frame required and adds complication when validating signal frames from
userspace, especially given the current structure of the code.

Any implementation of support for streaming mode vectors in signals will
have some potential for causing issues for applications that attempt to
handle SVE vectors in signals, use streaming mode but do not understand
streaming mode in their signal handling code, it is hard to identify a
case that is clearly better than any other - they all have cases where
they could cause unexpected register corruption or faults.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20220419112247.711548-19-broonie@kernel.orgSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 40a8e87b
...@@ -190,6 +190,14 @@ static inline unsigned int thread_get_sme_vl(struct thread_struct *thread) ...@@ -190,6 +190,14 @@ static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
return thread_get_vl(thread, ARM64_VEC_SME); return thread_get_vl(thread, ARM64_VEC_SME);
} }
static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
{
if (system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_SM_MASK))
return thread_get_sme_vl(thread);
else
return thread_get_sve_vl(thread);
}
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type); unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
void task_set_vl(struct task_struct *task, enum vec_type type, void task_set_vl(struct task_struct *task, enum vec_type type,
unsigned long vl); unsigned long vl);
......
...@@ -134,9 +134,12 @@ struct extra_context { ...@@ -134,9 +134,12 @@ struct extra_context {
struct sve_context { struct sve_context {
struct _aarch64_ctx head; struct _aarch64_ctx head;
__u16 vl; __u16 vl;
__u16 __reserved[3]; __u16 flags;
__u16 __reserved[2];
}; };
#define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#include <asm/sve_context.h> #include <asm/sve_context.h>
...@@ -186,9 +189,16 @@ struct sve_context { ...@@ -186,9 +189,16 @@ struct sve_context {
* sve_context.vl must equal the thread's current vector length when * sve_context.vl must equal the thread's current vector length when
* doing a sigreturn. * doing a sigreturn.
* *
* On systems with support for SME the SVE register state may reflect either
* streaming or non-streaming mode. In streaming mode the streaming mode
* vector length will be used and the flag SVE_SIG_FLAG_SM will be set in
* the flags field. It is permitted to enter or leave streaming mode in
* a signal return, applications should take care to ensure that any difference
* in vector length between the two modes is handled, including any resizing
* and movement of context blocks.
* *
* Note: for all these macros, the "vq" argument denotes the SVE * Note: for all these macros, the "vq" argument denotes the vector length
* vector length in quadwords (i.e., units of 128 bits). * in quadwords (i.e., units of 128 bits).
* *
* The correct way to obtain vq is to use sve_vq_from_vl(vl). The * The correct way to obtain vq is to use sve_vq_from_vl(vl). The
* result is valid if and only if sve_vl_valid(vl) is true. This is * result is valid if and only if sve_vl_valid(vl) is true. This is
......
...@@ -226,11 +226,17 @@ static int preserve_sve_context(struct sve_context __user *ctx) ...@@ -226,11 +226,17 @@ static int preserve_sve_context(struct sve_context __user *ctx)
{ {
int err = 0; int err = 0;
u16 reserved[ARRAY_SIZE(ctx->__reserved)]; u16 reserved[ARRAY_SIZE(ctx->__reserved)];
u16 flags = 0;
unsigned int vl = task_get_sve_vl(current); unsigned int vl = task_get_sve_vl(current);
unsigned int vq = 0; unsigned int vq = 0;
if (test_thread_flag(TIF_SVE)) if (thread_sm_enabled(&current->thread)) {
vl = task_get_sme_vl(current);
vq = sve_vq_from_vl(vl); vq = sve_vq_from_vl(vl);
flags |= SVE_SIG_FLAG_SM;
} else if (test_thread_flag(TIF_SVE)) {
vq = sve_vq_from_vl(vl);
}
memset(reserved, 0, sizeof(reserved)); memset(reserved, 0, sizeof(reserved));
...@@ -238,6 +244,7 @@ static int preserve_sve_context(struct sve_context __user *ctx) ...@@ -238,6 +244,7 @@ static int preserve_sve_context(struct sve_context __user *ctx)
__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
&ctx->head.size, err); &ctx->head.size, err);
__put_user_error(vl, &ctx->vl, err); __put_user_error(vl, &ctx->vl, err);
__put_user_error(flags, &ctx->flags, err);
BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
...@@ -258,18 +265,28 @@ static int preserve_sve_context(struct sve_context __user *ctx) ...@@ -258,18 +265,28 @@ static int preserve_sve_context(struct sve_context __user *ctx)
static int restore_sve_fpsimd_context(struct user_ctxs *user) static int restore_sve_fpsimd_context(struct user_ctxs *user)
{ {
int err; int err;
unsigned int vq; unsigned int vl, vq;
struct user_fpsimd_state fpsimd; struct user_fpsimd_state fpsimd;
struct sve_context sve; struct sve_context sve;
if (__copy_from_user(&sve, user->sve, sizeof(sve))) if (__copy_from_user(&sve, user->sve, sizeof(sve)))
return -EFAULT; return -EFAULT;
if (sve.vl != task_get_sve_vl(current)) if (sve.flags & SVE_SIG_FLAG_SM) {
if (!system_supports_sme())
return -EINVAL;
vl = task_get_sme_vl(current);
} else {
vl = task_get_sve_vl(current);
}
if (sve.vl != vl)
return -EINVAL; return -EINVAL;
if (sve.head.size <= sizeof(*user->sve)) { if (sve.head.size <= sizeof(*user->sve)) {
clear_thread_flag(TIF_SVE); clear_thread_flag(TIF_SVE);
current->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK;
goto fpsimd_only; goto fpsimd_only;
} }
...@@ -301,6 +318,9 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) ...@@ -301,6 +318,9 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
if (err) if (err)
return -EFAULT; return -EFAULT;
if (sve.flags & SVE_SIG_FLAG_SM)
current->thread.svcr |= SYS_SVCR_EL0_SM_MASK;
else
set_thread_flag(TIF_SVE); set_thread_flag(TIF_SVE);
fpsimd_only: fpsimd_only:
...@@ -393,7 +413,7 @@ static int parse_user_sigframe(struct user_ctxs *user, ...@@ -393,7 +413,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
break; break;
case SVE_MAGIC: case SVE_MAGIC:
if (!system_supports_sve()) if (!system_supports_sve() && !system_supports_sme())
goto invalid; goto invalid;
if (user->sve) if (user->sve)
...@@ -594,11 +614,12 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, ...@@ -594,11 +614,12 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
if (system_supports_sve()) { if (system_supports_sve()) {
unsigned int vq = 0; unsigned int vq = 0;
if (add_all || test_thread_flag(TIF_SVE)) { if (add_all || test_thread_flag(TIF_SVE) ||
int vl = sve_max_vl(); thread_sm_enabled(&current->thread)) {
int vl = max(sve_max_vl(), sme_max_vl());
if (!add_all) if (!add_all)
vl = task_get_sve_vl(current); vl = thread_get_cur_vl(&current->thread);
vq = sve_vq_from_vl(vl); vq = sve_vq_from_vl(vl);
} }
...@@ -649,8 +670,9 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, ...@@ -649,8 +670,9 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err); __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
} }
/* Scalable Vector Extension state, if present */ /* Scalable Vector Extension state (including streaming), if present */
if (system_supports_sve() && err == 0 && user->sve_offset) { if ((system_supports_sve() || system_supports_sme()) &&
err == 0 && user->sve_offset) {
struct sve_context __user *sve_ctx = struct sve_context __user *sve_ctx =
apply_user_offset(user, user->sve_offset); apply_user_offset(user, user->sve_offset);
err |= preserve_sve_context(sve_ctx); err |= preserve_sve_context(sve_ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment