Commit 1e570f51 authored by Dave Martin's avatar Dave Martin Committed by Will Deacon

arm64/sve: Eliminate data races on sve_default_vl

sve_default_vl can be modified via the /proc/sys/abi/sve_default_vl
sysctl concurrently with use, and modified concurrently by multiple
threads.

Adding a lock for this seems overkill, and I don't want to think any
more than necessary, so just define wrappers using READ_ONCE()/
WRITE_ONCE().

This will avoid the possibility of torn accesses and repeated loads
and stores.

There's no evidence yet that this is going wrong in practice: this
is just hygiene.  For generic sysctl users, it would be better to
build this kind of thing into the sysctl common code somehow.
Reported-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarDave Martin <Dave.Martin@arm.com>
Link: https://lore.kernel.org/r/1591808590-20210-3-git-send-email-Dave.Martin@arm.com
[will: move set_sve_default_vl() inside #ifdef to squash allnoconfig warning]
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 9ba6a9ef
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -119,10 +120,20 @@ struct fpsimd_last_state_struct { ...@@ -119,10 +120,20 @@ struct fpsimd_last_state_struct {
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
/* Default VL for tasks that don't set it explicitly: */ /* Default VL for tasks that don't set it explicitly: */
static int sve_default_vl = -1; static int __sve_default_vl = -1;
static int get_sve_default_vl(void)
{
return READ_ONCE(__sve_default_vl);
}
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
static void set_sve_default_vl(int val)
{
WRITE_ONCE(__sve_default_vl, val);
}
/* Maximum supported vector length across all CPUs (initially poisoned) */ /* Maximum supported vector length across all CPUs (initially poisoned) */
int __ro_after_init sve_max_vl = SVE_VL_MIN; int __ro_after_init sve_max_vl = SVE_VL_MIN;
int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
...@@ -344,7 +355,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write, ...@@ -344,7 +355,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos) void *buffer, size_t *lenp, loff_t *ppos)
{ {
int ret; int ret;
int vl = sve_default_vl; int vl = get_sve_default_vl();
struct ctl_table tmp_table = { struct ctl_table tmp_table = {
.data = &vl, .data = &vl,
.maxlen = sizeof(vl), .maxlen = sizeof(vl),
...@@ -361,7 +372,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write, ...@@ -361,7 +372,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
if (!sve_vl_valid(vl)) if (!sve_vl_valid(vl))
return -EINVAL; return -EINVAL;
sve_default_vl = find_supported_vector_length(vl); set_sve_default_vl(find_supported_vector_length(vl));
return 0; return 0;
} }
...@@ -868,7 +879,7 @@ void __init sve_setup(void) ...@@ -868,7 +879,7 @@ void __init sve_setup(void)
* For the default VL, pick the maximum supported value <= 64. * For the default VL, pick the maximum supported value <= 64.
* VL == 64 is guaranteed not to grow the signal frame. * VL == 64 is guaranteed not to grow the signal frame.
*/ */
sve_default_vl = find_supported_vector_length(64); set_sve_default_vl(find_supported_vector_length(64));
bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map, bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
SVE_VQ_MAX); SVE_VQ_MAX);
...@@ -889,7 +900,7 @@ void __init sve_setup(void) ...@@ -889,7 +900,7 @@ void __init sve_setup(void)
pr_info("SVE: maximum available vector length %u bytes per vector\n", pr_info("SVE: maximum available vector length %u bytes per vector\n",
sve_max_vl); sve_max_vl);
pr_info("SVE: default vector length %u bytes per vector\n", pr_info("SVE: default vector length %u bytes per vector\n",
sve_default_vl); get_sve_default_vl());
/* KVM decides whether to support mismatched systems. Just warn here: */ /* KVM decides whether to support mismatched systems. Just warn here: */
if (sve_max_virtualisable_vl < sve_max_vl) if (sve_max_virtualisable_vl < sve_max_vl)
...@@ -1029,13 +1040,13 @@ void fpsimd_flush_thread(void) ...@@ -1029,13 +1040,13 @@ void fpsimd_flush_thread(void)
* vector length configured: no kernel task can become a user * vector length configured: no kernel task can become a user
* task without an exec and hence a call to this function. * task without an exec and hence a call to this function.
* By the time the first call to this function is made, all * By the time the first call to this function is made, all
* early hardware probing is complete, so sve_default_vl * early hardware probing is complete, so __sve_default_vl
* should be valid. * should be valid.
* If a bug causes this to go wrong, we make some noise and * If a bug causes this to go wrong, we make some noise and
* try to fudge thread.sve_vl to a safe value here. * try to fudge thread.sve_vl to a safe value here.
*/ */
vl = current->thread.sve_vl_onexec ? vl = current->thread.sve_vl_onexec ?
current->thread.sve_vl_onexec : sve_default_vl; current->thread.sve_vl_onexec : get_sve_default_vl();
if (WARN_ON(!sve_vl_valid(vl))) if (WARN_ON(!sve_vl_valid(vl)))
vl = SVE_VL_MIN; vl = SVE_VL_MIN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment