Commit 7a474973 authored by Palmer Dabbelt's avatar Palmer Dabbelt

Merge patch series "RISC-V: hwprobe: Introduce which-cpus"

Andrew Jones <ajones@ventanamicro.com> says:

This series introduces a flag for the hwprobe syscall which effectively
reverses its behavior from getting the values of keys for a set of cpus
to getting the cpus for a set of key-value pairs.

* b4-shazam-merge:
  RISC-V: selftests: Add which-cpus hwprobe test
  RISC-V: hwprobe: Introduce which-cpus flag
  RISC-V: Move the hwprobe syscall to its own file
  RISC-V: hwprobe: Clarify cpus size parameter

Link: https://lore.kernel.org/r/20231122164700.127954-6-ajones@ventanamicro.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parents cbc91139 ef7d6abb
...@@ -12,7 +12,7 @@ is defined in <asm/hwprobe.h>:: ...@@ -12,7 +12,7 @@ is defined in <asm/hwprobe.h>::
}; };
long sys_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, long sys_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, cpu_set_t *cpus, size_t cpusetsize, cpu_set_t *cpus,
unsigned int flags); unsigned int flags);
The arguments are split into three groups: an array of key-value pairs, a CPU The arguments are split into three groups: an array of key-value pairs, a CPU
...@@ -20,12 +20,26 @@ set, and some flags. The key-value pairs are supplied with a count. Userspace ...@@ -20,12 +20,26 @@ set, and some flags. The key-value pairs are supplied with a count. Userspace
must prepopulate the key field for each element, and the kernel will fill in the must prepopulate the key field for each element, and the kernel will fill in the
value if the key is recognized. If a key is unknown to the kernel, its key field value if the key is recognized. If a key is unknown to the kernel, its key field
will be cleared to -1, and its value set to 0. The CPU set is defined by will be cleared to -1, and its value set to 0. The CPU set is defined by
CPU_SET(3). For value-like keys (eg. vendor/arch/impl), the returned value will CPU_SET(3) with size ``cpusetsize`` bytes. For value-like keys (eg. vendor,
be only be valid if all CPUs in the given set have the same value. Otherwise -1 arch, impl), the returned value will only be valid if all CPUs in the given set
will be returned. For boolean-like keys, the value returned will be a logical have the same value. Otherwise -1 will be returned. For boolean-like keys, the
AND of the values for the specified CPUs. Usermode can supply NULL for cpus and value returned will be a logical AND of the values for the specified CPUs.
0 for cpu_count as a shortcut for all online CPUs. There are currently no flags, Usermode can supply NULL for ``cpus`` and 0 for ``cpusetsize`` as a shortcut for
this value must be zero for future compatibility. all online CPUs. The currently supported flags are:
* :c:macro:`RISCV_HWPROBE_WHICH_CPUS`: This flag basically reverses the behavior
of sys_riscv_hwprobe(). Instead of populating the values of keys for a given
set of CPUs, the values of each key are given and the set of CPUs is reduced
by sys_riscv_hwprobe() to only those which match each of the key-value pairs.
How matching is done depends on the key type. For value-like keys, matching
means to be the exact same as the value. For boolean-like keys, matching
means the result of a logical AND of the pair's value with the CPU's value is
exactly the same as the pair's value. Additionally, when ``cpus`` is an empty
set, then it is initialized to all online CPUs which fit within it, i.e. the
CPU set returned is the reduction of all the online CPUs which can be
represented with a CPU set of size ``cpusetsize``.
All other flags are reserved for future compatibility and must be zero.
On success 0 is returned, on failure a negative error code is returned. On success 0 is returned, on failure a negative error code is returned.
......
...@@ -15,4 +15,28 @@ static inline bool riscv_hwprobe_key_is_valid(__s64 key) ...@@ -15,4 +15,28 @@ static inline bool riscv_hwprobe_key_is_valid(__s64 key)
return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY; return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
} }
static inline bool hwprobe_key_is_bitmask(__s64 key)
{
switch (key) {
case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
case RISCV_HWPROBE_KEY_IMA_EXT_0:
case RISCV_HWPROBE_KEY_CPUPERF_0:
return true;
}
return false;
}
static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair,
struct riscv_hwprobe *other_pair)
{
if (pair->key != other_pair->key)
return false;
if (hwprobe_key_is_bitmask(pair->key))
return (pair->value & other_pair->value) == other_pair->value;
return pair->value == other_pair->value;
}
#endif #endif
...@@ -66,4 +66,7 @@ struct riscv_hwprobe { ...@@ -66,4 +66,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
/* Flags */
#define RISCV_HWPROBE_WHICH_CPUS (1 << 0)
#endif #endif
...@@ -50,6 +50,7 @@ obj-y += setup.o ...@@ -50,6 +50,7 @@ obj-y += setup.o
obj-y += signal.o obj-y += signal.o
obj-y += syscall_table.o obj-y += syscall_table.o
obj-y += sys_riscv.o obj-y += sys_riscv.o
obj-y += sys_hwprobe.o
obj-y += time.o obj-y += time.o
obj-y += traps.o obj-y += traps.o
obj-y += riscv_ksyms.o obj-y += riscv_ksyms.o
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* The hwprobe interface, for allowing userspace to probe to see which features
* are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for
* more details.
*/
#include <linux/syscalls.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwprobe.h>
#include <asm/sbi.h>
#include <asm/switch_to.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/vector.h>
#include <vdso/vsyscall.h>
static void hwprobe_arch_id(struct riscv_hwprobe *pair,
const struct cpumask *cpus)
{
u64 id = -1ULL;
bool first = true;
int cpu;
for_each_cpu(cpu, cpus) {
u64 cpu_id;
switch (pair->key) {
case RISCV_HWPROBE_KEY_MVENDORID:
cpu_id = riscv_cached_mvendorid(cpu);
break;
case RISCV_HWPROBE_KEY_MIMPID:
cpu_id = riscv_cached_mimpid(cpu);
break;
case RISCV_HWPROBE_KEY_MARCHID:
cpu_id = riscv_cached_marchid(cpu);
break;
}
if (first) {
id = cpu_id;
first = false;
}
/*
* If there's a mismatch for the given set, return -1 in the
* value.
*/
if (id != cpu_id) {
id = -1ULL;
break;
}
}
pair->value = id;
}
static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
const struct cpumask *cpus)
{
int cpu;
u64 missing = 0;
pair->value = 0;
if (has_fpu())
pair->value |= RISCV_HWPROBE_IMA_FD;
if (riscv_isa_extension_available(NULL, c))
pair->value |= RISCV_HWPROBE_IMA_C;
if (has_vector())
pair->value |= RISCV_HWPROBE_IMA_V;
/*
* Loop through and record extensions that 1) anyone has, and 2) anyone
* doesn't have.
*/
for_each_cpu(cpu, cpus) {
struct riscv_isainfo *isainfo = &hart_isa[cpu];
#define EXT_KEY(ext) \
do { \
if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
pair->value |= RISCV_HWPROBE_EXT_##ext; \
else \
missing |= RISCV_HWPROBE_EXT_##ext; \
} while (false)
/*
* Only use EXT_KEY() for extensions which can be exposed to userspace,
* regardless of the kernel's configuration, as no other checks, besides
* presence in the hart_isa bitmap, are made.
*/
EXT_KEY(ZBA);
EXT_KEY(ZBB);
EXT_KEY(ZBS);
EXT_KEY(ZICBOZ);
EXT_KEY(ZBC);
EXT_KEY(ZBKB);
EXT_KEY(ZBKC);
EXT_KEY(ZBKX);
EXT_KEY(ZKND);
EXT_KEY(ZKNE);
EXT_KEY(ZKNH);
EXT_KEY(ZKSED);
EXT_KEY(ZKSH);
EXT_KEY(ZKT);
EXT_KEY(ZIHINTNTL);
if (has_vector()) {
EXT_KEY(ZVBB);
EXT_KEY(ZVBC);
EXT_KEY(ZVKB);
EXT_KEY(ZVKG);
EXT_KEY(ZVKNED);
EXT_KEY(ZVKNHA);
EXT_KEY(ZVKNHB);
EXT_KEY(ZVKSED);
EXT_KEY(ZVKSH);
EXT_KEY(ZVKT);
EXT_KEY(ZVFH);
EXT_KEY(ZVFHMIN);
}
if (has_fpu()) {
EXT_KEY(ZFH);
EXT_KEY(ZFHMIN);
EXT_KEY(ZFA);
}
#undef EXT_KEY
}
/* Now turn off reporting features if any CPU is missing it. */
pair->value &= ~missing;
}
static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
{
struct riscv_hwprobe pair;
hwprobe_isa_ext0(&pair, cpus);
return (pair.value & ext);
}
static u64 hwprobe_misaligned(const struct cpumask *cpus)
{
int cpu;
u64 perf = -1ULL;
for_each_cpu(cpu, cpus) {
int this_perf = per_cpu(misaligned_access_speed, cpu);
if (perf == -1ULL)
perf = this_perf;
if (perf != this_perf) {
perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
break;
}
}
if (perf == -1ULL)
return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
return perf;
}
static void hwprobe_one_pair(struct riscv_hwprobe *pair,
const struct cpumask *cpus)
{
switch (pair->key) {
case RISCV_HWPROBE_KEY_MVENDORID:
case RISCV_HWPROBE_KEY_MARCHID:
case RISCV_HWPROBE_KEY_MIMPID:
hwprobe_arch_id(pair, cpus);
break;
/*
* The kernel already assumes that the base single-letter ISA
* extensions are supported on all harts, and only supports the
* IMA base, so just cheat a bit here and tell that to
* userspace.
*/
case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
break;
case RISCV_HWPROBE_KEY_IMA_EXT_0:
hwprobe_isa_ext0(pair, cpus);
break;
case RISCV_HWPROBE_KEY_CPUPERF_0:
pair->value = hwprobe_misaligned(cpus);
break;
case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
pair->value = 0;
if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
pair->value = riscv_cboz_block_size;
break;
/*
* For forward compatibility, unknown keys don't fail the whole
* call, but get their element key set to -1 and value set to 0
* indicating they're unrecognized.
*/
default:
pair->key = -1;
pair->value = 0;
break;
}
}
static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
size_t pair_count, size_t cpusetsize,
unsigned long __user *cpus_user,
unsigned int flags)
{
size_t out;
int ret;
cpumask_t cpus;
/* Check the reserved flags. */
if (flags != 0)
return -EINVAL;
/*
* The interface supports taking in a CPU mask, and returns values that
* are consistent across that mask. Allow userspace to specify NULL and
* 0 as a shortcut to all online CPUs.
*/
cpumask_clear(&cpus);
if (!cpusetsize && !cpus_user) {
cpumask_copy(&cpus, cpu_online_mask);
} else {
if (cpusetsize > cpumask_size())
cpusetsize = cpumask_size();
ret = copy_from_user(&cpus, cpus_user, cpusetsize);
if (ret)
return -EFAULT;
/*
* Userspace must provide at least one online CPU, without that
* there's no way to define what is supported.
*/
cpumask_and(&cpus, &cpus, cpu_online_mask);
if (cpumask_empty(&cpus))
return -EINVAL;
}
for (out = 0; out < pair_count; out++, pairs++) {
struct riscv_hwprobe pair;
if (get_user(pair.key, &pairs->key))
return -EFAULT;
pair.value = 0;
hwprobe_one_pair(&pair, &cpus);
ret = put_user(pair.key, &pairs->key);
if (ret == 0)
ret = put_user(pair.value, &pairs->value);
if (ret)
return -EFAULT;
}
return 0;
}
static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
size_t pair_count, size_t cpusetsize,
unsigned long __user *cpus_user,
unsigned int flags)
{
cpumask_t cpus, one_cpu;
bool clear_all = false;
size_t i;
int ret;
if (flags != RISCV_HWPROBE_WHICH_CPUS)
return -EINVAL;
if (!cpusetsize || !cpus_user)
return -EINVAL;
if (cpusetsize > cpumask_size())
cpusetsize = cpumask_size();
ret = copy_from_user(&cpus, cpus_user, cpusetsize);
if (ret)
return -EFAULT;
if (cpumask_empty(&cpus))
cpumask_copy(&cpus, cpu_online_mask);
cpumask_and(&cpus, &cpus, cpu_online_mask);
cpumask_clear(&one_cpu);
for (i = 0; i < pair_count; i++) {
struct riscv_hwprobe pair, tmp;
int cpu;
ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
if (ret)
return -EFAULT;
if (!riscv_hwprobe_key_is_valid(pair.key)) {
clear_all = true;
pair = (struct riscv_hwprobe){ .key = -1, };
ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
if (ret)
return -EFAULT;
}
if (clear_all)
continue;
tmp = (struct riscv_hwprobe){ .key = pair.key, };
for_each_cpu(cpu, &cpus) {
cpumask_set_cpu(cpu, &one_cpu);
hwprobe_one_pair(&tmp, &one_cpu);
if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
cpumask_clear_cpu(cpu, &cpus);
cpumask_clear_cpu(cpu, &one_cpu);
}
}
if (clear_all)
cpumask_clear(&cpus);
ret = copy_to_user(cpus_user, &cpus, cpusetsize);
if (ret)
return -EFAULT;
return 0;
}
static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
size_t pair_count, size_t cpusetsize,
unsigned long __user *cpus_user,
unsigned int flags)
{
if (flags & RISCV_HWPROBE_WHICH_CPUS)
return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
cpus_user, flags);
return hwprobe_get_values(pairs, pair_count, cpusetsize,
cpus_user, flags);
}
#ifdef CONFIG_MMU
static int __init init_hwprobe_vdso_data(void)
{
struct vdso_data *vd = __arch_get_k_vdso_data();
struct arch_vdso_data *avd = &vd->arch_data;
u64 id_bitsmash = 0;
struct riscv_hwprobe pair;
int key;
/*
* Initialize vDSO data with the answers for the "all CPUs" case, to
* save a syscall in the common case.
*/
for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
pair.key = key;
hwprobe_one_pair(&pair, cpu_online_mask);
WARN_ON_ONCE(pair.key < 0);
avd->all_cpu_hwprobe_values[key] = pair.value;
/*
* Smash together the vendor, arch, and impl IDs to see if
* they're all 0 or any negative.
*/
if (key <= RISCV_HWPROBE_KEY_MIMPID)
id_bitsmash |= pair.value;
}
/*
* If the arch, vendor, and implementation ID are all the same across
* all harts, then assume all CPUs are the same, and allow the vDSO to
* answer queries for arbitrary masks. However if all values are 0 (not
* populated) or any value returns -1 (varies across CPUs), then the
* vDSO should defer to the kernel for exotic cpu masks.
*/
avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
return 0;
}
arch_initcall_sync(init_hwprobe_vdso_data);
#endif /* CONFIG_MMU */
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
cpus, unsigned int, flags)
{
return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
cpus, flags);
}
...@@ -7,15 +7,7 @@ ...@@ -7,15 +7,7 @@
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwprobe.h>
#include <asm/sbi.h>
#include <asm/vector.h>
#include <asm/switch_to.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm-generic/mman-common.h> #include <asm-generic/mman-common.h>
#include <vdso/vsyscall.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len, static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long prot, unsigned long flags,
...@@ -77,316 +69,6 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, ...@@ -77,316 +69,6 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
return 0; return 0;
} }
/*
* The hwprobe interface, for allowing userspace to probe to see which features
* are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for more
* details.
*/
static void hwprobe_arch_id(struct riscv_hwprobe *pair,
const struct cpumask *cpus)
{
u64 id = -1ULL;
bool first = true;
int cpu;
for_each_cpu(cpu, cpus) {
u64 cpu_id;
switch (pair->key) {
case RISCV_HWPROBE_KEY_MVENDORID:
cpu_id = riscv_cached_mvendorid(cpu);
break;
case RISCV_HWPROBE_KEY_MIMPID:
cpu_id = riscv_cached_mimpid(cpu);
break;
case RISCV_HWPROBE_KEY_MARCHID:
cpu_id = riscv_cached_marchid(cpu);
break;
}
if (first) {
id = cpu_id;
first = false;
}
/*
* If there's a mismatch for the given set, return -1 in the
* value.
*/
if (id != cpu_id) {
id = -1ULL;
break;
}
}
pair->value = id;
}
static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
const struct cpumask *cpus)
{
int cpu;
u64 missing = 0;
pair->value = 0;
if (has_fpu())
pair->value |= RISCV_HWPROBE_IMA_FD;
if (riscv_isa_extension_available(NULL, c))
pair->value |= RISCV_HWPROBE_IMA_C;
if (has_vector())
pair->value |= RISCV_HWPROBE_IMA_V;
/*
* Loop through and record extensions that 1) anyone has, and 2) anyone
* doesn't have.
*/
for_each_cpu(cpu, cpus) {
struct riscv_isainfo *isainfo = &hart_isa[cpu];
#define EXT_KEY(ext) \
do { \
if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
pair->value |= RISCV_HWPROBE_EXT_##ext; \
else \
missing |= RISCV_HWPROBE_EXT_##ext; \
} while (false)
/*
* Only use EXT_KEY() for extensions which can be exposed to userspace,
* regardless of the kernel's configuration, as no other checks, besides
* presence in the hart_isa bitmap, are made.
*/
EXT_KEY(ZBA);
EXT_KEY(ZBB);
EXT_KEY(ZBS);
EXT_KEY(ZICBOZ);
EXT_KEY(ZBC);
EXT_KEY(ZBKB);
EXT_KEY(ZBKC);
EXT_KEY(ZBKX);
EXT_KEY(ZKND);
EXT_KEY(ZKNE);
EXT_KEY(ZKNH);
EXT_KEY(ZKSED);
EXT_KEY(ZKSH);
EXT_KEY(ZKT);
EXT_KEY(ZIHINTNTL);
if (has_vector()) {
EXT_KEY(ZVBB);
EXT_KEY(ZVBC);
EXT_KEY(ZVKB);
EXT_KEY(ZVKG);
EXT_KEY(ZVKNED);
EXT_KEY(ZVKNHA);
EXT_KEY(ZVKNHB);
EXT_KEY(ZVKSED);
EXT_KEY(ZVKSH);
EXT_KEY(ZVKT);
EXT_KEY(ZVFH);
EXT_KEY(ZVFHMIN);
}
if (has_fpu()) {
EXT_KEY(ZFH);
EXT_KEY(ZFHMIN);
EXT_KEY(ZFA);
}
#undef EXT_KEY
}
/* Now turn off reporting features if any CPU is missing it. */
pair->value &= ~missing;
}
static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
{
struct riscv_hwprobe pair;
hwprobe_isa_ext0(&pair, cpus);
return (pair.value & ext);
}
static u64 hwprobe_misaligned(const struct cpumask *cpus)
{
int cpu;
u64 perf = -1ULL;
for_each_cpu(cpu, cpus) {
int this_perf = per_cpu(misaligned_access_speed, cpu);
if (perf == -1ULL)
perf = this_perf;
if (perf != this_perf) {
perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
break;
}
}
if (perf == -1ULL)
return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
return perf;
}
static void hwprobe_one_pair(struct riscv_hwprobe *pair,
const struct cpumask *cpus)
{
switch (pair->key) {
case RISCV_HWPROBE_KEY_MVENDORID:
case RISCV_HWPROBE_KEY_MARCHID:
case RISCV_HWPROBE_KEY_MIMPID:
hwprobe_arch_id(pair, cpus);
break;
/*
* The kernel already assumes that the base single-letter ISA
* extensions are supported on all harts, and only supports the
* IMA base, so just cheat a bit here and tell that to
* userspace.
*/
case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
break;
case RISCV_HWPROBE_KEY_IMA_EXT_0:
hwprobe_isa_ext0(pair, cpus);
break;
case RISCV_HWPROBE_KEY_CPUPERF_0:
pair->value = hwprobe_misaligned(cpus);
break;
case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
pair->value = 0;
if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
pair->value = riscv_cboz_block_size;
break;
/*
* For forward compatibility, unknown keys don't fail the whole
* call, but get their element key set to -1 and value set to 0
* indicating they're unrecognized.
*/
default:
pair->key = -1;
pair->value = 0;
break;
}
}
static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
size_t pair_count, size_t cpu_count,
unsigned long __user *cpus_user,
unsigned int flags)
{
size_t out;
int ret;
cpumask_t cpus;
/* Check the reserved flags. */
if (flags != 0)
return -EINVAL;
/*
* The interface supports taking in a CPU mask, and returns values that
* are consistent across that mask. Allow userspace to specify NULL and
* 0 as a shortcut to all online CPUs.
*/
cpumask_clear(&cpus);
if (!cpu_count && !cpus_user) {
cpumask_copy(&cpus, cpu_online_mask);
} else {
if (cpu_count > cpumask_size())
cpu_count = cpumask_size();
ret = copy_from_user(&cpus, cpus_user, cpu_count);
if (ret)
return -EFAULT;
/*
* Userspace must provide at least one online CPU, without that
* there's no way to define what is supported.
*/
cpumask_and(&cpus, &cpus, cpu_online_mask);
if (cpumask_empty(&cpus))
return -EINVAL;
}
for (out = 0; out < pair_count; out++, pairs++) {
struct riscv_hwprobe pair;
if (get_user(pair.key, &pairs->key))
return -EFAULT;
pair.value = 0;
hwprobe_one_pair(&pair, &cpus);
ret = put_user(pair.key, &pairs->key);
if (ret == 0)
ret = put_user(pair.value, &pairs->value);
if (ret)
return -EFAULT;
}
return 0;
}
#ifdef CONFIG_MMU
static int __init init_hwprobe_vdso_data(void)
{
struct vdso_data *vd = __arch_get_k_vdso_data();
struct arch_vdso_data *avd = &vd->arch_data;
u64 id_bitsmash = 0;
struct riscv_hwprobe pair;
int key;
/*
* Initialize vDSO data with the answers for the "all CPUs" case, to
* save a syscall in the common case.
*/
for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
pair.key = key;
hwprobe_one_pair(&pair, cpu_online_mask);
WARN_ON_ONCE(pair.key < 0);
avd->all_cpu_hwprobe_values[key] = pair.value;
/*
* Smash together the vendor, arch, and impl IDs to see if
* they're all 0 or any negative.
*/
if (key <= RISCV_HWPROBE_KEY_MIMPID)
id_bitsmash |= pair.value;
}
/*
* If the arch, vendor, and implementation ID are all the same across
* all harts, then assume all CPUs are the same, and allow the vDSO to
* answer queries for arbitrary masks. However if all values are 0 (not
* populated) or any value returns -1 (varies across CPUs), then the
* vDSO should defer to the kernel for exotic cpu masks.
*/
avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
return 0;
}
arch_initcall_sync(init_hwprobe_vdso_data);
#endif /* CONFIG_MMU */
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
size_t, pair_count, size_t, cpu_count, unsigned long __user *,
cpus, unsigned int, flags)
{
return do_riscv_hwprobe(pairs, pair_count, cpu_count,
cpus, flags);
}
/* Not defined using SYSCALL_DEFINE0 to avoid error injection */ /* Not defined using SYSCALL_DEFINE0 to avoid error injection */
asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused) asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused)
{ {
......
...@@ -3,26 +3,22 @@ ...@@ -3,26 +3,22 @@
* Copyright 2023 Rivos, Inc * Copyright 2023 Rivos, Inc
*/ */
#include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <vdso/datapage.h> #include <vdso/datapage.h>
#include <vdso/helpers.h> #include <vdso/helpers.h>
extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus, size_t cpusetsize, unsigned long *cpus,
unsigned int flags); unsigned int flags);
/* Add a prototype to avoid -Wmissing-prototypes warning. */ static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, size_t cpusetsize, unsigned long *cpus,
size_t cpu_count, unsigned long *cpus, unsigned int flags)
unsigned int flags);
int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus,
unsigned int flags)
{ {
const struct vdso_data *vd = __arch_get_vdso_data(); const struct vdso_data *vd = __arch_get_vdso_data();
const struct arch_vdso_data *avd = &vd->arch_data; const struct arch_vdso_data *avd = &vd->arch_data;
bool all_cpus = !cpu_count && !cpus; bool all_cpus = !cpusetsize && !cpus;
struct riscv_hwprobe *p = pairs; struct riscv_hwprobe *p = pairs;
struct riscv_hwprobe *end = pairs + pair_count; struct riscv_hwprobe *end = pairs + pair_count;
...@@ -33,7 +29,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ...@@ -33,7 +29,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
* masks. * masks.
*/ */
if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus)) if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags); return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
/* This is something we can handle, fill out the pairs. */ /* This is something we can handle, fill out the pairs. */
while (p < end) { while (p < end) {
...@@ -50,3 +46,71 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ...@@ -50,3 +46,71 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
return 0; return 0;
} }
static int riscv_vdso_get_cpus(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpusetsize, unsigned long *cpus,
unsigned int flags)
{
const struct vdso_data *vd = __arch_get_vdso_data();
const struct arch_vdso_data *avd = &vd->arch_data;
struct riscv_hwprobe *p = pairs;
struct riscv_hwprobe *end = pairs + pair_count;
unsigned char *c = (unsigned char *)cpus;
bool empty_cpus = true;
bool clear_all = false;
int i;
if (!cpusetsize || !cpus)
return -EINVAL;
for (i = 0; i < cpusetsize; i++) {
if (c[i]) {
empty_cpus = false;
break;
}
}
if (empty_cpus || flags != RISCV_HWPROBE_WHICH_CPUS || !avd->homogeneous_cpus)
return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
while (p < end) {
if (riscv_hwprobe_key_is_valid(p->key)) {
struct riscv_hwprobe t = {
.key = p->key,
.value = avd->all_cpu_hwprobe_values[p->key],
};
if (!riscv_hwprobe_pair_cmp(&t, p))
clear_all = true;
} else {
clear_all = true;
p->key = -1;
p->value = 0;
}
p++;
}
if (clear_all) {
for (i = 0; i < cpusetsize; i++)
c[i] = 0;
}
return 0;
}
/* Add a prototype to avoid -Wmissing-prototypes warning. */
int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpusetsize, unsigned long *cpus,
unsigned int flags);
int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpusetsize, unsigned long *cpus,
unsigned int flags)
{
if (flags & RISCV_HWPROBE_WHICH_CPUS)
return riscv_vdso_get_cpus(pairs, pair_count, cpusetsize,
cpus, flags);
return riscv_vdso_get_values(pairs, pair_count, cpusetsize,
cpus, flags);
}
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
CFLAGS += -I$(top_srcdir)/tools/include CFLAGS += -I$(top_srcdir)/tools/include
TEST_GEN_PROGS := hwprobe cbo TEST_GEN_PROGS := hwprobe cbo which-cpus
include ../../lib.mk include ../../lib.mk
...@@ -13,3 +13,6 @@ $(OUTPUT)/hwprobe: hwprobe.c sys_hwprobe.S ...@@ -13,3 +13,6 @@ $(OUTPUT)/hwprobe: hwprobe.c sys_hwprobe.S
$(OUTPUT)/cbo: cbo.c sys_hwprobe.S $(OUTPUT)/cbo: cbo.c sys_hwprobe.S
$(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^ $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
$(OUTPUT)/which-cpus: which-cpus.c sys_hwprobe.S
$(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
...@@ -47,7 +47,7 @@ int main(int argc, char **argv) ...@@ -47,7 +47,7 @@ int main(int argc, char **argv)
ksft_test_result(out != 0, "Bad CPU set\n"); ksft_test_result(out != 0, "Bad CPU set\n");
out = riscv_hwprobe(pairs, 8, 1, 0, 0); out = riscv_hwprobe(pairs, 8, 1, 0, 0);
ksft_test_result(out != 0, "NULL CPU set with non-zero count\n"); ksft_test_result(out != 0, "NULL CPU set with non-zero size\n");
pairs[0].key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR; pairs[0].key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR;
out = riscv_hwprobe(pairs, 1, 1, &cpus, 0); out = riscv_hwprobe(pairs, 1, 1, &cpus, 0);
......
...@@ -10,6 +10,6 @@ ...@@ -10,6 +10,6 @@
* contain the call. * contain the call.
*/ */
long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus, unsigned int flags); size_t cpusetsize, unsigned long *cpus, unsigned int flags);
#endif #endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Ventana Micro Systems Inc.
*
* Test the RISCV_HWPROBE_WHICH_CPUS flag of hwprobe. Also provides a command
* line interface to get the cpu list for arbitrary hwprobe pairs.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sched.h>
#include <unistd.h>
#include <assert.h>
#include "hwprobe.h"
#include "../../kselftest.h"
static void help(void)
{
printf("\n"
"which-cpus: [-h] [<key=value> [<key=value> ...]]\n\n"
" Without parameters, tests the RISCV_HWPROBE_WHICH_CPUS flag of hwprobe.\n"
" With parameters, where each parameter is a hwprobe pair written as\n"
" <key=value>, outputs the cpulist for cpus which all match the given set\n"
" of pairs. 'key' and 'value' should be in numeric form, e.g. 4=0x3b\n");
}
static void print_cpulist(cpu_set_t *cpus)
{
int start = 0, end = 0;
if (!CPU_COUNT(cpus)) {
printf("cpus: None\n");
return;
}
printf("cpus:");
for (int i = 0, c = 0; i < CPU_COUNT(cpus); i++, c++) {
if (start != end && !CPU_ISSET(c, cpus))
printf("-%d", end);
while (!CPU_ISSET(c, cpus))
++c;
if (i != 0 && c == end + 1) {
end = c;
continue;
}
printf("%c%d", i == 0 ? ' ' : ',', c);
start = end = c;
}
if (start != end)
printf("-%d", end);
printf("\n");
}
static void do_which_cpus(int argc, char **argv, cpu_set_t *cpus)
{
struct riscv_hwprobe *pairs;
int nr_pairs = argc - 1;
char *start, *end;
int rc;
pairs = malloc(nr_pairs * sizeof(struct riscv_hwprobe));
assert(pairs);
for (int i = 0; i < nr_pairs; i++) {
start = argv[i + 1];
pairs[i].key = strtol(start, &end, 0);
assert(end != start && *end == '=');
start = end + 1;
pairs[i].value = strtoul(start, &end, 0);
assert(end != start && *end == '\0');
}
rc = riscv_hwprobe(pairs, nr_pairs, sizeof(cpu_set_t), (unsigned long *)cpus, RISCV_HWPROBE_WHICH_CPUS);
assert(rc == 0);
print_cpulist(cpus);
free(pairs);
}
int main(int argc, char **argv)
{
struct riscv_hwprobe pairs[2];
cpu_set_t cpus_aff, cpus;
__u64 ext0_all;
long rc;
rc = sched_getaffinity(0, sizeof(cpu_set_t), &cpus_aff);
assert(rc == 0);
if (argc > 1) {
if (!strcmp(argv[1], "-h"))
help();
else
do_which_cpus(argc, argv, &cpus_aff);
return 0;
}
ksft_print_header();
ksft_set_plan(7);
pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, };
rc = riscv_hwprobe(pairs, 1, 0, NULL, 0);
assert(rc == 0 && pairs[0].key == RISCV_HWPROBE_KEY_BASE_BEHAVIOR &&
pairs[0].value == RISCV_HWPROBE_BASE_BEHAVIOR_IMA);
pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, };
rc = riscv_hwprobe(pairs, 1, 0, NULL, 0);
assert(rc == 0 && pairs[0].key == RISCV_HWPROBE_KEY_IMA_EXT_0);
ext0_all = pairs[0].value;
pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
CPU_ZERO(&cpus);
rc = riscv_hwprobe(pairs, 1, 0, (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
ksft_test_result(rc == -EINVAL, "no cpusetsize\n");
pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
rc = riscv_hwprobe(pairs, 1, sizeof(cpu_set_t), NULL, RISCV_HWPROBE_WHICH_CPUS);
ksft_test_result(rc == -EINVAL, "NULL cpus\n");
pairs[0] = (struct riscv_hwprobe){ .key = 0xbadc0de, };
CPU_ZERO(&cpus);
rc = riscv_hwprobe(pairs, 1, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
ksft_test_result(rc == 0 && CPU_COUNT(&cpus) == 0, "unknown key\n");
pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
CPU_ZERO(&cpus);
rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
ksft_test_result(rc == 0, "duplicate keys\n");
pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, .value = ext0_all, };
CPU_ZERO(&cpus);
rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
ksft_test_result(rc == 0 && CPU_COUNT(&cpus) == sysconf(_SC_NPROCESSORS_ONLN), "set all cpus\n");
pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, .value = ext0_all, };
memcpy(&cpus, &cpus_aff, sizeof(cpu_set_t));
rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
ksft_test_result(rc == 0 && CPU_EQUAL(&cpus, &cpus_aff), "set all affinity cpus\n");
pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, .value = ~ext0_all, };
memcpy(&cpus, &cpus_aff, sizeof(cpu_set_t));
rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
ksft_test_result(rc == 0 && CPU_COUNT(&cpus) == 0, "clear all cpus\n");
ksft_finished();
}
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
#include <sys/prctl.h> #include <sys/prctl.h>
#include <unistd.h> #include <unistd.h>
#include <asm/hwprobe.h>
#include <errno.h> #include <errno.h>
#include <sys/wait.h> #include <sys/wait.h>
#include "../hwprobe/hwprobe.h"
#include "../../kselftest.h" #include "../../kselftest.h"
/*
* Rather than relying on having a new enough libc to define this, just do it
* ourselves. This way we don't need to be coupled to a new-enough libc to
* contain the call.
*/
long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus, unsigned int flags);
#define NEXT_PROGRAM "./vstate_exec_nolibc" #define NEXT_PROGRAM "./vstate_exec_nolibc"
static int launch_test(int test_inherit) static int launch_test(int test_inherit)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment