Commit 88f60471 authored by David Vernet's avatar David Vernet Committed by Alexei Starovoitov

selftests/bpf: Add test for bpf_cpumask_weight() kfunc

The new bpf_cpumask_weight() kfunc can be used to count the number of
bits that are set in a struct cpumask* kptr. Let's add a selftest to
verify its behavior.
Signed-off-by: default avatarDavid Vernet <void@manifault.com>
Acked-by: default avatarYonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20231207210843.168466-3-void@manifault.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent a6de18f3
...@@ -18,6 +18,7 @@ static const char * const cpumask_success_testcases[] = { ...@@ -18,6 +18,7 @@ static const char * const cpumask_success_testcases[] = {
"test_insert_leave", "test_insert_leave",
"test_insert_remove_release", "test_insert_remove_release",
"test_global_mask_rcu", "test_global_mask_rcu",
"test_cpumask_weight",
}; };
static void verify_success(const char *prog_name) static void verify_success(const char *prog_name)
......
...@@ -54,6 +54,7 @@ bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; ...@@ -54,6 +54,7 @@ bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym; u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym;
u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym; u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym;
u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym;
......
...@@ -460,6 +460,49 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) ...@@ -460,6 +460,49 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
return 0; return 0;
} }
SEC("tp_btf/task_newtask")
int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *local;
if (!is_test_task())
return 0;
local = create_cpumask();
if (!local)
return 0;
if (bpf_cpumask_weight(cast(local)) != 0) {
err = 3;
goto out;
}
bpf_cpumask_set_cpu(0, local);
if (bpf_cpumask_weight(cast(local)) != 1) {
err = 4;
goto out;
}
/*
* Make sure that adding additional CPUs changes the weight. Test to
* see whether the CPU was set to account for running on UP machines.
*/
bpf_cpumask_set_cpu(1, local);
if (bpf_cpumask_test_cpu(1, cast(local)) && bpf_cpumask_weight(cast(local)) != 2) {
err = 5;
goto out;
}
bpf_cpumask_clear(local);
if (bpf_cpumask_weight(cast(local)) != 0) {
err = 6;
goto out;
}
out:
bpf_cpumask_release(local);
return 0;
}
SEC("tp_btf/task_newtask") SEC("tp_btf/task_newtask")
__success __success
int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags) int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment