Commit 5861d1e8 authored by Dave Marchevsky's avatar Dave Marchevsky Committed by Alexei Starovoitov

bpf: Allow bpf_spin_{lock,unlock} in sleepable progs

Commit 9e7a4d98 ("bpf: Allow LSM programs to use bpf spin locks")
disabled bpf_spin_lock usage in sleepable progs, stating:

 Sleepable LSM programs can be preempted which means that allowng spin
 locks will need more work (disabling preemption and the verifier
 ensuring that no sleepable helpers are called when a spin lock is
 held).

This patch disables preemption before grabbing bpf_spin_lock. The second
requirement above "no sleepable helpers are called when a spin lock is
held" is implicitly enforced by current verifier logic due to helper
calls in spin_lock CS being disabled except for a few exceptions, none
of which sleep.

Due to above preemption changes, bpf_spin_lock CS can also be considered
a RCU CS, so verifier's in_rcu_cs check is modified to account for this.
Signed-off-by: default avatarDave Marchevsky <davemarchevsky@fb.com>
Link: https://lore.kernel.org/r/20230821193311.3290257-7-davemarchevsky@fb.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 0816b8c6
...@@ -286,6 +286,7 @@ static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) ...@@ -286,6 +286,7 @@ static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
preempt_disable();
arch_spin_lock(l); arch_spin_lock(l);
} }
...@@ -294,6 +295,7 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) ...@@ -294,6 +295,7 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
arch_spinlock_t *l = (void *)lock; arch_spinlock_t *l = (void *)lock;
arch_spin_unlock(l); arch_spin_unlock(l);
preempt_enable();
} }
#else #else
......
...@@ -5064,7 +5064,9 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, ...@@ -5064,7 +5064,9 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
*/ */
static bool in_rcu_cs(struct bpf_verifier_env *env) static bool in_rcu_cs(struct bpf_verifier_env *env)
{ {
return env->cur_state->active_rcu_lock || !env->prog->aux->sleepable; return env->cur_state->active_rcu_lock ||
env->cur_state->active_lock.ptr ||
!env->prog->aux->sleepable;
} }
/* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
...@@ -16975,11 +16977,6 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, ...@@ -16975,11 +16977,6 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
return -EINVAL; return -EINVAL;
} }
if (prog->aux->sleepable) {
verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
return -EINVAL;
}
} }
if (btf_record_has_field(map->record, BPF_TIMER)) { if (btf_record_has_field(map->record, BPF_TIMER)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment