Commit dba122fb authored by Jiri Olsa's avatar Jiri Olsa Committed by Alexei Starovoitov

bpf: Add bpf_ksym_add/del functions

Separating /proc/kallsyms add/del code and adding bpf_ksym_add/del
functions for that.

Moving bpf_prog_ksym_node_add/del functions to __bpf_ksym_add/del
and changing their argument to 'struct bpf_ksym' object. This way
we can call them for other bpf objects types like trampoline and
dispatcher.
Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200312195610.346362-10-jolsa@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent cbd76f8d
...@@ -584,6 +584,9 @@ struct bpf_image { ...@@ -584,6 +584,9 @@ struct bpf_image {
#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image)) #define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
bool is_bpf_image_address(unsigned long address); bool is_bpf_image_address(unsigned long address);
void *bpf_image_alloc(void); void *bpf_image_alloc(void);
/* Called only from JIT-enabled code, so there's no need for stubs. */
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
#else #else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key) static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{ {
......
...@@ -607,20 +607,29 @@ static DEFINE_SPINLOCK(bpf_lock); ...@@ -607,20 +607,29 @@ static DEFINE_SPINLOCK(bpf_lock);
static LIST_HEAD(bpf_kallsyms); static LIST_HEAD(bpf_kallsyms);
static struct latch_tree_root bpf_tree __cacheline_aligned; static struct latch_tree_root bpf_tree __cacheline_aligned;
static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) void bpf_ksym_add(struct bpf_ksym *ksym)
{ {
WARN_ON_ONCE(!list_empty(&aux->ksym.lnode)); spin_lock_bh(&bpf_lock);
list_add_tail_rcu(&aux->ksym.lnode, &bpf_kallsyms); WARN_ON_ONCE(!list_empty(&ksym->lnode));
latch_tree_insert(&aux->ksym.tnode, &bpf_tree, &bpf_tree_ops); list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
spin_unlock_bh(&bpf_lock);
} }
static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux) static void __bpf_ksym_del(struct bpf_ksym *ksym)
{ {
if (list_empty(&aux->ksym.lnode)) if (list_empty(&ksym->lnode))
return; return;
latch_tree_erase(&aux->ksym.tnode, &bpf_tree, &bpf_tree_ops); latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
list_del_rcu(&aux->ksym.lnode); list_del_rcu(&ksym->lnode);
}
void bpf_ksym_del(struct bpf_ksym *ksym)
{
spin_lock_bh(&bpf_lock);
__bpf_ksym_del(ksym);
spin_unlock_bh(&bpf_lock);
} }
static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
...@@ -644,9 +653,7 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp) ...@@ -644,9 +653,7 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp)
bpf_prog_ksym_set_name(fp); bpf_prog_ksym_set_name(fp);
fp->aux->ksym.prog = true; fp->aux->ksym.prog = true;
spin_lock_bh(&bpf_lock); bpf_ksym_add(&fp->aux->ksym);
bpf_prog_ksym_node_add(fp->aux);
spin_unlock_bh(&bpf_lock);
} }
void bpf_prog_kallsyms_del(struct bpf_prog *fp) void bpf_prog_kallsyms_del(struct bpf_prog *fp)
...@@ -654,9 +661,7 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp) ...@@ -654,9 +661,7 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp)
if (!bpf_prog_kallsyms_candidate(fp)) if (!bpf_prog_kallsyms_candidate(fp))
return; return;
spin_lock_bh(&bpf_lock); bpf_ksym_del(&fp->aux->ksym);
bpf_prog_ksym_node_del(fp->aux);
spin_unlock_bh(&bpf_lock);
} }
static struct bpf_ksym *bpf_ksym_find(unsigned long addr) static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment