Commit 468e2f64 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by David S. Miller

bpf: introduce BPF_PROG_QUERY command

introduce BPF_PROG_QUERY command to retrieve a set of either
attached programs to given cgroup or a set of effective programs
that will execute for events within a cgroup
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarMartin KaFai Lau <kafai@fb.com>
for cgroup bits
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 324bda9e
...@@ -44,12 +44,16 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, ...@@ -44,12 +44,16 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags); enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags); enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
union bpf_attr __user *uattr);
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags); enum bpf_attach_type type, u32 flags);
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags); enum bpf_attach_type type, u32 flags);
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
union bpf_attr __user *uattr);
int __cgroup_bpf_run_filter_skb(struct sock *sk, int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
......
...@@ -260,6 +260,9 @@ struct bpf_prog_array { ...@@ -260,6 +260,9 @@ struct bpf_prog_array {
struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
__u32 __user *prog_ids, u32 cnt);
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \ #define BPF_PROG_RUN_ARRAY(array, ctx, func) \
({ \ ({ \
......
...@@ -92,6 +92,7 @@ enum bpf_cmd { ...@@ -92,6 +92,7 @@ enum bpf_cmd {
BPF_PROG_GET_FD_BY_ID, BPF_PROG_GET_FD_BY_ID,
BPF_MAP_GET_FD_BY_ID, BPF_MAP_GET_FD_BY_ID,
BPF_OBJ_GET_INFO_BY_FD, BPF_OBJ_GET_INFO_BY_FD,
BPF_PROG_QUERY,
}; };
enum bpf_map_type { enum bpf_map_type {
...@@ -211,6 +212,9 @@ enum bpf_attach_type { ...@@ -211,6 +212,9 @@ enum bpf_attach_type {
/* Specify numa node during map creation */ /* Specify numa node during map creation */
#define BPF_F_NUMA_NODE (1U << 2) #define BPF_F_NUMA_NODE (1U << 2)
/* flags for BPF_PROG_QUERY */
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
#define BPF_OBJ_NAME_LEN 16U #define BPF_OBJ_NAME_LEN 16U
union bpf_attr { union bpf_attr {
...@@ -289,6 +293,15 @@ union bpf_attr { ...@@ -289,6 +293,15 @@ union bpf_attr {
__u32 info_len; __u32 info_len;
__aligned_u64 info; __aligned_u64 info;
} info; } info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */
__u32 target_fd; /* container object to query */
__u32 attach_type;
__u32 query_flags;
__u32 attach_flags;
__aligned_u64 prog_ids;
__u32 prog_cnt;
} query;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
/* BPF helper function descriptions: /* BPF helper function descriptions:
......
...@@ -384,6 +384,52 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, ...@@ -384,6 +384,52 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
return err; return err;
} }
/* Must be called with cgroup_mutex held to avoid races. */
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
enum bpf_attach_type type = attr->query.attach_type;
struct list_head *progs = &cgrp->bpf.progs[type];
u32 flags = cgrp->bpf.flags[type];
int cnt, ret = 0, i;
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
else
cnt = prog_list_length(progs);
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
return -EFAULT;
if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
return -EFAULT;
if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
/* return early if user requested only program count + flags */
return 0;
if (attr->query.prog_cnt < cnt) {
cnt = attr->query.prog_cnt;
ret = -ENOSPC;
}
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
prog_ids, cnt);
} else {
struct bpf_prog_list *pl;
u32 id;
i = 0;
list_for_each_entry(pl, progs, node) {
id = pl->prog->aux->id;
if (copy_to_user(prog_ids + i, &id, sizeof(id)))
return -EFAULT;
if (++i == cnt)
break;
}
}
return ret;
}
/** /**
* __cgroup_bpf_run_filter_skb() - Run a program for packet filtering * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
* @sk: The socket sending or receiving traffic * @sk: The socket sending or receiving traffic
......
...@@ -1412,6 +1412,44 @@ void bpf_prog_array_free(struct bpf_prog_array __rcu *progs) ...@@ -1412,6 +1412,44 @@ void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
kfree_rcu(progs, rcu); kfree_rcu(progs, rcu);
} }
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
{
struct bpf_prog **prog;
u32 cnt = 0;
rcu_read_lock();
prog = rcu_dereference(progs)->progs;
for (; *prog; prog++)
cnt++;
rcu_read_unlock();
return cnt;
}
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
__u32 __user *prog_ids, u32 cnt)
{
struct bpf_prog **prog;
u32 i = 0, id;
rcu_read_lock();
prog = rcu_dereference(progs)->progs;
for (; *prog; prog++) {
id = (*prog)->aux->id;
if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
rcu_read_unlock();
return -EFAULT;
}
if (++i == cnt) {
prog++;
break;
}
}
rcu_read_unlock();
if (*prog)
return -ENOSPC;
return 0;
}
static void bpf_prog_free_deferred(struct work_struct *work) static void bpf_prog_free_deferred(struct work_struct *work)
{ {
struct bpf_prog_aux *aux; struct bpf_prog_aux *aux;
......
...@@ -1272,6 +1272,37 @@ static int bpf_prog_detach(const union bpf_attr *attr) ...@@ -1272,6 +1272,37 @@ static int bpf_prog_detach(const union bpf_attr *attr)
return ret; return ret;
} }
#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
static int bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
struct cgroup *cgrp;
int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (CHECK_ATTR(BPF_PROG_QUERY))
return -EINVAL;
if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
return -EINVAL;
switch (attr->query.attach_type) {
case BPF_CGROUP_INET_INGRESS:
case BPF_CGROUP_INET_EGRESS:
case BPF_CGROUP_INET_SOCK_CREATE:
case BPF_CGROUP_SOCK_OPS:
break;
default:
return -EINVAL;
}
cgrp = cgroup_get_from_fd(attr->query.target_fd);
if (IS_ERR(cgrp))
return PTR_ERR(cgrp);
ret = cgroup_bpf_query(cgrp, attr, uattr);
cgroup_put(cgrp);
return ret;
}
#endif /* CONFIG_CGROUP_BPF */ #endif /* CONFIG_CGROUP_BPF */
#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
...@@ -1568,6 +1599,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz ...@@ -1568,6 +1599,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_PROG_DETACH: case BPF_PROG_DETACH:
err = bpf_prog_detach(&attr); err = bpf_prog_detach(&attr);
break; break;
case BPF_PROG_QUERY:
err = bpf_prog_query(&attr, uattr);
break;
#endif #endif
case BPF_PROG_TEST_RUN: case BPF_PROG_TEST_RUN:
err = bpf_prog_test_run(&attr, uattr); err = bpf_prog_test_run(&attr, uattr);
......
...@@ -5761,4 +5761,14 @@ int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, ...@@ -5761,4 +5761,14 @@ int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_mutex);
return ret; return ret;
} }
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
int ret;
mutex_lock(&cgroup_mutex);
ret = __cgroup_bpf_query(cgrp, attr, uattr);
mutex_unlock(&cgroup_mutex);
return ret;
}
#endif /* CONFIG_CGROUP_BPF */ #endif /* CONFIG_CGROUP_BPF */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment