Commit 68932f71 authored by David S. Miller's avatar David S. Miller

Merge branch 'ebpf_support_for_cls_bpf'

Daniel Borkmann says:

====================
eBPF support for cls_bpf

This is the non-RFC version of my patchset posted before netdev01 [1]
conference. It contains a couple of eBPF cleanups and preparation
patches to get eBPF support into cls_bpf. The last patch adds the
actual support. I'll post the iproute2 parts after the kernel bits
are merged, an initial preview link to the code is mentioned in the
last patch.

Patch 4 and 5 were originally one patch, but I've split them into
two parts upon request as patch 4 only is also needed for Alexei's
tracing patches that go via tip tree.

Tested with tc and all in-kernel available BPF test suites.

I have configured and built LLVM with --enable-experimental-targets=BPF
but as Alexei put it, the plan is to get rid of the experimental
status in future [2].

Thanks a lot!

v1 -> v2:
 - Removed arch patches from this series
  - x86 is already queued in tip tree, under x86/mm
  - arm64 just reposted directly to arm folks
 - Rest is unchanged

  [1] http://thread.gmane.org/gmane.linux.network/350191
  [2] http://article.gmane.org/gmane.linux.kernel/1874969
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b656cc64 e2e9b654
......@@ -32,13 +32,13 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
struct bpf_map_ops *ops;
const struct bpf_map_ops *ops;
struct work_struct work;
};
struct bpf_map_type_list {
struct list_head list_node;
struct bpf_map_ops *ops;
const struct bpf_map_ops *ops;
enum bpf_map_type type;
};
......@@ -109,37 +109,47 @@ struct bpf_verifier_ops {
struct bpf_prog_type_list {
struct list_head list_node;
struct bpf_verifier_ops *ops;
const struct bpf_verifier_ops *ops;
enum bpf_prog_type type;
};
void bpf_register_prog_type(struct bpf_prog_type_list *tl);
struct bpf_prog;
struct bpf_prog_aux {
atomic_t refcnt;
bool is_gpl_compatible;
enum bpf_prog_type prog_type;
struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
u32 used_map_cnt;
const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
struct work_struct work;
};
#ifdef CONFIG_BPF_SYSCALL
void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_prog_put(struct bpf_prog *prog);
struct bpf_prog *bpf_prog_get(u32 ufd);
#else
static inline void bpf_prog_put(struct bpf_prog *prog) {}
static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
{
}
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
#endif
struct bpf_prog *bpf_prog_get(u32 ufd);
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
/* verifier prototypes for helper functions called from eBPF programs */
extern struct bpf_func_proto bpf_map_lookup_elem_proto;
extern struct bpf_func_proto bpf_map_update_elem_proto;
extern struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
#endif /* _LINUX_BPF_H */
......@@ -145,8 +145,6 @@ struct bpf_prog_aux;
.off = 0, \
.imm = ((__u64) (IMM)) >> 32 })
#define BPF_PSEUDO_MAP_FD 1
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD) \
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
......@@ -310,9 +308,11 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
bool jited; /* Is our filter JIT'ed? */
bool gpl_compatible; /* Is our filter GPL compatible? */
u32 len; /* Number of filter blocks */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
/* Instructions for interpreter */
......
......@@ -118,8 +118,11 @@ enum bpf_map_type {
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
BPF_PROG_TYPE_SCHED_CLS,
};
#define BPF_PSEUDO_MAP_FD 1
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
......
......@@ -397,6 +397,8 @@ enum {
TCA_BPF_CLASSID,
TCA_BPF_OPS_LEN,
TCA_BPF_OPS,
TCA_BPF_FD,
TCA_BPF_NAME,
__TCA_BPF_MAX,
};
......
obj-y := core.o
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o hashtab.o arraymap.o helpers.o
ifdef CONFIG_TEST_BPF
obj-$(CONFIG_BPF_SYSCALL) += test_stub.o
endif
......@@ -134,7 +134,7 @@ static void array_map_free(struct bpf_map *map)
kvfree(array);
}
static struct bpf_map_ops array_ops = {
static const struct bpf_map_ops array_ops = {
.map_alloc = array_map_alloc,
.map_free = array_map_free,
.map_get_next_key = array_map_get_next_key,
......@@ -143,14 +143,14 @@ static struct bpf_map_ops array_ops = {
.map_delete_elem = array_map_delete_elem,
};
static struct bpf_map_type_list tl = {
static struct bpf_map_type_list array_type __read_mostly = {
.ops = &array_ops,
.type = BPF_MAP_TYPE_ARRAY,
};
static int __init register_array_map(void)
{
bpf_register_map_type(&tl);
bpf_register_map_type(&array_type);
return 0;
}
late_initcall(register_array_map);
......@@ -345,7 +345,7 @@ static void htab_map_free(struct bpf_map *map)
kfree(htab);
}
static struct bpf_map_ops htab_ops = {
static const struct bpf_map_ops htab_ops = {
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
......@@ -354,14 +354,14 @@ static struct bpf_map_ops htab_ops = {
.map_delete_elem = htab_map_delete_elem,
};
static struct bpf_map_type_list tl = {
static struct bpf_map_type_list htab_type __read_mostly = {
.ops = &htab_ops,
.type = BPF_MAP_TYPE_HASH,
};
static int __init register_htab_map(void)
{
bpf_register_map_type(&tl);
bpf_register_map_type(&htab_type);
return 0;
}
late_initcall(register_htab_map);
......@@ -41,7 +41,7 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return (unsigned long) value;
}
struct bpf_func_proto bpf_map_lookup_elem_proto = {
const struct bpf_func_proto bpf_map_lookup_elem_proto = {
.func = bpf_map_lookup_elem,
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
......@@ -60,7 +60,7 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return map->ops->map_update_elem(map, key, value, r4);
}
struct bpf_func_proto bpf_map_update_elem_proto = {
const struct bpf_func_proto bpf_map_update_elem_proto = {
.func = bpf_map_update_elem,
.gpl_only = false,
.ret_type = RET_INTEGER,
......@@ -80,7 +80,7 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
return map->ops->map_delete_elem(map, key);
}
struct bpf_func_proto bpf_map_delete_elem_proto = {
const struct bpf_func_proto bpf_map_delete_elem_proto = {
.func = bpf_map_delete_elem,
.gpl_only = false,
.ret_type = RET_INTEGER,
......
......@@ -354,10 +354,11 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
list_for_each_entry(tl, &bpf_prog_types, list_node) {
if (tl->type == type) {
prog->aux->ops = tl->ops;
prog->aux->prog_type = type;
prog->type = type;
return 0;
}
}
return -EINVAL;
}
......@@ -418,6 +419,7 @@ void bpf_prog_put(struct bpf_prog *prog)
bpf_prog_free(prog);
}
}
EXPORT_SYMBOL_GPL(bpf_prog_put);
static int bpf_prog_release(struct inode *inode, struct file *filp)
{
......@@ -465,6 +467,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
fdput(f);
return prog;
}
EXPORT_SYMBOL_GPL(bpf_prog_get);
/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD log_buf
......@@ -508,7 +511,7 @@ static int bpf_prog_load(union bpf_attr *attr)
prog->jited = false;
atomic_set(&prog->aux->refcnt, 1);
prog->aux->is_gpl_compatible = is_gpl;
prog->gpl_compatible = is_gpl;
/* find program type: socket_filter vs tracing_filter */
err = find_prog_type(type, prog);
......@@ -517,7 +520,6 @@ static int bpf_prog_load(union bpf_attr *attr)
/* run eBPF verifier */
err = bpf_check(prog, attr);
if (err < 0)
goto free_used_maps;
......@@ -528,7 +530,6 @@ static int bpf_prog_load(union bpf_attr *attr)
bpf_prog_select_runtime(prog);
err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
if (err < 0)
/* failed to allocate fd */
goto free_used_maps;
......
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/bpf.h>
/* test stubs for BPF_MAP_TYPE_UNSPEC and for BPF_PROG_TYPE_UNSPEC
* to be used by user space verifier testsuite
*/
struct bpf_context {
u64 arg1;
u64 arg2;
};
static const struct bpf_func_proto *test_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
return &bpf_map_lookup_elem_proto;
case BPF_FUNC_map_update_elem:
return &bpf_map_update_elem_proto;
case BPF_FUNC_map_delete_elem:
return &bpf_map_delete_elem_proto;
default:
return NULL;
}
}
static const struct bpf_context_access {
int size;
enum bpf_access_type type;
} test_ctx_access[] = {
[offsetof(struct bpf_context, arg1)] = {
FIELD_SIZEOF(struct bpf_context, arg1),
BPF_READ
},
[offsetof(struct bpf_context, arg2)] = {
FIELD_SIZEOF(struct bpf_context, arg2),
BPF_READ
},
};
static bool test_is_valid_access(int off, int size, enum bpf_access_type type)
{
const struct bpf_context_access *access;
if (off < 0 || off >= ARRAY_SIZE(test_ctx_access))
return false;
access = &test_ctx_access[off];
if (access->size == size && (access->type & type))
return true;
return false;
}
static struct bpf_verifier_ops test_ops = {
.get_func_proto = test_func_proto,
.is_valid_access = test_is_valid_access,
};
static struct bpf_prog_type_list tl_prog = {
.ops = &test_ops,
.type = BPF_PROG_TYPE_UNSPEC,
};
static int __init register_test_ops(void)
{
bpf_register_prog_type(&tl_prog);
return 0;
}
late_initcall(register_test_ops);
......@@ -852,7 +852,7 @@ static int check_call(struct verifier_env *env, int func_id)
}
/* eBPF programs must be GPL compatible to use GPL-ed functions */
if (!env->prog->aux->is_gpl_compatible && fn->gpl_only) {
if (!env->prog->gpl_compatible && fn->gpl_only) {
verbose("cannot call GPL only function from proprietary program\n");
return -EINVAL;
}
......@@ -1172,6 +1172,17 @@ static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
return 0;
}
static bool may_access_skb(enum bpf_prog_type type)
{
switch (type) {
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
return true;
default:
return false;
}
}
/* verify safety of LD_ABS|LD_IND instructions:
* - they can only appear in the programs where ctx == skb
* - since they are wrappers of function calls, they scratch R1-R5 registers,
......@@ -1194,8 +1205,8 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
struct reg_state *reg;
int i, err;
if (env->prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) {
verbose("BPF_LD_ABS|IND instructions are only allowed in socket filters\n");
if (!may_access_skb(env->prog->type)) {
verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
return -EINVAL;
}
......
......@@ -814,7 +814,7 @@ static void bpf_release_orig_filter(struct bpf_prog *fp)
static void __bpf_prog_release(struct bpf_prog *prog)
{
if (prog->aux->prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
bpf_prog_put(prog);
} else {
bpf_release_orig_filter(prog);
......@@ -1093,7 +1093,6 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
}
EXPORT_SYMBOL_GPL(sk_attach_filter);
#ifdef CONFIG_BPF_SYSCALL
int sk_attach_bpf(u32 ufd, struct sock *sk)
{
struct sk_filter *fp, *old_fp;
......@@ -1106,8 +1105,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
if (IS_ERR(prog))
return PTR_ERR(prog);
if (prog->aux->prog_type != BPF_PROG_TYPE_SOCKET_FILTER) {
/* valid fd, but invalid program type */
if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
bpf_prog_put(prog);
return -EINVAL;
}
......@@ -1117,8 +1115,8 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
bpf_prog_put(prog);
return -ENOMEM;
}
fp->prog = prog;
fp->prog = prog;
atomic_set(&fp->refcnt, 0);
if (!sk_filter_charge(sk, fp)) {
......@@ -1136,10 +1134,8 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
return 0;
}
/* allow socket filters to call
* bpf_map_lookup_elem(), bpf_map_update_elem(), bpf_map_delete_elem()
*/
static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func_id)
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
......@@ -1153,34 +1149,37 @@ static const struct bpf_func_proto *sock_filter_func_proto(enum bpf_func_id func
}
}
static bool sock_filter_is_valid_access(int off, int size, enum bpf_access_type type)
static bool sk_filter_is_valid_access(int off, int size,
enum bpf_access_type type)
{
/* skb fields cannot be accessed yet */
return false;
}
static struct bpf_verifier_ops sock_filter_ops = {
.get_func_proto = sock_filter_func_proto,
.is_valid_access = sock_filter_is_valid_access,
static const struct bpf_verifier_ops sk_filter_ops = {
.get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
};
static struct bpf_prog_type_list tl = {
.ops = &sock_filter_ops,
static struct bpf_prog_type_list sk_filter_type __read_mostly = {
.ops = &sk_filter_ops,
.type = BPF_PROG_TYPE_SOCKET_FILTER,
};
static int __init register_sock_filter_ops(void)
static struct bpf_prog_type_list sched_cls_type __read_mostly = {
.ops = &sk_filter_ops,
.type = BPF_PROG_TYPE_SCHED_CLS,
};
static int __init register_sk_filter_ops(void)
{
bpf_register_prog_type(&tl);
bpf_register_prog_type(&sk_filter_type);
bpf_register_prog_type(&sched_cls_type);
return 0;
}
late_initcall(register_sock_filter_ops);
#else
int sk_attach_bpf(u32 ufd, struct sock *sk)
{
return -EOPNOTSUPP;
}
#endif
late_initcall(register_sk_filter_ops);
int sk_detach_filter(struct sock *sk)
{
int ret = -ENOENT;
......
......@@ -16,6 +16,8 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <net/rtnetlink.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
......@@ -24,6 +26,8 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
MODULE_DESCRIPTION("TC BPF based classifier");
#define CLS_BPF_NAME_LEN 256
struct cls_bpf_head {
struct list_head plist;
u32 hgen;
......@@ -32,18 +36,24 @@ struct cls_bpf_head {
struct cls_bpf_prog {
struct bpf_prog *filter;
struct sock_filter *bpf_ops;
struct tcf_exts exts;
struct tcf_result res;
struct list_head link;
struct tcf_result res;
struct tcf_exts exts;
u32 handle;
u16 bpf_num_ops;
union {
u32 bpf_fd;
u16 bpf_num_ops;
};
struct sock_filter *bpf_ops;
const char *bpf_name;
struct tcf_proto *tp;
struct rcu_head rcu;
};
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
[TCA_BPF_CLASSID] = { .type = NLA_U32 },
[TCA_BPF_FD] = { .type = NLA_U32 },
[TCA_BPF_NAME] = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
[TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
[TCA_BPF_OPS] = { .type = NLA_BINARY,
.len = sizeof(struct sock_filter) * BPF_MAXINSNS },
......@@ -76,6 +86,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
return -1;
}
static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
{
return !prog->bpf_ops;
}
static int cls_bpf_init(struct tcf_proto *tp)
{
struct cls_bpf_head *head;
......@@ -94,8 +109,12 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
{
tcf_exts_destroy(&prog->exts);
bpf_prog_destroy(prog->filter);
if (cls_bpf_is_ebpf(prog))
bpf_prog_put(prog->filter);
else
bpf_prog_destroy(prog->filter);
kfree(prog->bpf_name);
kfree(prog->bpf_ops);
kfree(prog);
}
......@@ -114,6 +133,7 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
call_rcu(&prog->rcu, __cls_bpf_delete_prog);
return 0;
}
......@@ -151,69 +171,121 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
return ret;
}
static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
struct cls_bpf_prog *prog,
unsigned long base, struct nlattr **tb,
struct nlattr *est, bool ovr)
static int cls_bpf_prog_from_ops(struct nlattr **tb,
struct cls_bpf_prog *prog, u32 classid)
{
struct sock_filter *bpf_ops;
struct tcf_exts exts;
struct sock_fprog_kern tmp;
struct sock_fprog_kern fprog_tmp;
struct bpf_prog *fp;
u16 bpf_size, bpf_num_ops;
u32 classid;
int ret;
if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
return -EINVAL;
tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
if (ret < 0)
return ret;
classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) {
ret = -EINVAL;
goto errout;
}
if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
return -EINVAL;
bpf_size = bpf_num_ops * sizeof(*bpf_ops);
if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
ret = -EINVAL;
goto errout;
}
if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
return -EINVAL;
bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
if (bpf_ops == NULL) {
ret = -ENOMEM;
goto errout;
}
if (bpf_ops == NULL)
return -ENOMEM;
memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
tmp.len = bpf_num_ops;
tmp.filter = bpf_ops;
fprog_tmp.len = bpf_num_ops;
fprog_tmp.filter = bpf_ops;
ret = bpf_prog_create(&fp, &tmp);
if (ret)
goto errout_free;
ret = bpf_prog_create(&fp, &fprog_tmp);
if (ret < 0) {
kfree(bpf_ops);
return ret;
}
prog->bpf_num_ops = bpf_num_ops;
prog->bpf_ops = bpf_ops;
prog->bpf_num_ops = bpf_num_ops;
prog->bpf_name = NULL;
prog->filter = fp;
prog->res.classid = classid;
return 0;
}
static int cls_bpf_prog_from_efd(struct nlattr **tb,
struct cls_bpf_prog *prog, u32 classid)
{
struct bpf_prog *fp;
char *name = NULL;
u32 bpf_fd;
bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
fp = bpf_prog_get(bpf_fd);
if (IS_ERR(fp))
return PTR_ERR(fp);
if (fp->type != BPF_PROG_TYPE_SCHED_CLS) {
bpf_prog_put(fp);
return -EINVAL;
}
if (tb[TCA_BPF_NAME]) {
name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
nla_len(tb[TCA_BPF_NAME]),
GFP_KERNEL);
if (!name) {
bpf_prog_put(fp);
return -ENOMEM;
}
}
prog->bpf_ops = NULL;
prog->bpf_fd = bpf_fd;
prog->bpf_name = name;
prog->filter = fp;
prog->res.classid = classid;
return 0;
}
static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
struct cls_bpf_prog *prog,
unsigned long base, struct nlattr **tb,
struct nlattr *est, bool ovr)
{
struct tcf_exts exts;
bool is_bpf, is_ebpf;
u32 classid;
int ret;
is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
is_ebpf = tb[TCA_BPF_FD];
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
!tb[TCA_BPF_CLASSID])
return -EINVAL;
tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
if (ret < 0)
return ret;
classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog, classid) :
cls_bpf_prog_from_efd(tb, prog, classid);
if (ret < 0) {
tcf_exts_destroy(&exts);
return ret;
}
tcf_bind_filter(tp, &prog->res, base);
tcf_exts_change(tp, &prog->exts, &exts);
return 0;
errout_free:
kfree(bpf_ops);
errout:
tcf_exts_destroy(&exts);
return ret;
}
static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
......@@ -297,11 +369,43 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
return ret;
}
static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
struct sk_buff *skb)
{
struct nlattr *nla;
if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
return -EMSGSIZE;
nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
sizeof(struct sock_filter));
if (nla == NULL)
return -EMSGSIZE;
memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
return 0;
}
static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
struct sk_buff *skb)
{
if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
return -EMSGSIZE;
if (prog->bpf_name &&
nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
return 0;
}
static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *tm)
{
struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
struct nlattr *nest, *nla;
struct nlattr *nest;
int ret;
if (prog == NULL)
return skb->len;
......@@ -314,16 +418,14 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
goto nla_put_failure;
if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
goto nla_put_failure;
nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
sizeof(struct sock_filter));
if (nla == NULL)
if (cls_bpf_is_ebpf(prog))
ret = cls_bpf_dump_ebpf_info(prog, skb);
else
ret = cls_bpf_dump_bpf_info(prog, skb);
if (ret)
goto nla_put_failure;
memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
if (tcf_exts_dump(skb, &prog->exts) < 0)
goto nla_put_failure;
......
......@@ -92,7 +92,9 @@ extern char bpf_log_buf[LOG_BUF_SIZE];
.off = 0, \
.imm = ((__u64) (IMM)) >> 32 })
#define BPF_PSEUDO_MAP_FD 1
#ifndef BPF_PSEUDO_MAP_FD
# define BPF_PSEUDO_MAP_FD 1
#endif
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD) \
......
......@@ -288,7 +288,8 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
/* should be able to access R0 = *(R2 + 8) */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8),
/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
......@@ -687,7 +688,7 @@ static int test(void)
}
printf("#%d %s ", i, tests[i].descr);
prog_fd = bpf_prog_load(BPF_PROG_TYPE_UNSPEC, prog,
prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog,
prog_len * sizeof(struct bpf_insn),
"GPL");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment