Commit f8e1100a authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-updates'

Daniel Borkmann says:

====================
BPF updates

Some minor updates to {cls,act}_bpf to retrieve routing realms
and to make skb->priority writable.

Thanks!

v1 -> v2:
 - Dropped preclassify patch for now from the series as the
   rest is pretty much independent of it
 - Rest unchanged, only rebased and already posted Acked-by's kept
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bd8762be 754f1e6a
......@@ -1047,7 +1047,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
set_memory_ro((unsigned long)header, header->pages);
fp->bpf_func = (void *)ctx.target;
fp->jited = true;
fp->jited = 1;
out:
kfree(ctx.offsets);
return;
......
......@@ -744,7 +744,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)ctx.image;
prog->jited = true;
prog->jited = 1;
out:
kfree(ctx.offset);
}
......
......@@ -1251,7 +1251,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
fp->bpf_func = (void *)ctx.target;
fp->jited = true;
fp->jited = 1;
out:
kfree(ctx.offsets);
......
......@@ -679,7 +679,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
((u64 *)image)[1] = local_paca->kernel_toc;
#endif
fp->bpf_func = (void *)image;
fp->jited = true;
fp->jited = 1;
}
out:
kfree(addrs);
......
......@@ -1310,7 +1310,7 @@ void bpf_int_jit_compile(struct bpf_prog *fp)
if (jit.prg_buf) {
set_memory_ro((unsigned long)header, header->pages);
fp->bpf_func = (void *) jit.prg_buf;
fp->jited = true;
fp->jited = 1;
}
free_addrs:
kfree(jit.addrs);
......
......@@ -812,7 +812,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
if (image) {
bpf_flush_icache(image, image + proglen);
fp->bpf_func = (void *)image;
fp->jited = true;
fp->jited = 1;
}
out:
kfree(addrs);
......
......@@ -1109,7 +1109,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
bpf_flush_icache(header, image + proglen);
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)image;
prog->jited = true;
prog->jited = 1;
}
out:
kfree(addrs);
......
......@@ -326,8 +326,11 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
bool jited; /* Is our filter JIT'ed? */
bool gpl_compatible; /* Is our filter GPL compatible? */
kmemcheck_bitfield_begin(meta);
u16 jited:1, /* Is our filter JIT'ed? */
gpl_compatible:1, /* Is filter GPL compatible? */
dst_needed:1; /* Do we need dst entry? */
kmemcheck_bitfield_end(meta);
u32 len; /* Number of filter blocks */
enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */
......
......@@ -280,6 +280,13 @@ enum bpf_func_id {
* Return: TC_ACT_REDIRECT
*/
BPF_FUNC_redirect,
/**
* bpf_get_route_realm(skb) - retrieve a dst's tclassid
* @skb: pointer to skb
* Return: realm if != 0
*/
BPF_FUNC_get_route_realm,
__BPF_FUNC_MAX_ID,
};
......
......@@ -82,6 +82,8 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
if (fp == NULL)
return NULL;
kmemcheck_annotate_bitfield(fp, meta);
aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
if (aux == NULL) {
vfree(fp);
......@@ -110,6 +112,8 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
if (fp != NULL) {
kmemcheck_annotate_bitfield(fp, meta);
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
fp->pages = size / PAGE_SIZE;
......
......@@ -402,6 +402,8 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
*/
BUG_ON(!prog->aux->ops->get_func_proto);
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_tail_call) {
/* mark bpf_tail_call as different opcode
* to avoid conditional branch in
......@@ -553,10 +555,10 @@ static int bpf_prog_load(union bpf_attr *attr)
goto free_prog;
prog->orig_prog = NULL;
prog->jited = false;
prog->jited = 0;
atomic_set(&prog->aux->refcnt, 1);
prog->gpl_compatible = is_gpl;
prog->gpl_compatible = is_gpl ? 1 : 0;
/* find program type: socket_filter vs tracing_filter */
err = find_prog_type(type, prog);
......
......@@ -49,6 +49,7 @@
#include <net/sch_generic.h>
#include <net/cls_cgroup.h>
#include <net/dst_metadata.h>
#include <net/dst.h>
/**
* sk_filter - run a packet through a socket filter
......@@ -1001,7 +1002,7 @@ static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
int err;
fp->bpf_func = NULL;
fp->jited = false;
fp->jited = 0;
err = bpf_check_classic(fp->insns, fp->len);
if (err) {
......@@ -1478,6 +1479,25 @@ static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
#ifdef CONFIG_IP_ROUTE_CLASSID
const struct dst_entry *dst;
dst = skb_dst((struct sk_buff *) (unsigned long) r1);
if (dst)
return dst->tclassid;
#endif
return 0;
}
static const struct bpf_func_proto bpf_get_route_realm_proto = {
.func = bpf_get_route_realm,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
......@@ -1648,6 +1668,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return bpf_get_skb_set_tunnel_key_proto();
case BPF_FUNC_redirect:
return &bpf_redirect_proto;
case BPF_FUNC_get_route_realm:
return &bpf_get_route_realm_proto;
default:
return sk_filter_func_proto(func_id);
}
......@@ -1699,6 +1721,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
switch (off) {
case offsetof(struct __sk_buff, mark):
case offsetof(struct __sk_buff, tc_index):
case offsetof(struct __sk_buff, priority):
case offsetof(struct __sk_buff, cb[0]) ...
offsetof(struct __sk_buff, cb[4]):
break;
......@@ -1740,8 +1763,12 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
case offsetof(struct __sk_buff, priority):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
offsetof(struct sk_buff, priority));
if (type == BPF_WRITE)
*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
offsetof(struct sk_buff, priority));
else
*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
offsetof(struct sk_buff, priority));
break;
case offsetof(struct __sk_buff, ingress_ifindex):
......
......@@ -262,7 +262,8 @@ static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
return 0;
}
static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog)
static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
const struct tcf_proto *tp)
{
struct bpf_prog *fp;
char *name = NULL;
......@@ -294,6 +295,9 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog)
prog->bpf_name = name;
prog->filter = fp;
if (fp->dst_needed)
netif_keep_dst(qdisc_dev(tp->q));
return 0;
}
......@@ -330,7 +334,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
prog->exts_integrated = have_exts;
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
cls_bpf_prog_from_efd(tb, prog);
cls_bpf_prog_from_efd(tb, prog, tp);
if (ret < 0) {
tcf_exts_destroy(&exts);
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment