Commit 7ae457c1 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by David S. Miller

net: filter: split 'struct sk_filter' into socket and bpf parts

clean up names related to socket filtering and bpf in the following way:
- everything that deals with sockets keeps 'sk_*' prefix
- everything that is pure BPF is changed to 'bpf_*' prefix

split 'struct sk_filter' into
struct sk_filter {
	atomic_t        refcnt;
	struct rcu_head rcu;
	struct bpf_prog *prog;
};
and
struct bpf_prog {
        u32                     jited:1,
                                len:31;
        struct sock_fprog_kern  *orig_prog;
        unsigned int            (*bpf_func)(const struct sk_buff *skb,
                                            const struct bpf_insn *filter);
        union {
                struct sock_filter      insns[0];
                struct bpf_insn         insnsi[0];
                struct work_struct      work;
        };
};
so that 'struct bpf_prog' can be used independent of sockets and cleans up
'unattached' bpf use cases

split SK_RUN_FILTER macro into:
    SK_RUN_FILTER to be used with 'struct sk_filter *' and
    BPF_PROG_RUN to be used with 'struct bpf_prog *'

__sk_filter_release(struct sk_filter *) gains
__bpf_prog_release(struct bpf_prog *) helper function

also perform related renames for the functions that work
with 'struct bpf_prog *', since they're on the same lines:

sk_filter_size -> bpf_prog_size
sk_filter_select_runtime -> bpf_prog_select_runtime
sk_filter_free -> bpf_prog_free
sk_unattached_filter_create -> bpf_prog_create
sk_unattached_filter_destroy -> bpf_prog_destroy
sk_store_orig_filter -> bpf_prog_store_orig_filter
sk_release_orig_filter -> bpf_release_orig_filter
__sk_migrate_filter -> bpf_migrate_filter
__sk_prepare_filter -> bpf_prepare_filter

API for attaching classic BPF to a socket stays the same:
sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *)
and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program
which is used by sockets, tun, af_packet

API for 'unattached' BPF programs becomes:
bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *)
and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program
which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf
Signed-off-by: default avatarAlexei Starovoitov <ast@plumgrid.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8fb575ca
...@@ -586,11 +586,11 @@ team driver's classifier for its load-balancing mode, netfilter's xt_bpf ...@@ -586,11 +586,11 @@ team driver's classifier for its load-balancing mode, netfilter's xt_bpf
extension, PTP dissector/classifier, and much more. They are all internally extension, PTP dissector/classifier, and much more. They are all internally
converted by the kernel into the new instruction set representation and run converted by the kernel into the new instruction set representation and run
in the eBPF interpreter. For in-kernel handlers, this all works transparently in the eBPF interpreter. For in-kernel handlers, this all works transparently
by using sk_unattached_filter_create() for setting up the filter, resp. by using bpf_prog_create() for setting up the filter, resp.
sk_unattached_filter_destroy() for destroying it. The macro bpf_prog_destroy() for destroying it. The macro
SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed BPF_PROG_RUN(filter, ctx) transparently invokes eBPF interpreter or JITed
code to run the filter. 'filter' is a pointer to struct sk_filter that we code to run the filter. 'filter' is a pointer to struct bpf_prog that we
got from sk_unattached_filter_create(), and 'ctx' the given context (e.g. got from bpf_prog_create(), and 'ctx' the given context (e.g.
skb pointer). All constraints and restrictions from bpf_check_classic() apply skb pointer). All constraints and restrictions from bpf_check_classic() apply
before a conversion to the new layout is being done behind the scenes! before a conversion to the new layout is being done behind the scenes!
......
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
#define FLAG_NEED_X_RESET (1 << 0) #define FLAG_NEED_X_RESET (1 << 0)
struct jit_ctx { struct jit_ctx {
const struct sk_filter *skf; const struct bpf_prog *skf;
unsigned idx; unsigned idx;
unsigned prologue_bytes; unsigned prologue_bytes;
int ret0_fp_idx; int ret0_fp_idx;
...@@ -465,7 +465,7 @@ static inline void update_on_xread(struct jit_ctx *ctx) ...@@ -465,7 +465,7 @@ static inline void update_on_xread(struct jit_ctx *ctx)
static int build_body(struct jit_ctx *ctx) static int build_body(struct jit_ctx *ctx)
{ {
void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
const struct sk_filter *prog = ctx->skf; const struct bpf_prog *prog = ctx->skf;
const struct sock_filter *inst; const struct sock_filter *inst;
unsigned i, load_order, off, condt; unsigned i, load_order, off, condt;
int imm12; int imm12;
...@@ -857,7 +857,7 @@ static int build_body(struct jit_ctx *ctx) ...@@ -857,7 +857,7 @@ static int build_body(struct jit_ctx *ctx)
} }
void bpf_jit_compile(struct sk_filter *fp) void bpf_jit_compile(struct bpf_prog *fp)
{ {
struct jit_ctx ctx; struct jit_ctx ctx;
unsigned tmp_idx; unsigned tmp_idx;
...@@ -926,7 +926,7 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -926,7 +926,7 @@ void bpf_jit_compile(struct sk_filter *fp)
return; return;
} }
void bpf_jit_free(struct sk_filter *fp) void bpf_jit_free(struct bpf_prog *fp)
{ {
if (fp->jited) if (fp->jited)
module_free(NULL, fp->bpf_func); module_free(NULL, fp->bpf_func);
......
...@@ -131,7 +131,7 @@ ...@@ -131,7 +131,7 @@
* @target: Memory location for the compiled filter * @target: Memory location for the compiled filter
*/ */
struct jit_ctx { struct jit_ctx {
const struct sk_filter *skf; const struct bpf_prog *skf;
unsigned int prologue_bytes; unsigned int prologue_bytes;
u32 idx; u32 idx;
u32 flags; u32 flags;
...@@ -789,7 +789,7 @@ static int pkt_type_offset(void) ...@@ -789,7 +789,7 @@ static int pkt_type_offset(void)
static int build_body(struct jit_ctx *ctx) static int build_body(struct jit_ctx *ctx)
{ {
void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
const struct sk_filter *prog = ctx->skf; const struct bpf_prog *prog = ctx->skf;
const struct sock_filter *inst; const struct sock_filter *inst;
unsigned int i, off, load_order, condt; unsigned int i, off, load_order, condt;
u32 k, b_off __maybe_unused; u32 k, b_off __maybe_unused;
...@@ -1369,7 +1369,7 @@ static int build_body(struct jit_ctx *ctx) ...@@ -1369,7 +1369,7 @@ static int build_body(struct jit_ctx *ctx)
int bpf_jit_enable __read_mostly; int bpf_jit_enable __read_mostly;
void bpf_jit_compile(struct sk_filter *fp) void bpf_jit_compile(struct bpf_prog *fp)
{ {
struct jit_ctx ctx; struct jit_ctx ctx;
unsigned int alloc_size, tmp_idx; unsigned int alloc_size, tmp_idx;
...@@ -1423,7 +1423,7 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -1423,7 +1423,7 @@ void bpf_jit_compile(struct sk_filter *fp)
kfree(ctx.offsets); kfree(ctx.offsets);
} }
void bpf_jit_free(struct sk_filter *fp) void bpf_jit_free(struct bpf_prog *fp)
{ {
if (fp->jited) if (fp->jited)
module_free(NULL, fp->bpf_func); module_free(NULL, fp->bpf_func);
......
...@@ -25,7 +25,7 @@ static inline void bpf_flush_icache(void *start, void *end) ...@@ -25,7 +25,7 @@ static inline void bpf_flush_icache(void *start, void *end)
flush_icache_range((unsigned long)start, (unsigned long)end); flush_icache_range((unsigned long)start, (unsigned long)end);
} }
static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
struct codegen_context *ctx) struct codegen_context *ctx)
{ {
int i; int i;
...@@ -121,7 +121,7 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) ...@@ -121,7 +121,7 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
/* Assemble the body code between the prologue & epilogue. */ /* Assemble the body code between the prologue & epilogue. */
static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
struct codegen_context *ctx, struct codegen_context *ctx,
unsigned int *addrs) unsigned int *addrs)
{ {
...@@ -569,7 +569,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -569,7 +569,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
return 0; return 0;
} }
void bpf_jit_compile(struct sk_filter *fp) void bpf_jit_compile(struct bpf_prog *fp)
{ {
unsigned int proglen; unsigned int proglen;
unsigned int alloclen; unsigned int alloclen;
...@@ -693,7 +693,7 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -693,7 +693,7 @@ void bpf_jit_compile(struct sk_filter *fp)
return; return;
} }
void bpf_jit_free(struct sk_filter *fp) void bpf_jit_free(struct bpf_prog *fp)
{ {
if (fp->jited) if (fp->jited)
module_free(NULL, fp->bpf_func); module_free(NULL, fp->bpf_func);
......
...@@ -812,7 +812,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize, ...@@ -812,7 +812,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
return header; return header;
} }
void bpf_jit_compile(struct sk_filter *fp) void bpf_jit_compile(struct bpf_prog *fp)
{ {
struct bpf_binary_header *header = NULL; struct bpf_binary_header *header = NULL;
unsigned long size, prg_len, lit_len; unsigned long size, prg_len, lit_len;
...@@ -875,7 +875,7 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -875,7 +875,7 @@ void bpf_jit_compile(struct sk_filter *fp)
kfree(addrs); kfree(addrs);
} }
void bpf_jit_free(struct sk_filter *fp) void bpf_jit_free(struct bpf_prog *fp)
{ {
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
struct bpf_binary_header *header = (void *)addr; struct bpf_binary_header *header = (void *)addr;
......
...@@ -354,7 +354,7 @@ do { *prog++ = BR_OPC | WDISP22(OFF); \ ...@@ -354,7 +354,7 @@ do { *prog++ = BR_OPC | WDISP22(OFF); \
* emit_jump() calls with adjusted offsets. * emit_jump() calls with adjusted offsets.
*/ */
void bpf_jit_compile(struct sk_filter *fp) void bpf_jit_compile(struct bpf_prog *fp)
{ {
unsigned int cleanup_addr, proglen, oldproglen = 0; unsigned int cleanup_addr, proglen, oldproglen = 0;
u32 temp[8], *prog, *func, seen = 0, pass; u32 temp[8], *prog, *func, seen = 0, pass;
...@@ -808,7 +808,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; ...@@ -808,7 +808,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
return; return;
} }
void bpf_jit_free(struct sk_filter *fp) void bpf_jit_free(struct bpf_prog *fp)
{ {
if (fp->jited) if (fp->jited)
module_free(NULL, fp->bpf_func); module_free(NULL, fp->bpf_func);
......
...@@ -211,7 +211,7 @@ struct jit_context { ...@@ -211,7 +211,7 @@ struct jit_context {
bool seen_ld_abs; bool seen_ld_abs;
}; };
static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx) int oldproglen, struct jit_context *ctx)
{ {
struct bpf_insn *insn = bpf_prog->insnsi; struct bpf_insn *insn = bpf_prog->insnsi;
...@@ -841,7 +841,7 @@ common_load: ctx->seen_ld_abs = true; ...@@ -841,7 +841,7 @@ common_load: ctx->seen_ld_abs = true;
/* By design x64 JIT should support all BPF instructions /* By design x64 JIT should support all BPF instructions
* This error will be seen if new instruction was added * This error will be seen if new instruction was added
* to interpreter, but not to JIT * to interpreter, but not to JIT
* or if there is junk in sk_filter * or if there is junk in bpf_prog
*/ */
pr_err("bpf_jit: unknown opcode %02x\n", insn->code); pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
return -EINVAL; return -EINVAL;
...@@ -862,11 +862,11 @@ common_load: ctx->seen_ld_abs = true; ...@@ -862,11 +862,11 @@ common_load: ctx->seen_ld_abs = true;
return proglen; return proglen;
} }
void bpf_jit_compile(struct sk_filter *prog) void bpf_jit_compile(struct bpf_prog *prog)
{ {
} }
void bpf_int_jit_compile(struct sk_filter *prog) void bpf_int_jit_compile(struct bpf_prog *prog)
{ {
struct bpf_binary_header *header = NULL; struct bpf_binary_header *header = NULL;
int proglen, oldproglen = 0; int proglen, oldproglen = 0;
...@@ -932,7 +932,7 @@ void bpf_int_jit_compile(struct sk_filter *prog) ...@@ -932,7 +932,7 @@ void bpf_int_jit_compile(struct sk_filter *prog)
static void bpf_jit_free_deferred(struct work_struct *work) static void bpf_jit_free_deferred(struct work_struct *work)
{ {
struct sk_filter *fp = container_of(work, struct sk_filter, work); struct bpf_prog *fp = container_of(work, struct bpf_prog, work);
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
struct bpf_binary_header *header = (void *)addr; struct bpf_binary_header *header = (void *)addr;
...@@ -941,7 +941,7 @@ static void bpf_jit_free_deferred(struct work_struct *work) ...@@ -941,7 +941,7 @@ static void bpf_jit_free_deferred(struct work_struct *work)
kfree(fp); kfree(fp);
} }
void bpf_jit_free(struct sk_filter *fp) void bpf_jit_free(struct bpf_prog *fp)
{ {
if (fp->jited) { if (fp->jited) {
INIT_WORK(&fp->work, bpf_jit_free_deferred); INIT_WORK(&fp->work, bpf_jit_free_deferred);
......
...@@ -379,12 +379,12 @@ isdn_ppp_release(int min, struct file *file) ...@@ -379,12 +379,12 @@ isdn_ppp_release(int min, struct file *file)
#endif #endif
#ifdef CONFIG_IPPP_FILTER #ifdef CONFIG_IPPP_FILTER
if (is->pass_filter) { if (is->pass_filter) {
sk_unattached_filter_destroy(is->pass_filter); bpf_prog_destroy(is->pass_filter);
is->pass_filter = NULL; is->pass_filter = NULL;
} }
if (is->active_filter) { if (is->active_filter) {
sk_unattached_filter_destroy(is->active_filter); bpf_prog_destroy(is->active_filter);
is->active_filter = NULL; is->active_filter = NULL;
} }
#endif #endif
...@@ -639,12 +639,11 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) ...@@ -639,12 +639,11 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
fprog.filter = code; fprog.filter = code;
if (is->pass_filter) { if (is->pass_filter) {
sk_unattached_filter_destroy(is->pass_filter); bpf_prog_destroy(is->pass_filter);
is->pass_filter = NULL; is->pass_filter = NULL;
} }
if (fprog.filter != NULL) if (fprog.filter != NULL)
err = sk_unattached_filter_create(&is->pass_filter, err = bpf_prog_create(&is->pass_filter, &fprog);
&fprog);
else else
err = 0; err = 0;
kfree(code); kfree(code);
...@@ -664,12 +663,11 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) ...@@ -664,12 +663,11 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
fprog.filter = code; fprog.filter = code;
if (is->active_filter) { if (is->active_filter) {
sk_unattached_filter_destroy(is->active_filter); bpf_prog_destroy(is->active_filter);
is->active_filter = NULL; is->active_filter = NULL;
} }
if (fprog.filter != NULL) if (fprog.filter != NULL)
err = sk_unattached_filter_create(&is->active_filter, err = bpf_prog_create(&is->active_filter, &fprog);
&fprog);
else else
err = 0; err = 0;
kfree(code); kfree(code);
...@@ -1174,14 +1172,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff * ...@@ -1174,14 +1172,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
} }
if (is->pass_filter if (is->pass_filter
&& SK_RUN_FILTER(is->pass_filter, skb) == 0) { && BPF_PROG_RUN(is->pass_filter, skb) == 0) {
if (is->debug & 0x2) if (is->debug & 0x2)
printk(KERN_DEBUG "IPPP: inbound frame filtered.\n"); printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
if (!(is->active_filter if (!(is->active_filter
&& SK_RUN_FILTER(is->active_filter, skb) == 0)) { && BPF_PROG_RUN(is->active_filter, skb) == 0)) {
if (is->debug & 0x2) if (is->debug & 0x2)
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n"); printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
lp->huptimer = 0; lp->huptimer = 0;
...@@ -1320,14 +1318,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1320,14 +1318,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
} }
if (ipt->pass_filter if (ipt->pass_filter
&& SK_RUN_FILTER(ipt->pass_filter, skb) == 0) { && BPF_PROG_RUN(ipt->pass_filter, skb) == 0) {
if (ipt->debug & 0x4) if (ipt->debug & 0x4)
printk(KERN_DEBUG "IPPP: outbound frame filtered.\n"); printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
kfree_skb(skb); kfree_skb(skb);
goto unlock; goto unlock;
} }
if (!(ipt->active_filter if (!(ipt->active_filter
&& SK_RUN_FILTER(ipt->active_filter, skb) == 0)) { && BPF_PROG_RUN(ipt->active_filter, skb) == 0)) {
if (ipt->debug & 0x4) if (ipt->debug & 0x4)
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n"); printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
lp->huptimer = 0; lp->huptimer = 0;
...@@ -1517,9 +1515,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp) ...@@ -1517,9 +1515,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
} }
drop |= is->pass_filter drop |= is->pass_filter
&& SK_RUN_FILTER(is->pass_filter, skb) == 0; && BPF_PROG_RUN(is->pass_filter, skb) == 0;
drop |= is->active_filter drop |= is->active_filter
&& SK_RUN_FILTER(is->active_filter, skb) == 0; && BPF_PROG_RUN(is->active_filter, skb) == 0;
skb_push(skb, IPPP_MAX_HEADER - 4); skb_push(skb, IPPP_MAX_HEADER - 4);
return drop; return drop;
......
...@@ -143,8 +143,8 @@ struct ppp { ...@@ -143,8 +143,8 @@ struct ppp {
struct sk_buff_head mrq; /* MP: receive reconstruction queue */ struct sk_buff_head mrq; /* MP: receive reconstruction queue */
#endif /* CONFIG_PPP_MULTILINK */ #endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER #ifdef CONFIG_PPP_FILTER
struct sk_filter *pass_filter; /* filter for packets to pass */ struct bpf_prog *pass_filter; /* filter for packets to pass */
struct sk_filter *active_filter;/* filter for pkts to reset idle */ struct bpf_prog *active_filter; /* filter for pkts to reset idle */
#endif /* CONFIG_PPP_FILTER */ #endif /* CONFIG_PPP_FILTER */
struct net *ppp_net; /* the net we belong to */ struct net *ppp_net; /* the net we belong to */
struct ppp_link_stats stats64; /* 64 bit network stats */ struct ppp_link_stats stats64; /* 64 bit network stats */
...@@ -762,12 +762,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -762,12 +762,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ppp_lock(ppp); ppp_lock(ppp);
if (ppp->pass_filter) { if (ppp->pass_filter) {
sk_unattached_filter_destroy(ppp->pass_filter); bpf_prog_destroy(ppp->pass_filter);
ppp->pass_filter = NULL; ppp->pass_filter = NULL;
} }
if (fprog.filter != NULL) if (fprog.filter != NULL)
err = sk_unattached_filter_create(&ppp->pass_filter, err = bpf_prog_create(&ppp->pass_filter,
&fprog); &fprog);
else else
err = 0; err = 0;
kfree(code); kfree(code);
...@@ -788,12 +788,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -788,12 +788,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ppp_lock(ppp); ppp_lock(ppp);
if (ppp->active_filter) { if (ppp->active_filter) {
sk_unattached_filter_destroy(ppp->active_filter); bpf_prog_destroy(ppp->active_filter);
ppp->active_filter = NULL; ppp->active_filter = NULL;
} }
if (fprog.filter != NULL) if (fprog.filter != NULL)
err = sk_unattached_filter_create(&ppp->active_filter, err = bpf_prog_create(&ppp->active_filter,
&fprog); &fprog);
else else
err = 0; err = 0;
kfree(code); kfree(code);
...@@ -1205,7 +1205,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1205,7 +1205,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
a four-byte PPP header on each packet */ a four-byte PPP header on each packet */
*skb_push(skb, 2) = 1; *skb_push(skb, 2) = 1;
if (ppp->pass_filter && if (ppp->pass_filter &&
SK_RUN_FILTER(ppp->pass_filter, skb) == 0) { BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1) if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev, netdev_printk(KERN_DEBUG, ppp->dev,
"PPP: outbound frame " "PPP: outbound frame "
...@@ -1215,7 +1215,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1215,7 +1215,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
} }
/* if this packet passes the active filter, record the time */ /* if this packet passes the active filter, record the time */
if (!(ppp->active_filter && if (!(ppp->active_filter &&
SK_RUN_FILTER(ppp->active_filter, skb) == 0)) BPF_PROG_RUN(ppp->active_filter, skb) == 0))
ppp->last_xmit = jiffies; ppp->last_xmit = jiffies;
skb_pull(skb, 2); skb_pull(skb, 2);
#else #else
...@@ -1839,7 +1839,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1839,7 +1839,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
*skb_push(skb, 2) = 0; *skb_push(skb, 2) = 0;
if (ppp->pass_filter && if (ppp->pass_filter &&
SK_RUN_FILTER(ppp->pass_filter, skb) == 0) { BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1) if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev, netdev_printk(KERN_DEBUG, ppp->dev,
"PPP: inbound frame " "PPP: inbound frame "
...@@ -1848,7 +1848,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1848,7 +1848,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
return; return;
} }
if (!(ppp->active_filter && if (!(ppp->active_filter &&
SK_RUN_FILTER(ppp->active_filter, skb) == 0)) BPF_PROG_RUN(ppp->active_filter, skb) == 0))
ppp->last_recv = jiffies; ppp->last_recv = jiffies;
__skb_pull(skb, 2); __skb_pull(skb, 2);
} else } else
...@@ -2829,12 +2829,12 @@ static void ppp_destroy_interface(struct ppp *ppp) ...@@ -2829,12 +2829,12 @@ static void ppp_destroy_interface(struct ppp *ppp)
#endif /* CONFIG_PPP_MULTILINK */ #endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER #ifdef CONFIG_PPP_FILTER
if (ppp->pass_filter) { if (ppp->pass_filter) {
sk_unattached_filter_destroy(ppp->pass_filter); bpf_prog_destroy(ppp->pass_filter);
ppp->pass_filter = NULL; ppp->pass_filter = NULL;
} }
if (ppp->active_filter) { if (ppp->active_filter) {
sk_unattached_filter_destroy(ppp->active_filter); bpf_prog_destroy(ppp->active_filter);
ppp->active_filter = NULL; ppp->active_filter = NULL;
} }
#endif /* CONFIG_PPP_FILTER */ #endif /* CONFIG_PPP_FILTER */
......
...@@ -58,7 +58,7 @@ struct lb_priv_ex { ...@@ -58,7 +58,7 @@ struct lb_priv_ex {
}; };
struct lb_priv { struct lb_priv {
struct sk_filter __rcu *fp; struct bpf_prog __rcu *fp;
lb_select_tx_port_func_t __rcu *select_tx_port_func; lb_select_tx_port_func_t __rcu *select_tx_port_func;
struct lb_pcpu_stats __percpu *pcpu_stats; struct lb_pcpu_stats __percpu *pcpu_stats;
struct lb_priv_ex *ex; /* priv extension */ struct lb_priv_ex *ex; /* priv extension */
...@@ -174,14 +174,14 @@ static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name) ...@@ -174,14 +174,14 @@ static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv, static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct sk_filter *fp; struct bpf_prog *fp;
uint32_t lhash; uint32_t lhash;
unsigned char *c; unsigned char *c;
fp = rcu_dereference_bh(lb_priv->fp); fp = rcu_dereference_bh(lb_priv->fp);
if (unlikely(!fp)) if (unlikely(!fp))
return 0; return 0;
lhash = SK_RUN_FILTER(fp, skb); lhash = BPF_PROG_RUN(fp, skb);
c = (char *) &lhash; c = (char *) &lhash;
return c[0] ^ c[1] ^ c[2] ^ c[3]; return c[0] ^ c[1] ^ c[2] ^ c[3];
} }
...@@ -271,8 +271,8 @@ static void __fprog_destroy(struct sock_fprog_kern *fprog) ...@@ -271,8 +271,8 @@ static void __fprog_destroy(struct sock_fprog_kern *fprog)
static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
{ {
struct lb_priv *lb_priv = get_lb_priv(team); struct lb_priv *lb_priv = get_lb_priv(team);
struct sk_filter *fp = NULL; struct bpf_prog *fp = NULL;
struct sk_filter *orig_fp = NULL; struct bpf_prog *orig_fp = NULL;
struct sock_fprog_kern *fprog = NULL; struct sock_fprog_kern *fprog = NULL;
int err; int err;
...@@ -281,7 +281,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) ...@@ -281,7 +281,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
ctx->data.bin_val.ptr); ctx->data.bin_val.ptr);
if (err) if (err)
return err; return err;
err = sk_unattached_filter_create(&fp, fprog); err = bpf_prog_create(&fp, fprog);
if (err) { if (err) {
__fprog_destroy(fprog); __fprog_destroy(fprog);
return err; return err;
...@@ -300,7 +300,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) ...@@ -300,7 +300,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
if (orig_fp) { if (orig_fp) {
synchronize_rcu(); synchronize_rcu();
sk_unattached_filter_destroy(orig_fp); bpf_prog_destroy(orig_fp);
} }
return 0; return 0;
} }
......
...@@ -296,7 +296,8 @@ enum { ...@@ -296,7 +296,8 @@ enum {
}) })
/* Macro to invoke filter function. */ /* Macro to invoke filter function. */
#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) #define SK_RUN_FILTER(filter, ctx) \
(*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
struct bpf_insn { struct bpf_insn {
__u8 code; /* opcode */ __u8 code; /* opcode */
...@@ -323,12 +324,10 @@ struct sk_buff; ...@@ -323,12 +324,10 @@ struct sk_buff;
struct sock; struct sock;
struct seccomp_data; struct seccomp_data;
struct sk_filter { struct bpf_prog {
atomic_t refcnt;
u32 jited:1, /* Is our filter JIT'ed? */ u32 jited:1, /* Is our filter JIT'ed? */
len:31; /* Number of filter blocks */ len:31; /* Number of filter blocks */
struct sock_fprog_kern *orig_prog; /* Original BPF program */ struct sock_fprog_kern *orig_prog; /* Original BPF program */
struct rcu_head rcu;
unsigned int (*bpf_func)(const struct sk_buff *skb, unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter); const struct bpf_insn *filter);
union { union {
...@@ -338,25 +337,32 @@ struct sk_filter { ...@@ -338,25 +337,32 @@ struct sk_filter {
}; };
}; };
static inline unsigned int sk_filter_size(unsigned int proglen) struct sk_filter {
atomic_t refcnt;
struct rcu_head rcu;
struct bpf_prog *prog;
};
#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
static inline unsigned int bpf_prog_size(unsigned int proglen)
{ {
return max(sizeof(struct sk_filter), return max(sizeof(struct bpf_prog),
offsetof(struct sk_filter, insns[proglen])); offsetof(struct bpf_prog, insns[proglen]));
} }
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
int sk_filter(struct sock *sk, struct sk_buff *skb); int sk_filter(struct sock *sk, struct sk_buff *skb);
void sk_filter_select_runtime(struct sk_filter *fp); void bpf_prog_select_runtime(struct bpf_prog *fp);
void sk_filter_free(struct sk_filter *fp); void bpf_prog_free(struct bpf_prog *fp);
int bpf_convert_filter(struct sock_filter *prog, int len, int bpf_convert_filter(struct sock_filter *prog, int len,
struct bpf_insn *new_prog, int *new_len); struct bpf_insn *new_prog, int *new_len);
int sk_unattached_filter_create(struct sk_filter **pfp, int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
struct sock_fprog_kern *fprog); void bpf_prog_destroy(struct bpf_prog *fp);
void sk_unattached_filter_destroy(struct sk_filter *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_detach_filter(struct sock *sk); int sk_detach_filter(struct sock *sk);
...@@ -369,7 +375,7 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); ...@@ -369,7 +375,7 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct sk_filter *fp); void bpf_int_jit_compile(struct bpf_prog *fp);
#define BPF_ANC BIT(15) #define BPF_ANC BIT(15)
...@@ -423,8 +429,8 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, ...@@ -423,8 +429,8 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/printk.h> #include <linux/printk.h>
void bpf_jit_compile(struct sk_filter *fp); void bpf_jit_compile(struct bpf_prog *fp);
void bpf_jit_free(struct sk_filter *fp); void bpf_jit_free(struct bpf_prog *fp);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image) u32 pass, void *image)
...@@ -438,11 +444,11 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, ...@@ -438,11 +444,11 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
#else #else
#include <linux/slab.h> #include <linux/slab.h>
static inline void bpf_jit_compile(struct sk_filter *fp) static inline void bpf_jit_compile(struct bpf_prog *fp)
{ {
} }
static inline void bpf_jit_free(struct sk_filter *fp) static inline void bpf_jit_free(struct bpf_prog *fp)
{ {
kfree(fp); kfree(fp);
} }
......
...@@ -180,8 +180,8 @@ struct ippp_struct { ...@@ -180,8 +180,8 @@ struct ippp_struct {
struct slcompress *slcomp; struct slcompress *slcomp;
#endif #endif
#ifdef CONFIG_IPPP_FILTER #ifdef CONFIG_IPPP_FILTER
struct sk_filter *pass_filter; /* filter for packets to pass */ struct bpf_prog *pass_filter; /* filter for packets to pass */
struct sk_filter *active_filter; /* filter for pkts to reset idle */ struct bpf_prog *active_filter; /* filter for pkts to reset idle */
#endif #endif
unsigned long debug; unsigned long debug;
struct isdn_ppp_compressor *compressor,*decompressor; struct isdn_ppp_compressor *compressor,*decompressor;
......
...@@ -6,14 +6,14 @@ ...@@ -6,14 +6,14 @@
#define XT_BPF_MAX_NUM_INSTR 64 #define XT_BPF_MAX_NUM_INSTR 64
struct sk_filter; struct bpf_prog;
struct xt_bpf_info { struct xt_bpf_info {
__u16 bpf_program_num_elem; __u16 bpf_program_num_elem;
struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR]; struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
/* only used in the kernel */ /* only used in the kernel */
struct sk_filter *filter __attribute__((aligned(8))); struct bpf_prog *filter __attribute__((aligned(8)));
}; };
#endif /*_XT_BPF_H */ #endif /*_XT_BPF_H */
...@@ -73,15 +73,13 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ...@@ -73,15 +73,13 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
} }
/** /**
* __sk_run_filter - run a filter on a given context * __bpf_prog_run - run eBPF program on a given context
* @ctx: buffer to run the filter on * @ctx: is the data we are operating on
* @insn: filter to apply * @insn: is the array of eBPF instructions
* *
* Decode and apply filter instructions to the skb->data. Return length to * Decode and execute eBPF instructions.
* keep, 0 for none. @ctx is the data we are operating on, @insn is the
* array of filter instructions.
*/ */
static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn) static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
{ {
u64 stack[MAX_BPF_STACK / sizeof(u64)]; u64 stack[MAX_BPF_STACK / sizeof(u64)];
u64 regs[MAX_BPF_REG], tmp; u64 regs[MAX_BPF_REG], tmp;
...@@ -508,29 +506,29 @@ static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn) ...@@ -508,29 +506,29 @@ static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn)
return 0; return 0;
} }
void __weak bpf_int_jit_compile(struct sk_filter *prog) void __weak bpf_int_jit_compile(struct bpf_prog *prog)
{ {
} }
/** /**
* sk_filter_select_runtime - select execution runtime for BPF program * bpf_prog_select_runtime - select execution runtime for BPF program
* @fp: sk_filter populated with internal BPF program * @fp: bpf_prog populated with internal BPF program
* *
* try to JIT internal BPF program, if JIT is not available select interpreter * try to JIT internal BPF program, if JIT is not available select interpreter
* BPF program will be executed via SK_RUN_FILTER() macro * BPF program will be executed via BPF_PROG_RUN() macro
*/ */
void sk_filter_select_runtime(struct sk_filter *fp) void bpf_prog_select_runtime(struct bpf_prog *fp)
{ {
fp->bpf_func = (void *) __sk_run_filter; fp->bpf_func = (void *) __bpf_prog_run;
/* Probe if internal BPF can be JITed */ /* Probe if internal BPF can be JITed */
bpf_int_jit_compile(fp); bpf_int_jit_compile(fp);
} }
EXPORT_SYMBOL_GPL(sk_filter_select_runtime); EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
/* free internal BPF program */ /* free internal BPF program */
void sk_filter_free(struct sk_filter *fp) void bpf_prog_free(struct bpf_prog *fp)
{ {
bpf_jit_free(fp); bpf_jit_free(fp);
} }
EXPORT_SYMBOL_GPL(sk_filter_free); EXPORT_SYMBOL_GPL(bpf_prog_free);
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
struct seccomp_filter { struct seccomp_filter {
atomic_t usage; atomic_t usage;
struct seccomp_filter *prev; struct seccomp_filter *prev;
struct sk_filter *prog; struct bpf_prog *prog;
}; };
/* Limit any path through the tree to 256KB worth of instructions. */ /* Limit any path through the tree to 256KB worth of instructions. */
...@@ -187,7 +187,7 @@ static u32 seccomp_run_filters(int syscall) ...@@ -187,7 +187,7 @@ static u32 seccomp_run_filters(int syscall)
* value always takes priority (ignoring the DATA). * value always takes priority (ignoring the DATA).
*/ */
for (f = current->seccomp.filter; f; f = f->prev) { for (f = current->seccomp.filter; f; f = f->prev) {
u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd); u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)&sd);
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
ret = cur_ret; ret = cur_ret;
...@@ -260,7 +260,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) ...@@ -260,7 +260,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
if (!filter) if (!filter)
goto free_prog; goto free_prog;
filter->prog = kzalloc(sk_filter_size(new_len), filter->prog = kzalloc(bpf_prog_size(new_len),
GFP_KERNEL|__GFP_NOWARN); GFP_KERNEL|__GFP_NOWARN);
if (!filter->prog) if (!filter->prog)
goto free_filter; goto free_filter;
...@@ -273,7 +273,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) ...@@ -273,7 +273,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
atomic_set(&filter->usage, 1); atomic_set(&filter->usage, 1);
filter->prog->len = new_len; filter->prog->len = new_len;
sk_filter_select_runtime(filter->prog); bpf_prog_select_runtime(filter->prog);
/* /*
* If there is an existing filter, make it the prev and don't drop its * If there is an existing filter, make it the prev and don't drop its
...@@ -337,7 +337,7 @@ void put_seccomp_filter(struct task_struct *tsk) ...@@ -337,7 +337,7 @@ void put_seccomp_filter(struct task_struct *tsk)
while (orig && atomic_dec_and_test(&orig->usage)) { while (orig && atomic_dec_and_test(&orig->usage)) {
struct seccomp_filter *freeme = orig; struct seccomp_filter *freeme = orig;
orig = orig->prev; orig = orig->prev;
sk_filter_free(freeme->prog); bpf_prog_free(freeme->prog);
kfree(freeme); kfree(freeme);
} }
} }
......
...@@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp) ...@@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp)
return len + 1; return len + 1;
} }
static struct sk_filter *generate_filter(int which, int *err) static struct bpf_prog *generate_filter(int which, int *err)
{ {
struct sk_filter *fp; struct bpf_prog *fp;
struct sock_fprog_kern fprog; struct sock_fprog_kern fprog;
unsigned int flen = probe_filter_length(tests[which].u.insns); unsigned int flen = probe_filter_length(tests[which].u.insns);
__u8 test_type = tests[which].aux & TEST_TYPE_MASK; __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
...@@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err) ...@@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err)
fprog.filter = tests[which].u.insns; fprog.filter = tests[which].u.insns;
fprog.len = flen; fprog.len = flen;
*err = sk_unattached_filter_create(&fp, &fprog); *err = bpf_prog_create(&fp, &fprog);
if (tests[which].aux & FLAG_EXPECTED_FAIL) { if (tests[which].aux & FLAG_EXPECTED_FAIL) {
if (*err == -EINVAL) { if (*err == -EINVAL) {
pr_cont("PASS\n"); pr_cont("PASS\n");
...@@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err) ...@@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err)
break; break;
case INTERNAL: case INTERNAL:
fp = kzalloc(sk_filter_size(flen), GFP_KERNEL); fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
if (fp == NULL) { if (fp == NULL) {
pr_cont("UNEXPECTED_FAIL no memory left\n"); pr_cont("UNEXPECTED_FAIL no memory left\n");
*err = -ENOMEM; *err = -ENOMEM;
...@@ -1809,7 +1809,7 @@ static struct sk_filter *generate_filter(int which, int *err) ...@@ -1809,7 +1809,7 @@ static struct sk_filter *generate_filter(int which, int *err)
memcpy(fp->insnsi, tests[which].u.insns_int, memcpy(fp->insnsi, tests[which].u.insns_int,
fp->len * sizeof(struct bpf_insn)); fp->len * sizeof(struct bpf_insn));
sk_filter_select_runtime(fp); bpf_prog_select_runtime(fp);
break; break;
} }
...@@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err) ...@@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err)
return fp; return fp;
} }
static void release_filter(struct sk_filter *fp, int which) static void release_filter(struct bpf_prog *fp, int which)
{ {
__u8 test_type = tests[which].aux & TEST_TYPE_MASK; __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
switch (test_type) { switch (test_type) {
case CLASSIC: case CLASSIC:
sk_unattached_filter_destroy(fp); bpf_prog_destroy(fp);
break; break;
case INTERNAL: case INTERNAL:
sk_filter_free(fp); bpf_prog_free(fp);
break; break;
} }
} }
static int __run_one(const struct sk_filter *fp, const void *data, static int __run_one(const struct bpf_prog *fp, const void *data,
int runs, u64 *duration) int runs, u64 *duration)
{ {
u64 start, finish; u64 start, finish;
...@@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data, ...@@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
start = ktime_to_us(ktime_get()); start = ktime_to_us(ktime_get());
for (i = 0; i < runs; i++) for (i = 0; i < runs; i++)
ret = SK_RUN_FILTER(fp, data); ret = BPF_PROG_RUN(fp, data);
finish = ktime_to_us(ktime_get()); finish = ktime_to_us(ktime_get());
...@@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data, ...@@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
return ret; return ret;
} }
static int run_one(const struct sk_filter *fp, struct bpf_test *test) static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
{ {
int err_cnt = 0, i, runs = MAX_TESTRUNS; int err_cnt = 0, i, runs = MAX_TESTRUNS;
...@@ -1884,7 +1884,7 @@ static __init int test_bpf(void) ...@@ -1884,7 +1884,7 @@ static __init int test_bpf(void)
int i, err_cnt = 0, pass_cnt = 0; int i, err_cnt = 0, pass_cnt = 0;
for (i = 0; i < ARRAY_SIZE(tests); i++) { for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct sk_filter *fp; struct bpf_prog *fp;
int err; int err;
pr_info("#%d %s ", i, tests[i].descr); pr_info("#%d %s ", i, tests[i].descr);
......
...@@ -810,8 +810,8 @@ int bpf_check_classic(const struct sock_filter *filter, unsigned int flen) ...@@ -810,8 +810,8 @@ int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
} }
EXPORT_SYMBOL(bpf_check_classic); EXPORT_SYMBOL(bpf_check_classic);
static int sk_store_orig_filter(struct sk_filter *fp, static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
const struct sock_fprog *fprog) const struct sock_fprog *fprog)
{ {
unsigned int fsize = bpf_classic_proglen(fprog); unsigned int fsize = bpf_classic_proglen(fprog);
struct sock_fprog_kern *fkprog; struct sock_fprog_kern *fkprog;
...@@ -831,7 +831,7 @@ static int sk_store_orig_filter(struct sk_filter *fp, ...@@ -831,7 +831,7 @@ static int sk_store_orig_filter(struct sk_filter *fp,
return 0; return 0;
} }
static void sk_release_orig_filter(struct sk_filter *fp) static void bpf_release_orig_filter(struct bpf_prog *fp)
{ {
struct sock_fprog_kern *fprog = fp->orig_prog; struct sock_fprog_kern *fprog = fp->orig_prog;
...@@ -841,10 +841,16 @@ static void sk_release_orig_filter(struct sk_filter *fp) ...@@ -841,10 +841,16 @@ static void sk_release_orig_filter(struct sk_filter *fp)
} }
} }
static void __bpf_prog_release(struct bpf_prog *prog)
{
bpf_release_orig_filter(prog);
bpf_prog_free(prog);
}
static void __sk_filter_release(struct sk_filter *fp) static void __sk_filter_release(struct sk_filter *fp)
{ {
sk_release_orig_filter(fp); __bpf_prog_release(fp->prog);
sk_filter_free(fp); kfree(fp);
} }
/** /**
...@@ -872,7 +878,7 @@ static void sk_filter_release(struct sk_filter *fp) ...@@ -872,7 +878,7 @@ static void sk_filter_release(struct sk_filter *fp)
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{ {
u32 filter_size = sk_filter_size(fp->len); u32 filter_size = bpf_prog_size(fp->prog->len);
atomic_sub(filter_size, &sk->sk_omem_alloc); atomic_sub(filter_size, &sk->sk_omem_alloc);
sk_filter_release(fp); sk_filter_release(fp);
...@@ -883,7 +889,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) ...@@ -883,7 +889,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
*/ */
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{ {
u32 filter_size = sk_filter_size(fp->len); u32 filter_size = bpf_prog_size(fp->prog->len);
/* same check as in sock_kmalloc() */ /* same check as in sock_kmalloc() */
if (filter_size <= sysctl_optmem_max && if (filter_size <= sysctl_optmem_max &&
...@@ -895,10 +901,10 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) ...@@ -895,10 +901,10 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
return false; return false;
} }
static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp) static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
{ {
struct sock_filter *old_prog; struct sock_filter *old_prog;
struct sk_filter *old_fp; struct bpf_prog *old_fp;
int err, new_len, old_len = fp->len; int err, new_len, old_len = fp->len;
/* We are free to overwrite insns et al right here as it /* We are free to overwrite insns et al right here as it
...@@ -927,7 +933,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp) ...@@ -927,7 +933,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp)
/* Expand fp for appending the new filter representation. */ /* Expand fp for appending the new filter representation. */
old_fp = fp; old_fp = fp;
fp = krealloc(old_fp, sk_filter_size(new_len), GFP_KERNEL); fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
if (!fp) { if (!fp) {
/* The old_fp is still around in case we couldn't /* The old_fp is still around in case we couldn't
* allocate new memory, so uncharge on that one. * allocate new memory, so uncharge on that one.
...@@ -949,7 +955,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp) ...@@ -949,7 +955,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp)
*/ */
goto out_err_free; goto out_err_free;
sk_filter_select_runtime(fp); bpf_prog_select_runtime(fp);
kfree(old_prog); kfree(old_prog);
return fp; return fp;
...@@ -957,11 +963,11 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp) ...@@ -957,11 +963,11 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp)
out_err_free: out_err_free:
kfree(old_prog); kfree(old_prog);
out_err: out_err:
__sk_filter_release(fp); __bpf_prog_release(fp);
return ERR_PTR(err); return ERR_PTR(err);
} }
static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp) static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
{ {
int err; int err;
...@@ -970,7 +976,7 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp) ...@@ -970,7 +976,7 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp)
err = bpf_check_classic(fp->insns, fp->len); err = bpf_check_classic(fp->insns, fp->len);
if (err) { if (err) {
__sk_filter_release(fp); __bpf_prog_release(fp);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -983,13 +989,13 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp) ...@@ -983,13 +989,13 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp)
* internal BPF translation for the optimized interpreter. * internal BPF translation for the optimized interpreter.
*/ */
if (!fp->jited) if (!fp->jited)
fp = __sk_migrate_filter(fp); fp = bpf_migrate_filter(fp);
return fp; return fp;
} }
/** /**
* sk_unattached_filter_create - create an unattached filter * bpf_prog_create - create an unattached filter
* @pfp: the unattached filter that is created * @pfp: the unattached filter that is created
* @fprog: the filter program * @fprog: the filter program
* *
...@@ -998,23 +1004,21 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp) ...@@ -998,23 +1004,21 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp)
* If an error occurs or there is insufficient memory for the filter * If an error occurs or there is insufficient memory for the filter
* a negative errno code is returned. On success the return is zero. * a negative errno code is returned. On success the return is zero.
*/ */
int sk_unattached_filter_create(struct sk_filter **pfp, int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
struct sock_fprog_kern *fprog)
{ {
unsigned int fsize = bpf_classic_proglen(fprog); unsigned int fsize = bpf_classic_proglen(fprog);
struct sk_filter *fp; struct bpf_prog *fp;
/* Make sure new filter is there and in the right amounts. */ /* Make sure new filter is there and in the right amounts. */
if (fprog->filter == NULL) if (fprog->filter == NULL)
return -EINVAL; return -EINVAL;
fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL); fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
if (!fp) if (!fp)
return -ENOMEM; return -ENOMEM;
memcpy(fp->insns, fprog->filter, fsize); memcpy(fp->insns, fprog->filter, fsize);
atomic_set(&fp->refcnt, 1);
fp->len = fprog->len; fp->len = fprog->len;
/* Since unattached filters are not copied back to user /* Since unattached filters are not copied back to user
* space through sk_get_filter(), we do not need to hold * space through sk_get_filter(), we do not need to hold
...@@ -1022,23 +1026,23 @@ int sk_unattached_filter_create(struct sk_filter **pfp, ...@@ -1022,23 +1026,23 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
*/ */
fp->orig_prog = NULL; fp->orig_prog = NULL;
/* __sk_prepare_filter() already takes care of freeing /* bpf_prepare_filter() already takes care of freeing
* memory in case something goes wrong. * memory in case something goes wrong.
*/ */
fp = __sk_prepare_filter(fp); fp = bpf_prepare_filter(fp);
if (IS_ERR(fp)) if (IS_ERR(fp))
return PTR_ERR(fp); return PTR_ERR(fp);
*pfp = fp; *pfp = fp;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(sk_unattached_filter_create); EXPORT_SYMBOL_GPL(bpf_prog_create);
void sk_unattached_filter_destroy(struct sk_filter *fp) void bpf_prog_destroy(struct bpf_prog *fp)
{ {
__sk_filter_release(fp); __bpf_prog_release(fp);
} }
EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy); EXPORT_SYMBOL_GPL(bpf_prog_destroy);
/** /**
* sk_attach_filter - attach a socket filter * sk_attach_filter - attach a socket filter
...@@ -1054,7 +1058,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) ...@@ -1054,7 +1058,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{ {
struct sk_filter *fp, *old_fp; struct sk_filter *fp, *old_fp;
unsigned int fsize = bpf_classic_proglen(fprog); unsigned int fsize = bpf_classic_proglen(fprog);
unsigned int sk_fsize = sk_filter_size(fprog->len); unsigned int bpf_fsize = bpf_prog_size(fprog->len);
struct bpf_prog *prog;
int err; int err;
if (sock_flag(sk, SOCK_FILTER_LOCKED)) if (sock_flag(sk, SOCK_FILTER_LOCKED))
...@@ -1064,29 +1069,36 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) ...@@ -1064,29 +1069,36 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
if (fprog->filter == NULL) if (fprog->filter == NULL)
return -EINVAL; return -EINVAL;
fp = kmalloc(sk_fsize, GFP_KERNEL); prog = kmalloc(bpf_fsize, GFP_KERNEL);
if (!fp) if (!prog)
return -ENOMEM; return -ENOMEM;
if (copy_from_user(fp->insns, fprog->filter, fsize)) { if (copy_from_user(prog->insns, fprog->filter, fsize)) {
kfree(fp); kfree(prog);
return -EFAULT; return -EFAULT;
} }
fp->len = fprog->len; prog->len = fprog->len;
err = sk_store_orig_filter(fp, fprog); err = bpf_prog_store_orig_filter(prog, fprog);
if (err) { if (err) {
kfree(fp); kfree(prog);
return -ENOMEM; return -ENOMEM;
} }
/* __sk_prepare_filter() already takes care of freeing /* bpf_prepare_filter() already takes care of freeing
* memory in case something goes wrong. * memory in case something goes wrong.
*/ */
fp = __sk_prepare_filter(fp); prog = bpf_prepare_filter(prog);
if (IS_ERR(fp)) if (IS_ERR(prog))
return PTR_ERR(fp); return PTR_ERR(prog);
fp = kmalloc(sizeof(*fp), GFP_KERNEL);
if (!fp) {
__bpf_prog_release(prog);
return -ENOMEM;
}
fp->prog = prog;
atomic_set(&fp->refcnt, 0); atomic_set(&fp->refcnt, 0);
...@@ -1142,7 +1154,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, ...@@ -1142,7 +1154,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
/* We're copying the filter that has been originally attached, /* We're copying the filter that has been originally attached,
* so no conversion/decode needed anymore. * so no conversion/decode needed anymore.
*/ */
fprog = filter->orig_prog; fprog = filter->prog->orig_prog;
ret = fprog->len; ret = fprog->len;
if (!len) if (!len)
......
...@@ -107,11 +107,11 @@ ...@@ -107,11 +107,11 @@
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/ptp_classify.h> #include <linux/ptp_classify.h>
static struct sk_filter *ptp_insns __read_mostly; static struct bpf_prog *ptp_insns __read_mostly;
unsigned int ptp_classify_raw(const struct sk_buff *skb) unsigned int ptp_classify_raw(const struct sk_buff *skb)
{ {
return SK_RUN_FILTER(ptp_insns, skb); return BPF_PROG_RUN(ptp_insns, skb);
} }
EXPORT_SYMBOL_GPL(ptp_classify_raw); EXPORT_SYMBOL_GPL(ptp_classify_raw);
...@@ -189,5 +189,5 @@ void __init ptp_classifier_init(void) ...@@ -189,5 +189,5 @@ void __init ptp_classifier_init(void)
.len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
}; };
BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog)); BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
} }
...@@ -68,7 +68,7 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, ...@@ -68,7 +68,7 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
if (!filter) if (!filter)
goto out; goto out;
fprog = filter->orig_prog; fprog = filter->prog->orig_prog;
flen = bpf_classic_proglen(fprog); flen = bpf_classic_proglen(fprog);
attr = nla_reserve(skb, attrtype, flen); attr = nla_reserve(skb, attrtype, flen);
......
...@@ -28,7 +28,7 @@ static int bpf_mt_check(const struct xt_mtchk_param *par) ...@@ -28,7 +28,7 @@ static int bpf_mt_check(const struct xt_mtchk_param *par)
program.len = info->bpf_program_num_elem; program.len = info->bpf_program_num_elem;
program.filter = info->bpf_program; program.filter = info->bpf_program;
if (sk_unattached_filter_create(&info->filter, &program)) { if (bpf_prog_create(&info->filter, &program)) {
pr_info("bpf: check failed: parse error\n"); pr_info("bpf: check failed: parse error\n");
return -EINVAL; return -EINVAL;
} }
...@@ -40,13 +40,13 @@ static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -40,13 +40,13 @@ static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
{ {
const struct xt_bpf_info *info = par->matchinfo; const struct xt_bpf_info *info = par->matchinfo;
return SK_RUN_FILTER(info->filter, skb); return BPF_PROG_RUN(info->filter, skb);
} }
static void bpf_mt_destroy(const struct xt_mtdtor_param *par) static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
{ {
const struct xt_bpf_info *info = par->matchinfo; const struct xt_bpf_info *info = par->matchinfo;
sk_unattached_filter_destroy(info->filter); bpf_prog_destroy(info->filter);
} }
static struct xt_match bpf_mt_reg __read_mostly = { static struct xt_match bpf_mt_reg __read_mostly = {
......
...@@ -30,7 +30,7 @@ struct cls_bpf_head { ...@@ -30,7 +30,7 @@ struct cls_bpf_head {
}; };
struct cls_bpf_prog { struct cls_bpf_prog {
struct sk_filter *filter; struct bpf_prog *filter;
struct sock_filter *bpf_ops; struct sock_filter *bpf_ops;
struct tcf_exts exts; struct tcf_exts exts;
struct tcf_result res; struct tcf_result res;
...@@ -54,7 +54,7 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -54,7 +54,7 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
int ret; int ret;
list_for_each_entry(prog, &head->plist, link) { list_for_each_entry(prog, &head->plist, link) {
int filter_res = SK_RUN_FILTER(prog->filter, skb); int filter_res = BPF_PROG_RUN(prog->filter, skb);
if (filter_res == 0) if (filter_res == 0)
continue; continue;
...@@ -92,7 +92,7 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) ...@@ -92,7 +92,7 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
tcf_unbind_filter(tp, &prog->res); tcf_unbind_filter(tp, &prog->res);
tcf_exts_destroy(tp, &prog->exts); tcf_exts_destroy(tp, &prog->exts);
sk_unattached_filter_destroy(prog->filter); bpf_prog_destroy(prog->filter);
kfree(prog->bpf_ops); kfree(prog->bpf_ops);
kfree(prog); kfree(prog);
...@@ -161,7 +161,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, ...@@ -161,7 +161,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
struct sock_filter *bpf_ops, *bpf_old; struct sock_filter *bpf_ops, *bpf_old;
struct tcf_exts exts; struct tcf_exts exts;
struct sock_fprog_kern tmp; struct sock_fprog_kern tmp;
struct sk_filter *fp, *fp_old; struct bpf_prog *fp, *fp_old;
u16 bpf_size, bpf_len; u16 bpf_size, bpf_len;
u32 classid; u32 classid;
int ret; int ret;
...@@ -193,7 +193,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, ...@@ -193,7 +193,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
tmp.len = bpf_len; tmp.len = bpf_len;
tmp.filter = bpf_ops; tmp.filter = bpf_ops;
ret = sk_unattached_filter_create(&fp, &tmp); ret = bpf_prog_create(&fp, &tmp);
if (ret) if (ret)
goto errout_free; goto errout_free;
...@@ -211,7 +211,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, ...@@ -211,7 +211,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
tcf_exts_change(tp, &prog->exts, &exts); tcf_exts_change(tp, &prog->exts, &exts);
if (fp_old) if (fp_old)
sk_unattached_filter_destroy(fp_old); bpf_prog_destroy(fp_old);
if (bpf_old) if (bpf_old)
kfree(bpf_old); kfree(bpf_old);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment