Commit 79741a38 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2018-04-27

The following pull-request contains BPF updates for your *net-next* tree.

The main changes are:

1) Add extensive BPF helper description into include/uapi/linux/bpf.h
   and a new script bpf_helpers_doc.py which allows for generating a
   man page out of it. Thus, every helper in BPF now comes with proper
   function signature, detailed description and return code explanation,
   from Quentin.

2) Migrate the BPF collect metadata tunnel tests from BPF samples over
   to the BPF selftests and further extend them with v6 vxlan, geneve
   and ipip tests, simplify the ipip tests, improve documentation and
   convert to bpf_ntoh*() / bpf_hton*() api, from William.

3) Currently, helpers that expect ARG_PTR_TO_MAP_{KEY,VALUE} can only
   access stack and packet memory. Extend this to allow such helpers
   to also use map values, which enabled use cases where value from
   a first lookup can be directly used as a key for a second lookup,
   from Paul.

4) Add a new helper bpf_skb_get_xfrm_state() for tc BPF programs in
   order to retrieve XFRM state information containing SPI, peer
   address and reqid values, from Eyal.

5) Various optimizations in nfp driver's BPF JIT in order to turn ADD
   and SUB instructions with negative immediate into the opposite
   operation with a positive immediate such that nfp can better fit
   small immediates into instructions. Savings in instruction count
   up to 4% have been observed, from Jakub.

6) Add the BPF prog's gpl_compatible flag to struct bpf_prog_info
   and add support for dumping this through bpftool, from Jiri.

7) Move the BPF sockmap samples over into BPF selftests instead since
   sockmap was rather a series of tests than sample anyway and this way
   this can be run from automated bots, from John.

8) Follow-up fix for bpf_adjust_tail() helper in order to make it work
   with generic XDP, from Nikita.

9) Some follow-up cleanups to BTF, namely, removing unused defines from
   BTF uapi header and renaming 'name' struct btf_* members into name_off
   to make it more clear they are offsets into string section, from Martin.

10) Remove test_sock_addr from TEST_GEN_PROGS in BPF selftests since
    not run directly but invoked from test_sock_addr.sh, from Yonghong.

11) Remove redundant ret assignment in sample BPF loader, from Wang.

12) Add couple of missing files to BPF selftest's gitignore, from Anders.

There are two trivial merge conflicts while pulling:

  1) Remove samples/sockmap/Makefile since all sockmap tests have been
     moved to selftests.
  2) Add both hunks from tools/testing/selftests/bpf/.gitignore to the
     file since git should ignore all of them.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cb586c63 c0885f61
......@@ -1214,45 +1214,83 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
static int
wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
enum br_mask br_mask, bool swap)
static const struct jmp_code_map {
enum br_mask br_mask;
bool swap;
} jmp_code_map[] = {
[BPF_JGT >> 4] = { BR_BLO, true },
[BPF_JGE >> 4] = { BR_BHS, false },
[BPF_JLT >> 4] = { BR_BLO, false },
[BPF_JLE >> 4] = { BR_BHS, true },
[BPF_JSGT >> 4] = { BR_BLT, true },
[BPF_JSGE >> 4] = { BR_BGE, false },
[BPF_JSLT >> 4] = { BR_BLT, false },
[BPF_JSLE >> 4] = { BR_BGE, true },
};
static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta)
{
unsigned int op;
op = BPF_OP(meta->insn.code) >> 4;
/* br_mask of 0 is BR_BEQ which we don't use in jump code table */
if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) ||
!jmp_code_map[op].br_mask,
"no code found for jump instruction"))
return NULL;
return &jmp_code_map[op];
}
static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
const struct jmp_code_map *code;
enum alu_op alu_op, carry_op;
u8 reg = insn->dst_reg * 2;
swreg tmp_reg;
code = nfp_jmp_code_get(meta);
if (!code)
return -EINVAL;
alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
if (!swap)
emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
if (!code->swap)
emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
else
emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
if (!swap)
if (!code->swap)
emit_alu(nfp_prog, reg_none(),
reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
reg_a(reg + 1), carry_op, tmp_reg);
else
emit_alu(nfp_prog, reg_none(),
tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
tmp_reg, carry_op, reg_a(reg + 1));
emit_br(nfp_prog, br_mask, insn->off, 0);
emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
}
static int
wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
enum br_mask br_mask, bool swap)
static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
const struct jmp_code_map *code;
u8 areg, breg;
code = nfp_jmp_code_get(meta);
if (!code)
return -EINVAL;
areg = insn->dst_reg * 2;
breg = insn->src_reg * 2;
if (swap) {
if (code->swap) {
areg ^= breg;
breg ^= areg;
areg ^= breg;
......@@ -1261,7 +1299,7 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
emit_alu(nfp_prog, reg_none(),
reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
emit_br(nfp_prog, br_mask, insn->off, 0);
emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
}
......@@ -2283,46 +2321,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
}
static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
}
static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
}
static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
}
static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
}
static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
}
static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
}
static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
}
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
......@@ -2392,46 +2390,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
}
static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
}
static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
}
static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
}
static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
}
static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
}
static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
}
static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
}
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
......@@ -2520,25 +2478,25 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ST | BPF_MEM | BPF_DW] = mem_st8,
[BPF_JMP | BPF_JA | BPF_K] = jump,
[BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
[BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
[BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
[BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
[BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
[BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm,
[BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm,
[BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm,
[BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm,
[BPF_JMP | BPF_JGT | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JGE | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JLT | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JLE | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
[BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
[BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
[BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
[BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
[BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
[BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
[BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg,
[BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg,
[BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg,
[BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg,
[BPF_JMP | BPF_JGT | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JGE | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JLT | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JLE | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_CALL] = call,
......@@ -2777,6 +2735,54 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
}
}
/* abs(insn.imm) will fit better into unrestricted reg immediate -
* convert add/sub of a negative number into a sub/add of a positive one.
*/
static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta;
list_for_each_entry(meta, &nfp_prog->insns, l) {
struct bpf_insn insn = meta->insn;
if (meta->skip)
continue;
if (BPF_CLASS(insn.code) != BPF_ALU &&
BPF_CLASS(insn.code) != BPF_ALU64 &&
BPF_CLASS(insn.code) != BPF_JMP)
continue;
if (BPF_SRC(insn.code) != BPF_K)
continue;
if (insn.imm >= 0)
continue;
if (BPF_CLASS(insn.code) == BPF_JMP) {
switch (BPF_OP(insn.code)) {
case BPF_JGE:
case BPF_JSGE:
case BPF_JLT:
case BPF_JSLT:
meta->jump_neg_op = true;
break;
default:
continue;
}
} else {
if (BPF_OP(insn.code) == BPF_ADD)
insn.code = BPF_CLASS(insn.code) | BPF_SUB;
else if (BPF_OP(insn.code) == BPF_SUB)
insn.code = BPF_CLASS(insn.code) | BPF_ADD;
else
continue;
meta->insn.code = insn.code | BPF_K;
}
meta->insn.imm = -insn.imm;
}
}
/* Remove masking after load since our load guarantees this is not needed */
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
......@@ -3212,6 +3218,7 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
nfp_bpf_opt_reg_init(nfp_prog);
nfp_bpf_opt_neg_add_sub(nfp_prog);
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
nfp_bpf_opt_ldst_gather(nfp_prog);
......
......@@ -236,6 +236,7 @@ struct nfp_bpf_reg_state {
* @xadd_over_16bit: 16bit immediate is not guaranteed
* @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions
* @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
......@@ -264,7 +265,10 @@ struct nfp_insn_meta {
bool xadd_maybe_16bit;
};
/* jump */
struct {
struct nfp_insn_meta *jmp_dst;
bool jump_neg_op;
};
/* function calls */
struct {
u32 func_id;
......
......@@ -765,7 +765,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
/* recalculate len if xdp.data or xdp.data_end were
* adjusted
*/
len = xdp.data_end - xdp.data;
len = xdp.data_end - xdp.data + vi->hdr_len;
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
......
This diff is collapsed.
......@@ -6,9 +6,7 @@
#include <linux/types.h>
#define BTF_MAGIC 0xeB9F
#define BTF_MAGIC_SWAP 0x9FeB
#define BTF_VERSION 1
#define BTF_FLAGS_COMPR 0x01
struct btf_header {
__u16 magic;
......@@ -43,7 +41,7 @@ struct btf_header {
#define BTF_STR_OFFSET(ref) ((ref) & BTF_MAX_NAME_OFFSET)
struct btf_type {
__u32 name;
__u32 name_off;
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
......@@ -105,7 +103,7 @@ struct btf_type {
* info in "struct btf_type").
*/
struct btf_enum {
__u32 name;
__u32 name_off;
__s32 val;
};
......@@ -122,7 +120,7 @@ struct btf_array {
* "struct btf_type").
*/
struct btf_member {
__u32 name;
__u32 name_off;
__u32 type;
__u32 offset; /* offset in bits */
};
......
......@@ -473,7 +473,7 @@ __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
__btf_verifier_log(log, "[%u] %s %s%s",
env->log_type_id,
btf_kind_str[kind],
btf_name_by_offset(btf, t->name),
btf_name_by_offset(btf, t->name_off),
log_details ? " " : "");
if (log_details)
......@@ -517,7 +517,7 @@ static void btf_verifier_log_member(struct btf_verifier_env *env,
btf_verifier_log_type(env, struct_type, NULL);
__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
btf_name_by_offset(btf, member->name),
btf_name_by_offset(btf, member->name_off),
member->type, member->offset);
if (fmt && *fmt) {
......@@ -1419,10 +1419,10 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
btf_verifier_log_type(env, t, NULL);
for_each_member(i, t, member) {
if (!btf_name_offset_valid(btf, member->name)) {
if (!btf_name_offset_valid(btf, member->name_off)) {
btf_verifier_log_member(env, t, member,
"Invalid member name_offset:%u",
member->name);
member->name_off);
return -EINVAL;
}
......@@ -1605,14 +1605,14 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
btf_verifier_log_type(env, t, NULL);
for (i = 0; i < nr_enums; i++) {
if (!btf_name_offset_valid(btf, enums[i].name)) {
if (!btf_name_offset_valid(btf, enums[i].name_off)) {
btf_verifier_log(env, "\tInvalid name_offset:%u",
enums[i].name);
enums[i].name_off);
return -EINVAL;
}
btf_verifier_log(env, "\t%s val=%d\n",
btf_name_by_offset(btf, enums[i].name),
btf_name_by_offset(btf, enums[i].name_off),
enums[i].val);
}
......@@ -1636,7 +1636,7 @@ static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
for (i = 0; i < nr_enums; i++) {
if (v == enums[i].val) {
seq_printf(m, "%s",
btf_name_by_offset(btf, enums[i].name));
btf_name_by_offset(btf, enums[i].name_off));
return;
}
}
......@@ -1687,9 +1687,9 @@ static s32 btf_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
if (!btf_name_offset_valid(env->btf, t->name)) {
if (!btf_name_offset_valid(env->btf, t->name_off)) {
btf_verifier_log(env, "[%u] Invalid name_offset:%u",
env->log_type_id, t->name);
env->log_type_id, t->name_off);
return -EINVAL;
}
......
......@@ -1914,6 +1914,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
info.load_time = prog->aux->load_time;
info.created_by_uid = from_kuid_munged(current_user_ns(),
prog->aux->user->uid);
info.gpl_compatible = prog->gpl_compatible;
memcpy(info.tag, prog->tag, sizeof(prog->tag));
memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
......
......@@ -1914,7 +1914,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
if (arg_type == ARG_PTR_TO_MAP_KEY ||
arg_type == ARG_PTR_TO_MAP_VALUE) {
expected_type = PTR_TO_STACK;
if (!type_is_pkt_pointer(type) &&
if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
type != expected_type)
goto err_type;
} else if (arg_type == ARG_CONST_SIZE ||
......@@ -1966,14 +1966,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
verbose(env, "invalid map_ptr to access map->key\n");
return -EACCES;
}
if (type_is_pkt_pointer(type))
err = check_packet_access(env, regno, reg->off,
meta->map_ptr->key_size,
false);
else
err = check_stack_boundary(env, regno,
meta->map_ptr->key_size,
false, NULL);
err = check_helper_mem_access(env, regno,
meta->map_ptr->key_size, false,
NULL);
} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
/* bpf_map_xxx(..., map_ptr, ..., value) call:
* check [value, value + map->value_size) validity
......@@ -1983,14 +1978,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
verbose(env, "invalid map_ptr to access map->value\n");
return -EACCES;
}
if (type_is_pkt_pointer(type))
err = check_packet_access(env, regno, reg->off,
meta->map_ptr->value_size,
false);
else
err = check_stack_boundary(env, regno,
meta->map_ptr->value_size,
false, NULL);
err = check_helper_mem_access(env, regno,
meta->map_ptr->value_size, false,
NULL);
} else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
......
......@@ -4057,8 +4057,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
* pckt.
*/
off = orig_data_end - xdp.data_end;
if (off != 0)
if (off != 0) {
skb_set_tail_pointer(skb, xdp.data_end - xdp.data);
skb->len -= off;
}
switch (act) {
case XDP_REDIRECT:
......
......@@ -57,6 +57,7 @@
#include <net/sock_reuseport.h>
#include <net/busy_poll.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <linux/bpf_trace.h>
/**
......@@ -3744,6 +3745,49 @@ static const struct bpf_func_proto bpf_bind_proto = {
.arg3_type = ARG_CONST_SIZE,
};
#ifdef CONFIG_XFRM
BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
struct bpf_xfrm_state *, to, u32, size, u64, flags)
{
const struct sec_path *sp = skb_sec_path(skb);
const struct xfrm_state *x;
if (!sp || unlikely(index >= sp->len || flags))
goto err_clear;
x = sp->xvec[index];
if (unlikely(size != sizeof(struct bpf_xfrm_state)))
goto err_clear;
to->reqid = x->props.reqid;
to->spi = x->id.spi;
to->family = x->props.family;
if (to->family == AF_INET6) {
memcpy(to->remote_ipv6, x->props.saddr.a6,
sizeof(to->remote_ipv6));
} else {
to->remote_ipv4 = x->props.saddr.a4;
}
return 0;
err_clear:
memset(to, 0, size);
return -EINVAL;
}
static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
.func = bpf_skb_get_xfrm_state,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
.arg4_type = ARG_CONST_SIZE,
.arg5_type = ARG_ANYTHING,
};
#endif
static const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id)
{
......@@ -3885,6 +3929,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_socket_cookie_proto;
case BPF_FUNC_get_socket_uid:
return &bpf_get_socket_uid_proto;
#ifdef CONFIG_XFRM
case BPF_FUNC_skb_get_xfrm_state:
return &bpf_skb_get_xfrm_state_proto;
#endif
default:
return bpf_base_func_proto(func_id);
}
......
......@@ -114,7 +114,6 @@ always += sock_flags_kern.o
always += test_probe_write_user_kern.o
always += trace_output_kern.o
always += tcbpf1_kern.o
always += tcbpf2_kern.o
always += tc_l2_redirect_kern.o
always += lathist_kern.o
always += offwaketime_kern.o
......
......@@ -549,7 +549,6 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
if (nr_maps < 0) {
printf("Error: Failed loading ELF maps (errno:%d):%s\n",
nr_maps, strerror(-nr_maps));
ret = 1;
goto done;
}
if (load_maps(map_data, nr_maps, fixup_map))
......@@ -615,7 +614,6 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
}
}
ret = 0;
done:
close(fd);
return ret;
......
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# In Namespace 0 (at_ns0) using native tunnel
# Overlay IP: 10.1.1.100
# local 192.16.1.100 remote 192.16.1.200
# veth0 IP: 172.16.1.100, tunnel dev <type>00
# Out of Namespace using BPF set/get on lwtunnel
# Overlay IP: 10.1.1.200
# local 172.16.1.200 remote 172.16.1.100
# veth1 IP: 172.16.1.200, tunnel dev <type>11
function config_device {
ip netns add at_ns0
ip link add veth0 type veth peer name veth1
ip link set veth0 netns at_ns0
ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
ip netns exec at_ns0 ip link set dev veth0 up
ip link set dev veth1 up mtu 1500
ip addr add dev veth1 172.16.1.200/24
}
function add_gre_tunnel {
# in namespace
ip netns exec at_ns0 \
ip link add dev $DEV_NS type $TYPE seq key 2 \
local 172.16.1.100 remote 172.16.1.200
ip netns exec at_ns0 ip link set dev $DEV_NS up
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
# out of namespace
ip link add dev $DEV type $TYPE key 2 external
ip link set dev $DEV up
ip addr add dev $DEV 10.1.1.200/24
}
function add_ip6gretap_tunnel {
# assign ipv6 address
ip netns exec at_ns0 ip addr add ::11/96 dev veth0
ip netns exec at_ns0 ip link set dev veth0 up
ip addr add dev veth1 ::22/96
ip link set dev veth1 up
# in namespace
ip netns exec at_ns0 \
ip link add dev $DEV_NS type $TYPE seq flowlabel 0xbcdef key 2 \
local ::11 remote ::22
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
ip netns exec at_ns0 ip addr add dev $DEV_NS fc80::100/96
ip netns exec at_ns0 ip link set dev $DEV_NS up
# out of namespace
ip link add dev $DEV type $TYPE external
ip addr add dev $DEV 10.1.1.200/24
ip addr add dev $DEV fc80::200/24
ip link set dev $DEV up
}
function add_erspan_tunnel {
# in namespace
if [ "$1" == "v1" ]; then
ip netns exec at_ns0 \
ip link add dev $DEV_NS type $TYPE seq key 2 \
local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 1 erspan 123
else
ip netns exec at_ns0 \
ip link add dev $DEV_NS type $TYPE seq key 2 \
local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 2 erspan_dir egress erspan_hwid 3
fi
ip netns exec at_ns0 ip link set dev $DEV_NS up
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
# out of namespace
ip link add dev $DEV type $TYPE external
ip link set dev $DEV up
ip addr add dev $DEV 10.1.1.200/24
}
function add_ip6erspan_tunnel {
# assign ipv6 address
ip netns exec at_ns0 ip addr add ::11/96 dev veth0
ip netns exec at_ns0 ip link set dev veth0 up
ip addr add dev veth1 ::22/96
ip link set dev veth1 up
# in namespace
if [ "$1" == "v1" ]; then
ip netns exec at_ns0 \
ip link add dev $DEV_NS type $TYPE seq key 2 \
local ::11 remote ::22 \
erspan_ver 1 erspan 123
else
ip netns exec at_ns0 \
ip link add dev $DEV_NS type $TYPE seq key 2 \
local ::11 remote ::22 \
erspan_ver 2 erspan_dir egress erspan_hwid 7
fi
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
ip netns exec at_ns0 ip link set dev $DEV_NS up
# out of namespace
ip link add dev $DEV type $TYPE external
ip addr add dev $DEV 10.1.1.200/24
ip link set dev $DEV up
}
function add_vxlan_tunnel {
# Set static ARP entry here because iptables set-mark works
# on L3 packet, as a result not applying to ARP packets,
# causing errors at get_tunnel_{key/opt}.
# in namespace
ip netns exec at_ns0 \
ip link add dev $DEV_NS type $TYPE id 2 dstport 4789 gbp remote 172.16.1.200
ip netns exec at_ns0 ip link set dev $DEV_NS address 52:54:00:d9:01:00 up
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
ip netns exec at_ns0 arp -s 10.1.1.200 52:54:00:d9:02:00
ip netns exec at_ns0 iptables -A OUTPUT -j MARK --set-mark 0x800FF
# out of namespace
ip link add dev $DEV type $TYPE external gbp dstport 4789
ip link set dev $DEV address 52:54:00:d9:02:00 up
ip addr add dev $DEV 10.1.1.200/24
arp -s 10.1.1.100 52:54:00:d9:01:00
}
function add_geneve_tunnel {
# in namespace
ip netns exec at_ns0 \
ip link add dev $DEV_NS type $TYPE id 2 dstport 6081 remote 172.16.1.200
ip netns exec at_ns0 ip link set dev $DEV_NS up
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
# out of namespace
ip link add dev $DEV type $TYPE dstport 6081 external
ip link set dev $DEV up
ip addr add dev $DEV 10.1.1.200/24
}
function add_ipip_tunnel {
# in namespace
ip netns exec at_ns0 \
ip link add dev $DEV_NS type $TYPE local 172.16.1.100 remote 172.16.1.200
ip netns exec at_ns0 ip link set dev $DEV_NS up
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
# out of namespace
ip link add dev $DEV type $TYPE external
ip link set dev $DEV up
ip addr add dev $DEV 10.1.1.200/24
}
function attach_bpf {
DEV=$1
SET_TUNNEL=$2
GET_TUNNEL=$3
tc qdisc add dev $DEV clsact
tc filter add dev $DEV egress bpf da obj tcbpf2_kern.o sec $SET_TUNNEL
tc filter add dev $DEV ingress bpf da obj tcbpf2_kern.o sec $GET_TUNNEL
}
function test_gre {
TYPE=gretap
DEV_NS=gretap00
DEV=gretap11
config_device
add_gre_tunnel
attach_bpf $DEV gre_set_tunnel gre_get_tunnel
ping -c 1 10.1.1.100
ip netns exec at_ns0 ping -c 1 10.1.1.200
cleanup
}
function test_ip6gre {
TYPE=ip6gre
DEV_NS=ip6gre00
DEV=ip6gre11
config_device
# reuse the ip6gretap function
add_ip6gretap_tunnel
attach_bpf $DEV ip6gretap_set_tunnel ip6gretap_get_tunnel
# underlay
ping6 -c 4 ::11
# overlay: ipv4 over ipv6
ip netns exec at_ns0 ping -c 1 10.1.1.200
ping -c 1 10.1.1.100
# overlay: ipv6 over ipv6
ip netns exec at_ns0 ping6 -c 1 fc80::200
cleanup
}
function test_ip6gretap {
TYPE=ip6gretap
DEV_NS=ip6gretap00
DEV=ip6gretap11
config_device
add_ip6gretap_tunnel
attach_bpf $DEV ip6gretap_set_tunnel ip6gretap_get_tunnel
# underlay
ping6 -c 4 ::11
# overlay: ipv4 over ipv6
ip netns exec at_ns0 ping -i .2 -c 1 10.1.1.200
ping -c 1 10.1.1.100
# overlay: ipv6 over ipv6
ip netns exec at_ns0 ping6 -c 1 fc80::200
cleanup
}
function test_erspan {
TYPE=erspan
DEV_NS=erspan00
DEV=erspan11
config_device
add_erspan_tunnel $1
attach_bpf $DEV erspan_set_tunnel erspan_get_tunnel
ping -c 1 10.1.1.100
ip netns exec at_ns0 ping -c 1 10.1.1.200
cleanup
}
function test_ip6erspan {
TYPE=ip6erspan
DEV_NS=ip6erspan00
DEV=ip6erspan11
config_device
add_ip6erspan_tunnel $1
attach_bpf $DEV ip4ip6erspan_set_tunnel ip4ip6erspan_get_tunnel
ping6 -c 3 ::11
ip netns exec at_ns0 ping -c 1 10.1.1.200
cleanup
}
function test_vxlan {
TYPE=vxlan
DEV_NS=vxlan00
DEV=vxlan11
config_device
add_vxlan_tunnel
attach_bpf $DEV vxlan_set_tunnel vxlan_get_tunnel
ping -c 1 10.1.1.100
ip netns exec at_ns0 ping -c 1 10.1.1.200
cleanup
}
function test_geneve {
TYPE=geneve
DEV_NS=geneve00
DEV=geneve11
config_device
add_geneve_tunnel
attach_bpf $DEV geneve_set_tunnel geneve_get_tunnel
ping -c 1 10.1.1.100
ip netns exec at_ns0 ping -c 1 10.1.1.200
cleanup
}
function test_ipip {
TYPE=ipip
DEV_NS=ipip00
DEV=ipip11
config_device
tcpdump -nei veth1 &
cat /sys/kernel/debug/tracing/trace_pipe &
add_ipip_tunnel
ethtool -K veth1 gso off gro off rx off tx off
ip link set dev veth1 mtu 1500
attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel
ping -c 1 10.1.1.100
ip netns exec at_ns0 ping -c 1 10.1.1.200
ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
sleep 0.2
iperf -c 10.1.1.100 -n 5k -p 5200
cleanup
}
function cleanup {
set +ex
pkill iperf
ip netns delete at_ns0
ip link del veth1
ip link del ipip11
ip link del gretap11
ip link del ip6gre11
ip link del ip6gretap11
ip link del vxlan11
ip link del geneve11
ip link del erspan11
ip link del ip6erspan11
pkill tcpdump
pkill cat
set -ex
}
trap cleanup 0 2 3 6 9
cleanup
echo "Testing GRE tunnel..."
test_gre
echo "Testing IP6GRE tunnel..."
test_ip6gre
echo "Testing IP6GRETAP tunnel..."
test_ip6gretap
echo "Testing ERSPAN tunnel..."
test_erspan v1
test_erspan v2
echo "Testing IP6ERSPAN tunnel..."
test_ip6erspan v1
test_ip6erspan v2
echo "Testing VXLAN tunnel..."
test_vxlan
echo "Testing GENEVE tunnel..."
test_geneve
echo "Testing IPIP tunnel..."
test_ipip
echo "*** PASS ***"
# List of programs to build
hostprogs-y := sockmap
# Libbpf dependencies
LIBBPF := ../../tools/lib/bpf/bpf.o ../../tools/lib/bpf/nlattr.o
HOSTCFLAGS += -I$(objtree)/usr/include
HOSTCFLAGS += -I$(srctree)/tools/lib/
HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
HOSTCFLAGS += -I$(srctree)/tools/perf
sockmap-objs := ../bpf/bpf_load.o $(LIBBPF) sockmap_user.o
# Tell kbuild to always build the programs
always := $(hostprogs-y)
always += sockmap_kern.o
HOSTLOADLIBES_sockmap += -lelf -lpthread
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
LLC ?= llc
CLANG ?= clang
# Trick to allow make to be run from this directory
all:
$(MAKE) -C ../../ $(CURDIR)/
clean:
$(MAKE) -C ../../ M=$(CURDIR) clean
@rm -f *~
$(obj)/syscall_nrs.s: $(src)/syscall_nrs.c
$(call if_changed_dep,cc_s_c)
$(obj)/syscall_nrs.h: $(obj)/syscall_nrs.s FORCE
$(call filechk,offsets,__SYSCALL_NRS_H__)
clean-files += syscall_nrs.h
FORCE:
# Verify LLVM compiler tools are available and bpf target is supported by llc
.PHONY: verify_cmds verify_target_bpf $(CLANG) $(LLC)
verify_cmds: $(CLANG) $(LLC)
@for TOOL in $^ ; do \
if ! (which -- "$${TOOL}" > /dev/null 2>&1); then \
echo "*** ERROR: Cannot find LLVM tool $${TOOL}" ;\
exit 1; \
else true; fi; \
done
verify_target_bpf: verify_cmds
@if ! (${LLC} -march=bpf -mattr=help > /dev/null 2>&1); then \
echo "*** ERROR: LLVM (${LLC}) does not support 'bpf' target" ;\
echo " NOTICE: LLVM version >= 3.7.1 required" ;\
exit 2; \
else true; fi
$(src)/*.c: verify_target_bpf
# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
# But, there is no easy way to fix it, so just exclude it since it is
# useless for BPF samples.
#
# -target bpf option required with SK_MSG programs, this is to ensure
# reading 'void *' data types for data and data_end are __u64 reads.
$(obj)/%.o: $(src)/%.c
$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
-D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
-Wno-compare-distinct-pointer-types \
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option -O2 -target bpf \
-emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
This diff is collapsed.
This diff is collapsed.
......@@ -95,7 +95,7 @@ EXAMPLES
**# bpftool prog show**
::
10: xdp name some_prog tag 005a3d2123620c8b
10: xdp name some_prog tag 005a3d2123620c8b gpl
loaded_at Sep 29/20:11 uid 0
xlated 528B jited 370B memlock 4096B map_ids 10
......@@ -108,6 +108,7 @@ EXAMPLES
"id": 10,
"type": "xdp",
"tag": "005a3d2123620c8b",
"gpl_compatible": true,
"loaded_at": "Sep 29/20:11",
"uid": 0,
"bytes_xlated": 528,
......
......@@ -235,6 +235,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
info->tag[0], info->tag[1], info->tag[2], info->tag[3],
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
if (info->load_time) {
......@@ -295,6 +297,7 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
printf("tag ");
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
printf("%s", info->gpl_compatible ? " gpl" : "");
printf("\n");
if (info->load_time) {
......
This diff is collapsed.
......@@ -6,9 +6,7 @@
#include <linux/types.h>
#define BTF_MAGIC 0xeB9F
#define BTF_MAGIC_SWAP 0x9FeB
#define BTF_VERSION 1
#define BTF_FLAGS_COMPR 0x01
struct btf_header {
__u16 magic;
......@@ -43,7 +41,7 @@ struct btf_header {
#define BTF_STR_OFFSET(ref) ((ref) & BTF_MAX_NAME_OFFSET)
struct btf_type {
__u32 name;
__u32 name_off;
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
......@@ -105,7 +103,7 @@ struct btf_type {
* info in "struct btf_type").
*/
struct btf_enum {
__u32 name;
__u32 name_off;
__s32 val;
};
......@@ -122,7 +120,7 @@ struct btf_array {
* "struct btf_type").
*/
struct btf_member {
__u32 name;
__u32 name_off;
__u32 type;
__u32 offset; /* offset in bits */
};
......
......@@ -281,7 +281,7 @@ int32_t btf__find_by_name(const struct btf *btf, const char *type_name)
for (i = 1; i <= btf->nr_types; i++) {
const struct btf_type *t = btf->types[i];
const char *name = btf_name_by_offset(btf, t->name);
const char *name = btf_name_by_offset(btf, t->name_off);
if (name && !strcmp(type_name, name))
return i;
......
......@@ -1961,7 +1961,7 @@ BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
static void bpf_program__set_expected_attach_type(struct bpf_program *prog,
void bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type)
{
prog->expected_attach_type = type;
......
......@@ -193,6 +193,8 @@ int bpf_program__set_sched_act(struct bpf_program *prog);
int bpf_program__set_xdp(struct bpf_program *prog);
int bpf_program__set_perf_event(struct bpf_program *prog);
void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type);
void bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
bool bpf_program__is_socket_filter(struct bpf_program *prog);
bool bpf_program__is_tracepoint(struct bpf_program *prog);
......
......@@ -15,3 +15,4 @@ test_libbpf_open
test_sock
test_sock_addr
urandom_read
test_btf
......@@ -24,7 +24,7 @@ urandom_read: urandom_read.c
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_sock_addr test_btf
test_sock test_btf test_sockmap
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
......@@ -32,7 +32,7 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \
sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o test_adjust_tail.o \
test_btf_haskv.o test_btf_nokv.o
test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
......@@ -40,10 +40,11 @@ TEST_PROGS := test_kmod.sh \
test_xdp_redirect.sh \
test_xdp_meta.sh \
test_offload.py \
test_sock_addr.sh
test_sock_addr.sh \
test_tunnel.sh
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_libbpf_open
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr
include ../lib.mk
......@@ -56,6 +57,7 @@ $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a
$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
$(OUTPUT)/test_sock: cgroup_helpers.c
$(OUTPUT)/test_sock_addr: cgroup_helpers.c
$(OUTPUT)/test_sockmap: cgroup_helpers.c
.PHONY: force
......
......@@ -98,7 +98,9 @@ static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
(void *) BPF_FUNC_bind;
static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
(void *) BPF_FUNC_xdp_adjust_tail;
static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state,
int size, int flags) =
(void *) BPF_FUNC_skb_get_xfrm_state;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
......
/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include "../../tools/testing/selftests/bpf/bpf_helpers.h"
#include "../../tools/testing/selftests/bpf/bpf_endian.h"
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018 Covalent IO, Inc. http://covalent.io
#include <stddef.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/pkt_cls.h>
#include <sys/socket.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
/* Sockmap sample program connects a client and a backend together
* using cgroups.
......@@ -337,5 +336,5 @@ int bpf_prog10(struct sk_msg_md *msg)
return SK_DROP;
}
int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment