Commit 437a4db6 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-02-09

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Two fixes for BPF sockmap in order to break up circular map references
   from programs attached to sockmap, and detaching related sockets in
   case of socket close() event. For the latter we get rid of the
   smap_state_change() and plug into ULP infrastructure, which will later
   also be used for additional features anyway such as TX hooks. For the
   second issue, dependency chain is broken up via map release callback
   to free parse/verdict programs, all from John.

2) Fix a libbpf relocation issue that was found while implementing XDP
   support for Suricata project. Issue was that when clang was invoked
   with default target instead of bpf target, then various other e.g.
   debugging relevant sections are added to the ELF file that contained
   relocation entries pointing to non-BPF related sections which libbpf
   trips over instead of skipping them. Test cases for libbpf are added
   as well, from Jesper.

3) Various misc fixes for bpftool and one for libbpf: a small addition
   to libbpf to make sure it recognizes all standard section prefixes.
   Then, the Makefile in bpftool/Documentation is improved to explicitly
   check for rst2man being installed on the system as we otherwise risk
   installing empty man pages; the man page for bpftool-map is corrected
   and a set of missing bash completions added in order to avoid shipping
   bpftool where the completions are only partially working, from Quentin.

4) Fix applying the relocation to immediate load instructions in the
   nfp JIT which were missing a shift, from Jakub.

5) Two fixes for the BPF kernel selftests: handle CONFIG_BPF_JIT_ALWAYS_ON=y
   gracefully in test_bpf.ko module and mark them as FLAG_EXPECTED_FAIL
   in this case; and explicitly delete the veth devices in the two tests
   test_xdp_{meta,redirect}.sh before dismantling the netnses as when
   selftests are run in batch mode, then workqueue to handle destruction
   might not have finished yet and thus veth creation in next test under
   same dev name would fail, from Yonghong.

6) Fix test_kmod.sh to check the test_bpf.ko module path before performing
   an insmod, and fallback to modprobe. Especially the latter is useful
   when having a device under test that has the modules installed instead,
   from Naresh.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 08f51385 d977ae59
......@@ -107,7 +107,7 @@ u16 immed_get_value(u64 instr)
if (!unreg_is_imm(reg))
reg = FIELD_GET(OP_IMMED_B_SRC, instr);
return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr);
return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr) << 8;
}
void immed_set_value(u64 *instr, u16 immed)
......
......@@ -1983,6 +1983,11 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
#define TCP_ULP_MAX 128
#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
enum {
TCP_ULP_TLS,
TCP_ULP_BPF,
};
struct tcp_ulp_ops {
struct list_head list;
......@@ -1991,12 +1996,15 @@ struct tcp_ulp_ops {
/* cleanup ulp */
void (*release)(struct sock *sk);
int uid;
char name[TCP_ULP_NAME_MAX];
bool user_visible;
struct module *owner;
};
int tcp_register_ulp(struct tcp_ulp_ops *type);
void tcp_unregister_ulp(struct tcp_ulp_ops *type);
int tcp_set_ulp(struct sock *sk, const char *name);
int tcp_set_ulp_id(struct sock *sk, const int ulp);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
......
......@@ -86,9 +86,10 @@ struct smap_psock {
struct work_struct tx_work;
struct work_struct gc_work;
struct proto *sk_proto;
void (*save_close)(struct sock *sk, long timeout);
void (*save_data_ready)(struct sock *sk);
void (*save_write_space)(struct sock *sk);
void (*save_state_change)(struct sock *sk);
};
static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
......@@ -96,12 +97,102 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
return rcu_dereference_sk_user_data(sk);
}
static struct proto tcp_bpf_proto;
static int bpf_tcp_init(struct sock *sk)
{
struct smap_psock *psock;
rcu_read_lock();
psock = smap_psock_sk(sk);
if (unlikely(!psock)) {
rcu_read_unlock();
return -EINVAL;
}
if (unlikely(psock->sk_proto)) {
rcu_read_unlock();
return -EBUSY;
}
psock->save_close = sk->sk_prot->close;
psock->sk_proto = sk->sk_prot;
sk->sk_prot = &tcp_bpf_proto;
rcu_read_unlock();
return 0;
}
static void bpf_tcp_release(struct sock *sk)
{
struct smap_psock *psock;
rcu_read_lock();
psock = smap_psock_sk(sk);
if (likely(psock)) {
sk->sk_prot = psock->sk_proto;
psock->sk_proto = NULL;
}
rcu_read_unlock();
}
static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
static void bpf_tcp_close(struct sock *sk, long timeout)
{
void (*close_fun)(struct sock *sk, long timeout);
struct smap_psock_map_entry *e, *tmp;
struct smap_psock *psock;
struct sock *osk;
rcu_read_lock();
psock = smap_psock_sk(sk);
if (unlikely(!psock)) {
rcu_read_unlock();
return sk->sk_prot->close(sk, timeout);
}
/* The psock may be destroyed anytime after exiting the RCU critial
* section so by the time we use close_fun the psock may no longer
* be valid. However, bpf_tcp_close is called with the sock lock
* held so the close hook and sk are still valid.
*/
close_fun = psock->save_close;
write_lock_bh(&sk->sk_callback_lock);
list_for_each_entry_safe(e, tmp, &psock->maps, list) {
osk = cmpxchg(e->entry, sk, NULL);
if (osk == sk) {
list_del(&e->list);
smap_release_sock(psock, sk);
}
}
write_unlock_bh(&sk->sk_callback_lock);
rcu_read_unlock();
close_fun(sk, timeout);
}
enum __sk_action {
__SK_DROP = 0,
__SK_PASS,
__SK_REDIRECT,
};
static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
.name = "bpf_tcp",
.uid = TCP_ULP_BPF,
.user_visible = false,
.owner = NULL,
.init = bpf_tcp_init,
.release = bpf_tcp_release,
};
static int bpf_tcp_ulp_register(void)
{
tcp_bpf_proto = tcp_prot;
tcp_bpf_proto.close = bpf_tcp_close;
return tcp_register_ulp(&bpf_tcp_ulp_ops);
}
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
{
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
......@@ -166,68 +257,6 @@ static void smap_report_sk_error(struct smap_psock *psock, int err)
sk->sk_error_report(sk);
}
static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
/* Called with lock_sock(sk) held */
static void smap_state_change(struct sock *sk)
{
struct smap_psock_map_entry *e, *tmp;
struct smap_psock *psock;
struct socket_wq *wq;
struct sock *osk;
rcu_read_lock();
/* Allowing transitions into an established syn_recv states allows
* for early binding sockets to a smap object before the connection
* is established.
*/
switch (sk->sk_state) {
case TCP_SYN_SENT:
case TCP_SYN_RECV:
case TCP_ESTABLISHED:
break;
case TCP_CLOSE_WAIT:
case TCP_CLOSING:
case TCP_LAST_ACK:
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
case TCP_LISTEN:
break;
case TCP_CLOSE:
/* Only release if the map entry is in fact the sock in
* question. There is a case where the operator deletes
* the sock from the map, but the TCP sock is closed before
* the psock is detached. Use cmpxchg to verify correct
* sock is removed.
*/
psock = smap_psock_sk(sk);
if (unlikely(!psock))
break;
write_lock_bh(&sk->sk_callback_lock);
list_for_each_entry_safe(e, tmp, &psock->maps, list) {
osk = cmpxchg(e->entry, sk, NULL);
if (osk == sk) {
list_del(&e->list);
smap_release_sock(psock, sk);
}
}
write_unlock_bh(&sk->sk_callback_lock);
break;
default:
psock = smap_psock_sk(sk);
if (unlikely(!psock))
break;
smap_report_sk_error(psock, EPIPE);
break;
}
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_all(&wq->wait);
rcu_read_unlock();
}
static void smap_read_sock_strparser(struct strparser *strp,
struct sk_buff *skb)
{
......@@ -322,10 +351,8 @@ static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
return;
sk->sk_data_ready = psock->save_data_ready;
sk->sk_write_space = psock->save_write_space;
sk->sk_state_change = psock->save_state_change;
psock->save_data_ready = NULL;
psock->save_write_space = NULL;
psock->save_state_change = NULL;
strp_stop(&psock->strp);
psock->strp_enabled = false;
}
......@@ -350,6 +377,7 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
if (psock->refcnt)
return;
tcp_cleanup_ulp(sock);
smap_stop_sock(psock, sock);
clear_bit(SMAP_TX_RUNNING, &psock->state);
rcu_assign_sk_user_data(sock, NULL);
......@@ -427,10 +455,8 @@ static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
return;
psock->save_data_ready = sk->sk_data_ready;
psock->save_write_space = sk->sk_write_space;
psock->save_state_change = sk->sk_state_change;
sk->sk_data_ready = smap_data_ready;
sk->sk_write_space = smap_write_space;
sk->sk_state_change = smap_state_change;
psock->strp_enabled = true;
}
......@@ -509,6 +535,10 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
if (attr->value_size > KMALLOC_MAX_SIZE)
return ERR_PTR(-E2BIG);
err = bpf_tcp_ulp_register();
if (err && err != -EEXIST)
return ERR_PTR(err);
stab = kzalloc(sizeof(*stab), GFP_USER);
if (!stab)
return ERR_PTR(-ENOMEM);
......@@ -590,11 +620,6 @@ static void sock_map_free(struct bpf_map *map)
}
rcu_read_unlock();
if (stab->bpf_verdict)
bpf_prog_put(stab->bpf_verdict);
if (stab->bpf_parse)
bpf_prog_put(stab->bpf_parse);
sock_map_remove_complete(stab);
}
......@@ -754,6 +779,10 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
goto out_progs;
}
err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
if (err)
goto out_progs;
set_bit(SMAP_TX_RUNNING, &psock->state);
}
......@@ -866,6 +895,19 @@ static int sock_map_update_elem(struct bpf_map *map,
return err;
}
static void sock_map_release(struct bpf_map *map, struct file *map_file)
{
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
struct bpf_prog *orig;
orig = xchg(&stab->bpf_parse, NULL);
if (orig)
bpf_prog_put(orig);
orig = xchg(&stab->bpf_verdict, NULL);
if (orig)
bpf_prog_put(orig);
}
const struct bpf_map_ops sock_map_ops = {
.map_alloc = sock_map_alloc,
.map_free = sock_map_free,
......@@ -873,6 +915,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_get_next_key = sock_map_get_next_key,
.map_update_elem = sock_map_update_elem,
.map_delete_elem = sock_map_delete_elem,
.map_release = sock_map_release,
};
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
......
......@@ -83,6 +83,7 @@ struct bpf_test {
__u32 result;
} test[MAX_SUBTESTS];
int (*fill_helper)(struct bpf_test *self);
int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
__u8 frag_data[MAX_DATA];
int stack_depth; /* for eBPF only, since tests don't call verifier */
};
......@@ -2026,7 +2027,9 @@ static struct bpf_test tests[] = {
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ }
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{
"check: div_k_0",
......@@ -2036,7 +2039,9 @@ static struct bpf_test tests[] = {
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ }
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{
"check: unknown insn",
......@@ -2047,7 +2052,9 @@ static struct bpf_test tests[] = {
},
CLASSIC | FLAG_EXPECTED_FAIL,
{ },
{ }
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{
"check: out of range spill/fill",
......@@ -2057,7 +2064,9 @@ static struct bpf_test tests[] = {
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ }
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{
"JUMPS + HOLES",
......@@ -2149,6 +2158,8 @@ static struct bpf_test tests[] = {
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{
"check: LDX + RET X",
......@@ -2159,6 +2170,8 @@ static struct bpf_test tests[] = {
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{ /* Mainly checking JIT here. */
"M[]: alt STX + LDX",
......@@ -2333,6 +2346,8 @@ static struct bpf_test tests[] = {
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{ /* Passes checker but fails during runtime. */
"LD [SKF_AD_OFF-1]",
......@@ -5395,6 +5410,7 @@ static struct bpf_test tests[] = {
{ },
{ },
.fill_helper = bpf_fill_maxinsns4,
.expected_errcode = -EINVAL,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Very long jump",
......@@ -5450,10 +5466,15 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: Jump, gap, jump, ...",
{ },
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
#else
CLASSIC | FLAG_NO_DATA,
#endif
{ },
{ { 0, 0xababcbac } },
.fill_helper = bpf_fill_maxinsns11,
.expected_errcode = -ENOTSUPP,
},
{
"BPF_MAXINSNS: ld_abs+get_processor_id",
......@@ -6344,7 +6365,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
*err = bpf_prog_create(&fp, &fprog);
if (tests[which].aux & FLAG_EXPECTED_FAIL) {
if (*err == -EINVAL) {
if (*err == tests[which].expected_errcode) {
pr_cont("PASS\n");
/* Verifier rejected filter as expected. */
*err = 0;
......
......@@ -29,6 +29,18 @@ static struct tcp_ulp_ops *tcp_ulp_find(const char *name)
return NULL;
}
static struct tcp_ulp_ops *tcp_ulp_find_id(const int ulp)
{
struct tcp_ulp_ops *e;
list_for_each_entry_rcu(e, &tcp_ulp_list, list) {
if (e->uid == ulp)
return e;
}
return NULL;
}
static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
{
const struct tcp_ulp_ops *ulp = NULL;
......@@ -51,6 +63,18 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
return ulp;
}
static const struct tcp_ulp_ops *__tcp_ulp_lookup(const int uid)
{
const struct tcp_ulp_ops *ulp;
rcu_read_lock();
ulp = tcp_ulp_find_id(uid);
if (!ulp || !try_module_get(ulp->owner))
ulp = NULL;
rcu_read_unlock();
return ulp;
}
/* Attach new upper layer protocol to the list
* of available protocols.
*/
......@@ -59,13 +83,10 @@ int tcp_register_ulp(struct tcp_ulp_ops *ulp)
int ret = 0;
spin_lock(&tcp_ulp_list_lock);
if (tcp_ulp_find(ulp->name)) {
pr_notice("%s already registered or non-unique name\n",
ulp->name);
if (tcp_ulp_find(ulp->name))
ret = -EEXIST;
} else {
else
list_add_tail_rcu(&ulp->list, &tcp_ulp_list);
}
spin_unlock(&tcp_ulp_list_lock);
return ret;
......@@ -124,6 +145,34 @@ int tcp_set_ulp(struct sock *sk, const char *name)
if (!ulp_ops)
return -ENOENT;
if (!ulp_ops->user_visible) {
module_put(ulp_ops->owner);
return -ENOENT;
}
err = ulp_ops->init(sk);
if (err) {
module_put(ulp_ops->owner);
return err;
}
icsk->icsk_ulp_ops = ulp_ops;
return 0;
}
int tcp_set_ulp_id(struct sock *sk, int ulp)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_ulp_ops *ulp_ops;
int err;
if (icsk->icsk_ulp_ops)
return -EEXIST;
ulp_ops = __tcp_ulp_lookup(ulp);
if (!ulp_ops)
return -ENOENT;
err = ulp_ops->init(sk);
if (err) {
module_put(ulp_ops->owner);
......
......@@ -484,6 +484,8 @@ static int tls_init(struct sock *sk)
static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.name = "tls",
.uid = TCP_ULP_TLS,
.user_visible = true,
.owner = THIS_MODULE,
.init = tls_init,
};
......
......@@ -23,7 +23,12 @@ DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
man: man8
man8: $(DOC_MAN8)
RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
$(OUTPUT)%.8: %.rst
ifndef RST2MAN_DEP
$(error "rst2man not found, but required to generate man pages")
endif
$(QUIET_GEN)rst2man $< > $@
clean:
......
......@@ -26,8 +26,8 @@ MAP COMMANDS
| **bpftool** **cgroup help**
|
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
| *ATTACH_TYPE* := { *ingress* | *egress* | *sock_create* | *sock_ops* | *device* }
| *ATTACH_FLAGS* := { *multi* | *override* }
| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** }
| *ATTACH_FLAGS* := { **multi** | **override** }
DESCRIPTION
===========
......
......@@ -31,7 +31,8 @@ MAP COMMANDS
| **bpftool** **map help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
| *VALUE* := { *BYTES* | *MAP* | *PROGRAM* }
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
| *VALUE* := { *BYTES* | *MAP* | *PROG* }
| *UPDATE_FLAGS* := { **any** | **exist** | **noexist** }
DESCRIPTION
......
......@@ -52,16 +52,24 @@ _bpftool_once_attr()
done
}
# Takes a list of words in argument; adds them all to COMPREPLY if none of them
# is already present on the command line. Returns no value.
_bpftool_one_of_list()
# Takes a list of words as argument; if any of those words is present on the
# command line, return 0. Otherwise, return 1.
_bpftool_search_list()
{
local w idx
for w in $*; do
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
[[ $w == ${words[idx]} ]] && return 1
[[ $w == ${words[idx]} ]] && return 0
done
done
return 1
}
# Takes a list of words in argument; adds them all to COMPREPLY if none of them
# is already present on the command line. Returns no value.
_bpftool_one_of_list()
{
_bpftool_search_list $* && return 1
COMPREPLY+=( $( compgen -W "$*" -- "$cur" ) )
}
......@@ -230,10 +238,14 @@ _bpftool()
fi
return 0
;;
load)
_filedir
return 0
;;
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'dump help pin show list' -- \
"$cur" ) )
COMPREPLY=( $( compgen -W 'dump help pin load \
show list' -- "$cur" ) )
;;
esac
;;
......@@ -347,6 +359,54 @@ _bpftool()
;;
esac
;;
cgroup)
case $command in
show|list)
_filedir
return 0
;;
attach|detach)
local ATTACH_TYPES='ingress egress sock_create sock_ops \
device'
local ATTACH_FLAGS='multi override'
local PROG_TYPE='id pinned tag'
case $prev in
$command)
_filedir
return 0
;;
ingress|egress|sock_create|sock_ops|device)
COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
"$cur" ) )
return 0
;;
id)
_bpftool_get_prog_ids
return 0
;;
*)
if ! _bpftool_search_list "$ATTACH_TYPES"; then
COMPREPLY=( $( compgen -W "$ATTACH_TYPES" -- \
"$cur" ) )
elif [[ "$command" == "attach" ]]; then
# We have an attach type on the command line,
# but it is not the previous word, or
# "id|pinned|tag" (we already checked for
# that). This should only leave the case when
# we need attach flags for "attach" commamnd.
_bpftool_one_of_list "$ATTACH_FLAGS"
fi
return 0
;;
esac
;;
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'help attach detach \
show list' -- "$cur" ) )
;;
esac
;;
esac
} &&
complete -F _bpftool bpftool
......
......@@ -15,9 +15,10 @@
/* ld/ldx fields */
#define BPF_SIZE(code) ((code) & 0x18)
#define BPF_W 0x00
#define BPF_H 0x08
#define BPF_B 0x10
#define BPF_W 0x00 /* 32-bit */
#define BPF_H 0x08 /* 16-bit */
#define BPF_B 0x10 /* 8-bit */
/* eBPF BPF_DW 0x18 64-bit */
#define BPF_MODE(code) ((code) & 0xe0)
#define BPF_IMM 0x00
#define BPF_ABS 0x20
......
......@@ -319,8 +319,8 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
prog->section_name = strdup(section_name);
if (!prog->section_name) {
pr_warning("failed to alloc name for prog under section %s\n",
section_name);
pr_warning("failed to alloc name for prog under section(%d) %s\n",
idx, section_name);
goto errout;
}
......@@ -742,6 +742,24 @@ bpf_object__init_maps(struct bpf_object *obj)
return 0;
}
static bool section_have_execinstr(struct bpf_object *obj, int idx)
{
Elf_Scn *scn;
GElf_Shdr sh;
scn = elf_getscn(obj->efile.elf, idx);
if (!scn)
return false;
if (gelf_getshdr(scn, &sh) != &sh)
return false;
if (sh.sh_flags & SHF_EXECINSTR)
return true;
return false;
}
static int bpf_object__elf_collect(struct bpf_object *obj)
{
Elf *elf = obj->efile.elf;
......@@ -763,29 +781,29 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
pr_warning("failed to get section header from %s\n",
obj->path);
pr_warning("failed to get section(%d) header from %s\n",
idx, obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
if (!name) {
pr_warning("failed to get section name from %s\n",
obj->path);
pr_warning("failed to get section(%d) name from %s\n",
idx, obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
data = elf_getdata(scn, 0);
if (!data) {
pr_warning("failed to get section data from %s(%s)\n",
name, obj->path);
pr_warning("failed to get section(%d) data from %s(%s)\n",
idx, name, obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
name, (unsigned long)data->d_size,
pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
idx, name, (unsigned long)data->d_size,
(int)sh.sh_link, (unsigned long)sh.sh_flags,
(int)sh.sh_type);
......@@ -825,6 +843,14 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (sh.sh_type == SHT_REL) {
void *reloc = obj->efile.reloc;
int nr_reloc = obj->efile.nr_reloc + 1;
int sec = sh.sh_info; /* points to other section */
/* Only do relo for section with exec instructions */
if (!section_have_execinstr(obj, sec)) {
pr_debug("skip relo %s(%d) for section(%d)\n",
name, idx, sec);
continue;
}
reloc = realloc(reloc,
sizeof(*obj->efile.reloc) * nr_reloc);
......@@ -840,6 +866,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
obj->efile.reloc[n].shdr = sh;
obj->efile.reloc[n].data = data;
}
} else {
pr_debug("skip section(%d) %s\n", idx, name);
}
if (err)
goto out;
......@@ -1119,8 +1147,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
prog = bpf_object__find_prog_by_idx(obj, idx);
if (!prog) {
pr_warning("relocation failed: no %d section\n",
idx);
pr_warning("relocation failed: no section(%d)\n", idx);
return -LIBBPF_ERRNO__RELOC;
}
......@@ -1816,12 +1843,17 @@ static const struct {
BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
};
......
......@@ -13,6 +13,7 @@ endif
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
LDLIBS += -lcap -lelf -lrt -lpthread
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user
......@@ -22,15 +23,24 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
sample_map_ret0.o test_tcpbpf_kern.o
TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
test_libbpf.sh \
test_xdp_redirect.sh \
test_xdp_meta.sh \
test_offload.py
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_libbpf_open
include ../lib.mk
BPFOBJ := $(OUTPUT)/libbpf.a cgroup_helpers.c
$(TEST_GEN_PROGS): $(BPFOBJ)
$(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a
.PHONY: force
# force a rebuild of BPFOBJ when its dependencies are updated
......
......@@ -10,9 +10,21 @@ test_run()
echo "[ JIT enabled:$1 hardened:$2 ]"
dmesg -C
insmod $SRC_TREE/lib/test_bpf.ko 2> /dev/null
if [ $? -ne 0 ]; then
rc=1
if [ -f ${SRC_TREE}/lib/test_bpf.ko ]; then
insmod ${SRC_TREE}/lib/test_bpf.ko 2> /dev/null
if [ $? -ne 0 ]; then
rc=1
fi
else
# Use modprobe dry run to check for missing test_bpf module
if ! /sbin/modprobe -q -n test_bpf; then
echo "test_bpf: [SKIP]"
elif /sbin/modprobe -q test_bpf; then
echo "test_bpf: ok"
else
echo "test_bpf: [FAIL]"
rc=1
fi
fi
rmmod test_bpf 2> /dev/null
dmesg | grep FAIL
......
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
export TESTNAME=test_libbpf
# Determine selftest success via shell exit code
exit_handler()
{
if (( $? == 0 )); then
echo "selftests: $TESTNAME [PASS]";
else
echo "$TESTNAME: failed at file $LAST_LOADED" 1>&2
echo "selftests: $TESTNAME [FAILED]";
fi
}
libbpf_open_file()
{
LAST_LOADED=$1
if [ -n "$VERBOSE" ]; then
./test_libbpf_open $1
else
./test_libbpf_open --quiet $1
fi
}
# Exit script immediately (well catched by trap handler) if any
# program/thing exits with a non-zero status.
set -e
# (Use 'trap -l' to list meaning of numbers)
trap exit_handler 0 2 3 6 9
libbpf_open_file test_l4lb.o
# TODO: fix libbpf to load noinline functions
# [warning] libbpf: incorrect bpf_call opcode
#libbpf_open_file test_l4lb_noinline.o
# TODO: fix test_xdp_meta.c to load with libbpf
# [warning] libbpf: test_xdp_meta.o doesn't provide kernel version
#libbpf_open_file test_xdp_meta.o
# TODO: fix libbpf to handle .eh_frame
# [warning] libbpf: relocation failed: no section(10)
#libbpf_open_file ../../../../samples/bpf/tracex3_kern.o
# Success
exit 0
/* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
*/
static const char *__doc__ =
"Libbpf test program for loading BPF ELF object files";
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <bpf/libbpf.h>
#include <getopt.h>
static const struct option long_options[] = {
{"help", no_argument, NULL, 'h' },
{"debug", no_argument, NULL, 'D' },
{"quiet", no_argument, NULL, 'q' },
{0, 0, NULL, 0 }
};
static void usage(char *argv[])
{
int i;
printf("\nDOCUMENTATION:\n%s\n\n", __doc__);
printf(" Usage: %s (options-see-below) BPF_FILE\n", argv[0]);
printf(" Listing options:\n");
for (i = 0; long_options[i].name != 0; i++) {
printf(" --%-12s", long_options[i].name);
printf(" short-option: -%c",
long_options[i].val);
printf("\n");
}
printf("\n");
}
#define DEFINE_PRINT_FN(name, enabled) \
static int libbpf_##name(const char *fmt, ...) \
{ \
va_list args; \
int ret; \
\
va_start(args, fmt); \
if (enabled) { \
fprintf(stderr, "[" #name "] "); \
ret = vfprintf(stderr, fmt, args); \
} \
va_end(args); \
return ret; \
}
DEFINE_PRINT_FN(warning, 1)
DEFINE_PRINT_FN(info, 1)
DEFINE_PRINT_FN(debug, 1)
#define EXIT_FAIL_LIBBPF EXIT_FAILURE
#define EXIT_FAIL_OPTION 2
int test_walk_progs(struct bpf_object *obj, bool verbose)
{
struct bpf_program *prog;
int cnt = 0;
bpf_object__for_each_program(prog, obj) {
cnt++;
if (verbose)
printf("Prog (count:%d) section_name: %s\n", cnt,
bpf_program__title(prog, false));
}
return 0;
}
int test_walk_maps(struct bpf_object *obj, bool verbose)
{
struct bpf_map *map;
int cnt = 0;
bpf_map__for_each(map, obj) {
cnt++;
if (verbose)
printf("Map (count:%d) name: %s\n", cnt,
bpf_map__name(map));
}
return 0;
}
int test_open_file(char *filename, bool verbose)
{
struct bpf_object *bpfobj = NULL;
long err;
if (verbose)
printf("Open BPF ELF-file with libbpf: %s\n", filename);
/* Load BPF ELF object file and check for errors */
bpfobj = bpf_object__open(filename);
err = libbpf_get_error(bpfobj);
if (err) {
char err_buf[128];
libbpf_strerror(err, err_buf, sizeof(err_buf));
if (verbose)
printf("Unable to load eBPF objects in file '%s': %s\n",
filename, err_buf);
return EXIT_FAIL_LIBBPF;
}
test_walk_progs(bpfobj, verbose);
test_walk_maps(bpfobj, verbose);
if (verbose)
printf("Close BPF ELF-file with libbpf: %s\n",
bpf_object__name(bpfobj));
bpf_object__close(bpfobj);
return 0;
}
int main(int argc, char **argv)
{
char filename[1024] = { 0 };
bool verbose = 1;
int longindex = 0;
int opt;
libbpf_set_print(libbpf_warning, libbpf_info, NULL);
/* Parse commands line args */
while ((opt = getopt_long(argc, argv, "hDq",
long_options, &longindex)) != -1) {
switch (opt) {
case 'D':
libbpf_set_print(libbpf_warning, libbpf_info,
libbpf_debug);
break;
case 'q': /* Use in scripting mode */
verbose = 0;
break;
case 'h':
default:
usage(argv);
return EXIT_FAIL_OPTION;
}
}
if (optind >= argc) {
usage(argv);
printf("ERROR: Expected BPF_FILE argument after options\n");
return EXIT_FAIL_OPTION;
}
snprintf(filename, sizeof(filename), "%s", argv[optind]);
return test_open_file(filename, verbose);
}
......@@ -9,6 +9,7 @@ cleanup()
fi
set +e
ip link del veth1 2> /dev/null
ip netns del ns1 2> /dev/null
ip netns del ns2 2> /dev/null
}
......
......@@ -19,6 +19,8 @@ cleanup()
fi
set +e
ip link del veth1 2> /dev/null
ip link del veth2 2> /dev/null
ip netns del ns1 2> /dev/null
ip netns del ns2 2> /dev/null
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment