Commit 59929cd1 authored by Daniel T. Lee's avatar Daniel T. Lee Committed by Daniel Borkmann

samples, bpf: Refactor kprobe, tail call kern progs map definition

Because the previous two commit replaced the bpf_load implementation of
the user program with libbpf, the corresponding kernel program's MAP
definition can be replaced with new BTF-defined map syntax.

This commit only updates the samples which uses libbpf API for loading
bpf program not with bpf_load.
Signed-off-by: default avatarDaniel T. Lee <danieltimlee@gmail.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200516040608.1377876-6-danieltimlee@gmail.com
parent 14846dda
...@@ -13,12 +13,12 @@ ...@@ -13,12 +13,12 @@
#define MAX_IPS 8192 #define MAX_IPS 8192
struct bpf_map_def SEC("maps") ip_map = { struct {
.type = BPF_MAP_TYPE_HASH, __uint(type, BPF_MAP_TYPE_HASH);
.key_size = sizeof(u64), __type(key, u64);
.value_size = sizeof(u32), __type(value, u32);
.max_entries = MAX_IPS, __uint(max_entries, MAX_IPS);
}; } ip_map SEC(".maps");
SEC("perf_event") SEC("perf_event")
int do_sample(struct bpf_perf_event_data *ctx) int do_sample(struct bpf_perf_event_data *ctx)
......
...@@ -19,12 +19,12 @@ ...@@ -19,12 +19,12 @@
#define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F #define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F
struct bpf_map_def SEC("maps") jmp_table = { struct {
.type = BPF_MAP_TYPE_PROG_ARRAY, __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
.key_size = sizeof(u32), __uint(key_size, sizeof(u32));
.value_size = sizeof(u32), __uint(value_size, sizeof(u32));
.max_entries = 8, __uint(max_entries, 8);
}; } jmp_table SEC(".maps");
#define PARSE_VLAN 1 #define PARSE_VLAN 1
#define PARSE_MPLS 2 #define PARSE_MPLS 2
...@@ -92,12 +92,12 @@ struct globals { ...@@ -92,12 +92,12 @@ struct globals {
struct flow_key_record flow; struct flow_key_record flow;
}; };
struct bpf_map_def SEC("maps") percpu_map = { struct {
.type = BPF_MAP_TYPE_ARRAY, __uint(type, BPF_MAP_TYPE_ARRAY);
.key_size = sizeof(__u32), __type(key, __u32);
.value_size = sizeof(struct globals), __type(value, struct globals);
.max_entries = 32, __uint(max_entries, 32);
}; } percpu_map SEC(".maps");
/* user poor man's per_cpu until native support is ready */ /* user poor man's per_cpu until native support is ready */
static struct globals *this_cpu_globals(void) static struct globals *this_cpu_globals(void)
...@@ -113,12 +113,12 @@ struct pair { ...@@ -113,12 +113,12 @@ struct pair {
__u64 bytes; __u64 bytes;
}; };
struct bpf_map_def SEC("maps") hash_map = { struct {
.type = BPF_MAP_TYPE_HASH, __uint(type, BPF_MAP_TYPE_HASH);
.key_size = sizeof(struct flow_key_record), __type(key, struct flow_key_record);
.value_size = sizeof(struct pair), __type(value, struct pair);
.max_entries = 1024, __uint(max_entries, 1024);
}; } hash_map SEC(".maps");
static void update_stats(struct __sk_buff *skb, struct globals *g) static void update_stats(struct __sk_buff *skb, struct globals *g)
{ {
......
...@@ -18,19 +18,19 @@ struct key_t { ...@@ -18,19 +18,19 @@ struct key_t {
u32 userstack; u32 userstack;
}; };
struct bpf_map_def SEC("maps") counts = { struct {
.type = BPF_MAP_TYPE_HASH, __uint(type, BPF_MAP_TYPE_HASH);
.key_size = sizeof(struct key_t), __type(key, struct key_t);
.value_size = sizeof(u64), __type(value, u64);
.max_entries = 10000, __uint(max_entries, 10000);
}; } counts SEC(".maps");
struct bpf_map_def SEC("maps") stackmap = { struct {
.type = BPF_MAP_TYPE_STACK_TRACE, __uint(type, BPF_MAP_TYPE_STACK_TRACE);
.key_size = sizeof(u32), __uint(key_size, sizeof(u32));
.value_size = PERF_MAX_STACK_DEPTH * sizeof(u64), __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
.max_entries = 10000, __uint(max_entries, 10000);
}; } stackmap SEC(".maps");
#define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP) #define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
#define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK) #define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
#include <bpf/bpf_tracing.h> #include <bpf/bpf_tracing.h>
#include "trace_common.h" #include "trace_common.h"
struct bpf_map_def SEC("maps") my_map = { struct {
.type = BPF_MAP_TYPE_HASH, __uint(type, BPF_MAP_TYPE_HASH);
.key_size = sizeof(long), __type(key, long);
.value_size = sizeof(long), __type(value, long);
.max_entries = 1024, __uint(max_entries, 1024);
}; } my_map SEC(".maps");
/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
* example will no longer be meaningful * example will no longer be meaningful
...@@ -71,12 +71,12 @@ struct hist_key { ...@@ -71,12 +71,12 @@ struct hist_key {
u64 index; u64 index;
}; };
struct bpf_map_def SEC("maps") my_hist_map = { struct {
.type = BPF_MAP_TYPE_PERCPU_HASH, __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
.key_size = sizeof(struct hist_key), __uint(key_size, sizeof(struct hist_key));
.value_size = sizeof(long), __uint(value_size, sizeof(long));
.max_entries = 1024, __uint(max_entries, 1024);
}; } my_hist_map SEC(".maps");
SEC("kprobe/" SYSCALL(sys_write)) SEC("kprobe/" SYSCALL(sys_write))
int bpf_prog3(struct pt_regs *ctx) int bpf_prog3(struct pt_regs *ctx)
......
...@@ -11,12 +11,12 @@ ...@@ -11,12 +11,12 @@
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h> #include <bpf/bpf_tracing.h>
struct bpf_map_def SEC("maps") my_map = { struct {
.type = BPF_MAP_TYPE_HASH, __uint(type, BPF_MAP_TYPE_HASH);
.key_size = sizeof(long), __type(key, long);
.value_size = sizeof(u64), __type(value, u64);
.max_entries = 4096, __uint(max_entries, 4096);
}; } my_map SEC(".maps");
/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
* example will no longer be meaningful * example will no longer be meaningful
...@@ -42,12 +42,12 @@ static unsigned int log2l(unsigned long long n) ...@@ -42,12 +42,12 @@ static unsigned int log2l(unsigned long long n)
#define SLOTS 100 #define SLOTS 100
struct bpf_map_def SEC("maps") lat_map = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __uint(key_size, sizeof(u32));
.value_size = sizeof(u64), __uint(value_size, sizeof(u64));
.max_entries = SLOTS, __uint(max_entries, SLOTS);
}; } lat_map SEC(".maps");
SEC("kprobe/blk_account_io_completion") SEC("kprobe/blk_account_io_completion")
int bpf_prog2(struct pt_regs *ctx) int bpf_prog2(struct pt_regs *ctx)
......
...@@ -15,12 +15,12 @@ struct pair { ...@@ -15,12 +15,12 @@ struct pair {
u64 ip; u64 ip;
}; };
struct bpf_map_def SEC("maps") my_map = { struct {
.type = BPF_MAP_TYPE_HASH, __uint(type, BPF_MAP_TYPE_HASH);
.key_size = sizeof(long), __type(key, long);
.value_size = sizeof(struct pair), __type(value, struct pair);
.max_entries = 1000000, __uint(max_entries, 1000000);
}; } my_map SEC(".maps");
/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
* example will no longer be meaningful * example will no longer be meaningful
......
...@@ -15,16 +15,16 @@ ...@@ -15,16 +15,16 @@
#define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F #define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
struct bpf_map_def SEC("maps") progs = { struct {
.type = BPF_MAP_TYPE_PROG_ARRAY, __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
.key_size = sizeof(u32), __uint(key_size, sizeof(u32));
.value_size = sizeof(u32), __uint(value_size, sizeof(u32));
#ifdef __mips__ #ifdef __mips__
.max_entries = 6000, /* MIPS n64 syscalls start at 5000 */ __uint(max_entries, 6000); /* MIPS n64 syscalls start at 5000 */
#else #else
.max_entries = 1024, __uint(max_entries, 1024);
#endif #endif
}; } progs SEC(".maps");
SEC("kprobe/__seccomp_filter") SEC("kprobe/__seccomp_filter")
int bpf_prog1(struct pt_regs *ctx) int bpf_prog1(struct pt_regs *ctx)
......
...@@ -3,24 +3,26 @@ ...@@ -3,24 +3,26 @@
#include <uapi/linux/bpf.h> #include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
struct bpf_map_def SEC("maps") counters = { struct {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
.key_size = sizeof(int), __uint(key_size, sizeof(int));
.value_size = sizeof(u32), __uint(value_size, sizeof(u32));
.max_entries = 64, __uint(max_entries, 64);
}; } counters SEC(".maps");
struct bpf_map_def SEC("maps") values = {
.type = BPF_MAP_TYPE_HASH, struct {
.key_size = sizeof(int), __uint(type, BPF_MAP_TYPE_HASH);
.value_size = sizeof(u64), __type(key, int);
.max_entries = 64, __type(value, u64);
}; __uint(max_entries, 64);
struct bpf_map_def SEC("maps") values2 = { } values SEC(".maps");
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(int), struct {
.value_size = sizeof(struct bpf_perf_event_value), __uint(type, BPF_MAP_TYPE_HASH);
.max_entries = 64, __type(key, int);
}; __type(value, struct bpf_perf_event_value);
__uint(max_entries, 64);
} values2 SEC(".maps");
SEC("kprobe/htab_map_get_next_key") SEC("kprobe/htab_map_get_next_key")
int bpf_prog1(struct pt_regs *ctx) int bpf_prog1(struct pt_regs *ctx)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment