Commit 454bfe97 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2018-03-21

The following pull-request contains BPF updates for your *net-next* tree.

The main changes are:

1) Add a BPF hook for sendmsg and sendfile by reusing the ULP infrastructure
   and sockmap. Three helpers are added along with this, bpf_msg_apply_bytes(),
   bpf_msg_cork_bytes(), and bpf_msg_pull_data(). The first is used to tell
   for how many bytes the verdict should be applied to, the second to tell
   that x bytes need to be queued first to retrigger the BPF program for a
   verdict, and the third helper is mainly for the sendfile case to pull in
   data for making it private for reading and/or writing, from John.

2) Improve address to symbol resolution of user stack traces in BPF stackmap.
   Currently, the latter stores the address for each entry in the call trace,
   however to map these addresses to user space files, it is necessary to
   maintain the mapping from these virtual addresses to symbols in the binary
   which is not practical for system-wide profiling. Instead, this option for
   the stackmap rather stores the ELF build id and offset for the call trace
   entries, from Song.

3) Add support that allows BPF programs attached to perf events to read the
   address values recorded with the perf events. They are requested through
   PERF_SAMPLE_ADDR via perf_event_open(). Main motivation behind it is to
   support building memory or lock access profiling and tracing tools with
   the help of BPF, from Teng.

4) Several improvements to the tools/bpf/ Makefiles. The 'make bpf' in the
   tools directory does not provide the standard quiet output except for
   bpftool and it also does not respect specifying a build output directory.
   'make bpf_install' command neither respects specified destination nor
   prefix, all from Jiri. In addition, Jakub fixes several other minor issues
   in the Makefiles on top of that, e.g. fixing dependency paths, phony
   targets and more.

5) Various doc updates e.g. add a comment for BPF fs about reserved names
   to make the dentry lookup from there a bit more obvious, and a comment
   to the bpf_devel_QA file in order to explain the diff between native
   and bpf target clang usage with regards to pointer size, from Quentin
   and Daniel.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0466080c 78262f45
...@@ -539,6 +539,18 @@ A: Although LLVM IR generation and optimization try to stay architecture ...@@ -539,6 +539,18 @@ A: Although LLVM IR generation and optimization try to stay architecture
The clang option "-fno-jump-tables" can be used to disable The clang option "-fno-jump-tables" can be used to disable
switch table generation. switch table generation.
- For clang -target bpf, it is guaranteed that pointer or long /
unsigned long types will always have a width of 64 bit, no matter
whether underlying clang binary or default target (or kernel) is
32 bit. However, when native clang target is used, then it will
compile these types based on the underlying architecture's conventions,
meaning in case of 32 bit architecture, pointer or long / unsigned
long types e.g. in BPF context structure will have width of 32 bit
while the BPF LLVM back end still operates in 64 bit. The native
target is mostly needed in tracing for the case of walking pt_regs
or other kernel structures where CPU's register width matters.
Otherwise, clang -target bpf is generally recommended.
You should use default target when: You should use default target when:
- Your program includes a header file, e.g., ptrace.h, which eventually - Your program includes a header file, e.g., ptrace.h, which eventually
......
...@@ -21,6 +21,7 @@ struct bpf_verifier_env; ...@@ -21,6 +21,7 @@ struct bpf_verifier_env;
struct perf_event; struct perf_event;
struct bpf_prog; struct bpf_prog;
struct bpf_map; struct bpf_map;
struct sock;
/* map is generic key/value storage optionally accesible by eBPF programs */ /* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops { struct bpf_map_ops {
......
...@@ -13,6 +13,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout) ...@@ -13,6 +13,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
#endif #endif
#ifdef CONFIG_BPF_EVENTS #ifdef CONFIG_BPF_EVENTS
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
......
...@@ -507,6 +507,22 @@ struct xdp_buff { ...@@ -507,6 +507,22 @@ struct xdp_buff {
struct xdp_rxq_info *rxq; struct xdp_rxq_info *rxq;
}; };
struct sk_msg_buff {
void *data;
void *data_end;
__u32 apply_bytes;
__u32 cork_bytes;
int sg_copybreak;
int sg_start;
int sg_curr;
int sg_end;
struct scatterlist sg_data[MAX_SKB_FRAGS];
bool sg_copy[MAX_SKB_FRAGS];
__u32 key;
__u32 flags;
struct bpf_map *map;
};
/* Compute the linear packet data range [data, data_end) which /* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf, * will be accessed by various program types (cls_bpf, act_bpf,
* lwt, ...). Subsystems allowing direct data access must (!) * lwt, ...). Subsystems allowing direct data access must (!)
...@@ -771,6 +787,7 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp) ...@@ -771,6 +787,7 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
void bpf_warn_invalid_xdp_action(u32 act); void bpf_warn_invalid_xdp_action(u32 act);
struct sock *do_sk_redirect_map(struct sk_buff *skb); struct sock *do_sk_redirect_map(struct sk_buff *skb);
struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
#ifdef CONFIG_BPF_JIT #ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable; extern int bpf_jit_enable;
......
...@@ -287,6 +287,7 @@ struct ucred { ...@@ -287,6 +287,7 @@ struct ucred {
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ #define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN #define MSG_EOF MSG_FIN
#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ #define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
......
...@@ -2141,6 +2141,10 @@ static inline struct page_frag *sk_page_frag(struct sock *sk) ...@@ -2141,6 +2141,10 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
int sg_start, int *sg_curr, unsigned int *sg_size,
int first_coalesce);
/* /*
* Default write policy as shown to user space via poll/select/SIGIO * Default write policy as shown to user space via poll/select/SIGIO
*/ */
......
...@@ -133,6 +133,7 @@ enum bpf_prog_type { ...@@ -133,6 +133,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SOCK_OPS, BPF_PROG_TYPE_SOCK_OPS,
BPF_PROG_TYPE_SK_SKB, BPF_PROG_TYPE_SK_SKB,
BPF_PROG_TYPE_CGROUP_DEVICE, BPF_PROG_TYPE_CGROUP_DEVICE,
BPF_PROG_TYPE_SK_MSG,
}; };
enum bpf_attach_type { enum bpf_attach_type {
...@@ -143,6 +144,7 @@ enum bpf_attach_type { ...@@ -143,6 +144,7 @@ enum bpf_attach_type {
BPF_SK_SKB_STREAM_PARSER, BPF_SK_SKB_STREAM_PARSER,
BPF_SK_SKB_STREAM_VERDICT, BPF_SK_SKB_STREAM_VERDICT,
BPF_CGROUP_DEVICE, BPF_CGROUP_DEVICE,
BPF_SK_MSG_VERDICT,
__MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
}; };
...@@ -231,6 +233,28 @@ enum bpf_attach_type { ...@@ -231,6 +233,28 @@ enum bpf_attach_type {
#define BPF_F_RDONLY (1U << 3) #define BPF_F_RDONLY (1U << 3)
#define BPF_F_WRONLY (1U << 4) #define BPF_F_WRONLY (1U << 4)
/* Flag for stack_map, store build_id+offset instead of pointer */
#define BPF_F_STACK_BUILD_ID (1U << 5)
enum bpf_stack_build_id_status {
/* user space need an empty entry to identify end of a trace */
BPF_STACK_BUILD_ID_EMPTY = 0,
/* with valid build_id and offset */
BPF_STACK_BUILD_ID_VALID = 1,
/* couldn't get build_id, fallback to ip */
BPF_STACK_BUILD_ID_IP = 2,
};
#define BPF_BUILD_ID_SIZE 20
struct bpf_stack_build_id {
__s32 status;
unsigned char build_id[BPF_BUILD_ID_SIZE];
union {
__u64 offset;
__u64 ip;
};
};
union bpf_attr { union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */ struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */ __u32 map_type; /* one of enum bpf_map_type */
...@@ -696,6 +720,15 @@ union bpf_attr { ...@@ -696,6 +720,15 @@ union bpf_attr {
* int bpf_override_return(pt_regs, rc) * int bpf_override_return(pt_regs, rc)
* @pt_regs: pointer to struct pt_regs * @pt_regs: pointer to struct pt_regs
* @rc: the return value to set * @rc: the return value to set
*
* int bpf_msg_redirect_map(map, key, flags)
* Redirect msg to a sock in map using key as a lookup key for the
* sock in map.
* @map: pointer to sockmap
* @key: key to lookup sock in map
* @flags: reserved for future use
* Return: SK_PASS
*
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -757,7 +790,11 @@ union bpf_attr { ...@@ -757,7 +790,11 @@ union bpf_attr {
FN(perf_prog_read_value), \ FN(perf_prog_read_value), \
FN(getsockopt), \ FN(getsockopt), \
FN(override_return), \ FN(override_return), \
FN(sock_ops_cb_flags_set), FN(sock_ops_cb_flags_set), \
FN(msg_redirect_map), \
FN(msg_apply_bytes), \
FN(msg_cork_bytes), \
FN(msg_pull_data),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
...@@ -920,6 +957,14 @@ enum sk_action { ...@@ -920,6 +957,14 @@ enum sk_action {
SK_PASS, SK_PASS,
}; };
/* user accessible metadata for SK_MSG packet hook, new fields must
* be added to the end of this structure
*/
struct sk_msg_md {
void *data;
void *data_end;
};
#define BPF_TAG_SIZE 8 #define BPF_TAG_SIZE 8
struct bpf_prog_info { struct bpf_prog_info {
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
struct bpf_perf_event_data { struct bpf_perf_event_data {
bpf_user_pt_regs_t regs; bpf_user_pt_regs_t regs;
__u64 sample_period; __u64 sample_period;
__u64 addr;
}; };
#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */ #endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
...@@ -178,6 +178,9 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg) ...@@ -178,6 +178,9 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
static struct dentry * static struct dentry *
bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags) bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
{ {
/* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
* extensions.
*/
if (strchr(dentry->d_name.name, '.')) if (strchr(dentry->d_name.name, '.'))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
......
This diff is collapsed.
...@@ -9,16 +9,19 @@ ...@@ -9,16 +9,19 @@
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/elf.h>
#include <linux/pagemap.h>
#include "percpu_freelist.h" #include "percpu_freelist.h"
#define STACK_CREATE_FLAG_MASK \ #define STACK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
BPF_F_STACK_BUILD_ID)
struct stack_map_bucket { struct stack_map_bucket {
struct pcpu_freelist_node fnode; struct pcpu_freelist_node fnode;
u32 hash; u32 hash;
u32 nr; u32 nr;
u64 ip[]; u64 data[];
}; };
struct bpf_stack_map { struct bpf_stack_map {
...@@ -29,6 +32,17 @@ struct bpf_stack_map { ...@@ -29,6 +32,17 @@ struct bpf_stack_map {
struct stack_map_bucket *buckets[]; struct stack_map_bucket *buckets[];
}; };
static inline bool stack_map_use_build_id(struct bpf_map *map)
{
return (map->map_flags & BPF_F_STACK_BUILD_ID);
}
static inline int stack_map_data_size(struct bpf_map *map)
{
return stack_map_use_build_id(map) ?
sizeof(struct bpf_stack_build_id) : sizeof(u64);
}
static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
{ {
u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
...@@ -68,8 +82,16 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) ...@@ -68,8 +82,16 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
/* check sanity of attributes */ /* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 || if (attr->max_entries == 0 || attr->key_size != 4 ||
value_size < 8 || value_size % 8 || value_size < 8 || value_size % 8)
value_size / 8 > sysctl_perf_event_max_stack) return ERR_PTR(-EINVAL);
BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
if (value_size % sizeof(struct bpf_stack_build_id) ||
value_size / sizeof(struct bpf_stack_build_id)
> sysctl_perf_event_max_stack)
return ERR_PTR(-EINVAL);
} else if (value_size / 8 > sysctl_perf_event_max_stack)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* hash table size must be power of 2 */ /* hash table size must be power of 2 */
...@@ -114,13 +136,184 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) ...@@ -114,13 +136,184 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
return ERR_PTR(err); return ERR_PTR(err);
} }
#define BPF_BUILD_ID 3
/*
* Parse build id from the note segment. This logic can be shared between
* 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
* identical.
*/
static inline int stack_map_parse_build_id(void *page_addr,
unsigned char *build_id,
void *note_start,
Elf32_Word note_size)
{
Elf32_Word note_offs = 0, new_offs;
/* check for overflow */
if (note_start < page_addr || note_start + note_size < note_start)
return -EINVAL;
/* only supports note that fits in the first page */
if (note_start + note_size > page_addr + PAGE_SIZE)
return -EINVAL;
while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
if (nhdr->n_type == BPF_BUILD_ID &&
nhdr->n_namesz == sizeof("GNU") &&
nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
memcpy(build_id,
note_start + note_offs +
ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
BPF_BUILD_ID_SIZE);
return 0;
}
new_offs = note_offs + sizeof(Elf32_Nhdr) +
ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
if (new_offs <= note_offs) /* overflow */
break;
note_offs = new_offs;
}
return -EINVAL;
}
/* Parse build ID from 32-bit ELF */
static int stack_map_get_build_id_32(void *page_addr,
unsigned char *build_id)
{
Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
Elf32_Phdr *phdr;
int i;
/* only supports phdr that fits in one page */
if (ehdr->e_phnum >
(PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
return -EINVAL;
phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
for (i = 0; i < ehdr->e_phnum; ++i)
if (phdr[i].p_type == PT_NOTE)
return stack_map_parse_build_id(page_addr, build_id,
page_addr + phdr[i].p_offset,
phdr[i].p_filesz);
return -EINVAL;
}
/* Parse build ID from 64-bit ELF */
static int stack_map_get_build_id_64(void *page_addr,
unsigned char *build_id)
{
Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
Elf64_Phdr *phdr;
int i;
/* only supports phdr that fits in one page */
if (ehdr->e_phnum >
(PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
return -EINVAL;
phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
for (i = 0; i < ehdr->e_phnum; ++i)
if (phdr[i].p_type == PT_NOTE)
return stack_map_parse_build_id(page_addr, build_id,
page_addr + phdr[i].p_offset,
phdr[i].p_filesz);
return -EINVAL;
}
/* Parse build ID of ELF file mapped to vma */
static int stack_map_get_build_id(struct vm_area_struct *vma,
unsigned char *build_id)
{
Elf32_Ehdr *ehdr;
struct page *page;
void *page_addr;
int ret;
/* only works for page backed storage */
if (!vma->vm_file)
return -EINVAL;
page = find_get_page(vma->vm_file->f_mapping, 0);
if (!page)
return -EFAULT; /* page not mapped */
ret = -EINVAL;
page_addr = page_address(page);
ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
goto out;
/* only support executable file and shared object file */
if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
goto out;
if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
ret = stack_map_get_build_id_32(page_addr, build_id);
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
ret = stack_map_get_build_id_64(page_addr, build_id);
out:
put_page(page);
return ret;
}
static void stack_map_get_build_id_offset(struct bpf_map *map,
struct stack_map_bucket *bucket,
u64 *ips, u32 trace_nr, bool user)
{
int i;
struct vm_area_struct *vma;
struct bpf_stack_build_id *id_offs;
bucket->nr = trace_nr;
id_offs = (struct bpf_stack_build_id *)bucket->data;
/*
* We cannot do up_read() in nmi context, so build_id lookup is
* only supported for non-nmi events. If at some point, it is
* possible to run find_vma() without taking the semaphore, we
* would like to allow build_id lookup in nmi context.
*
* Same fallback is used for kernel stack (!user) on a stackmap
* with build_id.
*/
if (!user || !current || !current->mm || in_nmi() ||
down_read_trylock(&current->mm->mmap_sem) == 0) {
/* cannot access current->mm, fall back to ips */
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
}
return;
}
for (i = 0; i < trace_nr; i++) {
vma = find_vma(current->mm, ips[i]);
if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
/* per entry fall back to ips */
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
continue;
}
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
- vma->vm_start;
id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
}
up_read(&current->mm->mmap_sem);
}
BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags) u64, flags)
{ {
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
struct perf_callchain_entry *trace; struct perf_callchain_entry *trace;
struct stack_map_bucket *bucket, *new_bucket, *old_bucket; struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
u32 max_depth = map->value_size / 8; u32 max_depth = map->value_size / stack_map_data_size(map);
/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
u32 init_nr = sysctl_perf_event_max_stack - max_depth; u32 init_nr = sysctl_perf_event_max_stack - max_depth;
u32 skip = flags & BPF_F_SKIP_FIELD_MASK; u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
...@@ -128,6 +321,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, ...@@ -128,6 +321,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
bool user = flags & BPF_F_USER_STACK; bool user = flags & BPF_F_USER_STACK;
bool kernel = !user; bool kernel = !user;
u64 *ips; u64 *ips;
bool hash_matches;
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
...@@ -156,15 +350,33 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, ...@@ -156,15 +350,33 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
id = hash & (smap->n_buckets - 1); id = hash & (smap->n_buckets - 1);
bucket = READ_ONCE(smap->buckets[id]); bucket = READ_ONCE(smap->buckets[id]);
if (bucket && bucket->hash == hash) { hash_matches = bucket && bucket->hash == hash;
if (flags & BPF_F_FAST_STACK_CMP) /* fast cmp */
if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
return id; return id;
if (bucket->nr == trace_nr &&
memcmp(bucket->ip, ips, trace_len) == 0) if (stack_map_use_build_id(map)) {
/* for build_id+offset, pop a bucket before slow cmp */
new_bucket = (struct stack_map_bucket *)
pcpu_freelist_pop(&smap->freelist);
if (unlikely(!new_bucket))
return -ENOMEM;
stack_map_get_build_id_offset(map, new_bucket, ips,
trace_nr, user);
trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
if (hash_matches && bucket->nr == trace_nr &&
memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
return id; return id;
} }
if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
/* this call stack is not in the map, try to add it */ pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
return -EEXIST;
}
} else {
if (hash_matches && bucket->nr == trace_nr &&
memcmp(bucket->data, ips, trace_len) == 0)
return id;
if (bucket && !(flags & BPF_F_REUSE_STACKID)) if (bucket && !(flags & BPF_F_REUSE_STACKID))
return -EEXIST; return -EEXIST;
...@@ -172,8 +384,9 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, ...@@ -172,8 +384,9 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
pcpu_freelist_pop(&smap->freelist); pcpu_freelist_pop(&smap->freelist);
if (unlikely(!new_bucket)) if (unlikely(!new_bucket))
return -ENOMEM; return -ENOMEM;
memcpy(new_bucket->data, ips, trace_len);
}
memcpy(new_bucket->ip, ips, trace_len);
new_bucket->hash = hash; new_bucket->hash = hash;
new_bucket->nr = trace_nr; new_bucket->nr = trace_nr;
...@@ -212,8 +425,8 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) ...@@ -212,8 +425,8 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
if (!bucket) if (!bucket)
return -ENOENT; return -ENOENT;
trace_len = bucket->nr * sizeof(u64); trace_len = bucket->nr * stack_map_data_size(map);
memcpy(value, bucket->ip, trace_len); memcpy(value, bucket->data, trace_len);
memset(value + trace_len, 0, map->value_size - trace_len); memset(value + trace_len, 0, map->value_size - trace_len);
old_bucket = xchg(&smap->buckets[id], bucket); old_bucket = xchg(&smap->buckets[id], bucket);
......
...@@ -1315,7 +1315,8 @@ static int bpf_obj_get(const union bpf_attr *attr) ...@@ -1315,7 +1315,8 @@ static int bpf_obj_get(const union bpf_attr *attr)
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach) static int sockmap_get_from_fd(const union bpf_attr *attr,
int type, bool attach)
{ {
struct bpf_prog *prog = NULL; struct bpf_prog *prog = NULL;
int ufd = attr->target_fd; int ufd = attr->target_fd;
...@@ -1329,8 +1330,7 @@ static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach) ...@@ -1329,8 +1330,7 @@ static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
return PTR_ERR(map); return PTR_ERR(map);
if (attach) { if (attach) {
prog = bpf_prog_get_type(attr->attach_bpf_fd, prog = bpf_prog_get_type(attr->attach_bpf_fd, type);
BPF_PROG_TYPE_SK_SKB);
if (IS_ERR(prog)) { if (IS_ERR(prog)) {
fdput(f); fdput(f);
return PTR_ERR(prog); return PTR_ERR(prog);
...@@ -1382,9 +1382,11 @@ static int bpf_prog_attach(const union bpf_attr *attr) ...@@ -1382,9 +1382,11 @@ static int bpf_prog_attach(const union bpf_attr *attr)
case BPF_CGROUP_DEVICE: case BPF_CGROUP_DEVICE:
ptype = BPF_PROG_TYPE_CGROUP_DEVICE; ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
break; break;
case BPF_SK_MSG_VERDICT:
return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true);
case BPF_SK_SKB_STREAM_PARSER: case BPF_SK_SKB_STREAM_PARSER:
case BPF_SK_SKB_STREAM_VERDICT: case BPF_SK_SKB_STREAM_VERDICT:
return sockmap_get_from_fd(attr, true); return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true);
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -1437,9 +1439,11 @@ static int bpf_prog_detach(const union bpf_attr *attr) ...@@ -1437,9 +1439,11 @@ static int bpf_prog_detach(const union bpf_attr *attr)
case BPF_CGROUP_DEVICE: case BPF_CGROUP_DEVICE:
ptype = BPF_PROG_TYPE_CGROUP_DEVICE; ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
break; break;
case BPF_SK_MSG_VERDICT:
return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false);
case BPF_SK_SKB_STREAM_PARSER: case BPF_SK_SKB_STREAM_PARSER:
case BPF_SK_SKB_STREAM_VERDICT: case BPF_SK_SKB_STREAM_VERDICT:
return sockmap_get_from_fd(attr, false); return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false);
default: default:
return -EINVAL; return -EINVAL;
} }
......
...@@ -1248,6 +1248,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, ...@@ -1248,6 +1248,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_SKB:
case BPF_PROG_TYPE_SK_MSG:
if (meta) if (meta)
return meta->pkt_access; return meta->pkt_access;
...@@ -2071,7 +2072,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, ...@@ -2071,7 +2072,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKMAP: case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map && if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_sock_map_update &&
func_id != BPF_FUNC_map_delete_elem) func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map)
goto error; goto error;
break; break;
default: default:
...@@ -2109,6 +2111,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, ...@@ -2109,6 +2111,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
goto error; goto error;
break; break;
case BPF_FUNC_sk_redirect_map: case BPF_FUNC_sk_redirect_map:
case BPF_FUNC_msg_redirect_map:
if (map->map_type != BPF_MAP_TYPE_SOCKMAP) if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
goto error; goto error;
break; break;
......
...@@ -726,8 +726,7 @@ const struct bpf_prog_ops tracepoint_prog_ops = { ...@@ -726,8 +726,7 @@ const struct bpf_prog_ops tracepoint_prog_ops = {
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info) struct bpf_insn_access_aux *info)
{ {
const int size_sp = FIELD_SIZEOF(struct bpf_perf_event_data, const int size_u64 = sizeof(u64);
sample_period);
if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
return false; return false;
...@@ -738,8 +737,13 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type ...@@ -738,8 +737,13 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type
switch (off) { switch (off) {
case bpf_ctx_range(struct bpf_perf_event_data, sample_period): case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
bpf_ctx_record_field_size(info, size_sp); bpf_ctx_record_field_size(info, size_u64);
if (!bpf_ctx_narrow_access_ok(off, size, size_sp)) if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
return false;
break;
case bpf_ctx_range(struct bpf_perf_event_data, addr):
bpf_ctx_record_field_size(info, size_u64);
if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
return false; return false;
break; break;
default: default:
...@@ -766,6 +770,14 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, ...@@ -766,6 +770,14 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
bpf_target_off(struct perf_sample_data, period, 8, bpf_target_off(struct perf_sample_data, period, 8,
target_size)); target_size));
break; break;
case offsetof(struct bpf_perf_event_data, addr):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
data), si->dst_reg, si->src_reg,
offsetof(struct bpf_perf_event_data_kern, data));
*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
bpf_target_off(struct perf_sample_data, addr, 8,
target_size));
break;
default: default:
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
regs), si->dst_reg, si->src_reg, regs), si->dst_reg, si->src_reg,
......
...@@ -1890,6 +1890,202 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = { ...@@ -1890,6 +1890,202 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
.arg4_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING,
}; };
BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg_buff *, msg,
struct bpf_map *, map, u32, key, u64, flags)
{
/* If user passes invalid input drop the packet. */
if (unlikely(flags))
return SK_DROP;
msg->key = key;
msg->flags = flags;
msg->map = map;
return SK_PASS;
}
struct sock *do_msg_redirect_map(struct sk_msg_buff *msg)
{
struct sock *sk = NULL;
if (msg->map) {
sk = __sock_map_lookup_elem(msg->map, msg->key);
msg->key = 0;
msg->map = NULL;
}
return sk;
}
static const struct bpf_func_proto bpf_msg_redirect_map_proto = {
.func = bpf_msg_redirect_map,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg_buff *, msg, u32, bytes)
{
msg->apply_bytes = bytes;
return 0;
}
static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
.func = bpf_msg_apply_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg_buff *, msg, u32, bytes)
{
msg->cork_bytes = bytes;
return 0;
}
static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
.func = bpf_msg_cork_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_4(bpf_msg_pull_data,
struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
{
unsigned int len = 0, offset = 0, copy = 0;
struct scatterlist *sg = msg->sg_data;
int first_sg, last_sg, i, shift;
unsigned char *p, *to, *from;
int bytes = end - start;
struct page *page;
if (unlikely(flags || end <= start))
return -EINVAL;
/* First find the starting scatterlist element */
i = msg->sg_start;
do {
len = sg[i].length;
offset += len;
if (start < offset + len)
break;
i++;
if (i == MAX_SKB_FRAGS)
i = 0;
} while (i != msg->sg_end);
if (unlikely(start >= offset + len))
return -EINVAL;
if (!msg->sg_copy[i] && bytes <= len)
goto out;
first_sg = i;
/* At this point we need to linearize multiple scatterlist
* elements or a single shared page. Either way we need to
* copy into a linear buffer exclusively owned by BPF. Then
* place the buffer in the scatterlist and fixup the original
* entries by removing the entries now in the linear buffer
* and shifting the remaining entries. For now we do not try
* to copy partial entries to avoid complexity of running out
* of sg_entry slots. The downside is reading a single byte
* will copy the entire sg entry.
*/
do {
copy += sg[i].length;
i++;
if (i == MAX_SKB_FRAGS)
i = 0;
if (bytes < copy)
break;
} while (i != msg->sg_end);
last_sg = i;
if (unlikely(copy < end - start))
return -EINVAL;
page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
if (unlikely(!page))
return -ENOMEM;
p = page_address(page);
offset = 0;
i = first_sg;
do {
from = sg_virt(&sg[i]);
len = sg[i].length;
to = p + offset;
memcpy(to, from, len);
offset += len;
sg[i].length = 0;
put_page(sg_page(&sg[i]));
i++;
if (i == MAX_SKB_FRAGS)
i = 0;
} while (i != last_sg);
sg[first_sg].length = copy;
sg_set_page(&sg[first_sg], page, copy, 0);
/* To repair sg ring we need to shift entries. If we only
* had a single entry though we can just replace it and
* be done. Otherwise walk the ring and shift the entries.
*/
shift = last_sg - first_sg - 1;
if (!shift)
goto out;
i = first_sg + 1;
do {
int move_from;
if (i + shift >= MAX_SKB_FRAGS)
move_from = i + shift - MAX_SKB_FRAGS;
else
move_from = i + shift;
if (move_from == msg->sg_end)
break;
sg[i] = sg[move_from];
sg[move_from].length = 0;
sg[move_from].page_link = 0;
sg[move_from].offset = 0;
i++;
if (i == MAX_SKB_FRAGS)
i = 0;
} while (1);
msg->sg_end -= shift;
if (msg->sg_end < 0)
msg->sg_end += MAX_SKB_FRAGS;
out:
msg->data = sg_virt(&sg[i]) + start - offset;
msg->data_end = msg->data + bytes;
return 0;
}
static const struct bpf_func_proto bpf_msg_pull_data_proto = {
.func = bpf_msg_pull_data,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
};
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
{ {
return task_get_classid(skb); return task_get_classid(skb);
...@@ -2831,7 +3027,8 @@ bool bpf_helper_changes_pkt_data(void *func) ...@@ -2831,7 +3027,8 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_l3_csum_replace || func == bpf_l3_csum_replace ||
func == bpf_l4_csum_replace || func == bpf_l4_csum_replace ||
func == bpf_xdp_adjust_head || func == bpf_xdp_adjust_head ||
func == bpf_xdp_adjust_meta) func == bpf_xdp_adjust_meta ||
func == bpf_msg_pull_data)
return true; return true;
return false; return false;
...@@ -3591,6 +3788,22 @@ static const struct bpf_func_proto * ...@@ -3591,6 +3788,22 @@ static const struct bpf_func_proto *
} }
} }
static const struct bpf_func_proto *sk_msg_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
case BPF_FUNC_msg_redirect_map:
return &bpf_msg_redirect_map_proto;
case BPF_FUNC_msg_apply_bytes:
return &bpf_msg_apply_bytes_proto;
case BPF_FUNC_msg_cork_bytes:
return &bpf_msg_cork_bytes_proto;
case BPF_FUNC_msg_pull_data:
return &bpf_msg_pull_data_proto;
default:
return bpf_base_func_proto(func_id);
}
}
static const struct bpf_func_proto *sk_skb_func_proto(enum bpf_func_id func_id) static const struct bpf_func_proto *sk_skb_func_proto(enum bpf_func_id func_id)
{ {
switch (func_id) { switch (func_id) {
...@@ -3980,6 +4193,32 @@ static bool sk_skb_is_valid_access(int off, int size, ...@@ -3980,6 +4193,32 @@ static bool sk_skb_is_valid_access(int off, int size,
return bpf_skb_is_valid_access(off, size, type, info); return bpf_skb_is_valid_access(off, size, type, info);
} }
static bool sk_msg_is_valid_access(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
if (type == BPF_WRITE)
return false;
switch (off) {
case offsetof(struct sk_msg_md, data):
info->reg_type = PTR_TO_PACKET;
break;
case offsetof(struct sk_msg_md, data_end):
info->reg_type = PTR_TO_PACKET_END;
break;
}
if (off < 0 || off >= sizeof(struct sk_msg_md))
return false;
if (off % size != 0)
return false;
if (size != sizeof(__u64))
return false;
return true;
}
static u32 bpf_convert_ctx_access(enum bpf_access_type type, static u32 bpf_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si, const struct bpf_insn *si,
struct bpf_insn *insn_buf, struct bpf_insn *insn_buf,
...@@ -4778,6 +5017,29 @@ static u32 sk_skb_convert_ctx_access(enum bpf_access_type type, ...@@ -4778,6 +5017,29 @@ static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
return insn - insn_buf; return insn - insn_buf;
} }
static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog, u32 *target_size)
{
struct bpf_insn *insn = insn_buf;
switch (si->off) {
case offsetof(struct sk_msg_md, data):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data),
si->dst_reg, si->src_reg,
offsetof(struct sk_msg_buff, data));
break;
case offsetof(struct sk_msg_md, data_end):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data_end),
si->dst_reg, si->src_reg,
offsetof(struct sk_msg_buff, data_end));
break;
}
return insn - insn_buf;
}
const struct bpf_verifier_ops sk_filter_verifier_ops = { const struct bpf_verifier_ops sk_filter_verifier_ops = {
.get_func_proto = sk_filter_func_proto, .get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access, .is_valid_access = sk_filter_is_valid_access,
...@@ -4868,6 +5130,15 @@ const struct bpf_verifier_ops sk_skb_verifier_ops = { ...@@ -4868,6 +5130,15 @@ const struct bpf_verifier_ops sk_skb_verifier_ops = {
const struct bpf_prog_ops sk_skb_prog_ops = { const struct bpf_prog_ops sk_skb_prog_ops = {
}; };
const struct bpf_verifier_ops sk_msg_verifier_ops = {
.get_func_proto = sk_msg_func_proto,
.is_valid_access = sk_msg_is_valid_access,
.convert_ctx_access = sk_msg_convert_ctx_access,
};
const struct bpf_prog_ops sk_msg_prog_ops = {
};
int sk_detach_filter(struct sock *sk) int sk_detach_filter(struct sock *sk)
{ {
int ret = -ENOENT; int ret = -ENOENT;
......
...@@ -2237,6 +2237,67 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) ...@@ -2237,6 +2237,67 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
} }
EXPORT_SYMBOL(sk_page_frag_refill); EXPORT_SYMBOL(sk_page_frag_refill);
int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
int sg_start, int *sg_curr_index, unsigned int *sg_curr_size,
int first_coalesce)
{
int sg_curr = *sg_curr_index, use = 0, rc = 0;
unsigned int size = *sg_curr_size;
struct page_frag *pfrag;
struct scatterlist *sge;
len -= size;
pfrag = sk_page_frag(sk);
while (len > 0) {
unsigned int orig_offset;
if (!sk_page_frag_refill(sk, pfrag)) {
rc = -ENOMEM;
goto out;
}
use = min_t(int, len, pfrag->size - pfrag->offset);
if (!sk_wmem_schedule(sk, use)) {
rc = -ENOMEM;
goto out;
}
sk_mem_charge(sk, use);
size += use;
orig_offset = pfrag->offset;
pfrag->offset += use;
sge = sg + sg_curr - 1;
if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
sg->offset + sg->length == orig_offset) {
sg->length += use;
} else {
sge = sg + sg_curr;
sg_unmark_end(sge);
sg_set_page(sge, pfrag->page, use, orig_offset);
get_page(pfrag->page);
sg_curr++;
if (sg_curr == MAX_SKB_FRAGS)
sg_curr = 0;
if (sg_curr == sg_start) {
rc = -ENOSPC;
break;
}
}
len -= use;
}
out:
*sg_curr_size = size;
*sg_curr_index = sg_curr;
return rc;
}
EXPORT_SYMBOL(sk_alloc_sg);
static void __lock_sock(struct sock *sk) static void __lock_sock(struct sock *sk)
__releases(&sk->sk_lock.slock) __releases(&sk->sk_lock.slock)
__acquires(&sk->sk_lock.slock) __acquires(&sk->sk_lock.slock)
......
...@@ -994,6 +994,8 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, ...@@ -994,6 +994,8 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
get_page(page); get_page(page);
skb_fill_page_desc(skb, i, page, offset, copy); skb_fill_page_desc(skb, i, page, offset, copy);
} }
if (!(flags & MSG_NO_SHARED_FRAGS))
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
skb->len += copy; skb->len += copy;
......
...@@ -87,71 +87,16 @@ static void trim_both_sgl(struct sock *sk, int target_size) ...@@ -87,71 +87,16 @@ static void trim_both_sgl(struct sock *sk, int target_size)
target_size); target_size);
} }
static int alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
int *sg_num_elem, unsigned int *sg_size,
int first_coalesce)
{
struct page_frag *pfrag;
unsigned int size = *sg_size;
int num_elem = *sg_num_elem, use = 0, rc = 0;
struct scatterlist *sge;
unsigned int orig_offset;
len -= size;
pfrag = sk_page_frag(sk);
while (len > 0) {
if (!sk_page_frag_refill(sk, pfrag)) {
rc = -ENOMEM;
goto out;
}
use = min_t(int, len, pfrag->size - pfrag->offset);
if (!sk_wmem_schedule(sk, use)) {
rc = -ENOMEM;
goto out;
}
sk_mem_charge(sk, use);
size += use;
orig_offset = pfrag->offset;
pfrag->offset += use;
sge = sg + num_elem - 1;
if (num_elem > first_coalesce && sg_page(sg) == pfrag->page &&
sg->offset + sg->length == orig_offset) {
sg->length += use;
} else {
sge++;
sg_unmark_end(sge);
sg_set_page(sge, pfrag->page, use, orig_offset);
get_page(pfrag->page);
++num_elem;
if (num_elem == MAX_SKB_FRAGS) {
rc = -ENOSPC;
break;
}
}
len -= use;
}
goto out;
out:
*sg_size = size;
*sg_num_elem = num_elem;
return rc;
}
static int alloc_encrypted_sg(struct sock *sk, int len) static int alloc_encrypted_sg(struct sock *sk, int len)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
int rc = 0; int rc = 0;
rc = alloc_sg(sk, len, ctx->sg_encrypted_data, rc = sk_alloc_sg(sk, len,
&ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, 0); ctx->sg_encrypted_data, 0,
&ctx->sg_encrypted_num_elem,
&ctx->sg_encrypted_size, 0);
return rc; return rc;
} }
...@@ -162,7 +107,7 @@ static int alloc_plaintext_sg(struct sock *sk, int len) ...@@ -162,7 +107,7 @@ static int alloc_plaintext_sg(struct sock *sk, int len)
struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
int rc = 0; int rc = 0;
rc = alloc_sg(sk, len, ctx->sg_plaintext_data, rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
&ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
tls_ctx->pending_open_record_frags); tls_ctx->pending_open_record_frags);
......
...@@ -67,6 +67,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) ...@@ -67,6 +67,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0; bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0;
bool is_sockops = strncmp(event, "sockops", 7) == 0; bool is_sockops = strncmp(event, "sockops", 7) == 0;
bool is_sk_skb = strncmp(event, "sk_skb", 6) == 0; bool is_sk_skb = strncmp(event, "sk_skb", 6) == 0;
bool is_sk_msg = strncmp(event, "sk_msg", 6) == 0;
size_t insns_cnt = size / sizeof(struct bpf_insn); size_t insns_cnt = size / sizeof(struct bpf_insn);
enum bpf_prog_type prog_type; enum bpf_prog_type prog_type;
char buf[256]; char buf[256];
...@@ -96,6 +97,8 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) ...@@ -96,6 +97,8 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
prog_type = BPF_PROG_TYPE_SOCK_OPS; prog_type = BPF_PROG_TYPE_SOCK_OPS;
} else if (is_sk_skb) { } else if (is_sk_skb) {
prog_type = BPF_PROG_TYPE_SK_SKB; prog_type = BPF_PROG_TYPE_SK_SKB;
} else if (is_sk_msg) {
prog_type = BPF_PROG_TYPE_SK_MSG;
} else { } else {
printf("Unknown event '%s'\n", event); printf("Unknown event '%s'\n", event);
return -1; return -1;
...@@ -113,7 +116,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) ...@@ -113,7 +116,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
if (is_xdp || is_perf_event || is_cgroup_skb || is_cgroup_sk) if (is_xdp || is_perf_event || is_cgroup_skb || is_cgroup_sk)
return 0; return 0;
if (is_socket || is_sockops || is_sk_skb) { if (is_socket || is_sockops || is_sk_skb || is_sk_msg) {
if (is_socket) if (is_socket)
event += 6; event += 6;
else else
...@@ -589,7 +592,8 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map) ...@@ -589,7 +592,8 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
memcmp(shname, "socket", 6) == 0 || memcmp(shname, "socket", 6) == 0 ||
memcmp(shname, "cgroup/", 7) == 0 || memcmp(shname, "cgroup/", 7) == 0 ||
memcmp(shname, "sockops", 7) == 0 || memcmp(shname, "sockops", 7) == 0 ||
memcmp(shname, "sk_skb", 6) == 0) { memcmp(shname, "sk_skb", 6) == 0 ||
memcmp(shname, "sk_msg", 6) == 0) {
ret = load_and_attach(shname, data->d_buf, ret = load_and_attach(shname, data->d_buf,
data->d_size); data->d_size);
if (ret != 0) if (ret != 0)
......
...@@ -39,6 +39,7 @@ int bpf_prog1(struct bpf_perf_event_data *ctx) ...@@ -39,6 +39,7 @@ int bpf_prog1(struct bpf_perf_event_data *ctx)
{ {
char time_fmt1[] = "Time Enabled: %llu, Time Running: %llu"; char time_fmt1[] = "Time Enabled: %llu, Time Running: %llu";
char time_fmt2[] = "Get Time Failed, ErrCode: %d"; char time_fmt2[] = "Get Time Failed, ErrCode: %d";
char addr_fmt[] = "Address recorded on event: %llx";
char fmt[] = "CPU-%d period %lld ip %llx"; char fmt[] = "CPU-%d period %lld ip %llx";
u32 cpu = bpf_get_smp_processor_id(); u32 cpu = bpf_get_smp_processor_id();
struct bpf_perf_event_value value_buf; struct bpf_perf_event_value value_buf;
...@@ -64,6 +65,9 @@ int bpf_prog1(struct bpf_perf_event_data *ctx) ...@@ -64,6 +65,9 @@ int bpf_prog1(struct bpf_perf_event_data *ctx)
else else
bpf_trace_printk(time_fmt2, sizeof(time_fmt2), ret); bpf_trace_printk(time_fmt2, sizeof(time_fmt2), ret);
if (ctx->addr != 0)
bpf_trace_printk(addr_fmt, sizeof(addr_fmt), ctx->addr);
val = bpf_map_lookup_elem(&counts, &key); val = bpf_map_lookup_elem(&counts, &key);
if (val) if (val)
(*val)++; (*val)++;
......
...@@ -215,6 +215,17 @@ static void test_bpf_perf_event(void) ...@@ -215,6 +215,17 @@ static void test_bpf_perf_event(void)
/* Intel Instruction Retired */ /* Intel Instruction Retired */
.config = 0xc0, .config = 0xc0,
}; };
struct perf_event_attr attr_type_raw_lock_load = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_RAW,
/* Intel MEM_UOPS_RETIRED.LOCK_LOADS */
.config = 0x21d0,
/* Request to record lock address from PEBS */
.sample_type = PERF_SAMPLE_ADDR,
/* Record address value requires precise event */
.precise_ip = 2,
};
printf("Test HW_CPU_CYCLES\n"); printf("Test HW_CPU_CYCLES\n");
test_perf_event_all_cpu(&attr_type_hw); test_perf_event_all_cpu(&attr_type_hw);
...@@ -236,6 +247,10 @@ static void test_bpf_perf_event(void) ...@@ -236,6 +247,10 @@ static void test_bpf_perf_event(void)
test_perf_event_all_cpu(&attr_type_raw); test_perf_event_all_cpu(&attr_type_raw);
test_perf_event_task(&attr_type_raw); test_perf_event_task(&attr_type_raw);
printf("Test Lock Load\n");
test_perf_event_all_cpu(&attr_type_raw_lock_load);
test_perf_event_task(&attr_type_raw_lock_load);
printf("*** PASS ***\n"); printf("*** PASS ***\n");
} }
......
...@@ -43,6 +43,42 @@ struct bpf_map_def SEC("maps") sock_map = { ...@@ -43,6 +43,42 @@ struct bpf_map_def SEC("maps") sock_map = {
.max_entries = 20, .max_entries = 20,
}; };
struct bpf_map_def SEC("maps") sock_map_txmsg = {
.type = BPF_MAP_TYPE_SOCKMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 20,
};
struct bpf_map_def SEC("maps") sock_map_redir = {
.type = BPF_MAP_TYPE_SOCKMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 1,
};
struct bpf_map_def SEC("maps") sock_apply_bytes = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 1
};
struct bpf_map_def SEC("maps") sock_cork_bytes = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 1
};
struct bpf_map_def SEC("maps") sock_pull_bytes = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 2
};
SEC("sk_skb1") SEC("sk_skb1")
int bpf_prog1(struct __sk_buff *skb) int bpf_prog1(struct __sk_buff *skb)
{ {
...@@ -105,4 +141,165 @@ int bpf_sockmap(struct bpf_sock_ops *skops) ...@@ -105,4 +141,165 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
return 0; return 0;
} }
SEC("sk_msg1")
int bpf_prog4(struct sk_msg_md *msg)
{
int *bytes, zero = 0, one = 1;
int *start, *end;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
bpf_msg_apply_bytes(msg, *bytes);
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
if (bytes)
bpf_msg_cork_bytes(msg, *bytes);
start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
if (start && end)
bpf_msg_pull_data(msg, *start, *end, 0);
return SK_PASS;
}
SEC("sk_msg2")
int bpf_prog5(struct sk_msg_md *msg)
{
int err1 = -1, err2 = -1, zero = 0, one = 1;
int *bytes, *start, *end, len1, len2;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
err1 = bpf_msg_apply_bytes(msg, *bytes);
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
if (bytes)
err2 = bpf_msg_cork_bytes(msg, *bytes);
len1 = (__u64)msg->data_end - (__u64)msg->data;
start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
if (start && end) {
int err;
bpf_printk("sk_msg2: pull(%i:%i)\n",
start ? *start : 0, end ? *end : 0);
err = bpf_msg_pull_data(msg, *start, *end, 0);
if (err)
bpf_printk("sk_msg2: pull_data err %i\n",
err);
len2 = (__u64)msg->data_end - (__u64)msg->data;
bpf_printk("sk_msg2: length update %i->%i\n",
len1, len2);
}
bpf_printk("sk_msg2: data length %i err1 %i err2 %i\n",
len1, err1, err2);
return SK_PASS;
}
SEC("sk_msg3")
int bpf_prog6(struct sk_msg_md *msg)
{
int *bytes, zero = 0, one = 1;
int *start, *end;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
bpf_msg_apply_bytes(msg, *bytes);
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
if (bytes)
bpf_msg_cork_bytes(msg, *bytes);
start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
if (start && end)
bpf_msg_pull_data(msg, *start, *end, 0);
return bpf_msg_redirect_map(msg, &sock_map_redir, zero, 0);
}
SEC("sk_msg4")
int bpf_prog7(struct sk_msg_md *msg)
{
int err1 = 0, err2 = 0, zero = 0, one = 1;
int *bytes, *start, *end, len1, len2;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
err1 = bpf_msg_apply_bytes(msg, *bytes);
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
if (bytes)
err2 = bpf_msg_cork_bytes(msg, *bytes);
len1 = (__u64)msg->data_end - (__u64)msg->data;
start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
if (start && end) {
int err;
bpf_printk("sk_msg2: pull(%i:%i)\n",
start ? *start : 0, end ? *end : 0);
err = bpf_msg_pull_data(msg, *start, *end, 0);
if (err)
bpf_printk("sk_msg2: pull_data err %i\n",
err);
len2 = (__u64)msg->data_end - (__u64)msg->data;
bpf_printk("sk_msg2: length update %i->%i\n",
len1, len2);
}
bpf_printk("sk_msg3: redirect(%iB) err1=%i err2=%i\n",
len1, err1, err2);
return bpf_msg_redirect_map(msg, &sock_map_redir, zero, 0);
}
SEC("sk_msg5")
int bpf_prog8(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
void *data = (void *)(long) msg->data;
int ret = 0, *bytes, zero = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes) {
ret = bpf_msg_apply_bytes(msg, *bytes);
if (ret)
return SK_DROP;
} else {
return SK_DROP;
}
return SK_PASS;
}
SEC("sk_msg6")
int bpf_prog9(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
void *data = (void *)(long) msg->data;
int ret = 0, *bytes, zero = 0;
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
if (bytes) {
if (((__u64)data_end - (__u64)data) >= *bytes)
return SK_PASS;
ret = bpf_msg_cork_bytes(msg, *bytes);
if (ret)
return SK_DROP;
}
return SK_PASS;
}
SEC("sk_msg7")
int bpf_prog10(struct sk_msg_md *msg)
{
int *bytes, zero = 0, one = 1;
int *start, *end;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
bpf_msg_apply_bytes(msg, *bytes);
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
if (bytes)
bpf_msg_cork_bytes(msg, *bytes);
start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
if (start && end)
bpf_msg_pull_data(msg, *start, *end, 0);
return SK_DROP;
}
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
#Test a bunch of positive cases to verify basic functionality
for prog in "--txmsg" "--txmsg_redir" "--txmsg_drop"; do
for t in "sendmsg" "sendpage"; do
for r in 1 10 100; do
for i in 1 10 100; do
for l in 1 10 100; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
done
done
done
done
#Test max iov
t="sendmsg"
r=1
i=1024
l=1
prog="--txmsg"
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
prog="--txmsg_redir"
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
# Test max iov with 1k send
t="sendmsg"
r=1
i=1024
l=1024
prog="--txmsg"
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
prog="--txmsg_redir"
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
# Test apply with 1B
r=1
i=1024
l=1024
prog="--txmsg_apply 1"
for t in "sendmsg" "sendpage"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test apply with larger value than send
r=1
i=8
l=1024
prog="--txmsg_apply 2048"
for t in "sendmsg" "sendpage"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test apply with apply that never reaches limit
r=1024
i=1
l=1
prog="--txmsg_apply 2048"
for t in "sendmsg" "sendpage"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test apply and redirect with 1B
r=1
i=1024
l=1024
prog="--txmsg_redir --txmsg_apply 1"
for t in "sendmsg" "sendpage"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test apply and redirect with larger value than send
r=1
i=8
l=1024
prog="--txmsg_redir --txmsg_apply 2048"
for t in "sendmsg" "sendpage"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test apply and redirect with apply that never reaches limit
r=1024
i=1
l=1
prog="--txmsg_apply 2048"
for t in "sendmsg" "sendpage"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test cork with 1B not really useful but test it anyways
r=1
i=1024
l=1024
prog="--txmsg_cork 1"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test cork with a more reasonable 100B
r=1
i=1000
l=1000
prog="--txmsg_cork 100"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test cork with larger value than send
r=1
i=8
l=1024
prog="--txmsg_cork 2048"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test cork with cork that never reaches limit
r=1024
i=1
l=1
prog="--txmsg_cork 2048"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
r=1
i=1024
l=1024
prog="--txmsg_redir --txmsg_cork 1"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test cork with a more reasonable 100B
r=1
i=1000
l=1000
prog="--txmsg_redir --txmsg_cork 100"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test cork with larger value than send
r=1
i=8
l=1024
prog="--txmsg_redir --txmsg_cork 2048"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test cork with cork that never reaches limit
r=1024
i=1
l=1
prog="--txmsg_cork 2048"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# mix and match cork and apply not really useful but valid programs
# Test apply < cork
r=100
i=1
l=5
prog="--txmsg_apply 10 --txmsg_cork 100"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Try again with larger sizes so we hit overflow case
r=100
i=1000
l=2048
prog="--txmsg_apply 4096 --txmsg_cork 8096"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test apply > cork
r=100
i=1
l=5
prog="--txmsg_apply 100 --txmsg_cork 10"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Again with larger sizes so we hit overflow cases
r=100
i=1000
l=2048
prog="--txmsg_apply 8096 --txmsg_cork 4096"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test apply = cork
r=100
i=1
l=5
prog="--txmsg_apply 10 --txmsg_cork 10"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
r=100
i=1000
l=2048
prog="--txmsg_apply 4096 --txmsg_cork 4096"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test apply < cork
r=100
i=1
l=5
prog="--txmsg_redir --txmsg_apply 10 --txmsg_cork 100"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Try again with larger sizes so we hit overflow case
r=100
i=1000
l=2048
prog="--txmsg_redir --txmsg_apply 4096 --txmsg_cork 8096"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test apply > cork
r=100
i=1
l=5
prog="--txmsg_redir --txmsg_apply 100 --txmsg_cork 10"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Again with larger sizes so we hit overflow cases
r=100
i=1000
l=2048
prog="--txmsg_redir --txmsg_apply 8096 --txmsg_cork 4096"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Test apply = cork
r=100
i=1
l=5
prog="--txmsg_redir --txmsg_apply 10 --txmsg_cork 10"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
r=100
i=1000
l=2048
prog="--txmsg_redir --txmsg_apply 4096 --txmsg_cork 4096"
for t in "sendpage" "sendmsg"; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog"
echo $TEST
$TEST
sleep 2
done
# Tests for bpf_msg_pull_data()
for i in `seq 99 100 1600`; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t sendpage -r 16 -i 1 -l 100 \
--txmsg --txmsg_start 0 --txmsg_end $i --txmsg_cork 1600"
echo $TEST
$TEST
sleep 2
done
for i in `seq 199 100 1600`; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t sendpage -r 16 -i 1 -l 100 \
--txmsg --txmsg_start 100 --txmsg_end $i --txmsg_cork 1600"
echo $TEST
$TEST
sleep 2
done
TEST="./sockmap --cgroup /mnt/cgroup2/ -t sendpage -r 16 -i 1 -l 100 \
--txmsg --txmsg_start 1500 --txmsg_end 1600 --txmsg_cork 1600"
echo $TEST
$TEST
sleep 2
TEST="./sockmap --cgroup /mnt/cgroup2/ -t sendpage -r 16 -i 1 -l 100 \
--txmsg --txmsg_start 1111 --txmsg_end 1112 --txmsg_cork 1600"
echo $TEST
$TEST
sleep 2
TEST="./sockmap --cgroup /mnt/cgroup2/ -t sendpage -r 16 -i 1 -l 100 \
--txmsg --txmsg_start 1111 --txmsg_end 0 --txmsg_cork 1600"
echo $TEST
$TEST
sleep 2
TEST="./sockmap --cgroup /mnt/cgroup2/ -t sendpage -r 16 -i 1 -l 100 \
--txmsg --txmsg_start 0 --txmsg_end 1601 --txmsg_cork 1600"
echo $TEST
$TEST
sleep 2
TEST="./sockmap --cgroup /mnt/cgroup2/ -t sendpage -r 16 -i 1 -l 100 \
--txmsg --txmsg_start 0 --txmsg_end 1601 --txmsg_cork 1602"
echo $TEST
$TEST
sleep 2
# Run through gamut again with start and end
for prog in "--txmsg" "--txmsg_redir" "--txmsg_drop"; do
for t in "sendmsg" "sendpage"; do
for r in 1 10 100; do
for i in 1 10 100; do
for l in 1 10 100; do
TEST="./sockmap --cgroup /mnt/cgroup2/ -t $t -r $r -i $i -l $l $prog --txmsg_start 1 --txmsg_end 2"
echo $TEST
$TEST
sleep 2
done
done
done
done
done
# Some specific tests to cover specific code paths
./sockmap --cgroup /mnt/cgroup2/ -t sendpage \
-r 5 -i 1 -l 1 --txmsg_redir --txmsg_cork 5 --txmsg_apply 3
./sockmap --cgroup /mnt/cgroup2/ -t sendmsg \
-r 5 -i 1 -l 1 --txmsg_redir --txmsg_cork 5 --txmsg_apply 3
./sockmap --cgroup /mnt/cgroup2/ -t sendpage \
-r 5 -i 1 -l 1 --txmsg_redir --txmsg_cork 5 --txmsg_apply 5
./sockmap --cgroup /mnt/cgroup2/ -t sendmsg \
-r 5 -i 1 -l 1 --txmsg_redir --txmsg_cork 5 --txmsg_apply 5
This diff is collapsed.
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
prefix = /usr include ../scripts/Makefile.include
prefix ?= /usr/local
CC = gcc CC = gcc
LEX = flex LEX = flex
YACC = bison YACC = bison
MAKE = make MAKE = make
INSTALL ?= install
CFLAGS += -Wall -O2 CFLAGS += -Wall -O2
CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include
ifeq ($(srctree),) ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR))) srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree))) srctree := $(patsubst %/,%,$(dir $(srctree)))
endif endif
ifeq ($(V),1)
Q =
else
Q = @
endif
FEATURE_USER = .bpf FEATURE_USER = .bpf
FEATURE_TESTS = libbfd disassembler-four-args FEATURE_TESTS = libbfd disassembler-four-args
FEATURE_DISPLAY = libbfd disassembler-four-args FEATURE_DISPLAY = libbfd disassembler-four-args
...@@ -38,40 +47,59 @@ ifeq ($(feature-disassembler-four-args), 1) ...@@ -38,40 +47,59 @@ ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif endif
%.yacc.c: %.y $(OUTPUT)%.yacc.c: $(srctree)/tools/bpf/%.y
$(YACC) -o $@ -d $< $(QUIET_BISON)$(YACC) -o $@ -d $<
%.lex.c: %.l $(OUTPUT)%.lex.c: $(srctree)/tools/bpf/%.l
$(LEX) -o $@ $< $(QUIET_FLEX)$(LEX) -o $@ $<
all: bpf_jit_disasm bpf_dbg bpf_asm bpftool $(OUTPUT)%.o: $(srctree)/tools/bpf/%.c
$(QUIET_CC)$(COMPILE.c) -o $@ $<
bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm' $(OUTPUT)%.yacc.o: $(OUTPUT)%.yacc.c
bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl $(QUIET_CC)$(COMPILE.c) -o $@ $<
bpf_jit_disasm : bpf_jit_disasm.o $(OUTPUT)%.lex.o: $(OUTPUT)%.lex.c
$(QUIET_CC)$(COMPILE.c) -o $@ $<
bpf_dbg : LDLIBS = -lreadline PROGS = $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg $(OUTPUT)bpf_asm
bpf_dbg : bpf_dbg.o
bpf_asm : LDLIBS = all: $(PROGS) bpftool
bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
bpf_exp.lex.o : bpf_exp.yacc.c
clean: bpftool_clean $(OUTPUT)bpf_jit_disasm: CFLAGS += -DPACKAGE='bpf_jit_disasm'
rm -rf *.o bpf_jit_disasm bpf_dbg bpf_asm bpf_exp.yacc.* bpf_exp.lex.* $(OUTPUT)bpf_jit_disasm: $(OUTPUT)bpf_jit_disasm.o
$(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^ -lopcodes -lbfd -ldl
install: bpftool_install $(OUTPUT)bpf_dbg: $(OUTPUT)bpf_dbg.o
install bpf_jit_disasm $(prefix)/bin/bpf_jit_disasm $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^ -lreadline
install bpf_dbg $(prefix)/bin/bpf_dbg
install bpf_asm $(prefix)/bin/bpf_asm $(OUTPUT)bpf_asm: $(OUTPUT)bpf_asm.o $(OUTPUT)bpf_exp.yacc.o $(OUTPUT)bpf_exp.lex.o
$(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^
$(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c
clean: bpftool_clean
$(call QUIET_CLEAN, bpf-progs)
$(Q)rm -rf $(OUTPUT)*.o $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg \
$(OUTPUT)bpf_asm $(OUTPUT)bpf_exp.yacc.* $(OUTPUT)bpf_exp.lex.*
$(call QUIET_CLEAN, core-gen)
$(Q)rm -f $(OUTPUT)FEATURE-DUMP.bpf
install: $(PROGS) bpftool_install
$(call QUIET_INSTALL, bpf_jit_disasm)
$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/bin
$(Q)$(INSTALL) $(OUTPUT)bpf_jit_disasm $(DESTDIR)$(prefix)/bin/bpf_jit_disasm
$(call QUIET_INSTALL, bpf_dbg)
$(Q)$(INSTALL) $(OUTPUT)bpf_dbg $(DESTDIR)$(prefix)/bin/bpf_dbg
$(call QUIET_INSTALL, bpf_asm)
$(Q)$(INSTALL) $(OUTPUT)bpf_asm $(DESTDIR)$(prefix)/bin/bpf_asm
bpftool: bpftool:
$(MAKE) -C bpftool $(call descend,bpftool)
bpftool_install: bpftool_install:
$(MAKE) -C bpftool install $(call descend,bpftool,install)
bpftool_clean: bpftool_clean:
$(MAKE) -C bpftool clean $(call descend,bpftool,clean)
.PHONY: bpftool FORCE .PHONY: all install clean bpftool bpftool_install bpftool_clean
...@@ -38,7 +38,7 @@ bash_compdir ?= /usr/share/bash-completion/completions ...@@ -38,7 +38,7 @@ bash_compdir ?= /usr/share/bash-completion/completions
CC = gcc CC = gcc
CFLAGS += -O2 CFLAGS += -O2
CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow -Wno-missing-field-initializers
CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/ CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"' CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
LIBS = -lelf -lbfd -lopcodes $(LIBBPF) LIBS = -lelf -lbfd -lopcodes $(LIBBPF)
...@@ -70,7 +70,7 @@ ifeq ($(feature-disassembler-four-args), 1) ...@@ -70,7 +70,7 @@ ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif endif
include $(wildcard *.d) include $(wildcard $(OUTPUT)*.d)
all: $(OUTPUT)bpftool all: $(OUTPUT)bpftool
...@@ -89,6 +89,8 @@ $(OUTPUT)%.o: %.c ...@@ -89,6 +89,8 @@ $(OUTPUT)%.o: %.c
clean: $(LIBBPF)-clean clean: $(LIBBPF)-clean
$(call QUIET_CLEAN, bpftool) $(call QUIET_CLEAN, bpftool)
$(Q)$(RM) $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d $(Q)$(RM) $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
$(call QUIET_CLEAN, core-gen)
$(Q)$(RM) $(OUTPUT)FEATURE-DUMP.bpftool
install: $(OUTPUT)bpftool install: $(OUTPUT)bpftool
$(call QUIET_INSTALL, bpftool) $(call QUIET_INSTALL, bpftool)
......
...@@ -49,7 +49,7 @@ struct dump_data { ...@@ -49,7 +49,7 @@ struct dump_data {
unsigned long address_call_base; unsigned long address_call_base;
struct kernel_sym *sym_mapping; struct kernel_sym *sym_mapping;
__u32 sym_count; __u32 sym_count;
char scratch_buff[SYM_MAX_NAME]; char scratch_buff[SYM_MAX_NAME + 8];
}; };
void kernel_syms_load(struct dump_data *dd); void kernel_syms_load(struct dump_data *dd);
......
...@@ -133,6 +133,7 @@ enum bpf_prog_type { ...@@ -133,6 +133,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SOCK_OPS, BPF_PROG_TYPE_SOCK_OPS,
BPF_PROG_TYPE_SK_SKB, BPF_PROG_TYPE_SK_SKB,
BPF_PROG_TYPE_CGROUP_DEVICE, BPF_PROG_TYPE_CGROUP_DEVICE,
BPF_PROG_TYPE_SK_MSG,
}; };
enum bpf_attach_type { enum bpf_attach_type {
...@@ -143,6 +144,7 @@ enum bpf_attach_type { ...@@ -143,6 +144,7 @@ enum bpf_attach_type {
BPF_SK_SKB_STREAM_PARSER, BPF_SK_SKB_STREAM_PARSER,
BPF_SK_SKB_STREAM_VERDICT, BPF_SK_SKB_STREAM_VERDICT,
BPF_CGROUP_DEVICE, BPF_CGROUP_DEVICE,
BPF_SK_MSG_VERDICT,
__MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
}; };
...@@ -231,6 +233,28 @@ enum bpf_attach_type { ...@@ -231,6 +233,28 @@ enum bpf_attach_type {
#define BPF_F_RDONLY (1U << 3) #define BPF_F_RDONLY (1U << 3)
#define BPF_F_WRONLY (1U << 4) #define BPF_F_WRONLY (1U << 4)
/* Flag for stack_map, store build_id+offset instead of pointer */
#define BPF_F_STACK_BUILD_ID (1U << 5)
enum bpf_stack_build_id_status {
/* user space need an empty entry to identify end of a trace */
BPF_STACK_BUILD_ID_EMPTY = 0,
/* with valid build_id and offset */
BPF_STACK_BUILD_ID_VALID = 1,
/* couldn't get build_id, fallback to ip */
BPF_STACK_BUILD_ID_IP = 2,
};
#define BPF_BUILD_ID_SIZE 20
struct bpf_stack_build_id {
__s32 status;
unsigned char build_id[BPF_BUILD_ID_SIZE];
union {
__u64 offset;
__u64 ip;
};
};
union bpf_attr { union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */ struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */ __u32 map_type; /* one of enum bpf_map_type */
...@@ -696,6 +720,15 @@ union bpf_attr { ...@@ -696,6 +720,15 @@ union bpf_attr {
* int bpf_override_return(pt_regs, rc) * int bpf_override_return(pt_regs, rc)
* @pt_regs: pointer to struct pt_regs * @pt_regs: pointer to struct pt_regs
* @rc: the return value to set * @rc: the return value to set
*
* int bpf_msg_redirect_map(map, key, flags)
* Redirect msg to a sock in map using key as a lookup key for the
* sock in map.
* @map: pointer to sockmap
* @key: key to lookup sock in map
* @flags: reserved for future use
* Return: SK_PASS
*
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -757,7 +790,11 @@ union bpf_attr { ...@@ -757,7 +790,11 @@ union bpf_attr {
FN(perf_prog_read_value), \ FN(perf_prog_read_value), \
FN(getsockopt), \ FN(getsockopt), \
FN(override_return), \ FN(override_return), \
FN(sock_ops_cb_flags_set), FN(sock_ops_cb_flags_set), \
FN(msg_redirect_map), \
FN(msg_apply_bytes), \
FN(msg_cork_bytes), \
FN(msg_pull_data),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
...@@ -919,6 +956,14 @@ enum sk_action { ...@@ -919,6 +956,14 @@ enum sk_action {
SK_PASS, SK_PASS,
}; };
/* user accessible metadata for SK_MSG packet hook, new fields must
* be added to the end of this structure
*/
struct sk_msg_md {
void *data;
void *data_end;
};
#define BPF_TAG_SIZE 8 #define BPF_TAG_SIZE 8
struct bpf_prog_info { struct bpf_prog_info {
......
...@@ -1857,6 +1857,7 @@ static const struct { ...@@ -1857,6 +1857,7 @@ static const struct {
BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT), BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS), BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB), BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG),
}; };
#undef BPF_PROG_SEC #undef BPF_PROG_SEC
......
...@@ -13,6 +13,14 @@ endif ...@@ -13,6 +13,14 @@ endif
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
LDLIBS += -lcap -lelf -lrt -lpthread LDLIBS += -lcap -lelf -lrt -lpthread
TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
all: $(TEST_CUSTOM_PROGS)
$(TEST_CUSTOM_PROGS): urandom_read
urandom_read: urandom_read.c
$(CC) -o $(TEST_CUSTOM_PROGS) -static $<
# Order correspond to 'make run_tests' order # Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user test_align test_verifier_log test_dev_cgroup test_tcpbpf_user
...@@ -21,7 +29,8 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test ...@@ -21,7 +29,8 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \ sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \ test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
sample_map_ret0.o test_tcpbpf_kern.o sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \
sockmap_tcp_msg_prog.o
# Order correspond to 'make run_tests' order # Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \ TEST_PROGS := test_kmod.sh \
...@@ -74,3 +83,5 @@ $(OUTPUT)/%.o: %.c ...@@ -74,3 +83,5 @@ $(OUTPUT)/%.o: %.c
$(CLANG) $(CLANG_FLAGS) \ $(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \ -O2 -target bpf -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@ $(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS)
...@@ -86,6 +86,14 @@ static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, ...@@ -86,6 +86,14 @@ static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
(void *) BPF_FUNC_perf_prog_read_value; (void *) BPF_FUNC_perf_prog_read_value;
static int (*bpf_override_return)(void *ctx, unsigned long rc) = static int (*bpf_override_return)(void *ctx, unsigned long rc) =
(void *) BPF_FUNC_override_return; (void *) BPF_FUNC_override_return;
static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) =
(void *) BPF_FUNC_msg_redirect_map;
static int (*bpf_msg_apply_bytes)(void *ctx, int len) =
(void *) BPF_FUNC_msg_apply_bytes;
static int (*bpf_msg_cork_bytes)(void *ctx, int len) =
(void *) BPF_FUNC_msg_cork_bytes;
static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) =
(void *) BPF_FUNC_msg_pull_data;
/* llvm builtin functions that eBPF C program may use to /* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions * emit BPF_LD_ABS and BPF_LD_IND instructions
...@@ -123,6 +131,8 @@ static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = ...@@ -123,6 +131,8 @@ static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
(void *) BPF_FUNC_skb_under_cgroup; (void *) BPF_FUNC_skb_under_cgroup;
static int (*bpf_skb_change_head)(void *, int len, int flags) = static int (*bpf_skb_change_head)(void *, int len, int flags) =
(void *) BPF_FUNC_skb_change_head; (void *) BPF_FUNC_skb_change_head;
static int (*bpf_skb_pull_data)(void *, int len) =
(void *) BPF_FUNC_skb_pull_data;
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */ /* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86) #if defined(__TARGET_ARCH_x86)
......
...@@ -20,14 +20,25 @@ int bpf_prog1(struct __sk_buff *skb) ...@@ -20,14 +20,25 @@ int bpf_prog1(struct __sk_buff *skb)
__u32 lport = skb->local_port; __u32 lport = skb->local_port;
__u32 rport = skb->remote_port; __u32 rport = skb->remote_port;
__u8 *d = data; __u8 *d = data;
__u32 len = (__u32) data_end - (__u32) data;
int err;
if (data + 10 > data_end) {
err = bpf_skb_pull_data(skb, 10);
if (err)
return SK_DROP;
data_end = (void *)(long)skb->data_end;
data = (void *)(long)skb->data;
if (data + 10 > data_end) if (data + 10 > data_end)
return skb->len; return SK_DROP;
}
/* This write/read is a bit pointless but tests the verifier and /* This write/read is a bit pointless but tests the verifier and
* strparser handler for read/write pkt data and access into sk * strparser handler for read/write pkt data and access into sk
* fields. * fields.
*/ */
d = data;
d[7] = 1; d[7] = 1;
return skb->len; return skb->len;
} }
......
#include <linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_util.h"
#include "bpf_endian.h"
int _version SEC("version") = 1;
#define bpf_printk(fmt, ...) \
({ \
char ____fmt[] = fmt; \
bpf_trace_printk(____fmt, sizeof(____fmt), \
##__VA_ARGS__); \
})
SEC("sk_msg1")
int bpf_prog1(struct sk_msg_md *msg)
{
void *data_end = (void *)(long) msg->data_end;
void *data = (void *)(long) msg->data;
char *d;
if (data + 8 > data_end)
return SK_DROP;
bpf_printk("data length %i\n", (__u64)msg->data_end - (__u64)msg->data);
d = (char *)data;
bpf_printk("hello sendmsg hook %i %i\n", d[0], d[1]);
return SK_PASS;
}
char _license[] SEC("license") = "GPL";
...@@ -26,6 +26,13 @@ struct bpf_map_def SEC("maps") sock_map_tx = { ...@@ -26,6 +26,13 @@ struct bpf_map_def SEC("maps") sock_map_tx = {
.max_entries = 20, .max_entries = 20,
}; };
struct bpf_map_def SEC("maps") sock_map_msg = {
.type = BPF_MAP_TYPE_SOCKMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 20,
};
struct bpf_map_def SEC("maps") sock_map_break = { struct bpf_map_def SEC("maps") sock_map_break = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int), .key_size = sizeof(int),
......
...@@ -464,15 +464,17 @@ static void test_devmap(int task, void *data) ...@@ -464,15 +464,17 @@ static void test_devmap(int task, void *data)
#include <linux/err.h> #include <linux/err.h>
#define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o" #define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o"
#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o" #define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
#define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o"
static void test_sockmap(int tasks, void *data) static void test_sockmap(int tasks, void *data)
{ {
int one = 1, map_fd_rx = 0, map_fd_tx = 0, map_fd_break, s, sc, rc; struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;
struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break; int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;
int ports[] = {50200, 50201, 50202, 50204}; int ports[] = {50200, 50201, 50202, 50204};
int err, i, fd, udp, sfd[6] = {0xdeadbeef}; int err, i, fd, udp, sfd[6] = {0xdeadbeef};
u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0}; u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0};
int parse_prog, verdict_prog; int parse_prog, verdict_prog, msg_prog;
struct sockaddr_in addr; struct sockaddr_in addr;
int one = 1, s, sc, rc;
struct bpf_object *obj; struct bpf_object *obj;
struct timeval to; struct timeval to;
__u32 key, value; __u32 key, value;
...@@ -584,6 +586,12 @@ static void test_sockmap(int tasks, void *data) ...@@ -584,6 +586,12 @@ static void test_sockmap(int tasks, void *data)
goto out_sockmap; goto out_sockmap;
} }
err = bpf_prog_attach(-1, fd, BPF_SK_MSG_VERDICT, 0);
if (!err) {
printf("Failed invalid msg verdict prog attach\n");
goto out_sockmap;
}
err = bpf_prog_attach(-1, fd, __MAX_BPF_ATTACH_TYPE, 0); err = bpf_prog_attach(-1, fd, __MAX_BPF_ATTACH_TYPE, 0);
if (!err) { if (!err) {
printf("Failed unknown prog attach\n"); printf("Failed unknown prog attach\n");
...@@ -602,6 +610,12 @@ static void test_sockmap(int tasks, void *data) ...@@ -602,6 +610,12 @@ static void test_sockmap(int tasks, void *data)
goto out_sockmap; goto out_sockmap;
} }
err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);
if (err) {
printf("Failed empty msg verdict prog detach\n");
goto out_sockmap;
}
err = bpf_prog_detach(fd, __MAX_BPF_ATTACH_TYPE); err = bpf_prog_detach(fd, __MAX_BPF_ATTACH_TYPE);
if (!err) { if (!err) {
printf("Detach invalid prog successful\n"); printf("Detach invalid prog successful\n");
...@@ -616,6 +630,13 @@ static void test_sockmap(int tasks, void *data) ...@@ -616,6 +630,13 @@ static void test_sockmap(int tasks, void *data)
goto out_sockmap; goto out_sockmap;
} }
err = bpf_prog_load(SOCKMAP_TCP_MSG_PROG,
BPF_PROG_TYPE_SK_MSG, &obj, &msg_prog);
if (err) {
printf("Failed to load SK_SKB msg prog\n");
goto out_sockmap;
}
err = bpf_prog_load(SOCKMAP_VERDICT_PROG, err = bpf_prog_load(SOCKMAP_VERDICT_PROG,
BPF_PROG_TYPE_SK_SKB, &obj, &verdict_prog); BPF_PROG_TYPE_SK_SKB, &obj, &verdict_prog);
if (err) { if (err) {
...@@ -631,7 +652,7 @@ static void test_sockmap(int tasks, void *data) ...@@ -631,7 +652,7 @@ static void test_sockmap(int tasks, void *data)
map_fd_rx = bpf_map__fd(bpf_map_rx); map_fd_rx = bpf_map__fd(bpf_map_rx);
if (map_fd_rx < 0) { if (map_fd_rx < 0) {
printf("Failed to get map fd\n"); printf("Failed to get map rx fd\n");
goto out_sockmap; goto out_sockmap;
} }
...@@ -647,6 +668,18 @@ static void test_sockmap(int tasks, void *data) ...@@ -647,6 +668,18 @@ static void test_sockmap(int tasks, void *data)
goto out_sockmap; goto out_sockmap;
} }
bpf_map_msg = bpf_object__find_map_by_name(obj, "sock_map_msg");
if (IS_ERR(bpf_map_msg)) {
printf("Failed to load map msg from msg_verdict prog\n");
goto out_sockmap;
}
map_fd_msg = bpf_map__fd(bpf_map_msg);
if (map_fd_msg < 0) {
printf("Failed to get map msg fd\n");
goto out_sockmap;
}
bpf_map_break = bpf_object__find_map_by_name(obj, "sock_map_break"); bpf_map_break = bpf_object__find_map_by_name(obj, "sock_map_break");
if (IS_ERR(bpf_map_break)) { if (IS_ERR(bpf_map_break)) {
printf("Failed to load map tx from verdict prog\n"); printf("Failed to load map tx from verdict prog\n");
...@@ -680,6 +713,12 @@ static void test_sockmap(int tasks, void *data) ...@@ -680,6 +713,12 @@ static void test_sockmap(int tasks, void *data)
goto out_sockmap; goto out_sockmap;
} }
err = bpf_prog_attach(msg_prog, map_fd_msg, BPF_SK_MSG_VERDICT, 0);
if (err) {
printf("Failed msg verdict bpf prog attach\n");
goto out_sockmap;
}
err = bpf_prog_attach(verdict_prog, map_fd_rx, err = bpf_prog_attach(verdict_prog, map_fd_rx,
__MAX_BPF_ATTACH_TYPE, 0); __MAX_BPF_ATTACH_TYPE, 0);
if (!err) { if (!err) {
...@@ -719,6 +758,14 @@ static void test_sockmap(int tasks, void *data) ...@@ -719,6 +758,14 @@ static void test_sockmap(int tasks, void *data)
} }
} }
/* Put sfd[2] (sending fd below) into msg map to test sendmsg bpf */
i = 0;
err = bpf_map_update_elem(map_fd_msg, &i, &sfd[2], BPF_ANY);
if (err) {
printf("Failed map_fd_msg update sockmap %i\n", err);
goto out_sockmap;
}
/* Test map send/recv */ /* Test map send/recv */
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
buf[0] = i; buf[0] = i;
......
...@@ -841,7 +841,8 @@ static void test_tp_attach_query(void) ...@@ -841,7 +841,8 @@ static void test_tp_attach_query(void)
static int compare_map_keys(int map1_fd, int map2_fd) static int compare_map_keys(int map1_fd, int map2_fd)
{ {
__u32 key, next_key; __u32 key, next_key;
char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)]; char val_buf[PERF_MAX_STACK_DEPTH *
sizeof(struct bpf_stack_build_id)];
int err; int err;
err = bpf_map_get_next_key(map1_fd, NULL, &key); err = bpf_map_get_next_key(map1_fd, NULL, &key);
...@@ -964,6 +965,166 @@ static void test_stacktrace_map() ...@@ -964,6 +965,166 @@ static void test_stacktrace_map()
return; return;
} }
static int extract_build_id(char *build_id, size_t size)
{
FILE *fp;
char *line = NULL;
size_t len = 0;
fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
if (fp == NULL)
return -1;
if (getline(&line, &len, fp) == -1)
goto err;
fclose(fp);
if (len > size)
len = size;
memcpy(build_id, line, len);
build_id[len] = '\0';
return 0;
err:
fclose(fp);
return -1;
}
static void test_stacktrace_build_id(void)
{
int control_map_fd, stackid_hmap_fd, stackmap_fd;
const char *file = "./test_stacktrace_build_id.o";
int bytes, efd, err, pmu_fd, prog_fd;
struct perf_event_attr attr = {};
__u32 key, previous_key, val, duration = 0;
struct bpf_object *obj;
char buf[256];
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0;
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
goto out;
/* Get the ID for the sched/sched_switch tracepoint */
snprintf(buf, sizeof(buf),
"/sys/kernel/debug/tracing/events/random/urandom_read/id");
efd = open(buf, O_RDONLY, 0);
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
goto close_prog;
bytes = read(efd, buf, sizeof(buf));
close(efd);
if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
"read", "bytes %d errno %d\n", bytes, errno))
goto close_prog;
/* Open the perf event and attach bpf progrram */
attr.config = strtol(buf, NULL, 0);
attr.type = PERF_TYPE_TRACEPOINT;
attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
attr.sample_period = 1;
attr.wakeup_events = 1;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
pmu_fd, errno))
goto close_prog;
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
err, errno))
goto close_pmu;
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
err, errno))
goto disable_pmu;
/* find map fds */
control_map_fd = bpf_find_map(__func__, obj, "control_map");
if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
"err %d errno %d\n", err, errno))
goto disable_pmu;
stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
err, errno))
goto disable_pmu;
assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
== 0);
assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0);
/* disable stack trace collection */
key = 0;
val = 1;
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
* in stackmap, and vise versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
goto disable_pmu;
err = extract_build_id(buf, 256);
if (CHECK(err, "get build_id with readelf",
"err %d errno %d\n", err, errno))
goto disable_pmu;
err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto disable_pmu;
do {
char build_id[64];
err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
goto disable_pmu;
for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
id_offs[i].offset != 0) {
for (j = 0; j < 20; ++j)
sprintf(build_id + 2 * j, "%02x",
id_offs[i].build_id[j] & 0xff);
if (strstr(buf, build_id) != NULL)
build_id_matches = 1;
}
previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map");
disable_pmu:
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
close_pmu:
close(pmu_fd);
close_prog:
bpf_object__close(obj);
out:
return;
}
int main(void) int main(void)
{ {
test_pkt_access(); test_pkt_access();
...@@ -976,6 +1137,7 @@ int main(void) ...@@ -976,6 +1137,7 @@ int main(void)
test_obj_name(); test_obj_name();
test_tp_attach_query(); test_tp_attach_query();
test_stacktrace_map(); test_stacktrace_map();
test_stacktrace_build_id();
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <linux/bpf.h>
#include "bpf_helpers.h"
#ifndef PERF_MAX_STACK_DEPTH
#define PERF_MAX_STACK_DEPTH 127
#endif
struct bpf_map_def SEC("maps") control_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 1,
};
struct bpf_map_def SEC("maps") stackid_hmap = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 10000,
};
struct bpf_map_def SEC("maps") stackmap = {
.type = BPF_MAP_TYPE_STACK_TRACE,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_stack_build_id)
* PERF_MAX_STACK_DEPTH,
.max_entries = 128,
.map_flags = BPF_F_STACK_BUILD_ID,
};
/* taken from /sys/kernel/debug/tracing/events/random/urandom_read/format */
struct random_urandom_args {
unsigned long long pad;
int got_bits;
int pool_left;
int input_left;
};
SEC("tracepoint/random/urandom_read")
int oncpu(struct random_urandom_args *args)
{
__u32 key = 0, val = 0, *value_p;
value_p = bpf_map_lookup_elem(&control_map, &key);
if (value_p && *value_p)
return 0; /* skip if non-zero *value_p */
/* The size of stackmap and stackid_hmap should be the same */
key = bpf_get_stackid(args, &stackmap, BPF_F_USER_STACK);
if ((int)key >= 0)
bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
...@@ -1596,6 +1596,60 @@ static struct bpf_test tests[] = { ...@@ -1596,6 +1596,60 @@ static struct bpf_test tests[] = {
.result = ACCEPT, .result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB, .prog_type = BPF_PROG_TYPE_SK_SKB,
}, },
{
"direct packet read for SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
offsetof(struct sk_msg_md, data)),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
offsetof(struct sk_msg_md, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"direct packet write for SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
offsetof(struct sk_msg_md, data)),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
offsetof(struct sk_msg_md, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"overlapping checks for direct packet access SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
offsetof(struct sk_msg_md, data)),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
offsetof(struct sk_msg_md, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{ {
"check skb->mark is not writeable by sockets", "check skb->mark is not writeable by sockets",
.insns = { .insns = {
......
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#define BUF_SIZE 256
int main(void)
{
int fd = open("/dev/urandom", O_RDONLY);
int i;
char buf[BUF_SIZE];
if (fd < 0)
return 1;
for (i = 0; i < 4; ++i)
read(fd, buf, BUF_SIZE);
close(fd);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment