Commit 8c8f67a4 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2017-12-13

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Addition of explicit scheduling points to map alloc/free
   in order to avoid having to hold the CPU for too long,
   from Eric.

2) Fixing of a corruption in overlapping perf_event_output
   calls from different BPF prog types on the same CPU out
   of different contexts, from Daniel.

3) Fallout fixes for recent correction of broken uapi for
   BPF_PROG_TYPE_PERF_EVENT. um had a missing asm header
   that needed to be pulled in from asm-generic and for
   BPF selftests the asm-generic include did not work,
   so similar asm include scheme was adapted for that
   problematic header that perf is having with other
   header files under tools, from Daniel.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f6e168b4 9147efcb
generic-y += barrier.h generic-y += barrier.h
generic-y += bpf_perf_event.h
generic-y += bug.h generic-y += bug.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += current.h generic-y += current.h
......
...@@ -114,6 +114,7 @@ static void htab_free_elems(struct bpf_htab *htab) ...@@ -114,6 +114,7 @@ static void htab_free_elems(struct bpf_htab *htab)
pptr = htab_elem_get_ptr(get_htab_elem(htab, i), pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
htab->map.key_size); htab->map.key_size);
free_percpu(pptr); free_percpu(pptr);
cond_resched();
} }
free_elems: free_elems:
bpf_map_area_free(htab->elems); bpf_map_area_free(htab->elems);
...@@ -159,6 +160,7 @@ static int prealloc_init(struct bpf_htab *htab) ...@@ -159,6 +160,7 @@ static int prealloc_init(struct bpf_htab *htab)
goto free_elems; goto free_elems;
htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
pptr); pptr);
cond_resched();
} }
skip_percpu_elems: skip_percpu_elems:
......
...@@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = { ...@@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
.arg4_type = ARG_CONST_SIZE, .arg4_type = ARG_CONST_SIZE,
}; };
static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
static __always_inline u64 static __always_inline u64
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
u64 flags, struct perf_raw_record *raw) u64 flags, struct perf_sample_data *sd)
{ {
struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_array *array = container_of(map, struct bpf_array, map);
struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
u64 index = flags & BPF_F_INDEX_MASK; u64 index = flags & BPF_F_INDEX_MASK;
struct bpf_event_entry *ee; struct bpf_event_entry *ee;
...@@ -373,8 +372,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, ...@@ -373,8 +372,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
if (unlikely(event->oncpu != cpu)) if (unlikely(event->oncpu != cpu))
return -EOPNOTSUPP; return -EOPNOTSUPP;
perf_sample_data_init(sd, 0, 0);
sd->raw = raw;
perf_event_output(event, sd, regs); perf_event_output(event, sd, regs);
return 0; return 0;
} }
...@@ -382,6 +379,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, ...@@ -382,6 +379,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags, void *, data, u64, size) u64, flags, void *, data, u64, size)
{ {
struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
struct perf_raw_record raw = { struct perf_raw_record raw = {
.frag = { .frag = {
.size = size, .size = size,
...@@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, ...@@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
if (unlikely(flags & ~(BPF_F_INDEX_MASK))) if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
return -EINVAL; return -EINVAL;
return __bpf_perf_event_output(regs, map, flags, &raw); perf_sample_data_init(sd, 0, 0);
sd->raw = &raw;
return __bpf_perf_event_output(regs, map, flags, sd);
} }
static const struct bpf_func_proto bpf_perf_event_output_proto = { static const struct bpf_func_proto bpf_perf_event_output_proto = {
...@@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = { ...@@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
}; };
static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
{ {
struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
struct perf_raw_frag frag = { struct perf_raw_frag frag = {
.copy = ctx_copy, .copy = ctx_copy,
...@@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, ...@@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
}; };
perf_fetch_caller_regs(regs); perf_fetch_caller_regs(regs);
perf_sample_data_init(sd, 0, 0);
sd->raw = &raw;
return __bpf_perf_event_output(regs, map, flags, &raw); return __bpf_perf_event_output(regs, map, flags, sd);
} }
BPF_CALL_0(bpf_get_current_task) BPF_CALL_0(bpf_get_current_task)
......
#if defined(__aarch64__)
#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
#elif defined(__s390__)
#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
#else
#include <uapi/asm-generic/bpf_perf_event.h>
#endif
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
include $(srctree)/tools/scripts/Makefile.arch
$(call detected_var,SRCARCH)
LIBDIR := ../../../lib LIBDIR := ../../../lib
BPFDIR := $(LIBDIR)/bpf BPFDIR := $(LIBDIR)/bpf
APIDIR := ../../../include/uapi APIDIR := ../../../include/uapi
ASMDIR:= ../../../arch/$(ARCH)/include/uapi
GENDIR := ../../../../include/generated GENDIR := ../../../../include/generated
GENHDR := $(GENDIR)/autoconf.h GENHDR := $(GENDIR)/autoconf.h
...@@ -21,7 +10,7 @@ ifneq ($(wildcard $(GENHDR)),) ...@@ -21,7 +10,7 @@ ifneq ($(wildcard $(GENHDR)),)
GENFLAGS := -DHAVE_GENHDR GENFLAGS := -DHAVE_GENHDR
endif endif
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(ASMDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
LDLIBS += -lcap -lelf LDLIBS += -lcap -lelf
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment