Commit 27602e2c authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2019-03-24

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) libbpf verision fix up from Daniel.

2) fix liveness propagation from Jakub.

3) fix verbose print of refcounted regs from Martin.

4) fix for large map allocations from Martynas.

5) fix use after free in sanitize_ptr_alu from Xu.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 526949e8 a7d6ac37
...@@ -136,21 +136,29 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) ...@@ -136,21 +136,29 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
void *bpf_map_area_alloc(size_t size, int numa_node) void *bpf_map_area_alloc(size_t size, int numa_node)
{ {
/* We definitely need __GFP_NORETRY, so OOM killer doesn't /* We really just want to fail instead of triggering OOM killer
* trigger under memory pressure as we really just want to * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
* fail instead. * which is used for lower order allocation requests.
*
* It has been observed that higher order allocation requests done by
* vmalloc with __GFP_NORETRY being set might fail due to not trying
* to reclaim memory from the page cache, thus we set
* __GFP_RETRY_MAYFAIL to avoid such situations.
*/ */
const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
void *area; void *area;
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
area = kmalloc_node(size, GFP_USER | flags, numa_node); area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
numa_node);
if (area != NULL) if (area != NULL)
return area; return area;
} }
return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags, return __vmalloc_node_flags_caller(size, numa_node,
__builtin_return_address(0)); GFP_KERNEL | __GFP_RETRY_MAYFAIL |
flags, __builtin_return_address(0));
} }
void bpf_map_area_free(void *area) void bpf_map_area_free(void *area)
......
...@@ -352,6 +352,14 @@ static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) ...@@ -352,6 +352,14 @@ static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
map_value_has_spin_lock(reg->map_ptr); map_value_has_spin_lock(reg->map_ptr);
} }
static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET ||
type == PTR_TO_SOCKET_OR_NULL ||
type == PTR_TO_TCP_SOCK ||
type == PTR_TO_TCP_SOCK_OR_NULL;
}
static bool arg_type_may_be_refcounted(enum bpf_arg_type type) static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
{ {
return type == ARG_PTR_TO_SOCK_COMMON; return type == ARG_PTR_TO_SOCK_COMMON;
...@@ -451,8 +459,9 @@ static void print_verifier_state(struct bpf_verifier_env *env, ...@@ -451,8 +459,9 @@ static void print_verifier_state(struct bpf_verifier_env *env,
if (t == PTR_TO_STACK) if (t == PTR_TO_STACK)
verbose(env, ",call_%d", func(env, reg)->callsite); verbose(env, ",call_%d", func(env, reg)->callsite);
} else { } else {
verbose(env, "(id=%d ref_obj_id=%d", reg->id, verbose(env, "(id=%d", reg->id);
reg->ref_obj_id); if (reg_type_may_be_refcounted_or_null(t))
verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
if (t != SCALAR_VALUE) if (t != SCALAR_VALUE)
verbose(env, ",off=%d", reg->off); verbose(env, ",off=%d", reg->off);
if (type_is_pkt_pointer(t)) if (type_is_pkt_pointer(t))
...@@ -3372,7 +3381,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, ...@@ -3372,7 +3381,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
*dst_reg = *ptr_reg; *dst_reg = *ptr_reg;
} }
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
if (!ptr_is_dst_reg) if (!ptr_is_dst_reg && ret)
*dst_reg = tmp; *dst_reg = tmp;
return !ret ? -EFAULT : 0; return !ret ? -EFAULT : 0;
} }
...@@ -6069,17 +6078,19 @@ static int propagate_liveness(struct bpf_verifier_env *env, ...@@ -6069,17 +6078,19 @@ static int propagate_liveness(struct bpf_verifier_env *env,
} }
/* Propagate read liveness of registers... */ /* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
/* We don't need to worry about FP liveness because it's read-only */ for (frame = 0; frame <= vstate->curframe; frame++) {
for (i = 0; i < BPF_REG_FP; i++) { /* We don't need to worry about FP liveness, it's read-only */
if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
if (vparent->frame[frame]->regs[i].live & REG_LIVE_READ)
continue; continue;
if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { if (vstate->frame[frame]->regs[i].live & REG_LIVE_READ) {
err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i], err = mark_reg_read(env, &vstate->frame[frame]->regs[i],
&vparent->frame[vstate->curframe]->regs[i]); &vparent->frame[frame]->regs[i]);
if (err) if (err)
return err; return err;
} }
} }
}
/* ... and stack slots */ /* ... and stack slots */
for (frame = 0; frame <= vstate->curframe; frame++) { for (frame = 0; frame <= vstate->curframe; frame++) {
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
BPF_VERSION = 0 BPF_VERSION = 0
BPF_PATCHLEVEL = 0 BPF_PATCHLEVEL = 0
BPF_EXTRAVERSION = 1 BPF_EXTRAVERSION = 2
MAKEFLAGS += --no-print-directory MAKEFLAGS += --no-print-directory
...@@ -79,8 +79,6 @@ export prefix libdir src obj ...@@ -79,8 +79,6 @@ export prefix libdir src obj
libdir_SQ = $(subst ','\'',$(libdir)) libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
LIB_FILE = libbpf.a libbpf.so
VERSION = $(BPF_VERSION) VERSION = $(BPF_VERSION)
PATCHLEVEL = $(BPF_PATCHLEVEL) PATCHLEVEL = $(BPF_PATCHLEVEL)
EXTRAVERSION = $(BPF_EXTRAVERSION) EXTRAVERSION = $(BPF_EXTRAVERSION)
...@@ -90,6 +88,9 @@ N = ...@@ -90,6 +88,9 @@ N =
LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION) LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
LIB_TARGET = libbpf.a libbpf.so.$(LIBBPF_VERSION)
LIB_FILE = libbpf.a libbpf.so*
# Set compile option CFLAGS # Set compile option CFLAGS
ifdef EXTRA_CFLAGS ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS) CFLAGS := $(EXTRA_CFLAGS)
...@@ -129,15 +130,17 @@ export srctree OUTPUT CC LD CFLAGS V ...@@ -129,15 +130,17 @@ export srctree OUTPUT CC LD CFLAGS V
include $(srctree)/tools/build/Makefile.include include $(srctree)/tools/build/Makefile.include
BPF_IN := $(OUTPUT)libbpf-in.o BPF_IN := $(OUTPUT)libbpf-in.o
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
VERSION_SCRIPT := libbpf.map VERSION_SCRIPT := libbpf.map
LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}') awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \ VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
CMD_TARGETS = $(LIB_FILE) CMD_TARGETS = $(LIB_TARGET)
CXX_TEST_TARGET = $(OUTPUT)test_libbpf CXX_TEST_TARGET = $(OUTPUT)test_libbpf
...@@ -170,9 +173,13 @@ $(BPF_IN): force elfdep bpfdep ...@@ -170,9 +173,13 @@ $(BPF_IN): force elfdep bpfdep
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
$(Q)$(MAKE) $(build)=libbpf $(Q)$(MAKE) $(build)=libbpf
$(OUTPUT)libbpf.so: $(BPF_IN) $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
$(QUIET_LINK)$(CC) --shared -Wl,--version-script=$(VERSION_SCRIPT) \
$^ -o $@ $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
$(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
-Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
@ln -sf $(@F) $(OUTPUT)libbpf.so
@ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
$(OUTPUT)libbpf.a: $(BPF_IN) $(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
...@@ -192,6 +199,12 @@ check_abi: $(OUTPUT)libbpf.so ...@@ -192,6 +199,12 @@ check_abi: $(OUTPUT)libbpf.so
exit 1; \ exit 1; \
fi fi
define do_install_mkdir
if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
fi
endef
define do_install define do_install
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \ if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
...@@ -200,8 +213,9 @@ define do_install ...@@ -200,8 +213,9 @@ define do_install
endef endef
install_lib: all_cmd install_lib: all_cmd
$(call QUIET_INSTALL, $(LIB_FILE)) \ $(call QUIET_INSTALL, $(LIB_TARGET)) \
$(call do_install,$(LIB_FILE),$(libdir_SQ)) $(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
install_headers: install_headers:
$(call QUIET_INSTALL, headers) \ $(call QUIET_INSTALL, headers) \
...@@ -219,7 +233,7 @@ config-clean: ...@@ -219,7 +233,7 @@ config-clean:
clean: clean:
$(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \ $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
*.o *~ *.a *.so .*.d .*.cmd LIBBPF-CFLAGS *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd LIBBPF-CFLAGS
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
......
...@@ -111,6 +111,7 @@ starting from ``0.0.1``. ...@@ -111,6 +111,7 @@ starting from ``0.0.1``.
Every time ABI is being changed, e.g. because a new symbol is added or Every time ABI is being changed, e.g. because a new symbol is added or
semantic of existing symbol is changed, ABI version should be bumped. semantic of existing symbol is changed, ABI version should be bumped.
This bump in ABI version is at most once per kernel development cycle.
For example, if current state of ``libbpf.map`` is: For example, if current state of ``libbpf.map`` is:
......
...@@ -1940,3 +1940,28 @@ ...@@ -1940,3 +1940,28 @@
.errstr = "!read_ok", .errstr = "!read_ok",
.result = REJECT, .result = REJECT,
}, },
{
"calls: cross frame pruning - liveness propagation",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_IMM(BPF_REG_8, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_8, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_IMM(BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_MOV64_IMM(BPF_REG_9, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.errstr = "!read_ok",
.result = REJECT,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment