Commit ede95a63 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Alexei Starovoitov

bpf: add bpf_jit_limit knob to restrict unpriv allocations

Rick reported that the BPF JIT could potentially fill the entire module
space with BPF programs from unprivileged users which would prevent later
attempts to load normal kernel modules or privileged BPF programs, for
example. If JIT was enabled but unsuccessful to generate the image, then
before commit 290af866 ("bpf: introduce BPF_JIT_ALWAYS_ON config")
we would always fall back to the BPF interpreter. Nowadays in the case
where the CONFIG_BPF_JIT_ALWAYS_ON could be set, then the load will abort
with a failure since the BPF interpreter was compiled out.

Add a global limit and enforce it for unprivileged users such that in case
of BPF interpreter compiled out we fail once the limit has been reached
or we fall back to BPF interpreter earlier w/o using module mem if latter
was compiled in. In a next step, fair share among unprivileged users can
be resolved in particular for the case where we would fail hard once limit
is reached.

Fixes: 290af866 ("bpf: introduce BPF_JIT_ALWAYS_ON config")
Fixes: 0a14842f ("net: filter: Just In Time compiler for x86-64")
Co-Developed-by: default avatarRick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Jann Horn <jannh@google.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: LKML <linux-kernel@vger.kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 4d716e10
...@@ -92,6 +92,14 @@ Values : ...@@ -92,6 +92,14 @@ Values :
0 - disable JIT kallsyms export (default value) 0 - disable JIT kallsyms export (default value)
1 - enable JIT kallsyms export for privileged users only 1 - enable JIT kallsyms export for privileged users only
bpf_jit_limit
-------------
This enforces a global limit for memory allocations to the BPF JIT
compiler in order to reject unprivileged JIT requests once it has
been surpassed. bpf_jit_limit contains the value of the global limit
in bytes.
dev_weight dev_weight
-------------- --------------
......
...@@ -854,6 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, ...@@ -854,6 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
extern int bpf_jit_enable; extern int bpf_jit_enable;
extern int bpf_jit_harden; extern int bpf_jit_harden;
extern int bpf_jit_kallsyms; extern int bpf_jit_kallsyms;
extern int bpf_jit_limit;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
......
...@@ -365,10 +365,13 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) ...@@ -365,10 +365,13 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
} }
#ifdef CONFIG_BPF_JIT #ifdef CONFIG_BPF_JIT
# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
/* All BPF JIT sysctl knobs here. */ /* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
int bpf_jit_harden __read_mostly; int bpf_jit_harden __read_mostly;
int bpf_jit_kallsyms __read_mostly; int bpf_jit_kallsyms __read_mostly;
int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
static __always_inline void static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog *prog, bpf_get_prog_addr_region(const struct bpf_prog *prog,
...@@ -577,27 +580,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, ...@@ -577,27 +580,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
return ret; return ret;
} }
static atomic_long_t bpf_jit_current;
#if defined(MODULES_VADDR)
static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
PAGE_SIZE), INT_MAX);
return 0;
}
pure_initcall(bpf_jit_charge_init);
#endif
static int bpf_jit_charge_modmem(u32 pages)
{
if (atomic_long_add_return(pages, &bpf_jit_current) >
(bpf_jit_limit >> PAGE_SHIFT)) {
if (!capable(CAP_SYS_ADMIN)) {
atomic_long_sub(pages, &bpf_jit_current);
return -EPERM;
}
}
return 0;
}
static void bpf_jit_uncharge_modmem(u32 pages)
{
atomic_long_sub(pages, &bpf_jit_current);
}
struct bpf_binary_header * struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment, unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns) bpf_jit_fill_hole_t bpf_fill_ill_insns)
{ {
struct bpf_binary_header *hdr; struct bpf_binary_header *hdr;
unsigned int size, hole, start; u32 size, hole, start, pages;
/* Most of BPF filters are really small, but if some of them /* Most of BPF filters are really small, but if some of them
* fill a page, allow at least 128 extra bytes to insert a * fill a page, allow at least 128 extra bytes to insert a
* random section of illegal instructions. * random section of illegal instructions.
*/ */
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
pages = size / PAGE_SIZE;
if (bpf_jit_charge_modmem(pages))
return NULL;
hdr = module_alloc(size); hdr = module_alloc(size);
if (hdr == NULL) if (!hdr) {
bpf_jit_uncharge_modmem(pages);
return NULL; return NULL;
}
/* Fill space with illegal/arch-dep instructions. */ /* Fill space with illegal/arch-dep instructions. */
bpf_fill_ill_insns(hdr, size); bpf_fill_ill_insns(hdr, size);
hdr->pages = size / PAGE_SIZE; hdr->pages = pages;
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
PAGE_SIZE - sizeof(*hdr)); PAGE_SIZE - sizeof(*hdr));
start = (get_random_int() % hole) & ~(alignment - 1); start = (get_random_int() % hole) & ~(alignment - 1);
...@@ -610,7 +650,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, ...@@ -610,7 +650,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
void bpf_jit_binary_free(struct bpf_binary_header *hdr) void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{ {
u32 pages = hdr->pages;
module_memfree(hdr); module_memfree(hdr);
bpf_jit_uncharge_modmem(pages);
} }
/* This symbol is only overridden by archs that have different /* This symbol is only overridden by archs that have different
......
...@@ -279,7 +279,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, ...@@ -279,7 +279,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
return ret; return ret;
} }
# ifdef CONFIG_HAVE_EBPF_JIT
static int static int
proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
...@@ -290,7 +289,6 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, ...@@ -290,7 +289,6 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
return proc_dointvec_minmax(table, write, buffer, lenp, ppos); return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
} }
# endif
#endif #endif
static struct ctl_table net_core_table[] = { static struct ctl_table net_core_table[] = {
...@@ -397,6 +395,14 @@ static struct ctl_table net_core_table[] = { ...@@ -397,6 +395,14 @@ static struct ctl_table net_core_table[] = {
.extra2 = &one, .extra2 = &one,
}, },
# endif # endif
{
.procname = "bpf_jit_limit",
.data = &bpf_jit_limit,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = proc_dointvec_minmax_bpf_restricted,
.extra1 = &one,
},
#endif #endif
{ {
.procname = "netdev_tstamp_prequeue", .procname = "netdev_tstamp_prequeue",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment