Commit 9666a702 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'enforce W^X for trampoline and dispatcher'

Song Liu says:

====================

Changes v1 => v2:
1. Update arch_prepare_bpf_dispatcher to use a RO image and a RW buffer.
   (Alexei) Note: I haven't found an existing test to cover this part, so
   this part was tested manually (comparing the generated dispatcher is
   the same).

Jeff Layton reported CPA W^X warning linux-next [1]. It turns out to be
W^X issue with bpf trampoline and bpf dispatcher. Fix these by:

1. Use bpf_prog_pack for bpf_dispatcher;
2. Set memory permission properly with bpf trampoline.

[1] https://lore.kernel.org/lkml/c84cc27c1a5031a003039748c3c099732a718aec.camel@kernel.org/
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 30b8fdbb 5b0d1c7b
...@@ -2242,7 +2242,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -2242,7 +2242,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
return ret; return ret;
} }
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
{ {
u8 *jg_reloc, *prog = *pprog; u8 *jg_reloc, *prog = *pprog;
int pivot, err, jg_bytes = 1; int pivot, err, jg_bytes = 1;
...@@ -2258,12 +2258,12 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) ...@@ -2258,12 +2258,12 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
progs[a]); progs[a]);
err = emit_cond_near_jump(&prog, /* je func */ err = emit_cond_near_jump(&prog, /* je func */
(void *)progs[a], prog, (void *)progs[a], image + (prog - buf),
X86_JE); X86_JE);
if (err) if (err)
return err; return err;
emit_indirect_jump(&prog, 2 /* rdx */, prog); emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
*pprog = prog; *pprog = prog;
return 0; return 0;
...@@ -2288,7 +2288,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) ...@@ -2288,7 +2288,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
jg_reloc = prog; jg_reloc = prog;
err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
progs); progs, image, buf);
if (err) if (err)
return err; return err;
...@@ -2302,7 +2302,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) ...@@ -2302,7 +2302,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
b, progs); b, progs, image, buf);
if (err) if (err)
return err; return err;
...@@ -2322,12 +2322,12 @@ static int cmp_ips(const void *a, const void *b) ...@@ -2322,12 +2322,12 @@ static int cmp_ips(const void *a, const void *b)
return 0; return 0;
} }
int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
{ {
u8 *prog = image; u8 *prog = buf;
sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs); return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
} }
struct x64_jit_data { struct x64_jit_data {
......
...@@ -946,6 +946,7 @@ struct bpf_dispatcher { ...@@ -946,6 +946,7 @@ struct bpf_dispatcher {
struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
int num_progs; int num_progs;
void *image; void *image;
void *rw_image;
u32 image_off; u32 image_off;
struct bpf_ksym ksym; struct bpf_ksym ksym;
}; };
...@@ -964,7 +965,7 @@ int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampolin ...@@ -964,7 +965,7 @@ int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampolin
struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info); struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr); void bpf_trampoline_put(struct bpf_trampoline *tr);
int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
#define BPF_DISPATCHER_INIT(_name) { \ #define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \ .mutex = __MUTEX_INITIALIZER(_name.mutex), \
.func = &_name##_func, \ .func = &_name##_func, \
...@@ -1007,7 +1008,6 @@ int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); ...@@ -1007,7 +1008,6 @@ int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to); struct bpf_prog *to);
/* Called only from JIT-enabled code, so there's no need for stubs. */ /* Called only from JIT-enabled code, so there's no need for stubs. */
void *bpf_jit_alloc_exec_page(void);
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym); void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym); void bpf_ksym_add(struct bpf_ksym *ksym);
......
...@@ -1023,6 +1023,8 @@ extern long bpf_jit_limit_max; ...@@ -1023,6 +1023,8 @@ extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
void bpf_jit_fill_hole_with_zero(void *area, unsigned int size);
struct bpf_binary_header * struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment, unsigned int alignment,
...@@ -1035,6 +1037,9 @@ void bpf_jit_free(struct bpf_prog *fp); ...@@ -1035,6 +1037,9 @@ void bpf_jit_free(struct bpf_prog *fp);
struct bpf_binary_header * struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog *fp); bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_prog_pack_free(struct bpf_binary_header *hdr);
static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
{ {
return list_empty(&fp->aux->ksym.lnode) || return list_empty(&fp->aux->ksym.lnode) ||
......
...@@ -825,6 +825,11 @@ struct bpf_prog_pack { ...@@ -825,6 +825,11 @@ struct bpf_prog_pack {
unsigned long bitmap[]; unsigned long bitmap[];
}; };
void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
{
memset(area, 0, size);
}
#define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
static DEFINE_MUTEX(pack_mutex); static DEFINE_MUTEX(pack_mutex);
...@@ -864,7 +869,7 @@ static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_ins ...@@ -864,7 +869,7 @@ static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_ins
return pack; return pack;
} }
static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
{ {
unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size); unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
struct bpf_prog_pack *pack; struct bpf_prog_pack *pack;
...@@ -905,7 +910,7 @@ static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insn ...@@ -905,7 +910,7 @@ static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insn
return ptr; return ptr;
} }
static void bpf_prog_pack_free(struct bpf_binary_header *hdr) void bpf_prog_pack_free(struct bpf_binary_header *hdr)
{ {
struct bpf_prog_pack *pack = NULL, *tmp; struct bpf_prog_pack *pack = NULL, *tmp;
unsigned int nbits; unsigned int nbits;
......
...@@ -85,12 +85,12 @@ static bool bpf_dispatcher_remove_prog(struct bpf_dispatcher *d, ...@@ -85,12 +85,12 @@ static bool bpf_dispatcher_remove_prog(struct bpf_dispatcher *d,
return false; return false;
} }
int __weak arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) int __weak arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
{ {
return -ENOTSUPP; return -ENOTSUPP;
} }
static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image) static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf)
{ {
s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0]; s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0];
int i; int i;
...@@ -99,12 +99,12 @@ static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image) ...@@ -99,12 +99,12 @@ static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image)
if (d->progs[i].prog) if (d->progs[i].prog)
*ipsp++ = (s64)(uintptr_t)d->progs[i].prog->bpf_func; *ipsp++ = (s64)(uintptr_t)d->progs[i].prog->bpf_func;
} }
return arch_prepare_bpf_dispatcher(image, &ips[0], d->num_progs); return arch_prepare_bpf_dispatcher(image, buf, &ips[0], d->num_progs);
} }
static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs) static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
{ {
void *old, *new; void *old, *new, *tmp;
u32 noff; u32 noff;
int err; int err;
...@@ -117,8 +117,14 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs) ...@@ -117,8 +117,14 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
} }
new = d->num_progs ? d->image + noff : NULL; new = d->num_progs ? d->image + noff : NULL;
tmp = d->num_progs ? d->rw_image + noff : NULL;
if (new) { if (new) {
if (bpf_dispatcher_prepare(d, new)) /* Prepare the dispatcher in d->rw_image. Then use
* bpf_arch_text_copy to update d->image, which is RO+X.
*/
if (bpf_dispatcher_prepare(d, new, tmp))
return;
if (IS_ERR(bpf_arch_text_copy(new, tmp, PAGE_SIZE / 2)))
return; return;
} }
...@@ -140,9 +146,18 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, ...@@ -140,9 +146,18 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
mutex_lock(&d->mutex); mutex_lock(&d->mutex);
if (!d->image) { if (!d->image) {
d->image = bpf_jit_alloc_exec_page(); d->image = bpf_prog_pack_alloc(PAGE_SIZE, bpf_jit_fill_hole_with_zero);
if (!d->image) if (!d->image)
goto out; goto out;
d->rw_image = bpf_jit_alloc_exec(PAGE_SIZE);
if (!d->rw_image) {
u32 size = PAGE_SIZE;
bpf_arch_text_copy(d->image, &size, sizeof(size));
bpf_prog_pack_free((struct bpf_binary_header *)d->image);
d->image = NULL;
goto out;
}
bpf_image_ksym_add(d->image, &d->ksym); bpf_image_ksym_add(d->image, &d->ksym);
} }
......
...@@ -116,22 +116,6 @@ bool bpf_prog_has_trampoline(const struct bpf_prog *prog) ...@@ -116,22 +116,6 @@ bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
(ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC); (ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
} }
void *bpf_jit_alloc_exec_page(void)
{
void *image;
image = bpf_jit_alloc_exec(PAGE_SIZE);
if (!image)
return NULL;
set_vm_flush_reset_perms(image);
/* Keep image as writeable. The alternative is to keep flipping ro/rw
* every time new program is attached or detached.
*/
set_memory_x((long)image, 1);
return image;
}
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym) void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
{ {
ksym->start = (unsigned long) data; ksym->start = (unsigned long) data;
...@@ -404,9 +388,10 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx) ...@@ -404,9 +388,10 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
goto out_free_im; goto out_free_im;
err = -ENOMEM; err = -ENOMEM;
im->image = image = bpf_jit_alloc_exec_page(); im->image = image = bpf_jit_alloc_exec(PAGE_SIZE);
if (!image) if (!image)
goto out_uncharge; goto out_uncharge;
set_vm_flush_reset_perms(image);
err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL); err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
if (err) if (err)
...@@ -483,6 +468,9 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut ...@@ -483,6 +468,9 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
if (err < 0) if (err < 0)
goto out; goto out;
set_memory_ro((long)im->image, 1);
set_memory_x((long)im->image, 1);
WARN_ON(tr->cur_image && tr->selector == 0); WARN_ON(tr->cur_image && tr->selector == 0);
WARN_ON(!tr->cur_image && tr->selector); WARN_ON(!tr->cur_image && tr->selector);
if (tr->cur_image) if (tr->cur_image)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment