Commit 180486b4 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Alexei Starovoitov

bpf, selftests: Add test case for atomic fetch on spilled pointer

Test whether unprivileged would be able to leak the spilled pointer either
by exporting the returned value from the atomic{32,64} operation or by reading
and exporting the value from the stack after the atomic operation took place.

Note that for unprivileged, the below atomic cmpxchg test case named "Dest
pointer in r0 - succeed" is failing. The reason is that in the dst memory
location (r10 -8) there is the spilled register r10:

  0: R1=ctx(id=0,off=0,imm=0) R10=fp0
  0: (bf) r0 = r10
  1: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0
  1: (7b) *(u64 *)(r10 -8) = r0
  2: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 fp-8_w=fp
  2: (b7) r1 = 0
  3: R0_w=fp0 R1_w=invP0 R10=fp0 fp-8_w=fp
  3: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r1)
  4: R0_w=fp0 R1_w=invP0 R10=fp0 fp-8_w=mmmmmmmm
  4: (79) r1 = *(u64 *)(r0 -8)
  5: R0_w=fp0 R1_w=invP(id=0) R10=fp0 fp-8_w=mmmmmmmm
  5: (b7) r0 = 0
  6: R0_w=invP0 R1_w=invP(id=0) R10=fp0 fp-8_w=mmmmmmmm
  6: (95) exit

However, allowing this case for unprivileged is a bit useless given an
update with a new pointer will fail anyway:

  0: R1=ctx(id=0,off=0,imm=0) R10=fp0
  0: (bf) r0 = r10
  1: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0
  1: (7b) *(u64 *)(r10 -8) = r0
  2: R0_w=fp0 R1=ctx(id=0,off=0,imm=0) R10=fp0 fp-8_w=fp
  2: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r10)
  R10 leaks addr into mem
Acked-by: default avatarBrendan Jackman <jackmanb@google.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 7d3baf0a
......@@ -156,4 +156,27 @@
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "leaking pointer from stack off -8",
},
{
"Dest pointer in r0 - succeed, check 2",
.insns = {
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* r5 = &val */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, r5); */
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
/* r1 = *r0 */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R5 leaks addr into mem",
},
{
"atomic dw/fetch and address leakage of (map ptr & -1) via stack slot",
.insns = {
BPF_LD_IMM64(BPF_REG_1, -1),
BPF_LD_MAP_FD(BPF_REG_8, 0),
BPF_LD_MAP_FD(BPF_REG_9, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 2, 4 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "leaking pointer from stack off -8",
},
{
"atomic dw/fetch and address leakage of (map ptr & -1) via returned value",
.insns = {
BPF_LD_IMM64(BPF_REG_1, -1),
BPF_LD_MAP_FD(BPF_REG_8, 0),
BPF_LD_MAP_FD(BPF_REG_9, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 2, 4 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "leaking pointer from stack off -8",
},
{
"atomic w/fetch and address leakage of (map ptr & -1) via stack slot",
.insns = {
BPF_LD_IMM64(BPF_REG_1, -1),
BPF_LD_MAP_FD(BPF_REG_8, 0),
BPF_LD_MAP_FD(BPF_REG_9, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 2, 4 },
.result = REJECT,
.errstr = "invalid size of register fill",
},
{
"atomic w/fetch and address leakage of (map ptr & -1) via returned value",
.insns = {
BPF_LD_IMM64(BPF_REG_1, -1),
BPF_LD_MAP_FD(BPF_REG_8, 0),
BPF_LD_MAP_FD(BPF_REG_9, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 2, 4 },
.result = REJECT,
.errstr = "invalid size of register fill",
},
#define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \
{ \
"atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment