1. 12 Jul, 2024 3 commits
    • Shung-Hsi Yu's avatar
      bpf: use check_add_overflow() to check for addition overflows · 28a44110
      Shung-Hsi Yu authored
      signed_add*_overflows() was added back when there was no overflow-check
      helper. With the introduction of such helpers in commit f0907827
      ("compiler.h: enable builtin overflow checkers and add fallback code"), we
      can drop signed_add*_overflows() in kernel/bpf/verifier.c and use the
      generic check_add_overflow() instead.
      
      This will make future refactoring easier, and takes advantage of
      compiler-emitted hardware instructions that efficiently implement these
      checks.
      
      After the change GCC 13.3.0 generates cleaner assembly on x86_64:
      
      	err = adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
         13625:	mov    0x28(%rbx),%r9  /*  r9 = src_reg->smin_value */
         13629:	mov    0x30(%rbx),%rcx /* rcx = src_reg->smax_value */
         ...
      	if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
         141c1:	mov    %r9,%rax
         141c4:	add    0x28(%r12),%rax
         141c9:	mov    %rax,0x28(%r12)
         141ce:	jo     146e4 <adjust_reg_min_max_vals+0x1294>
      	    check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) {
         141d4:	add    0x30(%r12),%rcx
         141d9:	mov    %rcx,0x30(%r12)
      	if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
         141de:	jo     146e4 <adjust_reg_min_max_vals+0x1294>
         ...
      		*dst_smin = S64_MIN;
         146e4:	movabs $0x8000000000000000,%rax
         146ee:	mov    %rax,0x28(%r12)
      		*dst_smax = S64_MAX;
         146f3:	sub    $0x1,%rax
         146f7:	mov    %rax,0x30(%r12)
      
      Before the change it gives:
      
      	s64 smin_val = src_reg->smin_value;
           675:	mov    0x28(%rsi),%r8
      	s64 smax_val = src_reg->smax_value;
      	u64 umin_val = src_reg->umin_value;
      	u64 umax_val = src_reg->umax_value;
           679:	mov    %rdi,%rax /* rax = dst_reg */
      	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
           67c:	mov    0x28(%rdi),%rdi /* rdi = dst_reg->smin_value */
      	u64 umin_val = src_reg->umin_value;
           680:	mov    0x38(%rsi),%rdx
      	u64 umax_val = src_reg->umax_value;
           684:	mov    0x40(%rsi),%rcx
      	s64 res = (s64)((u64)a + (u64)b);
           688:	lea    (%r8,%rdi,1),%r9 /* r9 = dst_reg->smin_value + src_reg->smin_value */
      	return res < a;
           68c:	cmp    %r9,%rdi
           68f:	setg   %r10b /* r10b = (dst_reg->smin_value + src_reg->smin_value) > dst_reg->smin_value */
      	if (b < 0)
           693:	test   %r8,%r8
           696:	js     72b <scalar_min_max_add+0xbb>
      	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
      		dst_reg->smin_value = S64_MIN;
      		dst_reg->smax_value = S64_MAX;
           69c:	movabs $0x7fffffffffffffff,%rdi
      	s64 smax_val = src_reg->smax_value;
           6a6:	mov    0x30(%rsi),%r8
      		dst_reg->smin_value = S64_MIN;
           6aa:	00 00 00 	movabs $0x8000000000000000,%rsi
      	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
           6b4:	test   %r10b,%r10b /* (dst_reg->smin_value + src_reg->smin_value) > dst_reg->smin_value ? goto 6cb */
           6b7:	jne    6cb <scalar_min_max_add+0x5b>
      	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
           6b9:	mov    0x30(%rax),%r10   /* r10 = dst_reg->smax_value */
      	s64 res = (s64)((u64)a + (u64)b);
           6bd:	lea    (%r10,%r8,1),%r11 /* r11 = dst_reg->smax_value + src_reg->smax_value */
      	if (b < 0)
           6c1:	test   %r8,%r8
           6c4:	js     71e <scalar_min_max_add+0xae>
      	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
           6c6:	cmp    %r11,%r10 /* (dst_reg->smax_value + src_reg->smax_value) <= dst_reg->smax_value ? goto 723 */
           6c9:	jle    723 <scalar_min_max_add+0xb3>
      	} else {
      		dst_reg->smin_value += smin_val;
      		dst_reg->smax_value += smax_val;
      	}
           6cb:	mov    %rsi,0x28(%rax)
           ...
           6d5:	mov    %rdi,0x30(%rax)
           ...
      	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
           71e:	cmp    %r11,%r10
           721:	jl     6cb <scalar_min_max_add+0x5b>
      		dst_reg->smin_value += smin_val;
           723:	mov    %r9,%rsi
      		dst_reg->smax_value += smax_val;
           726:	mov    %r11,%rdi
           729:	jmp    6cb <scalar_min_max_add+0x5b>
      		return res > a;
           72b:	cmp    %r9,%rdi
           72e:	setl   %r10b
           732:	jmp    69c <scalar_min_max_add+0x2c>
           737:	nopw   0x0(%rax,%rax,1)
      
      Note: unlike adjust_ptr_min_max_vals() and scalar*_min_max_add(), it is
      necessary to introduce intermediate variable in adjust_jmp_off() to keep
      the functional behavior unchanged. Without an intermediate variable
      imm/off will be altered even on overflow.
      Suggested-by: default avatarJiri Olsa <jolsa@kernel.org>
      Signed-off-by: default avatarShung-Hsi Yu <shung-hsi.yu@suse.com>
      Link: https://lore.kernel.org/r/20240712080127.136608-3-shung-hsi.yu@suse.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
      28a44110
    • Shung-Hsi Yu's avatar
      bpf: fix overflow check in adjust_jmp_off() · 4a04b4f0
      Shung-Hsi Yu authored
      adjust_jmp_off() incorrectly used the insn->imm field for all overflow check,
      which is incorrect as that should only be done or the BPF_JMP32 | BPF_JA case,
      not the general jump instruction case. Fix it by using insn->off for overflow
      check in the general case.
      
      Fixes: 5337ac4c ("bpf: Fix the corner case with may_goto and jump to the 1st insn.")
      Signed-off-by: default avatarShung-Hsi Yu <shung-hsi.yu@suse.com>
      Link: https://lore.kernel.org/r/20240712080127.136608-2-shung-hsi.yu@suse.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
      4a04b4f0
    • Alan Maguire's avatar
      bpf: Eliminate remaining "make W=1" warnings in kernel/bpf/btf.o · 2454075f
      Alan Maguire authored
      As reported by Mirsad [1] we still see format warnings in kernel/bpf/btf.o
      at W=1 warning level:
      
        CC      kernel/bpf/btf.o
      ./kernel/bpf/btf.c: In function ‘btf_type_seq_show_flags’:
      ./kernel/bpf/btf.c:7553:21: warning: assignment left-hand side might be a candidate for a format attribute [-Wsuggest-attribute=format]
       7553 |         sseq.showfn = btf_seq_show;
            |                     ^
      ./kernel/bpf/btf.c: In function ‘btf_type_snprintf_show’:
      ./kernel/bpf/btf.c:7604:31: warning: assignment left-hand side might be a candidate for a format attribute [-Wsuggest-attribute=format]
       7604 |         ssnprintf.show.showfn = btf_snprintf_show;
            |                               ^
      
      Combined with CONFIG_WERROR=y these can halt the build.
      
      The fix (annotating the structure field with __printf())
      suggested by Mirsad resolves these. Apologies I missed this last time.
      No other W=1 warnings were observed in kernel/bpf after this fix.
      
      [1] https://lore.kernel.org/bpf/92c9d047-f058-400c-9c7d-81d4dc1ef71b@gmail.com/
      
      Fixes: b3470da3 ("bpf: annotate BTF show functions with __printf")
      Reported-by: default avatarMirsad Todorovac <mtodorovac69@gmail.com>
      Suggested-by: default avatarMirsad Todorovac <mtodorovac69@gmail.com>
      Signed-off-by: default avatarAlan Maguire <alan.maguire@oracle.com>
      Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
      Link: https://lore.kernel.org/bpf/20240712092859.1390960-1-alan.maguire@oracle.com
      2454075f
  2. 11 Jul, 2024 2 commits
  3. 10 Jul, 2024 17 commits
  4. 09 Jul, 2024 18 commits