Commit b4560055 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'bpf-volatile-compare'

Alexei Starovoitov says:

====================
bpf: volatile compare

From: Alexei Starovoitov <ast@kernel.org>

v2->v3:
Debugged profiler.c regression. It was caused by basic block layout.
Introduce bpf_cmp_likely() and bpf_cmp_unlikely() macros.
Debugged redundant <<=32, >>=32 with u32 variables. Added cast workaround.

v1->v2:
Fixed issues pointed out by Daniel, added more tests, attempted to convert profiler.c,
but barrier_var() wins vs bpf_cmp(). To be investigated.
Patches 1-4 are good to go, but 5 needs more work.
====================

Link: https://lore.kernel.org/r/20231226191148.48536-1-alexei.starovoitov@gmail.comSigned-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents a640de4c 7e3811cb
...@@ -383,6 +383,7 @@ CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH)) ...@@ -383,6 +383,7 @@ CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
-I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \ -I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
-I$(abspath $(OUTPUT)/../usr/include) -I$(abspath $(OUTPUT)/../usr/include)
# TODO: enable me -Wsign-compare
CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \ CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
-Wno-compare-distinct-pointer-types -Wno-compare-distinct-pointer-types
......
...@@ -254,173 +254,97 @@ extern void bpf_throw(u64 cookie) __ksym; ...@@ -254,173 +254,97 @@ extern void bpf_throw(u64 cookie) __ksym;
} \ } \
}) })
/* Description #define __cmp_cannot_be_signed(x) \
* Assert that a conditional expression is true. __builtin_strcmp(#x, "==") == 0 || __builtin_strcmp(#x, "!=") == 0 || \
* Returns __builtin_strcmp(#x, "&") == 0
* Void.
* Throws
* An exception with the value zero when the assertion fails.
*/
#define bpf_assert(cond) if (!(cond)) bpf_throw(0);
/* Description
* Assert that a conditional expression is true.
* Returns
* Void.
* Throws
* An exception with the specified value when the assertion fails.
*/
#define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
/* Description
* Assert that LHS is equal to RHS. This statement updates the known value
* of LHS during verification. Note that RHS must be a constant value, and
* must fit within the data type of LHS.
* Returns
* Void.
* Throws
* An exception with the value zero when the assertion fails.
*/
#define bpf_assert_eq(LHS, RHS) \
({ \
barrier_var(LHS); \
__bpf_assert_op(LHS, ==, RHS, 0, true); \
})
/* Description
* Assert that LHS is equal to RHS. This statement updates the known value
* of LHS during verification. Note that RHS must be a constant value, and
* must fit within the data type of LHS.
* Returns
* Void.
* Throws
* An exception with the specified value when the assertion fails.
*/
#define bpf_assert_eq_with(LHS, RHS, value) \
({ \
barrier_var(LHS); \
__bpf_assert_op(LHS, ==, RHS, value, true); \
})
/* Description
* Assert that LHS is less than RHS. This statement updates the known
* bounds of LHS during verification. Note that RHS must be a constant
* value, and must fit within the data type of LHS.
* Returns
* Void.
* Throws
* An exception with the value zero when the assertion fails.
*/
#define bpf_assert_lt(LHS, RHS) \
({ \
barrier_var(LHS); \
__bpf_assert_op(LHS, <, RHS, 0, false); \
})
/* Description
* Assert that LHS is less than RHS. This statement updates the known
* bounds of LHS during verification. Note that RHS must be a constant
* value, and must fit within the data type of LHS.
* Returns
* Void.
* Throws
* An exception with the specified value when the assertion fails.
*/
#define bpf_assert_lt_with(LHS, RHS, value) \
({ \
barrier_var(LHS); \
__bpf_assert_op(LHS, <, RHS, value, false); \
})
/* Description #define __is_signed_type(type) (((type)(-1)) < (type)1)
* Assert that LHS is greater than RHS. This statement updates the known
* bounds of LHS during verification. Note that RHS must be a constant
* value, and must fit within the data type of LHS.
* Returns
* Void.
* Throws
* An exception with the value zero when the assertion fails.
*/
#define bpf_assert_gt(LHS, RHS) \
({ \
barrier_var(LHS); \
__bpf_assert_op(LHS, >, RHS, 0, false); \
})
/* Description #define __bpf_cmp(LHS, OP, SIGN, PRED, RHS, DEFAULT) \
* Assert that LHS is greater than RHS. This statement updates the known ({ \
* bounds of LHS during verification. Note that RHS must be a constant __label__ l_true; \
* value, and must fit within the data type of LHS. bool ret = DEFAULT; \
* Returns asm volatile goto("if %[lhs] " SIGN #OP " %[rhs] goto %l[l_true]" \
* Void. :: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true); \
* Throws ret = !DEFAULT; \
* An exception with the specified value when the assertion fails. l_true: \
ret; \
})
/* C type conversions coupled with comparison operator are tricky.
* Make sure BPF program is compiled with -Wsign-compare then
* __lhs OP __rhs below will catch the mistake.
* Be aware that we check only __lhs to figure out the sign of compare.
*/ */
#define bpf_assert_gt_with(LHS, RHS, value) \ #define _bpf_cmp(LHS, OP, RHS, NOFLIP) \
({ \ ({ \
barrier_var(LHS); \ typeof(LHS) __lhs = (LHS); \
__bpf_assert_op(LHS, >, RHS, value, false); \ typeof(RHS) __rhs = (RHS); \
}) bool ret; \
_Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
(void)(__lhs OP __rhs); \
if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) { \
if (sizeof(__rhs) == 8) \
ret = __bpf_cmp(__lhs, OP, "", "r", __rhs, NOFLIP); \
else \
ret = __bpf_cmp(__lhs, OP, "", "i", __rhs, NOFLIP); \
} else { \
if (sizeof(__rhs) == 8) \
ret = __bpf_cmp(__lhs, OP, "s", "r", __rhs, NOFLIP); \
else \
ret = __bpf_cmp(__lhs, OP, "s", "i", __rhs, NOFLIP); \
} \
ret; \
})
#ifndef bpf_cmp_unlikely
#define bpf_cmp_unlikely(LHS, OP, RHS) _bpf_cmp(LHS, OP, RHS, true)
#endif
/* Description #ifndef bpf_cmp_likely
* Assert that LHS is less than or equal to RHS. This statement updates the #define bpf_cmp_likely(LHS, OP, RHS) \
* known bounds of LHS during verification. Note that RHS must be a ({ \
* constant value, and must fit within the data type of LHS. bool ret; \
* Returns if (__builtin_strcmp(#OP, "==") == 0) \
* Void. ret = _bpf_cmp(LHS, !=, RHS, false); \
* Throws else if (__builtin_strcmp(#OP, "!=") == 0) \
* An exception with the value zero when the assertion fails. ret = _bpf_cmp(LHS, ==, RHS, false); \
*/ else if (__builtin_strcmp(#OP, "<=") == 0) \
#define bpf_assert_le(LHS, RHS) \ ret = _bpf_cmp(LHS, >, RHS, false); \
({ \ else if (__builtin_strcmp(#OP, "<") == 0) \
barrier_var(LHS); \ ret = _bpf_cmp(LHS, >=, RHS, false); \
__bpf_assert_op(LHS, <=, RHS, 0, false); \ else if (__builtin_strcmp(#OP, ">") == 0) \
}) ret = _bpf_cmp(LHS, <=, RHS, false); \
else if (__builtin_strcmp(#OP, ">=") == 0) \
ret = _bpf_cmp(LHS, <, RHS, false); \
else \
(void) "bug"; \
ret; \
})
#endif
/* Description #ifndef bpf_nop_mov
* Assert that LHS is less than or equal to RHS. This statement updates the #define bpf_nop_mov(var) \
* known bounds of LHS during verification. Note that RHS must be a asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var))
* constant value, and must fit within the data type of LHS. #endif
* Returns
* Void.
* Throws
* An exception with the specified value when the assertion fails.
*/
#define bpf_assert_le_with(LHS, RHS, value) \
({ \
barrier_var(LHS); \
__bpf_assert_op(LHS, <=, RHS, value, false); \
})
/* Description /* Description
* Assert that LHS is greater than or equal to RHS. This statement updates * Assert that a conditional expression is true.
* the known bounds of LHS during verification. Note that RHS must be a
* constant value, and must fit within the data type of LHS.
* Returns * Returns
* Void. * Void.
* Throws * Throws
* An exception with the value zero when the assertion fails. * An exception with the value zero when the assertion fails.
*/ */
#define bpf_assert_ge(LHS, RHS) \ #define bpf_assert(cond) if (!(cond)) bpf_throw(0);
({ \
barrier_var(LHS); \
__bpf_assert_op(LHS, >=, RHS, 0, false); \
})
/* Description /* Description
* Assert that LHS is greater than or equal to RHS. This statement updates * Assert that a conditional expression is true.
* the known bounds of LHS during verification. Note that RHS must be a
* constant value, and must fit within the data type of LHS.
* Returns * Returns
* Void. * Void.
* Throws * Throws
* An exception with the specified value when the assertion fails. * An exception with the specified value when the assertion fails.
*/ */
#define bpf_assert_ge_with(LHS, RHS, value) \ #define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
({ \
barrier_var(LHS); \
__bpf_assert_op(LHS, >=, RHS, value, false); \
})
/* Description /* Description
* Assert that LHS is in the range [BEG, END] (inclusive of both). This * Assert that LHS is in the range [BEG, END] (inclusive of both). This
......
...@@ -20,7 +20,7 @@ struct { ...@@ -20,7 +20,7 @@ struct {
} hashmap1 SEC(".maps"); } hashmap1 SEC(".maps");
/* will set before prog run */ /* will set before prog run */
volatile const __u32 num_cpus = 0; volatile const __s32 num_cpus = 0;
/* will collect results during prog run */ /* will collect results during prog run */
__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0; __u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0;
......
...@@ -35,7 +35,7 @@ SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx) ...@@ -35,7 +35,7 @@ SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
return 0; return 0;
file = vma->vm_file; file = vma->vm_file;
if (task->tgid != pid) { if (task->tgid != (pid_t)pid) {
if (one_task) if (one_task)
one_task_error = 1; one_task_error = 1;
return 0; return 0;
......
...@@ -22,7 +22,7 @@ int dump_task(struct bpf_iter__task *ctx) ...@@ -22,7 +22,7 @@ int dump_task(struct bpf_iter__task *ctx)
return 0; return 0;
} }
if (task->pid != tid) if (task->pid != (pid_t)tid)
num_unknown_tid++; num_unknown_tid++;
else else
num_known_tid++; num_known_tid++;
......
...@@ -45,7 +45,7 @@ int dump_bpf_map(struct bpf_iter__bpf_map *ctx) ...@@ -45,7 +45,7 @@ int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
} }
/* fill seq_file buffer */ /* fill seq_file buffer */
for (i = 0; i < print_len; i++) for (i = 0; i < (int)print_len; i++)
bpf_seq_write(seq, &seq_num, sizeof(seq_num)); bpf_seq_write(seq, &seq_num, sizeof(seq_num));
return ret; return ret;
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
__u32 invocations = 0; __u32 invocations = 0;
__u32 assertion_error = 0; __u32 assertion_error = 0;
__u32 retval_value = 0; __u32 retval_value = 0;
__u32 page_size = 0; __s32 page_size = 0;
SEC("cgroup/setsockopt") SEC("cgroup/setsockopt")
int get_retval(struct bpf_sockopt *ctx) int get_retval(struct bpf_sockopt *ctx)
......
...@@ -15,7 +15,7 @@ struct { ...@@ -15,7 +15,7 @@ struct {
__type(value, long); __type(value, long);
} map_a SEC(".maps"); } map_a SEC(".maps");
__u32 target_pid; __s32 target_pid;
__u64 cgroup_id; __u64 cgroup_id;
int target_hid; int target_hid;
bool is_cgroup1; bool is_cgroup1;
......
...@@ -332,7 +332,7 @@ SEC("tp_btf/task_newtask") ...@@ -332,7 +332,7 @@ SEC("tp_btf/task_newtask")
int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags) int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
{ {
struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
u32 cpu; int cpu;
if (!is_test_task()) if (!is_test_task())
return 0; return 0;
......
...@@ -210,7 +210,7 @@ __noinline int assert_zero_gfunc(u64 c) ...@@ -210,7 +210,7 @@ __noinline int assert_zero_gfunc(u64 c)
{ {
volatile u64 cookie = c; volatile u64 cookie = c;
bpf_assert_eq(cookie, 0); bpf_assert(bpf_cmp_unlikely(cookie, ==, 0));
return 0; return 0;
} }
...@@ -218,7 +218,7 @@ __noinline int assert_neg_gfunc(s64 c) ...@@ -218,7 +218,7 @@ __noinline int assert_neg_gfunc(s64 c)
{ {
volatile s64 cookie = c; volatile s64 cookie = c;
bpf_assert_lt(cookie, 0); bpf_assert(bpf_cmp_unlikely(cookie, <, 0));
return 0; return 0;
} }
...@@ -226,7 +226,7 @@ __noinline int assert_pos_gfunc(s64 c) ...@@ -226,7 +226,7 @@ __noinline int assert_pos_gfunc(s64 c)
{ {
volatile s64 cookie = c; volatile s64 cookie = c;
bpf_assert_gt(cookie, 0); bpf_assert(bpf_cmp_unlikely(cookie, >, 0));
return 0; return 0;
} }
...@@ -234,7 +234,7 @@ __noinline int assert_negeq_gfunc(s64 c) ...@@ -234,7 +234,7 @@ __noinline int assert_negeq_gfunc(s64 c)
{ {
volatile s64 cookie = c; volatile s64 cookie = c;
bpf_assert_le(cookie, -1); bpf_assert(bpf_cmp_unlikely(cookie, <=, -1));
return 0; return 0;
} }
...@@ -242,7 +242,7 @@ __noinline int assert_poseq_gfunc(s64 c) ...@@ -242,7 +242,7 @@ __noinline int assert_poseq_gfunc(s64 c)
{ {
volatile s64 cookie = c; volatile s64 cookie = c;
bpf_assert_ge(cookie, 1); bpf_assert(bpf_cmp_unlikely(cookie, >=, 1));
return 0; return 0;
} }
...@@ -258,7 +258,7 @@ __noinline int assert_zero_gfunc_with(u64 c) ...@@ -258,7 +258,7 @@ __noinline int assert_zero_gfunc_with(u64 c)
{ {
volatile u64 cookie = c; volatile u64 cookie = c;
bpf_assert_eq_with(cookie, 0, cookie + 100); bpf_assert_with(bpf_cmp_unlikely(cookie, ==, 0), cookie + 100);
return 0; return 0;
} }
...@@ -266,7 +266,7 @@ __noinline int assert_neg_gfunc_with(s64 c) ...@@ -266,7 +266,7 @@ __noinline int assert_neg_gfunc_with(s64 c)
{ {
volatile s64 cookie = c; volatile s64 cookie = c;
bpf_assert_lt_with(cookie, 0, cookie + 100); bpf_assert_with(bpf_cmp_unlikely(cookie, <, 0), cookie + 100);
return 0; return 0;
} }
...@@ -274,7 +274,7 @@ __noinline int assert_pos_gfunc_with(s64 c) ...@@ -274,7 +274,7 @@ __noinline int assert_pos_gfunc_with(s64 c)
{ {
volatile s64 cookie = c; volatile s64 cookie = c;
bpf_assert_gt_with(cookie, 0, cookie + 100); bpf_assert_with(bpf_cmp_unlikely(cookie, >, 0), cookie + 100);
return 0; return 0;
} }
...@@ -282,7 +282,7 @@ __noinline int assert_negeq_gfunc_with(s64 c) ...@@ -282,7 +282,7 @@ __noinline int assert_negeq_gfunc_with(s64 c)
{ {
volatile s64 cookie = c; volatile s64 cookie = c;
bpf_assert_le_with(cookie, -1, cookie + 100); bpf_assert_with(bpf_cmp_unlikely(cookie, <=, -1), cookie + 100);
return 0; return 0;
} }
...@@ -290,7 +290,7 @@ __noinline int assert_poseq_gfunc_with(s64 c) ...@@ -290,7 +290,7 @@ __noinline int assert_poseq_gfunc_with(s64 c)
{ {
volatile s64 cookie = c; volatile s64 cookie = c;
bpf_assert_ge_with(cookie, 1, cookie + 100); bpf_assert_with(bpf_cmp_unlikely(cookie, >=, 1), cookie + 100);
return 0; return 0;
} }
......
...@@ -11,51 +11,51 @@ ...@@ -11,51 +11,51 @@
#define check_assert(type, op, name, value) \ #define check_assert(type, op, name, value) \
SEC("?tc") \ SEC("?tc") \
__log_level(2) __failure \ __log_level(2) __failure \
int check_assert_##op##_##name(void *ctx) \ int check_assert_##name(void *ctx) \
{ \ { \
type num = bpf_ktime_get_ns(); \ type num = bpf_ktime_get_ns(); \
bpf_assert_##op(num, value); \ bpf_assert(bpf_cmp_unlikely(num, op, value)); \
return *(u64 *)num; \ return *(u64 *)num; \
} }
__msg(": R0_w=0xffffffff80000000 R10=fp0") __msg(": R0_w=0xffffffff80000000")
check_assert(s64, eq, int_min, INT_MIN); check_assert(s64, ==, eq_int_min, INT_MIN);
__msg(": R0_w=0x7fffffff R10=fp0") __msg(": R0_w=0x7fffffff")
check_assert(s64, eq, int_max, INT_MAX); check_assert(s64, ==, eq_int_max, INT_MAX);
__msg(": R0_w=0 R10=fp0") __msg(": R0_w=0")
check_assert(s64, eq, zero, 0); check_assert(s64, ==, eq_zero, 0);
__msg(": R0_w=0x8000000000000000 R1_w=0x8000000000000000 R10=fp0") __msg(": R0_w=0x8000000000000000 R1_w=0x8000000000000000")
check_assert(s64, eq, llong_min, LLONG_MIN); check_assert(s64, ==, eq_llong_min, LLONG_MIN);
__msg(": R0_w=0x7fffffffffffffff R1_w=0x7fffffffffffffff R10=fp0") __msg(": R0_w=0x7fffffffffffffff R1_w=0x7fffffffffffffff")
check_assert(s64, eq, llong_max, LLONG_MAX); check_assert(s64, ==, eq_llong_max, LLONG_MAX);
__msg(": R0_w=scalar(smax=0x7ffffffe) R10=fp0") __msg(": R0_w=scalar(id=1,smax=0x7ffffffe)")
check_assert(s64, lt, pos, INT_MAX); check_assert(s64, <, lt_pos, INT_MAX);
__msg(": R0_w=scalar(smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))") __msg(": R0_w=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, lt, zero, 0); check_assert(s64, <, lt_zero, 0);
__msg(": R0_w=scalar(smax=0xffffffff7fffffff,umin=0x8000000000000000,umax=0xffffffff7fffffff,var_off=(0x8000000000000000; 0x7fffffffffffffff))") __msg(": R0_w=scalar(id=1,smax=0xffffffff7fffffff")
check_assert(s64, lt, neg, INT_MIN); check_assert(s64, <, lt_neg, INT_MIN);
__msg(": R0_w=scalar(smax=0x7fffffff) R10=fp0") __msg(": R0_w=scalar(id=1,smax=0x7fffffff)")
check_assert(s64, le, pos, INT_MAX); check_assert(s64, <=, le_pos, INT_MAX);
__msg(": R0_w=scalar(smax=0) R10=fp0") __msg(": R0_w=scalar(id=1,smax=0)")
check_assert(s64, le, zero, 0); check_assert(s64, <=, le_zero, 0);
__msg(": R0_w=scalar(smax=0xffffffff80000000,umin=0x8000000000000000,umax=0xffffffff80000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))") __msg(": R0_w=scalar(id=1,smax=0xffffffff80000000")
check_assert(s64, le, neg, INT_MIN); check_assert(s64, <=, le_neg, INT_MIN);
__msg(": R0_w=scalar(smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))") __msg(": R0_w=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, gt, pos, INT_MAX); check_assert(s64, >, gt_pos, INT_MAX);
__msg(": R0_w=scalar(smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))") __msg(": R0_w=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, gt, zero, 0); check_assert(s64, >, gt_zero, 0);
__msg(": R0_w=scalar(smin=0xffffffff80000001) R10=fp0") __msg(": R0_w=scalar(id=1,smin=0xffffffff80000001")
check_assert(s64, gt, neg, INT_MIN); check_assert(s64, >, gt_neg, INT_MIN);
__msg(": R0_w=scalar(smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))") __msg(": R0_w=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, ge, pos, INT_MAX); check_assert(s64, >=, ge_pos, INT_MAX);
__msg(": R0_w=scalar(smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0") __msg(": R0_w=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, ge, zero, 0); check_assert(s64, >=, ge_zero, 0);
__msg(": R0_w=scalar(smin=0xffffffff80000000) R10=fp0") __msg(": R0_w=scalar(id=1,smin=0xffffffff80000000")
check_assert(s64, ge, neg, INT_MIN); check_assert(s64, >=, ge_neg, INT_MIN);
SEC("?tc") SEC("?tc")
__log_level(2) __failure __log_level(2) __failure
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include "bpf_misc.h" #include "bpf_misc.h"
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
static volatile int zero = 0; static volatile int zero = 0;
...@@ -676,7 +676,7 @@ static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n) ...@@ -676,7 +676,7 @@ static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
while ((t = bpf_iter_num_next(it))) { while ((t = bpf_iter_num_next(it))) {
i = *t; i = *t;
if (i >= n) if ((__u32)i >= n)
break; break;
sum += arr[i]; sum += arr[i];
} }
......
...@@ -28,9 +28,8 @@ int iter_task_vma_for_each(const void *ctx) ...@@ -28,9 +28,8 @@ int iter_task_vma_for_each(const void *ctx)
return 0; return 0;
bpf_for_each(task_vma, vma, task, 0) { bpf_for_each(task_vma, vma, task, 0) {
if (seen >= 1000) if (bpf_cmp_unlikely(seen, >=, 1000))
break; break;
barrier_var(seen);
vm_ranges[seen].vm_start = vma->vm_start; vm_ranges[seen].vm_start = vma->vm_start;
vm_ranges[seen].vm_end = vma->vm_end; vm_ranges[seen].vm_end = vma->vm_end;
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include "bpf_misc.h" #include "bpf_misc.h"
/* weak and shared between two files */ /* weak and shared between two files */
const volatile int my_tid __weak; const volatile __u32 my_tid __weak;
long syscall_id __weak; long syscall_id __weak;
int output_val1; int output_val1;
......
...@@ -68,7 +68,7 @@ int BPF_PROG(handler2, struct pt_regs *regs, long id) ...@@ -68,7 +68,7 @@ int BPF_PROG(handler2, struct pt_regs *regs, long id)
{ {
static volatile int whatever; static volatile int whatever;
if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id) if (my_tid != (s32)bpf_get_current_pid_tgid() || id != syscall_id)
return 0; return 0;
/* make sure we have CO-RE relocations in main program */ /* make sure we have CO-RE relocations in main program */
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include "bpf_experimental.h" #include "bpf_experimental.h"
#ifndef ARRAY_SIZE #ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
#endif #endif
#include "linked_list.h" #include "linked_list.h"
......
...@@ -13,7 +13,7 @@ char _license[] SEC("license") = "GPL"; ...@@ -13,7 +13,7 @@ char _license[] SEC("license") = "GPL";
#define DUMMY_STORAGE_VALUE 0xdeadbeef #define DUMMY_STORAGE_VALUE 0xdeadbeef
int monitored_pid = 0; __u32 monitored_pid = 0;
int inode_storage_result = -1; int inode_storage_result = -1;
int sk_storage_result = -1; int sk_storage_result = -1;
int task_storage_result = -1; int task_storage_result = -1;
......
...@@ -92,7 +92,7 @@ int BPF_PROG(test_int_hook, struct vm_area_struct *vma, ...@@ -92,7 +92,7 @@ int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
if (ret != 0) if (ret != 0)
return ret; return ret;
__u32 pid = bpf_get_current_pid_tgid() >> 32; __s32 pid = bpf_get_current_pid_tgid() >> 32;
int is_stack = 0; int is_stack = 0;
is_stack = (vma->vm_start <= vma->vm_mm->start_stack && is_stack = (vma->vm_start <= vma->vm_mm->start_stack &&
......
...@@ -36,7 +36,7 @@ int add_to_list_in_array(void *ctx) ...@@ -36,7 +36,7 @@ int add_to_list_in_array(void *ctx)
struct node_data *new; struct node_data *new;
int zero = 0; int zero = 0;
if (done || (u32)bpf_get_current_pid_tgid() != pid) if (done || (int)bpf_get_current_pid_tgid() != pid)
return 0; return 0;
value = bpf_map_lookup_elem(&array, &zero); value = bpf_map_lookup_elem(&array, &zero);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "profiler.h" #include "profiler.h"
#include "err.h" #include "err.h"
#include "bpf_experimental.h"
#ifndef NULL #ifndef NULL
#define NULL 0 #define NULL 0
...@@ -132,7 +133,7 @@ struct { ...@@ -132,7 +133,7 @@ struct {
} disallowed_exec_inodes SEC(".maps"); } disallowed_exec_inodes SEC(".maps");
#ifndef ARRAY_SIZE #ifndef ARRAY_SIZE
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) #define ARRAY_SIZE(arr) (int)(sizeof(arr) / sizeof(arr[0]))
#endif #endif
static INLINE bool IS_ERR(const void* ptr) static INLINE bool IS_ERR(const void* ptr)
...@@ -221,8 +222,7 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node, ...@@ -221,8 +222,7 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
return payload; return payload;
if (cgroup_node == cgroup_root_node) if (cgroup_node == cgroup_root_node)
*root_pos = payload - payload_start; *root_pos = payload - payload_start;
if (filepart_length <= MAX_PATH) { if (bpf_cmp_likely(filepart_length, <=, MAX_PATH)) {
barrier_var(filepart_length);
payload += filepart_length; payload += filepart_length;
} }
cgroup_node = BPF_CORE_READ(cgroup_node, parent); cgroup_node = BPF_CORE_READ(cgroup_node, parent);
...@@ -305,9 +305,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data, ...@@ -305,9 +305,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
size_t cgroup_root_length = size_t cgroup_root_length =
bpf_probe_read_kernel_str(payload, MAX_PATH, bpf_probe_read_kernel_str(payload, MAX_PATH,
BPF_CORE_READ(root_kernfs, name)); BPF_CORE_READ(root_kernfs, name));
barrier_var(cgroup_root_length); if (bpf_cmp_likely(cgroup_root_length, <=, MAX_PATH)) {
if (cgroup_root_length <= MAX_PATH) {
barrier_var(cgroup_root_length);
cgroup_data->cgroup_root_length = cgroup_root_length; cgroup_data->cgroup_root_length = cgroup_root_length;
payload += cgroup_root_length; payload += cgroup_root_length;
} }
...@@ -315,9 +313,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data, ...@@ -315,9 +313,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
size_t cgroup_proc_length = size_t cgroup_proc_length =
bpf_probe_read_kernel_str(payload, MAX_PATH, bpf_probe_read_kernel_str(payload, MAX_PATH,
BPF_CORE_READ(proc_kernfs, name)); BPF_CORE_READ(proc_kernfs, name));
barrier_var(cgroup_proc_length); if (bpf_cmp_likely(cgroup_proc_length, <=, MAX_PATH)) {
if (cgroup_proc_length <= MAX_PATH) {
barrier_var(cgroup_proc_length);
cgroup_data->cgroup_proc_length = cgroup_proc_length; cgroup_data->cgroup_proc_length = cgroup_proc_length;
payload += cgroup_proc_length; payload += cgroup_proc_length;
} }
...@@ -347,9 +343,7 @@ static INLINE void* populate_var_metadata(struct var_metadata_t* metadata, ...@@ -347,9 +343,7 @@ static INLINE void* populate_var_metadata(struct var_metadata_t* metadata,
metadata->comm_length = 0; metadata->comm_length = 0;
size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm); size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
barrier_var(comm_length); if (bpf_cmp_likely(comm_length, <=, TASK_COMM_LEN)) {
if (comm_length <= TASK_COMM_LEN) {
barrier_var(comm_length);
metadata->comm_length = comm_length; metadata->comm_length = comm_length;
payload += comm_length; payload += comm_length;
} }
...@@ -494,10 +488,9 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload) ...@@ -494,10 +488,9 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
filepart_length = filepart_length =
bpf_probe_read_kernel_str(payload, MAX_PATH, bpf_probe_read_kernel_str(payload, MAX_PATH,
BPF_CORE_READ(filp_dentry, d_name.name)); BPF_CORE_READ(filp_dentry, d_name.name));
barrier_var(filepart_length); bpf_nop_mov(filepart_length);
if (filepart_length > MAX_PATH) if (bpf_cmp_unlikely(filepart_length, >, MAX_PATH))
break; break;
barrier_var(filepart_length);
payload += filepart_length; payload += filepart_length;
length += filepart_length; length += filepart_length;
...@@ -579,9 +572,7 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write, ...@@ -579,9 +572,7 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
size_t sysctl_val_length = bpf_probe_read_kernel_str(payload, size_t sysctl_val_length = bpf_probe_read_kernel_str(payload,
CTL_MAXNAME, buf); CTL_MAXNAME, buf);
barrier_var(sysctl_val_length); if (bpf_cmp_likely(sysctl_val_length, <=, CTL_MAXNAME)) {
if (sysctl_val_length <= CTL_MAXNAME) {
barrier_var(sysctl_val_length);
sysctl_data->sysctl_val_length = sysctl_val_length; sysctl_data->sysctl_val_length = sysctl_val_length;
payload += sysctl_val_length; payload += sysctl_val_length;
} }
...@@ -590,9 +581,7 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write, ...@@ -590,9 +581,7 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
bpf_probe_read_kernel_str(payload, MAX_PATH, bpf_probe_read_kernel_str(payload, MAX_PATH,
BPF_CORE_READ(filp, f_path.dentry, BPF_CORE_READ(filp, f_path.dentry,
d_name.name)); d_name.name));
barrier_var(sysctl_path_length); if (bpf_cmp_likely(sysctl_path_length, <=, MAX_PATH)) {
if (sysctl_path_length <= MAX_PATH) {
barrier_var(sysctl_path_length);
sysctl_data->sysctl_path_length = sysctl_path_length; sysctl_data->sysctl_path_length = sysctl_path_length;
payload += sysctl_path_length; payload += sysctl_path_length;
} }
...@@ -645,7 +634,7 @@ int raw_tracepoint__sched_process_exit(void* ctx) ...@@ -645,7 +634,7 @@ int raw_tracepoint__sched_process_exit(void* ctx)
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) { for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) {
struct var_kill_data_t* past_kill_data = &arr_struct->array[i]; struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) { if (past_kill_data != NULL && past_kill_data->kill_target_pid == (pid_t)tpid) {
bpf_probe_read_kernel(kill_data, sizeof(*past_kill_data), bpf_probe_read_kernel(kill_data, sizeof(*past_kill_data),
past_kill_data); past_kill_data);
void* payload = kill_data->payload; void* payload = kill_data->payload;
...@@ -658,9 +647,7 @@ int raw_tracepoint__sched_process_exit(void* ctx) ...@@ -658,9 +647,7 @@ int raw_tracepoint__sched_process_exit(void* ctx)
kill_data->kill_target_cgroup_proc_length = 0; kill_data->kill_target_cgroup_proc_length = 0;
size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm); size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
barrier_var(comm_length); if (bpf_cmp_likely(comm_length, <=, TASK_COMM_LEN)) {
if (comm_length <= TASK_COMM_LEN) {
barrier_var(comm_length);
kill_data->kill_target_name_length = comm_length; kill_data->kill_target_name_length = comm_length;
payload += comm_length; payload += comm_length;
} }
...@@ -669,9 +656,7 @@ int raw_tracepoint__sched_process_exit(void* ctx) ...@@ -669,9 +656,7 @@ int raw_tracepoint__sched_process_exit(void* ctx)
bpf_probe_read_kernel_str(payload, bpf_probe_read_kernel_str(payload,
KILL_TARGET_LEN, KILL_TARGET_LEN,
BPF_CORE_READ(proc_kernfs, name)); BPF_CORE_READ(proc_kernfs, name));
barrier_var(cgroup_proc_length); if (bpf_cmp_likely(cgroup_proc_length, <=, KILL_TARGET_LEN)) {
if (cgroup_proc_length <= KILL_TARGET_LEN) {
barrier_var(cgroup_proc_length);
kill_data->kill_target_cgroup_proc_length = cgroup_proc_length; kill_data->kill_target_cgroup_proc_length = cgroup_proc_length;
payload += cgroup_proc_length; payload += cgroup_proc_length;
} }
...@@ -731,9 +716,7 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx) ...@@ -731,9 +716,7 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
const char* filename = BPF_CORE_READ(bprm, filename); const char* filename = BPF_CORE_READ(bprm, filename);
size_t bin_path_length = size_t bin_path_length =
bpf_probe_read_kernel_str(payload, MAX_FILENAME_LEN, filename); bpf_probe_read_kernel_str(payload, MAX_FILENAME_LEN, filename);
barrier_var(bin_path_length); if (bpf_cmp_likely(bin_path_length, <=, MAX_FILENAME_LEN)) {
if (bin_path_length <= MAX_FILENAME_LEN) {
barrier_var(bin_path_length);
proc_exec_data->bin_path_length = bin_path_length; proc_exec_data->bin_path_length = bin_path_length;
payload += bin_path_length; payload += bin_path_length;
} }
...@@ -743,8 +726,7 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx) ...@@ -743,8 +726,7 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
unsigned int cmdline_length = probe_read_lim(payload, arg_start, unsigned int cmdline_length = probe_read_lim(payload, arg_start,
arg_end - arg_start, MAX_ARGS_LEN); arg_end - arg_start, MAX_ARGS_LEN);
if (cmdline_length <= MAX_ARGS_LEN) { if (bpf_cmp_likely(cmdline_length, <=, MAX_ARGS_LEN)) {
barrier_var(cmdline_length);
proc_exec_data->cmdline_length = cmdline_length; proc_exec_data->cmdline_length = cmdline_length;
payload += cmdline_length; payload += cmdline_length;
} }
...@@ -821,9 +803,7 @@ int kprobe_ret__do_filp_open(struct pt_regs* ctx) ...@@ -821,9 +803,7 @@ int kprobe_ret__do_filp_open(struct pt_regs* ctx)
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload); payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
size_t len = read_absolute_file_path_from_dentry(filp_dentry, payload); size_t len = read_absolute_file_path_from_dentry(filp_dentry, payload);
barrier_var(len); if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
if (len <= MAX_FILEPATH_LENGTH) {
barrier_var(len);
payload += len; payload += len;
filemod_data->dst_filepath_length = len; filemod_data->dst_filepath_length = len;
} }
...@@ -876,17 +856,13 @@ int BPF_KPROBE(kprobe__vfs_link, ...@@ -876,17 +856,13 @@ int BPF_KPROBE(kprobe__vfs_link,
payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload); payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
size_t len = read_absolute_file_path_from_dentry(old_dentry, payload); size_t len = read_absolute_file_path_from_dentry(old_dentry, payload);
barrier_var(len); if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
if (len <= MAX_FILEPATH_LENGTH) {
barrier_var(len);
payload += len; payload += len;
filemod_data->src_filepath_length = len; filemod_data->src_filepath_length = len;
} }
len = read_absolute_file_path_from_dentry(new_dentry, payload); len = read_absolute_file_path_from_dentry(new_dentry, payload);
barrier_var(len); if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
if (len <= MAX_FILEPATH_LENGTH) {
barrier_var(len);
payload += len; payload += len;
filemod_data->dst_filepath_length = len; filemod_data->dst_filepath_length = len;
} }
...@@ -936,16 +912,12 @@ int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry, ...@@ -936,16 +912,12 @@ int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry,
size_t len = bpf_probe_read_kernel_str(payload, MAX_FILEPATH_LENGTH, size_t len = bpf_probe_read_kernel_str(payload, MAX_FILEPATH_LENGTH,
oldname); oldname);
barrier_var(len); if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
if (len <= MAX_FILEPATH_LENGTH) {
barrier_var(len);
payload += len; payload += len;
filemod_data->src_filepath_length = len; filemod_data->src_filepath_length = len;
} }
len = read_absolute_file_path_from_dentry(dentry, payload); len = read_absolute_file_path_from_dentry(dentry, payload);
barrier_var(len); if (bpf_cmp_likely(len, <=, MAX_FILEPATH_LENGTH)) {
if (len <= MAX_FILEPATH_LENGTH) {
barrier_var(len);
payload += len; payload += len;
filemod_data->dst_filepath_length = len; filemod_data->dst_filepath_length = len;
} }
......
...@@ -9,7 +9,7 @@ char _license[] SEC("license") = "GPL"; ...@@ -9,7 +9,7 @@ char _license[] SEC("license") = "GPL";
#define CUSTOM_INHERIT2 1 #define CUSTOM_INHERIT2 1
#define CUSTOM_LISTENER 2 #define CUSTOM_LISTENER 2
__u32 page_size = 0; __s32 page_size = 0;
struct sockopt_inherit { struct sockopt_inherit {
__u8 val; __u8 val;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
__u32 page_size = 0; __s32 page_size = 0;
SEC("cgroup/getsockopt") SEC("cgroup/getsockopt")
int _getsockopt_child(struct bpf_sockopt *ctx) int _getsockopt_child(struct bpf_sockopt *ctx)
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
__u32 page_size = 0; __s32 page_size = 0;
SEC("cgroup/setsockopt") SEC("cgroup/setsockopt")
int sockopt_qos_to_cc(struct bpf_sockopt *ctx) int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
......
...@@ -21,7 +21,7 @@ const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 204 ...@@ -21,7 +21,7 @@ const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 204
const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {}; const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
int err = 0; int err = 0;
int pid = 0; u32 pid = 0;
#define DEFINE_ARRAY_WITH_KPTR(_size) \ #define DEFINE_ARRAY_WITH_KPTR(_size) \
struct bin_data_##_size { \ struct bin_data_##_size { \
......
...@@ -53,7 +53,7 @@ int test_core_kernel(void *ctx) ...@@ -53,7 +53,7 @@ int test_core_kernel(void *ctx)
struct task_struct *task = (void *)bpf_get_current_task(); struct task_struct *task = (void *)bpf_get_current_task();
struct core_reloc_kernel_output *out = (void *)&data.out; struct core_reloc_kernel_output *out = (void *)&data.out;
uint64_t pid_tgid = bpf_get_current_pid_tgid(); uint64_t pid_tgid = bpf_get_current_pid_tgid();
uint32_t real_tgid = (uint32_t)pid_tgid; int32_t real_tgid = (int32_t)pid_tgid;
int pid, tgid; int pid, tgid;
if (data.my_pid_tgid != pid_tgid) if (data.my_pid_tgid != pid_tgid)
......
...@@ -43,8 +43,8 @@ int BPF_PROG(test_core_module_probed, ...@@ -43,8 +43,8 @@ int BPF_PROG(test_core_module_probed,
#if __has_builtin(__builtin_preserve_enum_value) #if __has_builtin(__builtin_preserve_enum_value)
struct core_reloc_module_output *out = (void *)&data.out; struct core_reloc_module_output *out = (void *)&data.out;
__u64 pid_tgid = bpf_get_current_pid_tgid(); __u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 real_tgid = (__u32)(pid_tgid >> 32); __s32 real_tgid = (__s32)(pid_tgid >> 32);
__u32 real_pid = (__u32)pid_tgid; __s32 real_pid = (__s32)pid_tgid;
if (data.my_pid_tgid != pid_tgid) if (data.my_pid_tgid != pid_tgid)
return 0; return 0;
...@@ -77,8 +77,8 @@ int BPF_PROG(test_core_module_direct, ...@@ -77,8 +77,8 @@ int BPF_PROG(test_core_module_direct,
#if __has_builtin(__builtin_preserve_enum_value) #if __has_builtin(__builtin_preserve_enum_value)
struct core_reloc_module_output *out = (void *)&data.out; struct core_reloc_module_output *out = (void *)&data.out;
__u64 pid_tgid = bpf_get_current_pid_tgid(); __u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 real_tgid = (__u32)(pid_tgid >> 32); __s32 real_tgid = (__s32)(pid_tgid >> 32);
__u32 real_pid = (__u32)pid_tgid; __s32 real_pid = (__s32)pid_tgid;
if (data.my_pid_tgid != pid_tgid) if (data.my_pid_tgid != pid_tgid)
return 0; return 0;
......
...@@ -38,7 +38,7 @@ int BPF_PROG(test_file_open, struct file *f) ...@@ -38,7 +38,7 @@ int BPF_PROG(test_file_open, struct file *f)
return 0; return 0;
got_fsverity = 1; got_fsverity = 1;
for (i = 0; i < sizeof(digest); i++) { for (i = 0; i < (int)sizeof(digest); i++) {
if (digest[i] != expected_digest[i]) if (digest[i] != expected_digest[i])
return 0; return 0;
} }
......
...@@ -29,7 +29,7 @@ int BPF_PROG(unix_listen, struct socket *sock, int backlog) ...@@ -29,7 +29,7 @@ int BPF_PROG(unix_listen, struct socket *sock, int backlog)
len = unix_sk->addr->len - sizeof(short); len = unix_sk->addr->len - sizeof(short);
path[0] = '@'; path[0] = '@';
for (i = 1; i < len; i++) { for (i = 1; i < len; i++) {
if (i >= sizeof(struct sockaddr_un)) if (i >= (int)sizeof(struct sockaddr_un))
break; break;
path[i] = unix_sk->addr->name->sun_path[i]; path[i] = unix_sk->addr->name->sun_path[i];
......
...@@ -38,7 +38,7 @@ int xdp_redirect(struct xdp_md *xdp) ...@@ -38,7 +38,7 @@ int xdp_redirect(struct xdp_md *xdp)
if (payload + 1 > data_end) if (payload + 1 > data_end)
return XDP_ABORTED; return XDP_ABORTED;
if (xdp->ingress_ifindex != ifindex_in) if (xdp->ingress_ifindex != (__u32)ifindex_in)
return XDP_ABORTED; return XDP_ABORTED;
if (metadata + 1 > data) if (metadata + 1 > data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment