Commit 36b0fb13 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'kfunc-annotation'

David Vernet says:

====================
This is v3 of the patchset [0]. v2 can be found at [1].

[0]: https://lore.kernel.org/bpf/Y7kCsjBZ%2FFrsWW%2Fe@maniforge.lan/T/
[1]: https://lore.kernel.org/lkml/20230123171506.71995-1-void@manifault.com/

Changelog:
----------
v2 -> v3:
- Go back to the __bpf_kfunc approach from v1. The BPF_KFUNC macro
  received pushback as it didn't match the more typical EXPORT_SYMBOL*
  APIs used elsewhere in the kernel. It's the longer term plan, but for
  now we're proposing something less controversial to fix kfuncs and BTF
  encoding.
- Add __bpf_kfunc macro to newly added cpumask kfuncs.
- Add __bpf_kfunc macro to newly added XDP metadata kfuncs, which were
  failing to be BTF encoded in the thread in [2].
- Update patch description(s) to reference the discussions in [2].
- Add a selftest that validates that a static kfunc with unused args is
  properly BTF encoded and can be invoked.

[2]: https://lore.kernel.org/all/fe5d42d1-faad-d05e-99ad-1c2c04776950@oracle.com/

v1 -> v2:
- Wrap entire function signature in BPF_KFUNC macro instead of using
  __bpf_kfunc tag (Kumar)
- Update all kfunc definitions to use this macro.
- Update kfuncs.rst documentation to describe and illustrate the macro.
- Also clean up a few small parts of kfuncs.rst, e.g. some grammar, and
  in general making it a bit tighter.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 10d1b0e4 6aed15e3
......@@ -41,7 +41,7 @@ An example is given below::
__diag_ignore_all("-Wmissing-prototypes",
"Global kfuncs as their definitions will be in BTF");
struct task_struct *bpf_find_get_task_by_vpid(pid_t nr)
__bpf_kfunc struct task_struct *bpf_find_get_task_by_vpid(pid_t nr)
{
return find_get_task_by_vpid(nr);
}
......@@ -66,7 +66,7 @@ kfunc with a __tag, where tag may be one of the supported annotations.
This annotation is used to indicate a memory and size pair in the argument list.
An example is given below::
void bpf_memzero(void *mem, int mem__sz)
__bpf_kfunc void bpf_memzero(void *mem, int mem__sz)
{
...
}
......@@ -86,7 +86,7 @@ safety of the program.
An example is given below::
void *bpf_obj_new(u32 local_type_id__k, ...)
__bpf_kfunc void *bpf_obj_new(u32 local_type_id__k, ...)
{
...
}
......@@ -125,6 +125,20 @@ flags on a set of kfuncs as follows::
This set encodes the BTF ID of each kfunc listed above, and encodes the flags
along with it. Ofcourse, it is also allowed to specify no flags.
kfunc definitions should also always be annotated with the ``__bpf_kfunc``
macro. This prevents issues such as the compiler inlining the kfunc if it's a
static kernel function, or the function being elided in an LTO build as it's
not used in the rest of the kernel. Developers should not manually add
annotations to their kfunc to prevent these issues. If an annotation is
required to prevent such an issue with your kfunc, it is a bug and should be
added to the definition of the macro so that other kfuncs are similarly
protected. An example is given below::
__bpf_kfunc struct task_struct *bpf_get_task_pid(s32 pid)
{
...
}
2.4.1 KF_ACQUIRE flag
---------------------
......
......@@ -116,6 +116,9 @@ if major >= 3:
# include/linux/linkage.h:
"asmlinkage",
# include/linux/btf.h
"__bpf_kfunc",
]
else:
......
......@@ -72,6 +72,14 @@
#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
#define KF_RCU (1 << 7) /* kfunc only takes rcu pointer arguments */
/*
* Tag marking a kernel function as a kfunc. This is meant to minimize the
* amount of copy-paste that kfunc authors have to include for correctness so
* as to avoid issues such as the compiler inlining or eliding either a static
* kfunc, or a global kfunc in an LTO build.
*/
#define __bpf_kfunc __used noinline
/*
* Return the name of the passed struct, if exists, or halt the build if for
* example the structure gets renamed. In this way, developers have to revisit
......
......@@ -48,7 +48,7 @@ __diag_ignore_all("-Wmissing-prototypes",
* bpf_cpumask_create() allocates memory using the BPF memory allocator, and
* will not block. It may return NULL if no memory is available.
*/
struct bpf_cpumask *bpf_cpumask_create(void)
__bpf_kfunc struct bpf_cpumask *bpf_cpumask_create(void)
{
struct bpf_cpumask *cpumask;
......@@ -74,7 +74,7 @@ struct bpf_cpumask *bpf_cpumask_create(void)
* must either be embedded in a map as a kptr, or freed with
* bpf_cpumask_release().
*/
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
__bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
{
refcount_inc(&cpumask->usage);
return cpumask;
......@@ -90,7 +90,7 @@ struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
* kptr, or freed with bpf_cpumask_release(). This function may return NULL if
* no BPF cpumask was found in the specified map value.
*/
struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
__bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
{
struct bpf_cpumask *cpumask;
......@@ -116,7 +116,7 @@ struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
* reference of the BPF cpumask has been released, it is subsequently freed in
* an RCU callback in the BPF memory allocator.
*/
void bpf_cpumask_release(struct bpf_cpumask *cpumask)
__bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
{
if (!cpumask)
return;
......@@ -135,7 +135,7 @@ void bpf_cpumask_release(struct bpf_cpumask *cpumask)
* Find the index of the first nonzero bit of the cpumask. A struct bpf_cpumask
* pointer may be safely passed to this function.
*/
u32 bpf_cpumask_first(const struct cpumask *cpumask)
__bpf_kfunc u32 bpf_cpumask_first(const struct cpumask *cpumask)
{
return cpumask_first(cpumask);
}
......@@ -148,7 +148,7 @@ u32 bpf_cpumask_first(const struct cpumask *cpumask)
* Find the index of the first unset bit of the cpumask. A struct bpf_cpumask
* pointer may be safely passed to this function.
*/
u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
__bpf_kfunc u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
{
return cpumask_first_zero(cpumask);
}
......@@ -158,7 +158,7 @@ u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
* @cpu: The CPU to be set in the cpumask.
* @cpumask: The BPF cpumask in which a bit is being set.
*/
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
__bpf_kfunc void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return;
......@@ -171,7 +171,7 @@ void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
* @cpu: The CPU to be cleared from the cpumask.
* @cpumask: The BPF cpumask in which a bit is being cleared.
*/
void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
__bpf_kfunc void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return;
......@@ -188,7 +188,7 @@ void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
* * true - @cpu is set in the cpumask
* * false - @cpu was not set in the cpumask, or @cpu is an invalid cpu.
*/
bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
__bpf_kfunc bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
{
if (!cpu_valid(cpu))
return false;
......@@ -205,7 +205,7 @@ bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
* * true - @cpu is set in the cpumask
* * false - @cpu was not set in the cpumask, or @cpu is invalid.
*/
bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
__bpf_kfunc bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return false;
......@@ -223,7 +223,7 @@ bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
* * true - @cpu is set in the cpumask
* * false - @cpu was not set in the cpumask, or @cpu is invalid.
*/
bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
__bpf_kfunc bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return false;
......@@ -235,7 +235,7 @@ bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
* bpf_cpumask_setall() - Set all of the bits in a BPF cpumask.
* @cpumask: The BPF cpumask having all of its bits set.
*/
void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
__bpf_kfunc void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
{
cpumask_setall((struct cpumask *)cpumask);
}
......@@ -244,7 +244,7 @@ void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
* bpf_cpumask_clear() - Clear all of the bits in a BPF cpumask.
* @cpumask: The BPF cpumask being cleared.
*/
void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
__bpf_kfunc void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
{
cpumask_clear((struct cpumask *)cpumask);
}
......@@ -261,9 +261,9 @@ void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
bool bpf_cpumask_and(struct bpf_cpumask *dst,
const struct cpumask *src1,
const struct cpumask *src2)
__bpf_kfunc bool bpf_cpumask_and(struct bpf_cpumask *dst,
const struct cpumask *src1,
const struct cpumask *src2)
{
return cpumask_and((struct cpumask *)dst, src1, src2);
}
......@@ -276,9 +276,9 @@ bool bpf_cpumask_and(struct bpf_cpumask *dst,
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
void bpf_cpumask_or(struct bpf_cpumask *dst,
const struct cpumask *src1,
const struct cpumask *src2)
__bpf_kfunc void bpf_cpumask_or(struct bpf_cpumask *dst,
const struct cpumask *src1,
const struct cpumask *src2)
{
cpumask_or((struct cpumask *)dst, src1, src2);
}
......@@ -291,9 +291,9 @@ void bpf_cpumask_or(struct bpf_cpumask *dst,
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
void bpf_cpumask_xor(struct bpf_cpumask *dst,
const struct cpumask *src1,
const struct cpumask *src2)
__bpf_kfunc void bpf_cpumask_xor(struct bpf_cpumask *dst,
const struct cpumask *src1,
const struct cpumask *src2)
{
cpumask_xor((struct cpumask *)dst, src1, src2);
}
......@@ -309,7 +309,7 @@ void bpf_cpumask_xor(struct bpf_cpumask *dst,
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
__bpf_kfunc bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_equal(src1, src2);
}
......@@ -325,7 +325,7 @@ bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2)
__bpf_kfunc bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_intersects(src1, src2);
}
......@@ -341,7 +341,7 @@ bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *sr
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
__bpf_kfunc bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_subset(src1, src2);
}
......@@ -356,7 +356,7 @@ bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
*
* A struct bpf_cpumask pointer may be safely passed to @cpumask.
*/
bool bpf_cpumask_empty(const struct cpumask *cpumask)
__bpf_kfunc bool bpf_cpumask_empty(const struct cpumask *cpumask)
{
return cpumask_empty(cpumask);
}
......@@ -371,7 +371,7 @@ bool bpf_cpumask_empty(const struct cpumask *cpumask)
*
* A struct bpf_cpumask pointer may be safely passed to @cpumask.
*/
bool bpf_cpumask_full(const struct cpumask *cpumask)
__bpf_kfunc bool bpf_cpumask_full(const struct cpumask *cpumask)
{
return cpumask_full(cpumask);
}
......@@ -383,7 +383,7 @@ bool bpf_cpumask_full(const struct cpumask *cpumask)
*
* A struct bpf_cpumask pointer may be safely passed to @src.
*/
void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
__bpf_kfunc void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
{
cpumask_copy((struct cpumask *)dst, src);
}
......@@ -398,7 +398,7 @@ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
*
* A struct bpf_cpumask pointer may be safely passed to @src.
*/
u32 bpf_cpumask_any(const struct cpumask *cpumask)
__bpf_kfunc u32 bpf_cpumask_any(const struct cpumask *cpumask)
{
return cpumask_any(cpumask);
}
......@@ -415,7 +415,7 @@ u32 bpf_cpumask_any(const struct cpumask *cpumask)
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2)
__bpf_kfunc u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_any_and(src1, src2);
}
......
......@@ -1776,7 +1776,7 @@ __diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in vmlinux BTF");
void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
{
struct btf_struct_meta *meta = meta__ign;
u64 size = local_type_id__k;
......@@ -1790,7 +1790,7 @@ void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
return p;
}
void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
{
struct btf_struct_meta *meta = meta__ign;
void *p = p__alloc;
......@@ -1811,12 +1811,12 @@ static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *hea
tail ? list_add_tail(n, h) : list_add(n, h);
}
void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node)
__bpf_kfunc void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node)
{
return __bpf_list_add(node, head, false);
}
void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node)
__bpf_kfunc void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node)
{
return __bpf_list_add(node, head, true);
}
......@@ -1834,12 +1834,12 @@ static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tai
return (struct bpf_list_node *)n;
}
struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
__bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
{
return __bpf_list_del(head, false);
}
struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
__bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
{
return __bpf_list_del(head, true);
}
......@@ -1850,7 +1850,7 @@ struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
* bpf_task_release().
* @p: The task on which a reference is being acquired.
*/
struct task_struct *bpf_task_acquire(struct task_struct *p)
__bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
{
return get_task_struct(p);
}
......@@ -1861,7 +1861,7 @@ struct task_struct *bpf_task_acquire(struct task_struct *p)
* released by calling bpf_task_release().
* @p: The task on which a reference is being acquired.
*/
struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
__bpf_kfunc struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
{
/* For the time being this function returns NULL, as it's not currently
* possible to safely acquire a reference to a task with RCU protection
......@@ -1913,7 +1913,7 @@ struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
* be released by calling bpf_task_release().
* @pp: A pointer to a task kptr on which a reference is being acquired.
*/
struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
__bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
{
/* We must return NULL here until we have clarity on how to properly
* leverage RCU for ensuring a task's lifetime. See the comment above
......@@ -1926,7 +1926,7 @@ struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
* bpf_task_release - Release the reference acquired on a task.
* @p: The task on which a reference is being released.
*/
void bpf_task_release(struct task_struct *p)
__bpf_kfunc void bpf_task_release(struct task_struct *p)
{
if (!p)
return;
......@@ -1941,7 +1941,7 @@ void bpf_task_release(struct task_struct *p)
* calling bpf_cgroup_release().
* @cgrp: The cgroup on which a reference is being acquired.
*/
struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
__bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
{
cgroup_get(cgrp);
return cgrp;
......@@ -1953,7 +1953,7 @@ struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
* be released by calling bpf_cgroup_release().
* @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired.
*/
struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
__bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
{
struct cgroup *cgrp;
......@@ -1985,7 +1985,7 @@ struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
* drops to 0.
* @cgrp: The cgroup on which a reference is being released.
*/
void bpf_cgroup_release(struct cgroup *cgrp)
__bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
{
if (!cgrp)
return;
......@@ -2000,7 +2000,7 @@ void bpf_cgroup_release(struct cgroup *cgrp)
* @cgrp: The cgroup for which we're performing a lookup.
* @level: The level of ancestor to look up.
*/
struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
__bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
{
struct cgroup *ancestor;
......@@ -2019,7 +2019,7 @@ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
* stored in a map, or released with bpf_task_release().
* @pid: The pid of the task being looked up.
*/
struct task_struct *bpf_task_from_pid(s32 pid)
__bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
{
struct task_struct *p;
......@@ -2032,22 +2032,22 @@ struct task_struct *bpf_task_from_pid(s32 pid)
return p;
}
void *bpf_cast_to_kern_ctx(void *obj)
__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
{
return obj;
}
void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
__bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
{
return obj__ign;
}
void bpf_rcu_read_lock(void)
__bpf_kfunc void bpf_rcu_read_lock(void)
{
rcu_read_lock();
}
void bpf_rcu_read_unlock(void)
__bpf_kfunc void bpf_rcu_read_unlock(void)
{
rcu_read_unlock();
}
......
......@@ -26,7 +26,7 @@ static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
* rstat_cpu->updated_children list. See the comment on top of
* cgroup_rstat_cpu definition for details.
*/
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
__bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
{
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
unsigned long flags;
......@@ -231,7 +231,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
*
* This function may block.
*/
void cgroup_rstat_flush(struct cgroup *cgrp)
__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
{
might_sleep();
......
......@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/btf.h>
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/file.h>
......@@ -975,7 +976,7 @@ void __noclone __crash_kexec(struct pt_regs *regs)
}
STACK_FRAME_NON_STANDARD(__crash_kexec);
void crash_kexec(struct pt_regs *regs)
__bpf_kfunc void crash_kexec(struct pt_regs *regs)
{
int old_cpu, this_cpu;
......
......@@ -1236,7 +1236,7 @@ __diag_ignore_all("-Wmissing-prototypes",
* Return: a bpf_key pointer with a valid key pointer if the key is found, a
* NULL pointer otherwise.
*/
struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
{
key_ref_t key_ref;
struct bpf_key *bkey;
......@@ -1285,7 +1285,7 @@ struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
* Return: a bpf_key pointer with an invalid key pointer set from the
* pre-determined ID on success, a NULL pointer otherwise
*/
struct bpf_key *bpf_lookup_system_key(u64 id)
__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
{
struct bpf_key *bkey;
......@@ -1309,7 +1309,7 @@ struct bpf_key *bpf_lookup_system_key(u64 id)
* Decrement the reference count of the key inside *bkey*, if the pointer
* is valid, and free *bkey*.
*/
void bpf_key_put(struct bpf_key *bkey)
__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
{
if (bkey->has_ref)
key_put(bkey->key);
......@@ -1329,7 +1329,7 @@ void bpf_key_put(struct bpf_key *bkey)
*
* Return: 0 on success, a negative value on error.
*/
int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
struct bpf_dynptr_kern *sig_ptr,
struct bpf_key *trusted_keyring)
{
......
......@@ -484,7 +484,7 @@ static int bpf_test_finish(const union bpf_attr *kattr,
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global functions as their definitions will be in vmlinux BTF");
int noinline bpf_fentry_test1(int a)
__bpf_kfunc int bpf_fentry_test1(int a)
{
return a + 1;
}
......@@ -529,23 +529,23 @@ int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
return (long)arg->a;
}
int noinline bpf_modify_return_test(int a, int *b)
__bpf_kfunc int bpf_modify_return_test(int a, int *b)
{
*b += 1;
return a + *b;
}
u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
{
return a + b + c + d;
}
int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
{
return a + b;
}
struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
{
return sk;
}
......@@ -582,21 +582,21 @@ static struct prog_test_ref_kfunc prog_test_struct = {
.cnt = REFCOUNT_INIT(1),
};
noinline struct prog_test_ref_kfunc *
__bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
{
refcount_inc(&prog_test_struct.cnt);
return &prog_test_struct;
}
noinline struct prog_test_member *
__bpf_kfunc struct prog_test_member *
bpf_kfunc_call_memb_acquire(void)
{
WARN_ON_ONCE(1);
return NULL;
}
noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
__bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
{
if (!p)
return;
......@@ -604,11 +604,11 @@ noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
refcount_dec(&p->cnt);
}
noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
__bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
{
}
noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
{
WARN_ON_ONCE(1);
}
......@@ -621,12 +621,14 @@ static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const i
return (int *)p;
}
noinline int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size)
__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
const int rdwr_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
}
noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
const int rdonly_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
}
......@@ -636,16 +638,17 @@ noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
* Acquire functions must return struct pointers, so these ones are
* failing.
*/
noinline int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
const int rdonly_buf_size)
{
return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
}
noinline void bpf_kfunc_call_int_mem_release(int *p)
__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
{
}
noinline struct prog_test_ref_kfunc *
__bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
{
struct prog_test_ref_kfunc *p = READ_ONCE(*pp);
......@@ -694,50 +697,55 @@ struct prog_test_fail3 {
char arr2[];
};
noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
{
}
noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
{
}
noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
{
}
noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
{
}
noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
{
}
noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
{
}
noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
{
}
noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
{
}
noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
{
}
noinline void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
{
}
noinline void bpf_kfunc_call_test_destructive(void)
__bpf_kfunc void bpf_kfunc_call_test_destructive(void)
{
}
__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
{
return arg;
}
__diag_pop();
BTF_SET8_START(bpf_test_modify_return_ids)
......@@ -776,6 +784,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
BTF_SET8_END(test_sk_check_kfunc_ids)
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
......
......@@ -4,6 +4,7 @@
* Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
*/
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/filter.h>
#include <linux/types.h>
......@@ -722,7 +723,7 @@ __diag_ignore_all("-Wmissing-prototypes",
*
* Returns 0 on success or ``-errno`` on error.
*/
int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
__bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
{
return -EOPNOTSUPP;
}
......@@ -734,7 +735,7 @@ int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
*
* Returns 0 on success or ``-errno`` on error.
*/
int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
{
return -EOPNOTSUPP;
}
......
......@@ -295,7 +295,7 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
}
/* override sysctl_tcp_min_tso_segs */
static u32 bbr_min_tso_segs(struct sock *sk)
__bpf_kfunc static u32 bbr_min_tso_segs(struct sock *sk)
{
return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
}
......@@ -328,7 +328,7 @@ static void bbr_save_cwnd(struct sock *sk)
bbr->prior_cwnd = max(bbr->prior_cwnd, tcp_snd_cwnd(tp));
}
static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
__bpf_kfunc static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
......@@ -1023,7 +1023,7 @@ static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
bbr_update_gains(sk);
}
static void bbr_main(struct sock *sk, const struct rate_sample *rs)
__bpf_kfunc static void bbr_main(struct sock *sk, const struct rate_sample *rs)
{
struct bbr *bbr = inet_csk_ca(sk);
u32 bw;
......@@ -1035,7 +1035,7 @@ static void bbr_main(struct sock *sk, const struct rate_sample *rs)
bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
}
static void bbr_init(struct sock *sk)
__bpf_kfunc static void bbr_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
......@@ -1077,7 +1077,7 @@ static void bbr_init(struct sock *sk)
cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
}
static u32 bbr_sndbuf_expand(struct sock *sk)
__bpf_kfunc static u32 bbr_sndbuf_expand(struct sock *sk)
{
/* Provision 3 * cwnd since BBR may slow-start even during recovery. */
return 3;
......@@ -1086,7 +1086,7 @@ static u32 bbr_sndbuf_expand(struct sock *sk)
/* In theory BBR does not need to undo the cwnd since it does not
* always reduce cwnd on losses (see bbr_main()). Keep it for now.
*/
static u32 bbr_undo_cwnd(struct sock *sk)
__bpf_kfunc static u32 bbr_undo_cwnd(struct sock *sk)
{
struct bbr *bbr = inet_csk_ca(sk);
......@@ -1097,7 +1097,7 @@ static u32 bbr_undo_cwnd(struct sock *sk)
}
/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
static u32 bbr_ssthresh(struct sock *sk)
__bpf_kfunc static u32 bbr_ssthresh(struct sock *sk)
{
bbr_save_cwnd(sk);
return tcp_sk(sk)->snd_ssthresh;
......@@ -1125,7 +1125,7 @@ static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
return 0;
}
static void bbr_set_state(struct sock *sk, u8 new_state)
__bpf_kfunc static void bbr_set_state(struct sock *sk, u8 new_state)
{
struct bbr *bbr = inet_csk_ca(sk);
......
......@@ -403,7 +403,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
* ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
* returns the leftover acks to adjust cwnd in congestion avoidance mode.
*/
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
__bpf_kfunc u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
......@@ -417,7 +417,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
* for every packet that was ACKed.
*/
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
__bpf_kfunc void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
{
/* If credits accumulated at a higher w, apply them gently now. */
if (tp->snd_cwnd_cnt >= w) {
......@@ -443,7 +443,7 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
/* This is Jacobson's slow start and congestion avoidance.
* SIGCOMM '88, p. 328.
*/
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
__bpf_kfunc void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
......@@ -462,7 +462,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
/* Slow start threshold is half the congestion window (min 2) */
u32 tcp_reno_ssthresh(struct sock *sk)
__bpf_kfunc u32 tcp_reno_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
......@@ -470,7 +470,7 @@ u32 tcp_reno_ssthresh(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
u32 tcp_reno_undo_cwnd(struct sock *sk)
__bpf_kfunc u32 tcp_reno_undo_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
......
......@@ -126,7 +126,7 @@ static inline void bictcp_hystart_reset(struct sock *sk)
ca->sample_cnt = 0;
}
static void cubictcp_init(struct sock *sk)
__bpf_kfunc static void cubictcp_init(struct sock *sk)
{
struct bictcp *ca = inet_csk_ca(sk);
......@@ -139,7 +139,7 @@ static void cubictcp_init(struct sock *sk)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
static void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
__bpf_kfunc static void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
struct bictcp *ca = inet_csk_ca(sk);
......@@ -321,7 +321,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
ca->cnt = max(ca->cnt, 2U);
}
static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
__bpf_kfunc static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
......@@ -338,7 +338,7 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
static u32 cubictcp_recalc_ssthresh(struct sock *sk)
__bpf_kfunc static u32 cubictcp_recalc_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
......@@ -355,7 +355,7 @@ static u32 cubictcp_recalc_ssthresh(struct sock *sk)
return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
}
static void cubictcp_state(struct sock *sk, u8 new_state)
__bpf_kfunc static void cubictcp_state(struct sock *sk, u8 new_state)
{
if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk));
......@@ -445,7 +445,7 @@ static void hystart_update(struct sock *sk, u32 delay)
}
}
static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample)
__bpf_kfunc static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
......
......@@ -75,7 +75,7 @@ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
ca->old_delivered_ce = tp->delivered_ce;
}
static void dctcp_init(struct sock *sk)
__bpf_kfunc static void dctcp_init(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
......@@ -104,7 +104,7 @@ static void dctcp_init(struct sock *sk)
INET_ECN_dontxmit(sk);
}
static u32 dctcp_ssthresh(struct sock *sk)
__bpf_kfunc static u32 dctcp_ssthresh(struct sock *sk)
{
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
......@@ -113,7 +113,7 @@ static u32 dctcp_ssthresh(struct sock *sk)
return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * ca->dctcp_alpha) >> 11U), 2U);
}
static void dctcp_update_alpha(struct sock *sk, u32 flags)
__bpf_kfunc static void dctcp_update_alpha(struct sock *sk, u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct dctcp *ca = inet_csk_ca(sk);
......@@ -169,7 +169,7 @@ static void dctcp_react_to_loss(struct sock *sk)
tp->snd_ssthresh = max(tcp_snd_cwnd(tp) >> 1U, 2U);
}
static void dctcp_state(struct sock *sk, u8 new_state)
__bpf_kfunc static void dctcp_state(struct sock *sk, u8 new_state)
{
if (new_state == TCP_CA_Recovery &&
new_state != inet_csk(sk)->icsk_ca_state)
......@@ -179,7 +179,7 @@ static void dctcp_state(struct sock *sk, u8 new_state)
*/
}
static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
__bpf_kfunc static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
{
struct dctcp *ca = inet_csk_ca(sk);
......@@ -229,7 +229,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
return 0;
}
static u32 dctcp_cwnd_undo(struct sock *sk)
__bpf_kfunc static u32 dctcp_cwnd_undo(struct sock *sk)
{
const struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
......
......@@ -249,7 +249,7 @@ __diag_ignore_all("-Wmissing-prototypes",
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
struct nf_conn___init *
__bpf_kfunc struct nf_conn___init *
bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
......@@ -283,7 +283,7 @@ bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
struct nf_conn *
__bpf_kfunc struct nf_conn *
bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
......@@ -316,7 +316,7 @@ bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
struct nf_conn___init *
__bpf_kfunc struct nf_conn___init *
bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
......@@ -351,7 +351,7 @@ bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
* @opts__sz - Length of the bpf_ct_opts structure
* Must be NF_BPF_CT_OPTS_SZ (12)
*/
struct nf_conn *
__bpf_kfunc struct nf_conn *
bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
u32 tuple__sz, struct bpf_ct_opts *opts, u32 opts__sz)
{
......@@ -376,7 +376,7 @@ bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple,
* @nfct - Pointer to referenced nf_conn___init object, obtained
* using bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
*/
struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
__bpf_kfunc struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
{
struct nf_conn *nfct = (struct nf_conn *)nfct_i;
int err;
......@@ -400,7 +400,7 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
* @nf_conn - Pointer to referenced nf_conn object, obtained using
* bpf_xdp_ct_lookup or bpf_skb_ct_lookup.
*/
void bpf_ct_release(struct nf_conn *nfct)
__bpf_kfunc void bpf_ct_release(struct nf_conn *nfct)
{
if (!nfct)
return;
......@@ -417,7 +417,7 @@ void bpf_ct_release(struct nf_conn *nfct)
* bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
* @timeout - Timeout in msecs.
*/
void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout)
__bpf_kfunc void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout)
{
__nf_ct_set_timeout((struct nf_conn *)nfct, msecs_to_jiffies(timeout));
}
......@@ -432,7 +432,7 @@ void bpf_ct_set_timeout(struct nf_conn___init *nfct, u32 timeout)
* bpf_ct_insert_entry, bpf_xdp_ct_lookup, or bpf_skb_ct_lookup.
* @timeout - New timeout in msecs.
*/
int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout)
__bpf_kfunc int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout)
{
return __nf_ct_change_timeout(nfct, msecs_to_jiffies(timeout));
}
......@@ -447,7 +447,7 @@ int bpf_ct_change_timeout(struct nf_conn *nfct, u32 timeout)
* bpf_xdp_ct_alloc or bpf_skb_ct_alloc.
* @status - New status value.
*/
int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status)
__bpf_kfunc int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status)
{
return nf_ct_change_status_common((struct nf_conn *)nfct, status);
}
......@@ -462,7 +462,7 @@ int bpf_ct_set_status(const struct nf_conn___init *nfct, u32 status)
* bpf_ct_insert_entry, bpf_xdp_ct_lookup or bpf_skb_ct_lookup.
* @status - New status value.
*/
int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
__bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
{
return nf_ct_change_status_common(nfct, status);
}
......
......@@ -30,9 +30,9 @@ __diag_ignore_all("-Wmissing-prototypes",
* interpreted as select a random port.
* @manip - NF_NAT_MANIP_SRC or NF_NAT_MANIP_DST
*/
int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
union nf_inet_addr *addr, int port,
enum nf_nat_manip_type manip)
__bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
union nf_inet_addr *addr, int port,
enum nf_nat_manip_type manip)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
u16 proto = nf_ct_l3num(ct);
......
......@@ -39,8 +39,7 @@ __diag_ignore_all("-Wmissing-prototypes",
* @to - Pointer to memory to which the metadata will be copied
* Cannot be NULL
*/
__used noinline
int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to)
__bpf_kfunc int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to)
{
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
struct xfrm_md_info *info;
......@@ -62,9 +61,7 @@ int bpf_skb_get_xfrm_info(struct __sk_buff *skb_ctx, struct bpf_xfrm_info *to)
* @from - Pointer to memory from which the metadata will be copied
* Cannot be NULL
*/
__used noinline
int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx,
const struct bpf_xfrm_info *from)
__bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bpf_xfrm_info *from)
{
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
struct metadata_dst *md_dst;
......
......@@ -59,7 +59,7 @@ bpf_testmod_test_struct_arg_5(void) {
return bpf_testmod_test_struct_arg_result;
}
noinline void
__bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)
{
*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
......
......@@ -77,6 +77,7 @@ static struct kfunc_test_params kfunc_tests[] = {
TC_TEST(kfunc_call_test_get_mem, 42),
SYSCALL_TEST(kfunc_syscall_test, 0),
SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0),
TC_TEST(kfunc_call_test_static_unused_arg, 0),
};
struct syscall_test_args {
......
......@@ -17,6 +17,7 @@ extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym;
extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym;
extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym;
extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
extern u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) __ksym;
SEC("tc")
int kfunc_call_test4(struct __sk_buff *skb)
......@@ -181,4 +182,14 @@ int kfunc_call_test_get_mem(struct __sk_buff *skb)
return ret;
}
SEC("tc")
int kfunc_call_test_static_unused_arg(struct __sk_buff *skb)
{
u32 expected = 5, actual;
actual = bpf_kfunc_call_test_static_unused_arg(expected, 0xdeadbeef);
return actual != expected ? -1 : 0;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment