Commit 05ae6865 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

bpf: Refactor alloc_bulk().

Factor out inner body of alloc_bulk into separate helper.
No functional changes.
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarHou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20230706033447.54696-5-alexei.starovoitov@gmail.com
parent 9de3e815
...@@ -154,11 +154,35 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) ...@@ -154,11 +154,35 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
#endif #endif
} }
static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
{
unsigned long flags;
if (IS_ENABLED(CONFIG_PREEMPT_RT))
/* In RT irq_work runs in per-cpu kthread, so disable
* interrupts to avoid preemption and interrupts and
* reduce the chance of bpf prog executing on this cpu
* when active counter is busy.
*/
local_irq_save(flags);
/* alloc_bulk runs from irq_work which will not preempt a bpf
* program that does unit_alloc/unit_free since IRQs are
* disabled there. There is no race to increment 'active'
* counter. It protects free_llist from corruption in case NMI
* bpf prog preempted this loop.
*/
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
__llist_add(obj, &c->free_llist);
c->free_cnt++;
local_dec(&c->active);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(flags);
}
/* Mostly runs from irq_work except __init phase. */ /* Mostly runs from irq_work except __init phase. */
static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
{ {
struct mem_cgroup *memcg = NULL, *old_memcg; struct mem_cgroup *memcg = NULL, *old_memcg;
unsigned long flags;
void *obj; void *obj;
int i; int i;
...@@ -188,25 +212,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) ...@@ -188,25 +212,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
if (!obj) if (!obj)
break; break;
} }
if (IS_ENABLED(CONFIG_PREEMPT_RT)) add_obj_to_free_list(c, obj);
/* In RT irq_work runs in per-cpu kthread, so disable
* interrupts to avoid preemption and interrupts and
* reduce the chance of bpf prog executing on this cpu
* when active counter is busy.
*/
local_irq_save(flags);
/* alloc_bulk runs from irq_work which will not preempt a bpf
* program that does unit_alloc/unit_free since IRQs are
* disabled there. There is no race to increment 'active'
* counter. It protects free_llist from corruption in case NMI
* bpf prog preempted this loop.
*/
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
__llist_add(obj, &c->free_llist);
c->free_cnt++;
local_dec(&c->active);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(flags);
} }
set_active_memcg(old_memcg); set_active_memcg(old_memcg);
mem_cgroup_put(memcg); mem_cgroup_put(memcg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment