Commit 92411764 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'for-6.8-bpf' of...

Merge branch 'for-6.8-bpf' of https://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup into bpf-next

Merge cgroup prerequisite patches.

Link: https://lore.kernel.org/bpf/20231029061438.4215-1-laoar.shao@gmail.com/Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 689b097a aecd408b
...@@ -563,6 +563,7 @@ struct cgroup_root { ...@@ -563,6 +563,7 @@ struct cgroup_root {
/* A list running through the active hierarchies */ /* A list running through the active hierarchies */
struct list_head root_list; struct list_head root_list;
struct rcu_head rcu;
/* Hierarchy-specific flags */ /* Hierarchy-specific flags */
unsigned int flags; unsigned int flags;
......
...@@ -69,6 +69,7 @@ struct css_task_iter { ...@@ -69,6 +69,7 @@ struct css_task_iter {
extern struct file_system_type cgroup_fs_type; extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root; extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set; extern struct css_set init_css_set;
extern spinlock_t css_set_lock;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h> #include <linux/cgroup_subsys.h>
...@@ -386,7 +387,6 @@ static inline void cgroup_unlock(void) ...@@ -386,7 +387,6 @@ static inline void cgroup_unlock(void)
* as locks used during the cgroup_subsys::attach() methods. * as locks used during the cgroup_subsys::attach() methods.
*/ */
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \ #define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \ rcu_dereference_check((task)->cgroups, \
rcu_read_lock_sched_held() || \ rcu_read_lock_sched_held() || \
...@@ -853,4 +853,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {} ...@@ -853,4 +853,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
#endif /* CONFIG_CGROUP_BPF */ #endif /* CONFIG_CGROUP_BPF */
struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
#endif /* _LINUX_CGROUP_H */ #endif /* _LINUX_CGROUP_H */
...@@ -164,13 +164,13 @@ struct cgroup_mgctx { ...@@ -164,13 +164,13 @@ struct cgroup_mgctx {
#define DEFINE_CGROUP_MGCTX(name) \ #define DEFINE_CGROUP_MGCTX(name) \
struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name) struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
extern spinlock_t css_set_lock;
extern struct cgroup_subsys *cgroup_subsys[]; extern struct cgroup_subsys *cgroup_subsys[];
extern struct list_head cgroup_roots; extern struct list_head cgroup_roots;
/* iterate across the hierarchies */ /* iterate across the hierarchies */
#define for_each_root(root) \ #define for_each_root(root) \
list_for_each_entry((root), &cgroup_roots, root_list) list_for_each_entry_rcu((root), &cgroup_roots, root_list, \
lockdep_is_held(&cgroup_mutex))
/** /**
* for_each_subsys - iterate all enabled cgroup subsystems * for_each_subsys - iterate all enabled cgroup subsystems
......
...@@ -1262,6 +1262,40 @@ int cgroup1_get_tree(struct fs_context *fc) ...@@ -1262,6 +1262,40 @@ int cgroup1_get_tree(struct fs_context *fc)
return ret; return ret;
} }
/**
* task_get_cgroup1 - Acquires the associated cgroup of a task within a
* specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
* hierarchy ID.
* @tsk: The target task
* @hierarchy_id: The ID of a cgroup1 hierarchy
*
* On success, the cgroup is returned. On failure, ERR_PTR is returned.
* We limit it to cgroup1 only.
*/
struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id)
{
struct cgroup *cgrp = ERR_PTR(-ENOENT);
struct cgroup_root *root;
unsigned long flags;
rcu_read_lock();
for_each_root(root) {
/* cgroup1 only*/
if (root == &cgrp_dfl_root)
continue;
if (root->hierarchy_id != hierarchy_id)
continue;
spin_lock_irqsave(&css_set_lock, flags);
cgrp = task_cgroup_from_root(tsk, root);
if (!cgrp || !cgroup_tryget(cgrp))
cgrp = ERR_PTR(-ENOENT);
spin_unlock_irqrestore(&css_set_lock, flags);
break;
}
rcu_read_unlock();
return cgrp;
}
static int __init cgroup1_wq_init(void) static int __init cgroup1_wq_init(void)
{ {
/* /*
......
...@@ -1315,7 +1315,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root) ...@@ -1315,7 +1315,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root)
void cgroup_free_root(struct cgroup_root *root) void cgroup_free_root(struct cgroup_root *root)
{ {
kfree(root); kfree_rcu(root, rcu);
} }
static void cgroup_destroy_root(struct cgroup_root *root) static void cgroup_destroy_root(struct cgroup_root *root)
...@@ -1347,10 +1347,9 @@ static void cgroup_destroy_root(struct cgroup_root *root) ...@@ -1347,10 +1347,9 @@ static void cgroup_destroy_root(struct cgroup_root *root)
spin_unlock_irq(&css_set_lock); spin_unlock_irq(&css_set_lock);
if (!list_empty(&root->root_list)) { WARN_ON_ONCE(list_empty(&root->root_list));
list_del(&root->root_list); list_del_rcu(&root->root_list);
cgroup_root_count--; cgroup_root_count--;
}
if (!have_favordynmods) if (!have_favordynmods)
cgroup_favor_dynmods(root, false); cgroup_favor_dynmods(root, false);
...@@ -1390,7 +1389,15 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset, ...@@ -1390,7 +1389,15 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
} }
} }
BUG_ON(!res_cgroup); /*
* If cgroup_mutex is not held, the cgrp_cset_link will be freed
* before we remove the cgroup root from the root_list. Consequently,
* when accessing a cgroup root, the cset_link may have already been
* freed, resulting in a NULL res_cgroup. However, by holding the
* cgroup_mutex, we ensure that res_cgroup can't be NULL.
* If we don't hold cgroup_mutex in the caller, we must do the NULL
* check.
*/
return res_cgroup; return res_cgroup;
} }
...@@ -1413,6 +1420,11 @@ current_cgns_cgroup_from_root(struct cgroup_root *root) ...@@ -1413,6 +1420,11 @@ current_cgns_cgroup_from_root(struct cgroup_root *root)
rcu_read_unlock(); rcu_read_unlock();
/*
* The namespace_sem is held by current, so the root cgroup can't
* be umounted. Therefore, we can ensure that the res is non-NULL.
*/
WARN_ON_ONCE(!res);
return res; return res;
} }
...@@ -1449,7 +1461,6 @@ static struct cgroup *current_cgns_cgroup_dfl(void) ...@@ -1449,7 +1461,6 @@ static struct cgroup *current_cgns_cgroup_dfl(void)
static struct cgroup *cset_cgroup_from_root(struct css_set *cset, static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
struct cgroup_root *root) struct cgroup_root *root)
{ {
lockdep_assert_held(&cgroup_mutex);
lockdep_assert_held(&css_set_lock); lockdep_assert_held(&css_set_lock);
return __cset_cgroup_from_root(cset, root); return __cset_cgroup_from_root(cset, root);
...@@ -1457,7 +1468,9 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset, ...@@ -1457,7 +1468,9 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
/* /*
* Return the cgroup for "task" from the given hierarchy. Must be * Return the cgroup for "task" from the given hierarchy. Must be
* called with cgroup_mutex and css_set_lock held. * called with css_set_lock held to prevent task's groups from being modified.
* Must be called with either cgroup_mutex or rcu read lock to prevent the
* cgroup root from being destroyed.
*/ */
struct cgroup *task_cgroup_from_root(struct task_struct *task, struct cgroup *task_cgroup_from_root(struct task_struct *task,
struct cgroup_root *root) struct cgroup_root *root)
...@@ -2032,7 +2045,7 @@ void init_cgroup_root(struct cgroup_fs_context *ctx) ...@@ -2032,7 +2045,7 @@ void init_cgroup_root(struct cgroup_fs_context *ctx)
struct cgroup_root *root = ctx->root; struct cgroup_root *root = ctx->root;
struct cgroup *cgrp = &root->cgrp; struct cgroup *cgrp = &root->cgrp;
INIT_LIST_HEAD(&root->root_list); INIT_LIST_HEAD_RCU(&root->root_list);
atomic_set(&root->nr_cgrps, 1); atomic_set(&root->nr_cgrps, 1);
cgrp->root = root; cgrp->root = root;
init_cgroup_housekeeping(cgrp); init_cgroup_housekeeping(cgrp);
...@@ -2115,7 +2128,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) ...@@ -2115,7 +2128,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
* care of subsystems' refcounts, which are explicitly dropped in * care of subsystems' refcounts, which are explicitly dropped in
* the failure exit path. * the failure exit path.
*/ */
list_add(&root->root_list, &cgroup_roots); list_add_rcu(&root->root_list, &cgroup_roots);
cgroup_root_count++; cgroup_root_count++;
/* /*
...@@ -6277,7 +6290,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, ...@@ -6277,7 +6290,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
if (!buf) if (!buf)
goto out; goto out;
cgroup_lock(); rcu_read_lock();
spin_lock_irq(&css_set_lock); spin_lock_irq(&css_set_lock);
for_each_root(root) { for_each_root(root) {
...@@ -6288,6 +6301,11 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, ...@@ -6288,6 +6301,11 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
if (root == &cgrp_dfl_root && !READ_ONCE(cgrp_dfl_visible)) if (root == &cgrp_dfl_root && !READ_ONCE(cgrp_dfl_visible))
continue; continue;
cgrp = task_cgroup_from_root(tsk, root);
/* The root has already been unmounted. */
if (!cgrp)
continue;
seq_printf(m, "%d:", root->hierarchy_id); seq_printf(m, "%d:", root->hierarchy_id);
if (root != &cgrp_dfl_root) if (root != &cgrp_dfl_root)
for_each_subsys(ss, ssid) for_each_subsys(ss, ssid)
...@@ -6298,9 +6316,6 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, ...@@ -6298,9 +6316,6 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
seq_printf(m, "%sname=%s", count ? "," : "", seq_printf(m, "%sname=%s", count ? "," : "",
root->name); root->name);
seq_putc(m, ':'); seq_putc(m, ':');
cgrp = task_cgroup_from_root(tsk, root);
/* /*
* On traditional hierarchies, all zombie tasks show up as * On traditional hierarchies, all zombie tasks show up as
* belonging to the root cgroup. On the default hierarchy, * belonging to the root cgroup. On the default hierarchy,
...@@ -6332,7 +6347,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, ...@@ -6332,7 +6347,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
retval = 0; retval = 0;
out_unlock: out_unlock:
spin_unlock_irq(&css_set_lock); spin_unlock_irq(&css_set_lock);
cgroup_unlock(); rcu_read_unlock();
kfree(buf); kfree(buf);
out: out:
return retval; return retval;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment