debug.c 8.21 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7
/*
 * Debug controller
 *
 * WARNING: This controller is for cgroup core debugging only.
 * Its interfaces are unstable and subject to changes at any time.
 */
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/slab.h>

#include "cgroup-internal.h"

static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

static void debug_css_free(struct cgroup_subsys_state *css)
{
	kfree(css);
}

/*
 * debug_taskcount_read - return the number of tasks in a cgroup.
 * @cgrp: the cgroup in question
 */
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
{
	return cgroup_task_count(css->cgroup);
}

40
static int current_css_set_read(struct seq_file *seq, void *v)
41
{
42
	struct kernfs_open_file *of = seq->private;
43 44 45 46 47
	struct css_set *cset;
	struct cgroup_subsys *ss;
	struct cgroup_subsys_state *css;
	int i, refcnt;

48 49 50
	if (!cgroup_kn_lock_live(of->kn, false))
		return -ENODEV;

51 52
	spin_lock_irq(&css_set_lock);
	rcu_read_lock();
53
	cset = task_css_set(current);
54 55 56 57 58 59 60 61 62 63 64 65 66
	refcnt = refcount_read(&cset->refcount);
	seq_printf(seq, "css_set %pK %d", cset, refcnt);
	if (refcnt > cset->nr_tasks)
		seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
	seq_puts(seq, "\n");

	/*
	 * Print the css'es stored in the current css_set.
	 */
	for_each_subsys(ss, i) {
		css = cset->subsys[ss->id];
		if (!css)
			continue;
67 68
		seq_printf(seq, "%2d: %-4s\t- %p[%d]\n", ss->id, ss->name,
			  css, css->id);
69 70 71
	}
	rcu_read_unlock();
	spin_unlock_irq(&css_set_lock);
72
	cgroup_kn_unlock(of->kn);
73
	return 0;
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
}

static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
					 struct cftype *cft)
{
	u64 count;

	rcu_read_lock();
	count = refcount_read(&task_css_set(current)->refcount);
	rcu_read_unlock();
	return count;
}

static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
{
	struct cgrp_cset_link *link;
	struct css_set *cset;
	char *name_buf;

	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name_buf)
		return -ENOMEM;

	spin_lock_irq(&css_set_lock);
	rcu_read_lock();
99
	cset = task_css_set(current);
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
		struct cgroup *c = link->cgrp;

		cgroup_name(c, name_buf, NAME_MAX + 1);
		seq_printf(seq, "Root %d group %s\n",
			   c->root->hierarchy_id, name_buf);
	}
	rcu_read_unlock();
	spin_unlock_irq(&css_set_lock);
	kfree(name_buf);
	return 0;
}

#define MAX_TASKS_SHOWN_PER_CSS 25
static int cgroup_css_links_read(struct seq_file *seq, void *v)
{
	struct cgroup_subsys_state *css = seq_css(seq);
	struct cgrp_cset_link *link;
118
	int dead_cnt = 0, extra_refs = 0, threaded_csets = 0;
119 120

	spin_lock_irq(&css_set_lock);
121

122 123 124 125
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
		struct css_set *cset = link->cset;
		struct task_struct *task;
		int count = 0;
126
		int refcnt = refcount_read(&cset->refcount);
127

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
		/*
		 * Print out the proc_cset and threaded_cset relationship
		 * and highlight difference between refcount and task_count.
		 */
		seq_printf(seq, "css_set %pK", cset);
		if (rcu_dereference_protected(cset->dom_cset, 1) != cset) {
			threaded_csets++;
			seq_printf(seq, "=>%pK", cset->dom_cset);
		}
		if (!list_empty(&cset->threaded_csets)) {
			struct css_set *tcset;
			int idx = 0;

			list_for_each_entry(tcset, &cset->threaded_csets,
					    threaded_csets_node) {
				seq_puts(seq, idx ? "," : "<=");
				seq_printf(seq, "%pK", tcset);
				idx++;
			}
		} else {
			seq_printf(seq, " %d", refcnt);
			if (refcnt - cset->nr_tasks > 0) {
				int extra = refcnt - cset->nr_tasks;

				seq_printf(seq, " +%d", extra);
				/*
				 * Take out the one additional reference in
				 * init_css_set.
				 */
				if (cset == &init_css_set)
					extra--;
				extra_refs += extra;
			}
161 162
		}
		seq_puts(seq, "\n");
163 164

		list_for_each_entry(task, &cset->tasks, cg_list) {
165 166 167
			if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
				seq_printf(seq, "  task %d\n",
					   task_pid_vnr(task));
168 169 170
		}

		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
171 172 173
			if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
				seq_printf(seq, "  task %d\n",
					   task_pid_vnr(task));
174
		}
175 176 177 178 179 180 181 182 183 184 185
		/* show # of overflowed tasks */
		if (count > MAX_TASKS_SHOWN_PER_CSS)
			seq_printf(seq, "  ... (%d)\n",
				   count - MAX_TASKS_SHOWN_PER_CSS);

		if (cset->dead) {
			seq_puts(seq, "    [dead]\n");
			dead_cnt++;
		}

		WARN_ON(count != cset->nr_tasks);
186 187
	}
	spin_unlock_irq(&css_set_lock);
188

189
	if (!dead_cnt && !extra_refs && !threaded_csets)
190 191 192
		return 0;

	seq_puts(seq, "\n");
193 194
	if (threaded_csets)
		seq_printf(seq, "threaded css_sets = %d\n", threaded_csets);
195 196 197 198 199 200 201 202 203 204
	if (extra_refs)
		seq_printf(seq, "extra references = %d\n", extra_refs);
	if (dead_cnt)
		seq_printf(seq, "dead css_sets = %d\n", dead_cnt);

	return 0;
}

static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
{
205 206
	struct kernfs_open_file *of = seq->private;
	struct cgroup *cgrp;
207 208 209 210 211
	struct cgroup_subsys *ss;
	struct cgroup_subsys_state *css;
	char pbuf[16];
	int i;

212 213 214 215
	cgrp = cgroup_kn_lock_live(of->kn, false);
	if (!cgrp)
		return -ENODEV;

216 217 218 219 220 221 222 223 224 225 226
	for_each_subsys(ss, i) {
		css = rcu_dereference_check(cgrp->subsys[ss->id], true);
		if (!css)
			continue;

		pbuf[0] = '\0';

		/* Show the parent CSS if applicable*/
		if (css->parent)
			snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
				 css->parent->id);
227 228
		seq_printf(seq, "%2d: %-4s\t- %p[%d] %d%s\n", ss->id, ss->name,
			  css, css->id,
229 230
			  atomic_read(&css->online_cnt), pbuf);
	}
231 232

	cgroup_kn_unlock(of->kn);
233 234 235
	return 0;
}

236 237
static void cgroup_masks_read_one(struct seq_file *seq, const char *name,
				  u16 mask)
238 239
{
	struct cgroup_subsys *ss;
240 241
	int ssid;
	bool first = true;
242

243 244 245 246 247 248 249 250
	seq_printf(seq, "%-17s: ", name);
	for_each_subsys(ss, ssid) {
		if (!(mask & (1 << ssid)))
			continue;
		if (!first)
			seq_puts(seq, ", ");
		seq_puts(seq, ss->name);
		first = false;
251
	}
252 253
	seq_putc(seq, '\n');
}
254

255 256
static int cgroup_masks_read(struct seq_file *seq, void *v)
{
257 258 259 260 261 262
	struct kernfs_open_file *of = seq->private;
	struct cgroup *cgrp;

	cgrp = cgroup_kn_lock_live(of->kn, false);
	if (!cgrp)
		return -ENODEV;
263 264 265

	cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control);
	cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask);
266 267

	cgroup_kn_unlock(of->kn);
268 269 270 271 272 273 274 275 276
	return 0;
}

static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
	return (!cgroup_is_populated(css->cgroup) &&
		!css_has_online_children(&css->cgroup->self));
}

277
static struct cftype debug_legacy_files[] =  {
278 279 280 281 282 283 284
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
285 286
		.seq_show = current_css_set_read,
		.flags = CFTYPE_ONLY_ON_ROOT,
287 288 289 290 291
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
292
		.flags = CFTYPE_ONLY_ON_ROOT,
293 294 295 296 297
	},

	{
		.name = "current_css_set_cg_links",
		.seq_show = current_css_set_cg_links_read,
298
		.flags = CFTYPE_ONLY_ON_ROOT,
299 300 301 302 303 304 305
	},

	{
		.name = "cgroup_css_links",
		.seq_show = cgroup_css_links_read,
	},

306 307 308 309 310 311 312 313 314 315
	{
		.name = "cgroup_subsys_states",
		.seq_show = cgroup_subsys_states_read,
	},

	{
		.name = "cgroup_masks",
		.seq_show = cgroup_masks_read,
	},

316 317 318 319 320 321 322 323
	{
		.name = "releasable",
		.read_u64 = releasable_read,
	},

	{ }	/* terminate */
};

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.seq_show = current_css_set_read,
		.flags = CFTYPE_ONLY_ON_ROOT,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
		.flags = CFTYPE_ONLY_ON_ROOT,
	},

	{
		.name = "current_css_set_cg_links",
		.seq_show = current_css_set_cg_links_read,
		.flags = CFTYPE_ONLY_ON_ROOT,
	},

	{
		.name = "css_links",
		.seq_show = cgroup_css_links_read,
	},

	{
		.name = "csses",
		.seq_show = cgroup_subsys_states_read,
	},

	{
		.name = "masks",
		.seq_show = cgroup_masks_read,
	},

	{ }	/* terminate */
};

366
struct cgroup_subsys debug_cgrp_subsys = {
367 368
	.css_alloc	= debug_css_alloc,
	.css_free	= debug_css_free,
369
	.legacy_cftypes	= debug_legacy_files,
370
};
371 372 373 374 375

/*
 * On v2, debug is an implicit controller enabled by "cgroup_debug" boot
 * parameter.
 */
376
void __init enable_debug_cgroup(void)
377 378 379
{
	debug_cgrp_subsys.dfl_cftypes = debug_files;
	debug_cgrp_subsys.implicit_on_dfl = true;
380
	debug_cgrp_subsys.threaded = true;
381
}