Commit 07e06b01 authored by Yong Zhang's avatar Yong Zhang Committed by Ingo Molnar

sched: Consolidate the name of root_task_group and init_task_group

root_task_group is the leftover of USER_SCHED, now it's always
same to init_task_group.
But as Mike suggested, root_task_group is maybe the suitable name
to keep for a tree.
So in this patch:
  init_task_group      --> root_task_group
  init_task_group_load --> root_task_group_load
  INIT_TASK_GROUP_LOAD --> ROOT_TASK_GROUP_LOAD
Suggested-by: default avatarMike Galbraith <efault@gmx.de>
Signed-off-by: default avatarYong Zhang <yong.zhang0@gmail.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20110107071736.GA32635@windriver.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cb600d2f
...@@ -2511,7 +2511,7 @@ extern void normalize_rt_tasks(void); ...@@ -2511,7 +2511,7 @@ extern void normalize_rt_tasks(void);
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
extern struct task_group init_task_group; extern struct task_group root_task_group;
extern struct task_group *sched_create_group(struct task_group *parent); extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg); extern void sched_destroy_group(struct task_group *tg);
......
...@@ -278,14 +278,12 @@ struct task_group { ...@@ -278,14 +278,12 @@ struct task_group {
#endif #endif
}; };
#define root_task_group init_task_group
/* task_group_lock serializes the addition/removal of task groups */ /* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock); static DEFINE_SPINLOCK(task_group_lock);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD # define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
/* /*
* A weight of 0 or 1 can cause arithmetics problems. * A weight of 0 or 1 can cause arithmetics problems.
...@@ -298,13 +296,13 @@ static DEFINE_SPINLOCK(task_group_lock); ...@@ -298,13 +296,13 @@ static DEFINE_SPINLOCK(task_group_lock);
#define MIN_SHARES 2 #define MIN_SHARES 2
#define MAX_SHARES (1UL << 18) #define MAX_SHARES (1UL << 18)
static int init_task_group_load = INIT_TASK_GROUP_LOAD; static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
#endif #endif
/* Default task group. /* Default task group.
* Every task in system belong to this group at bootup. * Every task in system belong to this group at bootup.
*/ */
struct task_group init_task_group; struct task_group root_task_group;
#endif /* CONFIG_CGROUP_SCHED */ #endif /* CONFIG_CGROUP_SCHED */
...@@ -7848,7 +7846,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, ...@@ -7848,7 +7846,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
cfs_rq->tg = tg; cfs_rq->tg = tg;
tg->se[cpu] = se; tg->se[cpu] = se;
/* se could be NULL for init_task_group */ /* se could be NULL for root_task_group */
if (!se) if (!se)
return; return;
...@@ -7908,18 +7906,18 @@ void __init sched_init(void) ...@@ -7908,18 +7906,18 @@ void __init sched_init(void)
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
init_task_group.se = (struct sched_entity **)ptr; root_task_group.se = (struct sched_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **); ptr += nr_cpu_ids * sizeof(void **);
init_task_group.cfs_rq = (struct cfs_rq **)ptr; root_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **); ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
init_task_group.rt_se = (struct sched_rt_entity **)ptr; root_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **); ptr += nr_cpu_ids * sizeof(void **);
init_task_group.rt_rq = (struct rt_rq **)ptr; root_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **); ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */ #endif /* CONFIG_RT_GROUP_SCHED */
...@@ -7939,13 +7937,13 @@ void __init sched_init(void) ...@@ -7939,13 +7937,13 @@ void __init sched_init(void)
global_rt_period(), global_rt_runtime()); global_rt_period(), global_rt_runtime());
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&init_task_group.rt_bandwidth, init_rt_bandwidth(&root_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime()); global_rt_period(), global_rt_runtime());
#endif /* CONFIG_RT_GROUP_SCHED */ #endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
list_add(&init_task_group.list, &task_groups); list_add(&root_task_group.list, &task_groups);
INIT_LIST_HEAD(&init_task_group.children); INIT_LIST_HEAD(&root_task_group.children);
autogroup_init(&init_task); autogroup_init(&init_task);
#endif /* CONFIG_CGROUP_SCHED */ #endif /* CONFIG_CGROUP_SCHED */
...@@ -7960,34 +7958,34 @@ void __init sched_init(void) ...@@ -7960,34 +7958,34 @@ void __init sched_init(void)
init_cfs_rq(&rq->cfs, rq); init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq); init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
init_task_group.shares = init_task_group_load; root_task_group.shares = root_task_group_load;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
/* /*
* How much cpu bandwidth does init_task_group get? * How much cpu bandwidth does root_task_group get?
* *
* In case of task-groups formed thr' the cgroup filesystem, it * In case of task-groups formed thr' the cgroup filesystem, it
* gets 100% of the cpu resources in the system. This overall * gets 100% of the cpu resources in the system. This overall
* system cpu resource is divided among the tasks of * system cpu resource is divided among the tasks of
* init_task_group and its child task-groups in a fair manner, * root_task_group and its child task-groups in a fair manner,
* based on each entity's (task or task-group's) weight * based on each entity's (task or task-group's) weight
* (se->load.weight). * (se->load.weight).
* *
* In other words, if init_task_group has 10 tasks of weight * In other words, if root_task_group has 10 tasks of weight
* 1024) and two child groups A0 and A1 (of weight 1024 each), * 1024) and two child groups A0 and A1 (of weight 1024 each),
* then A0's share of the cpu resource is: * then A0's share of the cpu resource is:
* *
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
* *
* We achieve this by letting init_task_group's tasks sit * We achieve this by letting root_task_group's tasks sit
* directly in rq->cfs (i.e init_task_group->se[] = NULL). * directly in rq->cfs (i.e root_task_group->se[] = NULL).
*/ */
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL); init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
INIT_LIST_HEAD(&rq->leaf_rt_rq_list); INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL); init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif #endif
for (j = 0; j < CPU_LOAD_IDX_MAX; j++) for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
...@@ -8812,7 +8810,7 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) ...@@ -8812,7 +8810,7 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
if (!cgrp->parent) { if (!cgrp->parent) {
/* This is early initialization for the top cgroup */ /* This is early initialization for the top cgroup */
return &init_task_group.css; return &root_task_group.css;
} }
parent = cgroup_tg(cgrp->parent); parent = cgroup_tg(cgrp->parent);
......
...@@ -11,8 +11,8 @@ static atomic_t autogroup_seq_nr; ...@@ -11,8 +11,8 @@ static atomic_t autogroup_seq_nr;
static void autogroup_init(struct task_struct *init_task) static void autogroup_init(struct task_struct *init_task)
{ {
autogroup_default.tg = &init_task_group; autogroup_default.tg = &root_task_group;
init_task_group.autogroup = &autogroup_default; root_task_group.autogroup = &autogroup_default;
kref_init(&autogroup_default.kref); kref_init(&autogroup_default.kref);
init_rwsem(&autogroup_default.lock); init_rwsem(&autogroup_default.lock);
init_task->signal->autogroup = &autogroup_default; init_task->signal->autogroup = &autogroup_default;
...@@ -63,7 +63,7 @@ static inline struct autogroup *autogroup_create(void) ...@@ -63,7 +63,7 @@ static inline struct autogroup *autogroup_create(void)
if (!ag) if (!ag)
goto out_fail; goto out_fail;
tg = sched_create_group(&init_task_group); tg = sched_create_group(&root_task_group);
if (IS_ERR(tg)) if (IS_ERR(tg))
goto out_free; goto out_free;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment