Commit 29f59db3 authored by Srivatsa Vaddagiri's avatar Srivatsa Vaddagiri Committed by Ingo Molnar

sched: group-scheduler core

Add interface to control cpu bandwidth allocation to task-groups.

(not yet configurable, due to missing CONFIG_CONTAINERS)
Signed-off-by: default avatarSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: default avatarDhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
parent 119fe5e0
...@@ -281,6 +281,15 @@ config CPUSETS ...@@ -281,6 +281,15 @@ config CPUSETS
Say N if unsure. Say N if unsure.
config FAIR_GROUP_SCHED
bool "Fair group scheduler"
depends on EXPERIMENTAL && CONTAINERS
help
This option enables you to group tasks and control CPU resource
allocation to such groups.
Say N if unsure.
config SYSFS_DEPRECATED config SYSFS_DEPRECATED
bool "Create deprecated sysfs files" bool "Create deprecated sysfs files"
default y default y
......
This diff is collapsed.
...@@ -610,8 +610,7 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) ...@@ -610,8 +610,7 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
*/ */
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{ {
/* A later patch will take group into account */ return cfs_rq->tg->cfs_rq[this_cpu];
return &cpu_rq(this_cpu)->cfs;
} }
/* Iterate thr' all leaf cfs_rq's on a runqueue */ /* Iterate thr' all leaf cfs_rq's on a runqueue */
......
...@@ -50,6 +50,10 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr) ...@@ -50,6 +50,10 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr)
{ {
} }
static void set_curr_task_idle(struct rq *rq)
{
}
/* /*
* Simple, special scheduling class for the per-CPU idle tasks: * Simple, special scheduling class for the per-CPU idle tasks:
*/ */
...@@ -66,6 +70,7 @@ static struct sched_class idle_sched_class __read_mostly = { ...@@ -66,6 +70,7 @@ static struct sched_class idle_sched_class __read_mostly = {
.load_balance = load_balance_idle, .load_balance = load_balance_idle,
.set_curr_task = set_curr_task_idle,
.task_tick = task_tick_idle, .task_tick = task_tick_idle,
/* no .task_new for idle tasks */ /* no .task_new for idle tasks */
}; };
...@@ -218,6 +218,10 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p) ...@@ -218,6 +218,10 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
} }
} }
static void set_curr_task_rt(struct rq *rq)
{
}
static struct sched_class rt_sched_class __read_mostly = { static struct sched_class rt_sched_class __read_mostly = {
.enqueue_task = enqueue_task_rt, .enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt, .dequeue_task = dequeue_task_rt,
...@@ -230,5 +234,6 @@ static struct sched_class rt_sched_class __read_mostly = { ...@@ -230,5 +234,6 @@ static struct sched_class rt_sched_class __read_mostly = {
.load_balance = load_balance_rt, .load_balance = load_balance_rt,
.set_curr_task = set_curr_task_rt,
.task_tick = task_tick_rt, .task_tick = task_tick_rt,
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment