Commit f66ffded authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-core-for-linus' of...

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits)
  sched: Fix SCHED_MC regression caused by change in sched cpu_power
  sched: Don't use possibly stale sched_class
  kthread, sched: Remove reference to kthread_create_on_cpu
  sched: cpuacct: Use bigger percpu counter batch values for stats counters
  percpu_counter: Make __percpu_counter_add an inline function on UP
  sched: Remove member rt_se from struct rt_rq
  sched: Change usage of rt_rq->rt_se to rt_rq->tg->rt_se[cpu]
  sched: Remove unused update_shares_locked()
  sched: Use for_each_bit
  sched: Queue a deboosted task to the head of the RT prio queue
  sched: Implement head queueing for sched_rt
  sched: Extend enqueue_task to allow head queueing
  sched: Remove USER_SCHED
  sched: Fix the place where group powers are updated
  sched: Assume *balance is valid
  sched: Remove load_balance_newidle()
  sched: Unify load_balance{,_newidle}()
  sched: Add a lock break for PREEMPT=y
  sched: Remove from fwd decls
  sched: Remove rq_iterator from move_one_task
  ...

Fix up trivial conflicts in kernel/sched.c
parents 2531216f dd5feea1
...@@ -6,21 +6,6 @@ be removed from this file. ...@@ -6,21 +6,6 @@ be removed from this file.
--------------------------- ---------------------------
What: USER_SCHED
When: 2.6.34
Why: USER_SCHED was implemented as a proof of concept for group scheduling.
The effect of USER_SCHED can already be achieved from userspace with
the help of libcgroup. The removal of USER_SCHED will also simplify
the scheduler code with the removal of one major ifdef. There are also
issues USER_SCHED has with USER_NS. A decision was taken not to fix
those and instead remove USER_SCHED. Also new group scheduling
features will not be implemented for USER_SCHED.
Who: Dhaval Giani <dhaval@linux.vnet.ibm.com>
---------------------------
What: PRISM54 What: PRISM54
When: 2.6.34 When: 2.6.34
......
...@@ -124,7 +124,7 @@ extern int _cond_resched(void); ...@@ -124,7 +124,7 @@ extern int _cond_resched(void);
#endif #endif
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
void __might_sleep(char *file, int line, int preempt_offset); void __might_sleep(const char *file, int line, int preempt_offset);
/** /**
* might_sleep - annotation for functions that can sleep * might_sleep - annotation for functions that can sleep
* *
...@@ -138,7 +138,8 @@ extern int _cond_resched(void); ...@@ -138,7 +138,8 @@ extern int _cond_resched(void);
# define might_sleep() \ # define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
#else #else
static inline void __might_sleep(char *file, int line, int preempt_offset) { } static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0) # define might_sleep() do { might_resched(); } while (0)
#endif #endif
......
...@@ -98,9 +98,6 @@ static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) ...@@ -98,9 +98,6 @@ static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
fbc->count = amount; fbc->count = amount;
} }
#define __percpu_counter_add(fbc, amount, batch) \
percpu_counter_add(fbc, amount)
static inline void static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount) percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{ {
...@@ -109,6 +106,12 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount) ...@@ -109,6 +106,12 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
preempt_enable(); preempt_enable();
} }
static inline void
__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
{
percpu_counter_add(fbc, amount);
}
static inline s64 percpu_counter_read(struct percpu_counter *fbc) static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{ {
return fbc->count; return fbc->count;
......
...@@ -740,14 +740,6 @@ struct user_struct { ...@@ -740,14 +740,6 @@ struct user_struct {
uid_t uid; uid_t uid;
struct user_namespace *user_ns; struct user_namespace *user_ns;
#ifdef CONFIG_USER_SCHED
struct task_group *tg;
#ifdef CONFIG_SYSFS
struct kobject kobj;
struct delayed_work work;
#endif
#endif
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm; atomic_long_t locked_vm;
#endif #endif
...@@ -1087,7 +1079,8 @@ struct sched_domain; ...@@ -1087,7 +1079,8 @@ struct sched_domain;
struct sched_class { struct sched_class {
const struct sched_class *next; const struct sched_class *next;
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
bool head);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
void (*yield_task) (struct rq *rq); void (*yield_task) (struct rq *rq);
...@@ -1099,14 +1092,6 @@ struct sched_class { ...@@ -1099,14 +1092,6 @@ struct sched_class {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio);
int (*move_one_task) (struct rq *this_rq, int this_cpu,
struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq); void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct rq *this_rq, struct task_struct *task); void (*task_waking) (struct rq *this_rq, struct task_struct *task);
...@@ -2520,13 +2505,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask); ...@@ -2520,13 +2505,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
extern void normalize_rt_tasks(void); extern void normalize_rt_tasks(void);
#ifdef CONFIG_GROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
extern struct task_group init_task_group; extern struct task_group init_task_group;
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
extern void set_tg_uid(struct user_struct *user);
#endif
extern struct task_group *sched_create_group(struct task_group *parent); extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg); extern void sched_destroy_group(struct task_group *tg);
......
...@@ -461,57 +461,6 @@ config LOG_BUF_SHIFT ...@@ -461,57 +461,6 @@ config LOG_BUF_SHIFT
config HAVE_UNSTABLE_SCHED_CLOCK config HAVE_UNSTABLE_SCHED_CLOCK
bool bool
config GROUP_SCHED
bool "Group CPU scheduler"
depends on EXPERIMENTAL
default n
help
This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups.
In order to create a group from arbitrary set of processes, use
CONFIG_CGROUPS. (See Control Group support.)
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on GROUP_SCHED
default GROUP_SCHED
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on EXPERIMENTAL
depends on GROUP_SCHED
default n
help
This feature lets you explicitly allocate real CPU bandwidth
to users or control groups (depending on the "Basis for grouping tasks"
setting below. If enabled, it will also make it impossible to
schedule realtime tasks for non-root users until you allocate
realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.txt for more information.
choice
depends on GROUP_SCHED
prompt "Basis for grouping tasks"
default USER_SCHED
config USER_SCHED
bool "user id"
help
This option will choose userid as the basis for grouping
tasks, thus providing equal CPU bandwidth to each user.
config CGROUP_SCHED
bool "Control groups"
depends on CGROUPS
help
This option allows you to create arbitrary task groups
using the "cgroup" pseudo filesystem and control
the cpu bandwidth allocated to each such task group.
Refer to Documentation/cgroups/cgroups.txt for more
information on "cgroup" pseudo filesystem.
endchoice
menuconfig CGROUPS menuconfig CGROUPS
boolean "Control Group support" boolean "Control Group support"
help help
...@@ -632,6 +581,36 @@ config CGROUP_MEM_RES_CTLR_SWAP ...@@ -632,6 +581,36 @@ config CGROUP_MEM_RES_CTLR_SWAP
Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
size is 4096bytes, 512k per 1Gbytes of swap. size is 4096bytes, 512k per 1Gbytes of swap.
menuconfig CGROUP_SCHED
bool "Group CPU scheduler"
depends on EXPERIMENTAL && CGROUPS
default n
help
This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups. It uses cgroups to group
tasks.
if CGROUP_SCHED
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on CGROUP_SCHED
default CGROUP_SCHED
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on EXPERIMENTAL
depends on CGROUP_SCHED
default n
help
This feature lets you explicitly allocate real CPU bandwidth
to users or control groups (depending on the "Basis for grouping tasks"
setting below. If enabled, it will also make it impossible to
schedule realtime tasks for non-root users until you allocate
realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.txt for more information.
endif #CGROUP_SCHED
endif # CGROUPS endif # CGROUPS
config MM_OWNER config MM_OWNER
......
...@@ -197,16 +197,8 @@ static int __init ksysfs_init(void) ...@@ -197,16 +197,8 @@ static int __init ksysfs_init(void)
goto group_exit; goto group_exit;
} }
/* create the /sys/kernel/uids/ directory */
error = uids_sysfs_init();
if (error)
goto notes_exit;
return 0; return 0;
notes_exit:
if (notes_size > 0)
sysfs_remove_bin_file(kernel_kobj, &notes_attr);
group_exit: group_exit:
sysfs_remove_group(kernel_kobj, &kernel_attr_group); sysfs_remove_group(kernel_kobj, &kernel_attr_group);
kset_exit: kset_exit:
......
...@@ -101,7 +101,7 @@ static void create_kthread(struct kthread_create_info *create) ...@@ -101,7 +101,7 @@ static void create_kthread(struct kthread_create_info *create)
* *
* Description: This helper function creates and names a kernel * Description: This helper function creates and names a kernel
* thread. The thread will be stopped: use wake_up_process() to start * thread. The thread will be stopped: use wake_up_process() to start
* it. See also kthread_run(), kthread_create_on_cpu(). * it. See also kthread_run().
* *
* When woken, the thread will run @threadfn() with @data as its * When woken, the thread will run @threadfn() with @data as its
* argument. @threadfn() can either call do_exit() directly if it is a * argument. @threadfn() can either call do_exit() directly if it is a
......
This diff is collapsed.
...@@ -47,9 +47,7 @@ static int convert_prio(int prio) ...@@ -47,9 +47,7 @@ static int convert_prio(int prio)
} }
#define for_each_cpupri_active(array, idx) \ #define for_each_cpupri_active(array, idx) \
for (idx = find_first_bit(array, CPUPRI_NR_PRIORITIES); \ for_each_bit(idx, array, CPUPRI_NR_PRIORITIES)
idx < CPUPRI_NR_PRIORITIES; \
idx = find_next_bit(array, CPUPRI_NR_PRIORITIES, idx+1))
/** /**
* cpupri_find - find the best (lowest-pri) CPU in the system * cpupri_find - find the best (lowest-pri) CPU in the system
......
This diff is collapsed.
...@@ -44,24 +44,6 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) ...@@ -44,24 +44,6 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{ {
} }
#ifdef CONFIG_SMP
static unsigned long
load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
return 0;
}
static int
move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
return 0;
}
#endif
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{ {
} }
...@@ -97,7 +79,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p, ...@@ -97,7 +79,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p,
check_preempt_curr(rq, p, 0); check_preempt_curr(rq, p, 0);
} }
unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
{ {
return 0; return 0;
} }
...@@ -119,9 +101,6 @@ static const struct sched_class idle_sched_class = { ...@@ -119,9 +101,6 @@ static const struct sched_class idle_sched_class = {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.select_task_rq = select_task_rq_idle, .select_task_rq = select_task_rq_idle,
.load_balance = load_balance_idle,
.move_one_task = move_one_task_idle,
#endif #endif
.set_curr_task = set_curr_task_idle, .set_curr_task = set_curr_task_idle,
......
...@@ -194,17 +194,20 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) ...@@ -194,17 +194,20 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
return rt_se->my_q; return rt_se->my_q;
} }
static void enqueue_rt_entity(struct sched_rt_entity *rt_se); static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se); static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{ {
int this_cpu = smp_processor_id();
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
struct sched_rt_entity *rt_se = rt_rq->rt_se; struct sched_rt_entity *rt_se;
rt_se = rt_rq->tg->rt_se[this_cpu];
if (rt_rq->rt_nr_running) { if (rt_rq->rt_nr_running) {
if (rt_se && !on_rt_rq(rt_se)) if (rt_se && !on_rt_rq(rt_se))
enqueue_rt_entity(rt_se); enqueue_rt_entity(rt_se, false);
if (rt_rq->highest_prio.curr < curr->prio) if (rt_rq->highest_prio.curr < curr->prio)
resched_task(curr); resched_task(curr);
} }
...@@ -212,7 +215,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) ...@@ -212,7 +215,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{ {
struct sched_rt_entity *rt_se = rt_rq->rt_se; int this_cpu = smp_processor_id();
struct sched_rt_entity *rt_se;
rt_se = rt_rq->tg->rt_se[this_cpu];
if (rt_se && on_rt_rq(rt_se)) if (rt_se && on_rt_rq(rt_se))
dequeue_rt_entity(rt_se); dequeue_rt_entity(rt_se);
...@@ -803,7 +809,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) ...@@ -803,7 +809,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
dec_rt_group(rt_se, rt_rq); dec_rt_group(rt_se, rt_rq);
} }
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
{ {
struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active; struct rt_prio_array *array = &rt_rq->active;
...@@ -819,7 +825,10 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) ...@@ -819,7 +825,10 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
return; return;
list_add_tail(&rt_se->run_list, queue); if (head)
list_add(&rt_se->run_list, queue);
else
list_add_tail(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap); __set_bit(rt_se_prio(rt_se), array->bitmap);
inc_rt_tasks(rt_se, rt_rq); inc_rt_tasks(rt_se, rt_rq);
...@@ -856,11 +865,11 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se) ...@@ -856,11 +865,11 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
} }
} }
static void enqueue_rt_entity(struct sched_rt_entity *rt_se) static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
{ {
dequeue_rt_stack(rt_se); dequeue_rt_stack(rt_se);
for_each_sched_rt_entity(rt_se) for_each_sched_rt_entity(rt_se)
__enqueue_rt_entity(rt_se); __enqueue_rt_entity(rt_se, head);
} }
static void dequeue_rt_entity(struct sched_rt_entity *rt_se) static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
...@@ -871,21 +880,22 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) ...@@ -871,21 +880,22 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
struct rt_rq *rt_rq = group_rt_rq(rt_se); struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq && rt_rq->rt_nr_running) if (rt_rq && rt_rq->rt_nr_running)
__enqueue_rt_entity(rt_se); __enqueue_rt_entity(rt_se, false);
} }
} }
/* /*
* Adding/removing a task to/from a priority array: * Adding/removing a task to/from a priority array:
*/ */
static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) static void
enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head)
{ {
struct sched_rt_entity *rt_se = &p->rt; struct sched_rt_entity *rt_se = &p->rt;
if (wakeup) if (wakeup)
rt_se->timeout = 0; rt_se->timeout = 0;
enqueue_rt_entity(rt_se); enqueue_rt_entity(rt_se, head);
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p); enqueue_pushable_task(rq, p);
...@@ -1481,24 +1491,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) ...@@ -1481,24 +1491,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
push_rt_tasks(rq); push_rt_tasks(rq);
} }
static unsigned long
load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
/* don't touch RT tasks */
return 0;
}
static int
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
/* don't touch RT tasks */
return 0;
}
static void set_cpus_allowed_rt(struct task_struct *p, static void set_cpus_allowed_rt(struct task_struct *p,
const struct cpumask *new_mask) const struct cpumask *new_mask)
{ {
...@@ -1721,7 +1713,7 @@ static void set_curr_task_rt(struct rq *rq) ...@@ -1721,7 +1713,7 @@ static void set_curr_task_rt(struct rq *rq)
dequeue_pushable_task(rq, p); dequeue_pushable_task(rq, p);
} }
unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{ {
/* /*
* Time slice is 0 for SCHED_FIFO tasks * Time slice is 0 for SCHED_FIFO tasks
...@@ -1746,8 +1738,6 @@ static const struct sched_class rt_sched_class = { ...@@ -1746,8 +1738,6 @@ static const struct sched_class rt_sched_class = {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.select_task_rq = select_task_rq_rt, .select_task_rq = select_task_rq_rt,
.load_balance = load_balance_rt,
.move_one_task = move_one_task_rt,
.set_cpus_allowed = set_cpus_allowed_rt, .set_cpus_allowed = set_cpus_allowed_rt,
.rq_online = rq_online_rt, .rq_online = rq_online_rt,
.rq_offline = rq_offline_rt, .rq_offline = rq_offline_rt,
......
...@@ -571,11 +571,6 @@ static int set_user(struct cred *new) ...@@ -571,11 +571,6 @@ static int set_user(struct cred *new)
if (!new_user) if (!new_user)
return -EAGAIN; return -EAGAIN;
if (!task_can_switch_user(new_user, current)) {
free_uid(new_user);
return -EINVAL;
}
if (atomic_read(&new_user->processes) >= if (atomic_read(&new_user->processes) >=
current->signal->rlim[RLIMIT_NPROC].rlim_cur && current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
new_user != INIT_USER) { new_user != INIT_USER) {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment