Commit d07e75a6 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'sched/cleanups' into sched/core

Merge reason: these bits, formerly in sched/urgent, are too late for v3.10.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 424c93fe 8527632d
...@@ -11,7 +11,7 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) ...@@ -11,7 +11,7 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
endif endif
obj-y += core.o clock.o cputime.o idle_task.o fair.o rt.o stop_task.o obj-y += core.o proc.o clock.o cputime.o idle_task.o fair.o rt.o stop_task.o
obj-$(CONFIG_SMP) += cpupri.o obj-$(CONFIG_SMP) += cpupri.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_SCHEDSTATS) += stats.o
......
This diff is collapsed.
...@@ -113,6 +113,24 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; ...@@ -113,6 +113,24 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif #endif
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
lw->inv_weight = 0;
}
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
{
lw->weight -= dec;
lw->inv_weight = 0;
}
static inline void update_load_set(struct load_weight *lw, unsigned long w)
{
lw->weight = w;
lw->inv_weight = 0;
}
/* /*
* Increase the granularity value when there are more CPUs, * Increase the granularity value when there are more CPUs,
* because with more CPUs the 'effective latency' as visible * because with more CPUs the 'effective latency' as visible
......
This diff is collapsed.
...@@ -10,8 +10,16 @@ ...@@ -10,8 +10,16 @@
#include "cpupri.h" #include "cpupri.h"
#include "cpuacct.h" #include "cpuacct.h"
struct rq;
extern __read_mostly int scheduler_running; extern __read_mostly int scheduler_running;
extern unsigned long calc_load_update;
extern atomic_long_t calc_load_tasks;
extern long calc_load_fold_active(struct rq *this_rq);
extern void update_cpu_load_active(struct rq *this_rq);
/* /*
* Convert user-nice values [ -20 ... 0 ... 19 ] * Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
...@@ -884,24 +892,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ...@@ -884,24 +892,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#define WF_FORK 0x02 /* child wakeup after fork */ #define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */ #define WF_MIGRATED 0x4 /* internal use, task got migrated */
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
lw->inv_weight = 0;
}
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
{
lw->weight -= dec;
lw->inv_weight = 0;
}
static inline void update_load_set(struct load_weight *lw, unsigned long w)
{
lw->weight = w;
lw->inv_weight = 0;
}
/* /*
* To aid in avoiding the subversion of "niceness" due to uneven distribution * To aid in avoiding the subversion of "niceness" due to uneven distribution
* of tasks with abnormal "nice" values across CPUs the contribution that * of tasks with abnormal "nice" values across CPUs the contribution that
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment