Commit a7f75d3b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-fixes-for-linus' of...

Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched: re-tune NUMA topologies
  sched: stop wake_affine from causing serious imbalance
  sched: fix sched_clock_cpu()
  revert ("sched: fair-group: SMP-nice for group scheduling")
  sched: cleanup
  show_schedstat(): fix memleak
  sched: unite unlikely pairs in rt_policy() and schedule_debug()
  revert ("sched: fair: weight calculations")
parents 3897b82c 67159306
...@@ -766,7 +766,6 @@ struct sched_domain { ...@@ -766,7 +766,6 @@ struct sched_domain {
struct sched_domain *child; /* bottom domain must be null terminated */ struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */ struct sched_group *groups; /* the balancing groups of the domain */
cpumask_t span; /* span of all CPUs in this domain */ cpumask_t span; /* span of all CPUs in this domain */
int first_cpu; /* cache of the first cpu in this domain */
unsigned long min_interval; /* Minimum balance interval ms */ unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */ unsigned int busy_factor; /* less balancing by factor if busy */
......
...@@ -166,7 +166,9 @@ void arch_update_cpu_topology(void); ...@@ -166,7 +166,9 @@ void arch_update_cpu_topology(void);
.busy_idx = 3, \ .busy_idx = 3, \
.idle_idx = 3, \ .idle_idx = 3, \
.flags = SD_LOAD_BALANCE \ .flags = SD_LOAD_BALANCE \
| SD_SERIALIZE, \ | SD_BALANCE_NEWIDLE \
| SD_WAKE_AFFINE \
| SD_SERIALIZE, \
.last_balance = jiffies, \ .last_balance = jiffies, \
.balance_interval = 64, \ .balance_interval = 64, \
} }
......
This diff is collapsed.
...@@ -59,22 +59,26 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) ...@@ -59,22 +59,26 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
return &per_cpu(sched_clock_data, cpu); return &per_cpu(sched_clock_data, cpu);
} }
static __read_mostly int sched_clock_running;
void sched_clock_init(void) void sched_clock_init(void)
{ {
u64 ktime_now = ktime_to_ns(ktime_get()); u64 ktime_now = ktime_to_ns(ktime_get());
u64 now = 0; unsigned long now_jiffies = jiffies;
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct sched_clock_data *scd = cpu_sdc(cpu); struct sched_clock_data *scd = cpu_sdc(cpu);
scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
scd->prev_jiffies = jiffies; scd->prev_jiffies = now_jiffies;
scd->prev_raw = now; scd->prev_raw = 0;
scd->tick_raw = now; scd->tick_raw = 0;
scd->tick_gtod = ktime_now; scd->tick_gtod = ktime_now;
scd->clock = ktime_now; scd->clock = ktime_now;
} }
sched_clock_running = 1;
} }
/* /*
...@@ -136,6 +140,9 @@ u64 sched_clock_cpu(int cpu) ...@@ -136,6 +140,9 @@ u64 sched_clock_cpu(int cpu)
struct sched_clock_data *scd = cpu_sdc(cpu); struct sched_clock_data *scd = cpu_sdc(cpu);
u64 now, clock; u64 now, clock;
if (unlikely(!sched_clock_running))
return 0ull;
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
now = sched_clock(); now = sched_clock();
...@@ -174,6 +181,9 @@ void sched_clock_tick(void) ...@@ -174,6 +181,9 @@ void sched_clock_tick(void)
struct sched_clock_data *scd = this_scd(); struct sched_clock_data *scd = this_scd();
u64 now, now_gtod; u64 now, now_gtod;
if (unlikely(!sched_clock_running))
return;
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
now = sched_clock(); now = sched_clock();
......
...@@ -167,11 +167,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) ...@@ -167,11 +167,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#endif #endif
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
cfs_rq->nr_spread_over); cfs_rq->nr_spread_over);
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_SMP
SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
#endif
#endif
} }
static void print_cpu(struct seq_file *m, int cpu) static void print_cpu(struct seq_file *m, int cpu)
......
This diff is collapsed.
...@@ -513,8 +513,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) ...@@ -513,8 +513,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
*/ */
for_each_sched_rt_entity(rt_se) for_each_sched_rt_entity(rt_se)
enqueue_rt_entity(rt_se); enqueue_rt_entity(rt_se);
inc_cpu_load(rq, p->se.load.weight);
} }
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
...@@ -534,8 +532,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) ...@@ -534,8 +532,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
if (rt_rq && rt_rq->rt_nr_running) if (rt_rq && rt_rq->rt_nr_running)
enqueue_rt_entity(rt_se); enqueue_rt_entity(rt_se);
} }
dec_cpu_load(rq, p->se.load.weight);
} }
/* /*
......
...@@ -67,6 +67,7 @@ static int show_schedstat(struct seq_file *seq, void *v) ...@@ -67,6 +67,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
preempt_enable(); preempt_enable();
#endif #endif
} }
kfree(mask_str);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment