Commit 4cb14bc8 authored by Heiko Carstens's avatar Heiko Carstens Committed by Ingo Molnar

topology, s390: Add z11 cpu topology support

Use the extended cpu topology information that z11 machines provide
to improve the scheduler's decision making.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100831082844.604956770@de.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b40d8ed4
...@@ -198,6 +198,13 @@ config HOTPLUG_CPU ...@@ -198,6 +198,13 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#. can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug. Say N if you want to disable CPU hotplug.
config SCHED_BOOK
bool "Book scheduler support"
depends on SMP
help
Book scheduler support improves the CPU scheduler's decision making
when dealing with machines that have several books.
config MATHEMU config MATHEMU
bool "IEEE FPU emulation" bool "IEEE FPU emulation"
depends on MARCH_G5 depends on MARCH_G5
......
...@@ -3,15 +3,32 @@ ...@@ -3,15 +3,32 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#define mc_capable() (1)
const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
extern unsigned char cpu_core_id[NR_CPUS]; extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS];
static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
{
return &cpu_core_map[cpu];
}
#define topology_core_id(cpu) (cpu_core_id[cpu]) #define topology_core_id(cpu) (cpu_core_id[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) #define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define mc_capable() (1)
#ifdef CONFIG_SCHED_BOOK
extern unsigned char cpu_book_id[NR_CPUS];
extern cpumask_t cpu_book_map[NR_CPUS];
static inline const struct cpumask *cpu_book_mask(unsigned int cpu)
{
return &cpu_book_map[cpu];
}
#define topology_book_id(cpu) (cpu_book_id[cpu])
#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
#endif /* CONFIG_SCHED_BOOK */
int topology_set_cpu_management(int fc); int topology_set_cpu_management(int fc);
void topology_schedule_update(void); void topology_schedule_update(void);
...@@ -30,6 +47,8 @@ static inline void s390_init_cpu_topology(void) ...@@ -30,6 +47,8 @@ static inline void s390_init_cpu_topology(void)
}; };
#endif #endif
#define SD_BOOK_INIT SD_CPU_INIT
#include <asm-generic/topology.h> #include <asm-generic/topology.h>
#endif /* _ASM_S390_TOPOLOGY_H */ #endif /* _ASM_S390_TOPOLOGY_H */
...@@ -57,8 +57,8 @@ struct tl_info { ...@@ -57,8 +57,8 @@ struct tl_info {
union tl_entry tle[0]; union tl_entry tle[0];
}; };
struct core_info { struct mask_info {
struct core_info *next; struct mask_info *next;
unsigned char id; unsigned char id;
cpumask_t mask; cpumask_t mask;
}; };
...@@ -66,7 +66,6 @@ struct core_info { ...@@ -66,7 +66,6 @@ struct core_info {
static int topology_enabled; static int topology_enabled;
static void topology_work_fn(struct work_struct *work); static void topology_work_fn(struct work_struct *work);
static struct tl_info *tl_info; static struct tl_info *tl_info;
static struct core_info core_info;
static int machine_has_topology; static int machine_has_topology;
static struct timer_list topology_timer; static struct timer_list topology_timer;
static void set_topology_timer(void); static void set_topology_timer(void);
...@@ -74,38 +73,37 @@ static DECLARE_WORK(topology_work, topology_work_fn); ...@@ -74,38 +73,37 @@ static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */ /* topology_lock protects the core linked list */
static DEFINE_SPINLOCK(topology_lock); static DEFINE_SPINLOCK(topology_lock);
static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS]; cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS]; unsigned char cpu_core_id[NR_CPUS];
static cpumask_t cpu_coregroup_map(unsigned int cpu) #ifdef CONFIG_SCHED_BOOK
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
unsigned char cpu_book_id[NR_CPUS];
#endif
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{ {
struct core_info *core = &core_info;
unsigned long flags;
cpumask_t mask; cpumask_t mask;
cpus_clear(mask); cpus_clear(mask);
if (!topology_enabled || !machine_has_topology) if (!topology_enabled || !machine_has_topology)
return cpu_possible_map; return cpu_possible_map;
spin_lock_irqsave(&topology_lock, flags); while (info) {
while (core) { if (cpu_isset(cpu, info->mask)) {
if (cpu_isset(cpu, core->mask)) { mask = info->mask;
mask = core->mask;
break; break;
} }
core = core->next; info = info->next;
} }
spin_unlock_irqrestore(&topology_lock, flags);
if (cpus_empty(mask)) if (cpus_empty(mask))
mask = cpumask_of_cpu(cpu); mask = cpumask_of_cpu(cpu);
return mask; return mask;
} }
const struct cpumask *cpu_coregroup_mask(unsigned int cpu) static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book,
{ struct mask_info *core)
return &cpu_core_map[cpu];
}
static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
{ {
unsigned int cpu; unsigned int cpu;
...@@ -117,23 +115,35 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) ...@@ -117,23 +115,35 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
for_each_present_cpu(lcpu) { for_each_present_cpu(lcpu) {
if (cpu_logical_map(lcpu) == rcpu) { if (cpu_logical_map(lcpu) != rcpu)
cpu_set(lcpu, core->mask); continue;
cpu_core_id[lcpu] = core->id; #ifdef CONFIG_SCHED_BOOK
smp_cpu_polarization[lcpu] = tl_cpu->pp; cpu_set(lcpu, book->mask);
} cpu_book_id[lcpu] = book->id;
#endif
cpu_set(lcpu, core->mask);
cpu_core_id[lcpu] = core->id;
smp_cpu_polarization[lcpu] = tl_cpu->pp;
} }
} }
} }
static void clear_cores(void) static void clear_masks(void)
{ {
struct core_info *core = &core_info; struct mask_info *info;
while (core) { info = &core_info;
cpus_clear(core->mask); while (info) {
core = core->next; cpus_clear(info->mask);
info = info->next;
}
#ifdef CONFIG_SCHED_BOOK
info = &book_info;
while (info) {
cpus_clear(info->mask);
info = info->next;
} }
#endif
} }
static union tl_entry *next_tle(union tl_entry *tle) static union tl_entry *next_tle(union tl_entry *tle)
...@@ -146,29 +156,36 @@ static union tl_entry *next_tle(union tl_entry *tle) ...@@ -146,29 +156,36 @@ static union tl_entry *next_tle(union tl_entry *tle)
static void tl_to_cores(struct tl_info *info) static void tl_to_cores(struct tl_info *info)
{ {
#ifdef CONFIG_SCHED_BOOK
struct mask_info *book = &book_info;
#else
struct mask_info *book = NULL;
#endif
struct mask_info *core = &core_info;
union tl_entry *tle, *end; union tl_entry *tle, *end;
struct core_info *core = &core_info;
spin_lock_irq(&topology_lock); spin_lock_irq(&topology_lock);
clear_cores(); clear_masks();
tle = info->tle; tle = info->tle;
end = (union tl_entry *)((unsigned long)info + info->length); end = (union tl_entry *)((unsigned long)info + info->length);
while (tle < end) { while (tle < end) {
switch (tle->nl) { switch (tle->nl) {
case 5: #ifdef CONFIG_SCHED_BOOK
case 4:
case 3:
case 2: case 2:
book = book->next;
book->id = tle->container.id;
break; break;
#endif
case 1: case 1:
core = core->next; core = core->next;
core->id = tle->container.id; core->id = tle->container.id;
break; break;
case 0: case 0:
add_cpus_to_core(&tle->cpu, core); add_cpus_to_mask(&tle->cpu, book, core);
break; break;
default: default:
clear_cores(); clear_masks();
machine_has_topology = 0; machine_has_topology = 0;
goto out; goto out;
} }
...@@ -221,10 +238,29 @@ int topology_set_cpu_management(int fc) ...@@ -221,10 +238,29 @@ int topology_set_cpu_management(int fc)
static void update_cpu_core_map(void) static void update_cpu_core_map(void)
{ {
unsigned long flags;
int cpu; int cpu;
for_each_possible_cpu(cpu) spin_lock_irqsave(&topology_lock, flags);
cpu_core_map[cpu] = cpu_coregroup_map(cpu); for_each_possible_cpu(cpu) {
cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
#ifdef CONFIG_SCHED_BOOK
cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
#endif
}
spin_unlock_irqrestore(&topology_lock, flags);
}
static void store_topology(struct tl_info *info)
{
#ifdef CONFIG_SCHED_BOOK
int rc;
rc = stsi(info, 15, 1, 3);
if (rc != -ENOSYS)
return;
#endif
stsi(info, 15, 1, 2);
} }
int arch_update_cpu_topology(void) int arch_update_cpu_topology(void)
...@@ -238,7 +274,7 @@ int arch_update_cpu_topology(void) ...@@ -238,7 +274,7 @@ int arch_update_cpu_topology(void)
topology_update_polarization_simple(); topology_update_polarization_simple();
return 0; return 0;
} }
stsi(info, 15, 1, 2); store_topology(info);
tl_to_cores(info); tl_to_cores(info);
update_cpu_core_map(); update_cpu_core_map();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
...@@ -299,12 +335,24 @@ static int __init init_topology_update(void) ...@@ -299,12 +335,24 @@ static int __init init_topology_update(void)
} }
__initcall(init_topology_update); __initcall(init_topology_update);
static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset)
{
int i, nr_masks;
nr_masks = info->mag[NR_MAG - offset];
for (i = 0; i < info->mnest - offset; i++)
nr_masks *= info->mag[NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
mask->next = alloc_bootmem(sizeof(struct mask_info));
mask = mask->next;
}
}
void __init s390_init_cpu_topology(void) void __init s390_init_cpu_topology(void)
{ {
unsigned long long facility_bits; unsigned long long facility_bits;
struct tl_info *info; struct tl_info *info;
struct core_info *core;
int nr_cores;
int i; int i;
if (stfle(&facility_bits, 1) <= 0) if (stfle(&facility_bits, 1) <= 0)
...@@ -315,25 +363,13 @@ void __init s390_init_cpu_topology(void) ...@@ -315,25 +363,13 @@ void __init s390_init_cpu_topology(void)
tl_info = alloc_bootmem_pages(PAGE_SIZE); tl_info = alloc_bootmem_pages(PAGE_SIZE);
info = tl_info; info = tl_info;
stsi(info, 15, 1, 2); store_topology(info);
nr_cores = info->mag[NR_MAG - 2];
for (i = 0; i < info->mnest - 2; i++)
nr_cores *= info->mag[NR_MAG - 3 - i];
pr_info("The CPU configuration topology of the machine is:"); pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < NR_MAG; i++) for (i = 0; i < NR_MAG; i++)
printk(" %d", info->mag[i]); printk(" %d", info->mag[i]);
printk(" / %d\n", info->mnest); printk(" / %d\n", info->mnest);
alloc_masks(info, &core_info, 2);
core = &core_info; #ifdef CONFIG_SCHED_BOOK
for (i = 0; i < nr_cores; i++) { alloc_masks(info, &book_info, 3);
core->next = alloc_bootmem(sizeof(struct core_info)); #endif
core = core->next;
if (!core)
goto error;
}
return;
error:
machine_has_topology = 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment