Commit d1e57508 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

s390/topology: cleanup topology code

Mainly merge all different per-cpu arrays into a single array which
holds all topology information per logical cpu.
Also fix the broken core vs socket variable naming and simplify the
locking a bit.
When running in environments without topology information also
invent book, socket and core ids, so that not all ids are zero.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 0a4ccc99
...@@ -8,32 +8,34 @@ struct cpu; ...@@ -8,32 +8,34 @@ struct cpu;
#ifdef CONFIG_SCHED_BOOK #ifdef CONFIG_SCHED_BOOK
extern unsigned char cpu_socket_id[NR_CPUS]; struct cpu_topology_s390 {
#define topology_physical_package_id(cpu) (cpu_socket_id[cpu]) unsigned short core_id;
unsigned short socket_id;
unsigned short book_id;
cpumask_t core_mask;
cpumask_t book_mask;
};
extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
extern unsigned char cpu_core_id[NR_CPUS]; #define mc_capable() 1
extern cpumask_t cpu_core_map[NR_CPUS];
static inline const struct cpumask *cpu_coregroup_mask(int cpu) static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{ {
return &cpu_core_map[cpu]; return &cpu_topology[cpu].core_mask;
} }
#define topology_core_id(cpu) (cpu_core_id[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define mc_capable() (1)
extern unsigned char cpu_book_id[NR_CPUS];
extern cpumask_t cpu_book_map[NR_CPUS];
static inline const struct cpumask *cpu_book_mask(int cpu) static inline const struct cpumask *cpu_book_mask(int cpu)
{ {
return &cpu_book_map[cpu]; return &cpu_topology[cpu].book_mask;
} }
#define topology_book_id(cpu) (cpu_book_id[cpu])
#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
int topology_cpu_init(struct cpu *); int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc); int topology_set_cpu_management(int fc);
void topology_schedule_update(void); void topology_schedule_update(void);
......
...@@ -29,48 +29,38 @@ struct mask_info { ...@@ -29,48 +29,38 @@ struct mask_info {
cpumask_t mask; cpumask_t mask;
}; };
static int topology_enabled = 1; static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work); static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info; static struct sysinfo_15_1_x *tl_info;
static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */
static DEFINE_SPINLOCK(topology_lock);
static struct mask_info core_info; static int topology_enabled = 1;
cpumask_t cpu_core_map[NR_CPUS]; static DECLARE_WORK(topology_work, topology_work_fn);
unsigned char cpu_core_id[NR_CPUS];
unsigned char cpu_socket_id[NR_CPUS];
/* topology_lock protects the socket and book linked lists */
static DEFINE_SPINLOCK(topology_lock);
static struct mask_info socket_info;
static struct mask_info book_info; static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
unsigned char cpu_book_id[NR_CPUS]; struct cpu_topology_s390 cpu_topology[NR_CPUS];
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{ {
cpumask_t mask; cpumask_t mask;
cpumask_clear(&mask); cpumask_copy(&mask, cpumask_of(cpu));
if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
cpumask_copy(&mask, cpumask_of(cpu));
return mask; return mask;
for (; info; info = info->next) {
if (cpumask_test_cpu(cpu, &info->mask))
return info->mask;
} }
while (info) {
if (cpumask_test_cpu(cpu, &info->mask)) {
mask = info->mask;
break;
}
info = info->next;
}
if (cpumask_empty(&mask))
cpumask_copy(&mask, cpumask_of(cpu));
return mask; return mask;
} }
static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
struct mask_info *book, struct mask_info *book,
struct mask_info *core, struct mask_info *socket,
int one_core_per_cpu) int one_socket_per_cpu)
{ {
unsigned int cpu; unsigned int cpu;
...@@ -80,28 +70,28 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, ...@@ -80,28 +70,28 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
lcpu = smp_find_processor_id(rcpu); lcpu = smp_find_processor_id(rcpu);
if (lcpu >= 0) { if (lcpu < 0)
cpumask_set_cpu(lcpu, &book->mask); continue;
cpu_book_id[lcpu] = book->id; cpumask_set_cpu(lcpu, &book->mask);
cpumask_set_cpu(lcpu, &core->mask); cpu_topology[lcpu].book_id = book->id;
cpu_core_id[lcpu] = rcpu; cpumask_set_cpu(lcpu, &socket->mask);
if (one_core_per_cpu) { cpu_topology[lcpu].core_id = rcpu;
cpu_socket_id[lcpu] = rcpu; if (one_socket_per_cpu) {
core = core->next; cpu_topology[lcpu].socket_id = rcpu;
} else { socket = socket->next;
cpu_socket_id[lcpu] = core->id; } else {
} cpu_topology[lcpu].socket_id = socket->id;
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
} }
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
} }
return core; return socket;
} }
static void clear_masks(void) static void clear_masks(void)
{ {
struct mask_info *info; struct mask_info *info;
info = &core_info; info = &socket_info;
while (info) { while (info) {
cpumask_clear(&info->mask); cpumask_clear(&info->mask);
info = info->next; info = info->next;
...@@ -120,9 +110,9 @@ static union topology_entry *next_tle(union topology_entry *tle) ...@@ -120,9 +110,9 @@ static union topology_entry *next_tle(union topology_entry *tle)
return (union topology_entry *)((struct topology_container *)tle + 1); return (union topology_entry *)((struct topology_container *)tle + 1);
} }
static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
{ {
struct mask_info *core = &core_info; struct mask_info *socket = &socket_info;
struct mask_info *book = &book_info; struct mask_info *book = &book_info;
union topology_entry *tle, *end; union topology_entry *tle, *end;
...@@ -135,11 +125,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) ...@@ -135,11 +125,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
book->id = tle->container.id; book->id = tle->container.id;
break; break;
case 1: case 1:
core = core->next; socket = socket->next;
core->id = tle->container.id; socket->id = tle->container.id;
break; break;
case 0: case 0:
add_cpus_to_mask(&tle->cpu, book, core, 0); add_cpus_to_mask(&tle->cpu, book, socket, 0);
break; break;
default: default:
clear_masks(); clear_masks();
...@@ -149,9 +139,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) ...@@ -149,9 +139,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
} }
} }
static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
{ {
struct mask_info *core = &core_info; struct mask_info *socket = &socket_info;
struct mask_info *book = &book_info; struct mask_info *book = &book_info;
union topology_entry *tle, *end; union topology_entry *tle, *end;
...@@ -164,7 +154,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) ...@@ -164,7 +154,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
book->id = tle->container.id; book->id = tle->container.id;
break; break;
case 0: case 0:
core = add_cpus_to_mask(&tle->cpu, book, core, 1); socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
break; break;
default: default:
clear_masks(); clear_masks();
...@@ -174,20 +164,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) ...@@ -174,20 +164,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
} }
} }
static void tl_to_cores(struct sysinfo_15_1_x *info) static void tl_to_masks(struct sysinfo_15_1_x *info)
{ {
struct cpuid cpu_id; struct cpuid cpu_id;
get_cpu_id(&cpu_id);
spin_lock_irq(&topology_lock); spin_lock_irq(&topology_lock);
get_cpu_id(&cpu_id);
clear_masks(); clear_masks();
switch (cpu_id.machine) { switch (cpu_id.machine) {
case 0x2097: case 0x2097:
case 0x2098: case 0x2098:
__tl_to_cores_z10(info); __tl_to_masks_z10(info);
break; break;
default: default:
__tl_to_cores_generic(info); __tl_to_masks_generic(info);
} }
spin_unlock_irq(&topology_lock); spin_unlock_irq(&topology_lock);
} }
...@@ -232,15 +222,20 @@ int topology_set_cpu_management(int fc) ...@@ -232,15 +222,20 @@ int topology_set_cpu_management(int fc)
return rc; return rc;
} }
static void update_cpu_core_map(void) static void update_cpu_masks(void)
{ {
unsigned long flags; unsigned long flags;
int cpu; int cpu;
spin_lock_irqsave(&topology_lock, flags); spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
if (!MACHINE_HAS_TOPOLOGY) {
cpu_topology[cpu].core_id = cpu;
cpu_topology[cpu].socket_id = cpu;
cpu_topology[cpu].book_id = cpu;
}
} }
spin_unlock_irqrestore(&topology_lock, flags); spin_unlock_irqrestore(&topology_lock, flags);
} }
...@@ -260,13 +255,13 @@ int arch_update_cpu_topology(void) ...@@ -260,13 +255,13 @@ int arch_update_cpu_topology(void)
int cpu; int cpu;
if (!MACHINE_HAS_TOPOLOGY) { if (!MACHINE_HAS_TOPOLOGY) {
update_cpu_core_map(); update_cpu_masks();
topology_update_polarization_simple(); topology_update_polarization_simple();
return 0; return 0;
} }
store_topology(info); store_topology(info);
tl_to_cores(info); tl_to_masks(info);
update_cpu_core_map(); update_cpu_masks();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu); dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE); kobject_uevent(&dev->kobj, KOBJ_CHANGE);
...@@ -355,7 +350,7 @@ void __init s390_init_cpu_topology(void) ...@@ -355,7 +350,7 @@ void __init s390_init_cpu_topology(void)
for (i = 0; i < TOPOLOGY_NR_MAG; i++) for (i = 0; i < TOPOLOGY_NR_MAG; i++)
printk(KERN_CONT " %d", info->mag[i]); printk(KERN_CONT " %d", info->mag[i]);
printk(KERN_CONT " / %d\n", info->mnest); printk(KERN_CONT " / %d\n", info->mnest);
alloc_masks(info, &core_info, 1); alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2); alloc_masks(info, &book_info, 2);
} }
...@@ -454,7 +449,7 @@ static int __init topology_init(void) ...@@ -454,7 +449,7 @@ static int __init topology_init(void)
} }
set_topology_timer(); set_topology_timer();
out: out:
update_cpu_core_map(); update_cpu_masks();
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
} }
device_initcall(topology_init); device_initcall(topology_init);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment