Commit 3a3814c2 authored by Michael Holzheu's avatar Michael Holzheu Committed by Martin Schwidefsky

s390/topology: remove topology lock

Since we are already protected by the "sched_domains_mutex" lock, we can
safely remove the topology lock.
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMichael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent f341b8df
...@@ -40,8 +40,10 @@ static struct sysinfo_15_1_x *tl_info; ...@@ -40,8 +40,10 @@ static struct sysinfo_15_1_x *tl_info;
static int topology_enabled = 1; static int topology_enabled = 1;
static DECLARE_WORK(topology_work, topology_work_fn); static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the socket and book linked lists */ /*
static DEFINE_SPINLOCK(topology_lock); * Socket/Book linked lists and per_cpu(cpu_topology) updates are
* protected by "sched_domains_mutex".
*/
static struct mask_info socket_info; static struct mask_info socket_info;
static struct mask_info book_info; static struct mask_info book_info;
...@@ -191,7 +193,6 @@ static void tl_to_masks(struct sysinfo_15_1_x *info) ...@@ -191,7 +193,6 @@ static void tl_to_masks(struct sysinfo_15_1_x *info)
{ {
struct cpuid cpu_id; struct cpuid cpu_id;
spin_lock_irq(&topology_lock);
get_cpu_id(&cpu_id); get_cpu_id(&cpu_id);
clear_masks(); clear_masks();
switch (cpu_id.machine) { switch (cpu_id.machine) {
...@@ -202,7 +203,6 @@ static void tl_to_masks(struct sysinfo_15_1_x *info) ...@@ -202,7 +203,6 @@ static void tl_to_masks(struct sysinfo_15_1_x *info)
default: default:
__tl_to_masks_generic(info); __tl_to_masks_generic(info);
} }
spin_unlock_irq(&topology_lock);
} }
static void topology_update_polarization_simple(void) static void topology_update_polarization_simple(void)
...@@ -247,10 +247,8 @@ int topology_set_cpu_management(int fc) ...@@ -247,10 +247,8 @@ int topology_set_cpu_management(int fc)
static void update_cpu_masks(void) static void update_cpu_masks(void)
{ {
unsigned long flags;
int cpu; int cpu;
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
...@@ -262,7 +260,6 @@ static void update_cpu_masks(void) ...@@ -262,7 +260,6 @@ static void update_cpu_masks(void)
per_cpu(cpu_topology, cpu).book_id = cpu; per_cpu(cpu_topology, cpu).book_id = cpu;
} }
} }
spin_unlock_irqrestore(&topology_lock, flags);
numa_update_cpu_topology(); numa_update_cpu_topology();
} }
......
...@@ -51,6 +51,11 @@ enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA}; ...@@ -51,6 +51,11 @@ enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA};
static int emu_nodes = 1; static int emu_nodes = 1;
/* NUMA stripe size */ /* NUMA stripe size */
static unsigned long emu_size; static unsigned long emu_size;
/*
* Node to core pinning information updates are protected by
* "sched_domains_mutex".
*/
/* Pinned core to node mapping */ /* Pinned core to node mapping */
static int cores_to_node_id[CONFIG_NR_CPUS]; static int cores_to_node_id[CONFIG_NR_CPUS];
/* Total number of pinned cores */ /* Total number of pinned cores */
...@@ -393,7 +398,7 @@ static void print_node_to_core_map(void) ...@@ -393,7 +398,7 @@ static void print_node_to_core_map(void)
* Transfer physical topology into a NUMA topology and modify CPU masks * Transfer physical topology into a NUMA topology and modify CPU masks
* according to the NUMA topology. * according to the NUMA topology.
* *
* This function is called under the CPU hotplug lock. * Must be called with "sched_domains_mutex" lock held.
*/ */
static void emu_update_cpu_topology(void) static void emu_update_cpu_topology(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment