Commit 8078f4d6 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/cpu/topology: Rename smp_num_siblings

It's really a non-intuitive name. Rename it to __max_threads_per_core which
is obvious.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarMichael Kelley <mhklinux@outlook.com>
Tested-by: default avatarSohil Mehta <sohil.mehta@intel.com>
Link: https://lore.kernel.org/r/20240213210253.011307973@linutronix.de



parent 3205c983
...@@ -181,7 +181,7 @@ static inline u64 p4_clear_ht_bit(u64 config) ...@@ -181,7 +181,7 @@ static inline u64 p4_clear_ht_bit(u64 config)
static inline int p4_ht_active(void) static inline int p4_ht_active(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
return smp_num_siblings > 1; return __max_threads_per_core > 1;
#endif #endif
return 0; return 0;
} }
...@@ -189,7 +189,7 @@ static inline int p4_ht_active(void) ...@@ -189,7 +189,7 @@ static inline int p4_ht_active(void)
static inline int p4_ht_thread(int cpu) static inline int p4_ht_thread(int cpu)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (smp_num_siblings == 2) if (__max_threads_per_core == 2)
return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map)); return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
#endif #endif
return 0; return 0;
......
...@@ -8,8 +8,6 @@ ...@@ -8,8 +8,6 @@
#include <asm/current.h> #include <asm/current.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
extern unsigned int smp_num_siblings;
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
......
...@@ -145,6 +145,7 @@ extern const struct cpumask *cpu_clustergroup_mask(int cpu); ...@@ -145,6 +145,7 @@ extern const struct cpumask *cpu_clustergroup_mask(int cpu);
extern unsigned int __max_dies_per_package; extern unsigned int __max_dies_per_package;
extern unsigned int __max_logical_packages; extern unsigned int __max_logical_packages;
extern unsigned int __max_threads_per_core;
static inline unsigned int topology_max_packages(void) static inline unsigned int topology_max_packages(void)
{ {
......
...@@ -73,8 +73,8 @@ ...@@ -73,8 +73,8 @@
u32 elf_hwcap2 __read_mostly; u32 elf_hwcap2 __read_mostly;
/* Number of siblings per CPU package */ /* Number of siblings per CPU package */
unsigned int smp_num_siblings __ro_after_init = 1; unsigned int __max_threads_per_core __ro_after_init = 1;
EXPORT_SYMBOL(smp_num_siblings); EXPORT_SYMBOL(__max_threads_per_core);
unsigned int __max_dies_per_package __ro_after_init = 1; unsigned int __max_dies_per_package __ro_after_init = 1;
EXPORT_SYMBOL(__max_dies_per_package); EXPORT_SYMBOL(__max_dies_per_package);
...@@ -2251,7 +2251,7 @@ void __init arch_cpu_finalize_init(void) ...@@ -2251,7 +2251,7 @@ void __init arch_cpu_finalize_init(void)
* identify_boot_cpu() initialized SMT support information, let the * identify_boot_cpu() initialized SMT support information, let the
* core code know. * core code know.
*/ */
cpu_smt_set_num_threads(smp_num_siblings, smp_num_siblings); cpu_smt_set_num_threads(__max_threads_per_core, __max_threads_per_core);
if (!IS_ENABLED(CONFIG_SMP)) { if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: "); pr_info("CPU: ");
......
...@@ -30,7 +30,7 @@ static int cpu_debug_show(struct seq_file *m, void *p) ...@@ -30,7 +30,7 @@ static int cpu_debug_show(struct seq_file *m, void *p)
seq_printf(m, "amd_nodes_per_pkg: %u\n", topology_amd_nodes_per_pkg()); seq_printf(m, "amd_nodes_per_pkg: %u\n", topology_amd_nodes_per_pkg());
seq_printf(m, "max_cores: %u\n", c->x86_max_cores); seq_printf(m, "max_cores: %u\n", c->x86_max_cores);
seq_printf(m, "max_dies_per_pkg: %u\n", __max_dies_per_package); seq_printf(m, "max_dies_per_pkg: %u\n", __max_dies_per_package);
seq_printf(m, "smp_num_siblings: %u\n", smp_num_siblings); seq_printf(m, "max_threads_per_core:%u\n", __max_threads_per_core);
return 0; return 0;
} }
......
...@@ -433,7 +433,7 @@ static u32 get_nbc_for_node(int node_id) ...@@ -433,7 +433,7 @@ static u32 get_nbc_for_node(int node_id)
struct cpuinfo_x86 *c = &boot_cpu_data; struct cpuinfo_x86 *c = &boot_cpu_data;
u32 cores_per_node; u32 cores_per_node;
cores_per_node = (c->x86_max_cores * smp_num_siblings) / topology_amd_nodes_per_pkg(); cores_per_node = (c->x86_max_cores * __max_threads_per_core) / topology_amd_nodes_per_pkg();
return cores_per_node * node_id; return cores_per_node * node_id;
} }
......
...@@ -76,7 +76,7 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id) ...@@ -76,7 +76,7 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid) static void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid)
{ {
if (!(apicid & (smp_num_siblings - 1))) if (!(apicid & (__max_threads_per_core - 1)))
cpumask_set_cpu(cpu, &__cpu_primary_thread_mask); cpumask_set_cpu(cpu, &__cpu_primary_thread_mask);
} }
#else #else
...@@ -429,8 +429,8 @@ void __init topology_init_possible_cpus(void) ...@@ -429,8 +429,8 @@ void __init topology_init_possible_cpus(void)
* Can't use order delta here as order(cnta) can be equal * Can't use order delta here as order(cnta) can be equal
* order(cntb) even if cnta != cntb. * order(cntb) even if cnta != cntb.
*/ */
smp_num_siblings = DIV_ROUND_UP(cntb, cnta); __max_threads_per_core = DIV_ROUND_UP(cntb, cnta);
pr_info("Max. threads per core: %3u\n", smp_num_siblings); pr_info("Max. threads per core: %3u\n", __max_threads_per_core);
pr_info("Allowing %u present CPUs plus %u hotplug CPUs\n", assigned, disabled); pr_info("Allowing %u present CPUs plus %u hotplug CPUs\n", assigned, disabled);
if (topo_info.nr_rejected_cpus) if (topo_info.nr_rejected_cpus)
......
...@@ -936,7 +936,7 @@ static __cpuidle void mwait_idle(void) ...@@ -936,7 +936,7 @@ static __cpuidle void mwait_idle(void)
void select_idle_routine(const struct cpuinfo_x86 *c) void select_idle_routine(const struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) if (boot_option_idle_override == IDLE_POLL && __max_threads_per_core > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
#endif #endif
if (x86_idle_set() || boot_option_idle_override == IDLE_POLL) if (x86_idle_set() || boot_option_idle_override == IDLE_POLL)
......
...@@ -563,7 +563,7 @@ static void __init build_sched_topology(void) ...@@ -563,7 +563,7 @@ static void __init build_sched_topology(void)
void set_cpu_sibling_map(int cpu) void set_cpu_sibling_map(int cpu)
{ {
bool has_smt = smp_num_siblings > 1; bool has_smt = __max_threads_per_core > 1;
bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
struct cpuinfo_x86 *o; struct cpuinfo_x86 *o;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment