Commit 5d88aa85 authored by Jesse Larrew's avatar Jesse Larrew Committed by Benjamin Herrenschmidt

powerpc/pseries: Update CPU maps when device tree is updated

Platform events such as partition migration or the new PRRN firmware
feature can cause the NUMA characteristics of a CPU to change, and these
changes will be reflected in the device tree nodes for the affected
CPUs.

This patch registers a handler for Open Firmware device tree updates
and reconfigures the CPU and node maps whenever the associativity
changes. Currently, this is accomplished by marking the affected CPUs in
the cpu_associativity_changes_mask and allowing
arch_update_cpu_topology() to retrieve the new associativity information
using hcall_vphn().

Protecting the NUMA cpu maps from concurrent access during an update
operation will be addressed in a subsequent patch in this series.
Signed-off-by: default avatarNathan Fontenot <nfont@linux.vnet.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 8002b0c5
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000) #define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -66,7 +67,7 @@ enum { ...@@ -66,7 +67,7 @@ enum {
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO | FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
FW_FEATURE_TYPE1_AFFINITY, FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
FW_FEATURE_PSERIES_ALWAYS = 0, FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
FW_FEATURE_POWERNV_ALWAYS = 0, FW_FEATURE_POWERNV_ALWAYS = 0,
......
...@@ -128,6 +128,7 @@ struct of_drconf_cell { ...@@ -128,6 +128,7 @@ struct of_drconf_cell {
#define OV5_CMO 0x0480 /* Cooperative Memory Overcommitment */ #define OV5_CMO 0x0480 /* Cooperative Memory Overcommitment */
#define OV5_XCMO 0x0440 /* Page Coalescing */ #define OV5_XCMO 0x0440 /* Page Coalescing */
#define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */ #define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */
#define OV5_PRRN 0x0540 /* Platform Resource Reassignment */
#define OV5_PFO_HW_RNG 0x0E80 /* PFO Random Number Generator */ #define OV5_PFO_HW_RNG 0x0E80 /* PFO Random Number Generator */
#define OV5_PFO_HW_842 0x0E40 /* PFO Compression Accelerator */ #define OV5_PFO_HW_842 0x0E40 /* PFO Compression Accelerator */
#define OV5_PFO_HW_ENCR 0x0E20 /* PFO Encryption Accelerator */ #define OV5_PFO_HW_ENCR 0x0E20 /* PFO Encryption Accelerator */
......
...@@ -1257,7 +1257,8 @@ u64 memory_hotplug_max(void) ...@@ -1257,7 +1257,8 @@ u64 memory_hotplug_max(void)
static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
static cpumask_t cpu_associativity_changes_mask; static cpumask_t cpu_associativity_changes_mask;
static int vphn_enabled; static int vphn_enabled;
static void set_topology_timer(void); static int prrn_enabled;
static void reset_topology_timer(void);
/* /*
* Store the current values of the associativity change counters in the * Store the current values of the associativity change counters in the
...@@ -1293,11 +1294,9 @@ static void setup_cpu_associativity_change_counters(void) ...@@ -1293,11 +1294,9 @@ static void setup_cpu_associativity_change_counters(void)
*/ */
static int update_cpu_associativity_changes_mask(void) static int update_cpu_associativity_changes_mask(void)
{ {
int cpu, nr_cpus = 0; int cpu;
cpumask_t *changes = &cpu_associativity_changes_mask; cpumask_t *changes = &cpu_associativity_changes_mask;
cpumask_clear(changes);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
int i, changed = 0; int i, changed = 0;
u8 *counts = vphn_cpu_change_counts[cpu]; u8 *counts = vphn_cpu_change_counts[cpu];
...@@ -1311,11 +1310,10 @@ static int update_cpu_associativity_changes_mask(void) ...@@ -1311,11 +1310,10 @@ static int update_cpu_associativity_changes_mask(void)
} }
if (changed) { if (changed) {
cpumask_set_cpu(cpu, changes); cpumask_set_cpu(cpu, changes);
nr_cpus++;
} }
} }
return nr_cpus; return cpumask_weight(changes);
} }
/* /*
...@@ -1416,7 +1414,7 @@ int arch_update_cpu_topology(void) ...@@ -1416,7 +1414,7 @@ int arch_update_cpu_topology(void)
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
struct device *dev; struct device *dev;
for_each_cpu(cpu,&cpu_associativity_changes_mask) { for_each_cpu(cpu, &cpu_associativity_changes_mask) {
vphn_get_associativity(cpu, associativity); vphn_get_associativity(cpu, associativity);
nid = associativity_to_nid(associativity); nid = associativity_to_nid(associativity);
...@@ -1438,6 +1436,7 @@ int arch_update_cpu_topology(void) ...@@ -1438,6 +1436,7 @@ int arch_update_cpu_topology(void)
dev = get_cpu_device(cpu); dev = get_cpu_device(cpu);
if (dev) if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE); kobject_uevent(&dev->kobj, KOBJ_CHANGE);
cpumask_clear_cpu(cpu, &cpu_associativity_changes_mask);
changed = 1; changed = 1;
} }
...@@ -1457,37 +1456,80 @@ void topology_schedule_update(void) ...@@ -1457,37 +1456,80 @@ void topology_schedule_update(void)
static void topology_timer_fn(unsigned long ignored) static void topology_timer_fn(unsigned long ignored)
{ {
if (!vphn_enabled) if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
return;
if (update_cpu_associativity_changes_mask() > 0)
topology_schedule_update(); topology_schedule_update();
set_topology_timer(); else if (vphn_enabled) {
if (update_cpu_associativity_changes_mask() > 0)
topology_schedule_update();
reset_topology_timer();
}
} }
static struct timer_list topology_timer = static struct timer_list topology_timer =
TIMER_INITIALIZER(topology_timer_fn, 0, 0); TIMER_INITIALIZER(topology_timer_fn, 0, 0);
static void set_topology_timer(void) static void reset_topology_timer(void)
{ {
topology_timer.data = 0; topology_timer.data = 0;
topology_timer.expires = jiffies + 60 * HZ; topology_timer.expires = jiffies + 60 * HZ;
add_timer(&topology_timer); mod_timer(&topology_timer, topology_timer.expires);
}
static void stage_topology_update(int core_id)
{
cpumask_or(&cpu_associativity_changes_mask,
&cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
reset_topology_timer();
}
static int dt_update_callback(struct notifier_block *nb,
unsigned long action, void *data)
{
struct of_prop_reconfig *update;
int rc = NOTIFY_DONE;
switch (action) {
case OF_RECONFIG_ADD_PROPERTY:
case OF_RECONFIG_UPDATE_PROPERTY:
update = (struct of_prop_reconfig *)data;
if (!of_prop_cmp(update->dn->type, "cpu")) {
u32 core_id;
of_property_read_u32(update->dn, "reg", &core_id);
stage_topology_update(core_id);
rc = NOTIFY_OK;
}
break;
}
return rc;
} }
static struct notifier_block dt_update_nb = {
.notifier_call = dt_update_callback,
};
/* /*
* Start polling for VPHN associativity changes. * Start polling for associativity changes.
*/ */
int start_topology_update(void) int start_topology_update(void)
{ {
int rc = 0; int rc = 0;
/* Disabled until races with load balancing are fixed */ if (firmware_has_feature(FW_FEATURE_PRRN)) {
if (0 && firmware_has_feature(FW_FEATURE_VPHN) && if (!prrn_enabled) {
get_lppaca()->shared_proc) { prrn_enabled = 1;
vphn_enabled = 1; vphn_enabled = 0;
setup_cpu_associativity_change_counters(); rc = of_reconfig_notifier_register(&dt_update_nb);
init_timer_deferrable(&topology_timer); }
set_topology_timer(); } else if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
rc = 1; get_lppaca()->shared_proc) {
/* Disabled until races with load balancing are fixed */
if (!vphn_enabled) {
prrn_enabled = 0;
vphn_enabled = 1;
setup_cpu_associativity_change_counters();
init_timer_deferrable(&topology_timer);
reset_topology_timer();
}
} }
return rc; return rc;
...@@ -1499,7 +1541,16 @@ __initcall(start_topology_update); ...@@ -1499,7 +1541,16 @@ __initcall(start_topology_update);
*/ */
int stop_topology_update(void) int stop_topology_update(void)
{ {
vphn_enabled = 0; int rc = 0;
return del_timer_sync(&topology_timer);
if (prrn_enabled) {
prrn_enabled = 0;
rc = of_reconfig_notifier_unregister(&dt_update_nb);
} else if (vphn_enabled) {
vphn_enabled = 0;
rc = del_timer_sync(&topology_timer);
}
return rc;
} }
#endif /* CONFIG_PPC_SPLPAR */ #endif /* CONFIG_PPC_SPLPAR */
...@@ -110,6 +110,7 @@ struct vec5_fw_feature { ...@@ -110,6 +110,7 @@ struct vec5_fw_feature {
static __initdata struct vec5_fw_feature static __initdata struct vec5_fw_feature
vec5_fw_features_table[] = { vec5_fw_features_table[] = {
{FW_FEATURE_TYPE1_AFFINITY, OV5_TYPE1_AFFINITY}, {FW_FEATURE_TYPE1_AFFINITY, OV5_TYPE1_AFFINITY},
{FW_FEATURE_PRRN, OV5_PRRN},
}; };
void __init fw_vec5_feature_init(const char *vec5, unsigned long len) void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment