Commit 43238382 authored by travis@sgi.com's avatar travis@sgi.com Committed by Ingo Molnar

x86: change size of node ids from u8 to s16

Change the size of node ids for X86_64 from u8 to s16 to
accomodate more than 32k nodes and allow for NUMA_NO_NODE
(-1) to be sign extended to int.

Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Reviewed-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 409a7b85
...@@ -848,6 +848,7 @@ config NUMA_EMU ...@@ -848,6 +848,7 @@ config NUMA_EMU
config NODES_SHIFT config NODES_SHIFT
int int
range 1 15 if X86_64
default "6" if X86_64 default "6" if X86_64
default "4" if X86_NUMAQ default "4" if X86_NUMAQ
default "3" default "3"
......
...@@ -31,15 +31,15 @@ bootmem_data_t plat_node_bdata[MAX_NUMNODES]; ...@@ -31,15 +31,15 @@ bootmem_data_t plat_node_bdata[MAX_NUMNODES];
struct memnode memnode; struct memnode memnode;
u16 x86_cpu_to_node_map_init[NR_CPUS] = { int x86_cpu_to_node_map_init[NR_CPUS] = {
[0 ... NR_CPUS-1] = NUMA_NO_NODE [0 ... NR_CPUS-1] = NUMA_NO_NODE
}; };
void *x86_cpu_to_node_map_early_ptr; void *x86_cpu_to_node_map_early_ptr;
DEFINE_PER_CPU(u16, x86_cpu_to_node_map) = NUMA_NO_NODE; DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE;
EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map); EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map);
EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr); EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr);
u16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
}; };
...@@ -63,7 +63,7 @@ static int __init populate_memnodemap(const struct bootnode *nodes, ...@@ -63,7 +63,7 @@ static int __init populate_memnodemap(const struct bootnode *nodes,
unsigned long addr, end; unsigned long addr, end;
int i, res = -1; int i, res = -1;
memset(memnodemap, 0xff, memnodemapsize); memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
for (i = 0; i < numnodes; i++) { for (i = 0; i < numnodes; i++) {
addr = nodes[i].start; addr = nodes[i].start;
end = nodes[i].end; end = nodes[i].end;
...@@ -72,7 +72,7 @@ static int __init populate_memnodemap(const struct bootnode *nodes, ...@@ -72,7 +72,7 @@ static int __init populate_memnodemap(const struct bootnode *nodes,
if ((end >> shift) >= memnodemapsize) if ((end >> shift) >= memnodemapsize)
return 0; return 0;
do { do {
if (memnodemap[addr >> shift] != 0xff) if (memnodemap[addr >> shift] != NUMA_NO_NODE)
return -1; return -1;
memnodemap[addr >> shift] = i; memnodemap[addr >> shift] = i;
addr += (1UL << shift); addr += (1UL << shift);
...@@ -533,7 +533,7 @@ __cpuinit void numa_add_cpu(int cpu) ...@@ -533,7 +533,7 @@ __cpuinit void numa_add_cpu(int cpu)
void __cpuinit numa_set_node(int cpu, int node) void __cpuinit numa_set_node(int cpu, int node)
{ {
u16 *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr;
cpu_pda(cpu)->nodenumber = node; cpu_pda(cpu)->nodenumber = node;
......
...@@ -15,9 +15,9 @@ ...@@ -15,9 +15,9 @@
struct memnode { struct memnode {
int shift; int shift;
unsigned int mapsize; unsigned int mapsize;
u8 *map; s16 *map;
u8 embedded_map[64-16]; s16 embedded_map[64-8];
} ____cacheline_aligned; /* total size = 64 bytes */ } ____cacheline_aligned; /* total size = 128 bytes */
extern struct memnode memnode; extern struct memnode memnode;
#define memnode_shift memnode.shift #define memnode_shift memnode.shift
#define memnodemap memnode.map #define memnodemap memnode.map
......
...@@ -20,7 +20,7 @@ extern void numa_set_node(int cpu, int node); ...@@ -20,7 +20,7 @@ extern void numa_set_node(int cpu, int node);
extern void srat_reserve_add_area(int nodeid); extern void srat_reserve_add_area(int nodeid);
extern int hotadd_percent; extern int hotadd_percent;
extern u16 apicid_to_node[MAX_LOCAL_APIC]; extern s16 apicid_to_node[MAX_LOCAL_APIC];
extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
extern unsigned long numa_free_all_bootmem(void); extern unsigned long numa_free_all_bootmem(void);
......
...@@ -31,11 +31,11 @@ ...@@ -31,11 +31,11 @@
/* Mappings between logical cpu number and node number */ /* Mappings between logical cpu number and node number */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
extern u8 cpu_to_node_map[]; extern int cpu_to_node_map[];
#else #else
DECLARE_PER_CPU(u16, x86_cpu_to_node_map); DECLARE_PER_CPU(int, x86_cpu_to_node_map);
extern u16 x86_cpu_to_node_map_init[]; extern int x86_cpu_to_node_map_init[];
extern void *x86_cpu_to_node_map_early_ptr; extern void *x86_cpu_to_node_map_early_ptr;
/* Returns the number of the current Node. */ /* Returns the number of the current Node. */
#define numa_node_id() (early_cpu_to_node(raw_smp_processor_id())) #define numa_node_id() (early_cpu_to_node(raw_smp_processor_id()))
...@@ -43,7 +43,7 @@ extern void *x86_cpu_to_node_map_early_ptr; ...@@ -43,7 +43,7 @@ extern void *x86_cpu_to_node_map_early_ptr;
extern cpumask_t node_to_cpumask_map[]; extern cpumask_t node_to_cpumask_map[];
#define NUMA_NO_NODE ((u16)(~0)) #define NUMA_NO_NODE (-1)
/* Returns the number of the node containing CPU 'cpu' */ /* Returns the number of the node containing CPU 'cpu' */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -56,7 +56,7 @@ static inline int cpu_to_node(int cpu) ...@@ -56,7 +56,7 @@ static inline int cpu_to_node(int cpu)
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
static inline int early_cpu_to_node(int cpu) static inline int early_cpu_to_node(int cpu)
{ {
u16 *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr;
if (cpu_to_node_map) if (cpu_to_node_map)
return cpu_to_node_map[cpu]; return cpu_to_node_map[cpu];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment