Commit 88a2a4ac authored by Eric Dumazet's avatar Eric Dumazet Committed by Linus Torvalds

[PATCH] percpu data: only iterate over possible CPUs

percpu_data blindly allocates bootmem memory to store NR_CPUS instances of
cpudata, instead of allocating memory only for possible cpus.

As a preparation for changing that, we need to convert various 0 -> NR_CPUS
loops to use for_each_cpu().

(The above only applies to users of asm-generic/percpu.h.  powerpc has gone it
alone and is presently only allocating memory for present CPUs, so it's
currently corrupting memory).
Signed-off-by: default avatarEric Dumazet <dada1@cosmosbay.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Jens Axboe <axboe@suse.de>
Cc: Anton Blanchard <anton@samba.org>
Acked-by: default avatarWilliam Irwin <wli@holomorphy.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent cef50769
...@@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void) ...@@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void)
if (nmi_watchdog == NMI_LOCAL_APIC) if (nmi_watchdog == NMI_LOCAL_APIC)
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
for (cpu = 0; cpu < NR_CPUS; cpu++) for_each_cpu(cpu)
prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
local_irq_enable(); local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks mdelay((10*1000)/nmi_hz); // wait 10 ticks
......
...@@ -3453,7 +3453,7 @@ int __init blk_dev_init(void) ...@@ -3453,7 +3453,7 @@ int __init blk_dev_init(void)
iocontext_cachep = kmem_cache_create("blkdev_ioc", iocontext_cachep = kmem_cache_create("blkdev_ioc",
sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
for (i = 0; i < NR_CPUS; i++) for_each_cpu(i)
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
......
...@@ -1245,7 +1245,7 @@ static int __init init_scsi(void) ...@@ -1245,7 +1245,7 @@ static int __init init_scsi(void)
if (error) if (error)
goto cleanup_sysctl; goto cleanup_sysctl;
for (i = 0; i < NR_CPUS; i++) for_each_cpu(i)
INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
devfs_mk_dir("scsi"); devfs_mk_dir("scsi");
......
...@@ -379,7 +379,6 @@ static void __devinit fdtable_defer_list_init(int cpu) ...@@ -379,7 +379,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
void __init files_defer_init(void) void __init files_defer_init(void)
{ {
int i; int i;
/* Really early - can't use for_each_cpu */ for_each_cpu(i)
for (i = 0; i < NR_CPUS; i++)
fdtable_defer_list_init(i); fdtable_defer_list_init(i);
} }
...@@ -6109,7 +6109,7 @@ void __init sched_init(void) ...@@ -6109,7 +6109,7 @@ void __init sched_init(void)
runqueue_t *rq; runqueue_t *rq;
int i, j, k; int i, j, k;
for (i = 0; i < NR_CPUS; i++) { for_each_cpu(i) {
prio_array_t *array; prio_array_t *array;
rq = cpu_rq(i); rq = cpu_rq(i);
......
...@@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) ...@@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
{ {
int cpu = 0; int cpu = 0;
memset(ret, 0, sizeof(*ret)); memset(ret, 0, nr * sizeof(unsigned long));
cpus_and(*cpumask, *cpumask, cpu_online_map); cpus_and(*cpumask, *cpumask, cpu_online_map);
cpu = first_cpu(*cpumask); cpu = first_cpu(*cpumask);
while (cpu < NR_CPUS) { while (cpu < NR_CPUS) {
unsigned long *in, *out, off; unsigned long *in, *out, off;
if (!cpu_isset(cpu, *cpumask))
continue;
in = (unsigned long *)&per_cpu(page_states, cpu); in = (unsigned long *)&per_cpu(page_states, cpu);
cpu = next_cpu(cpu, *cpumask); cpu = next_cpu(cpu, *cpumask);
if (cpu < NR_CPUS) if (likely(cpu < NR_CPUS))
prefetch(&per_cpu(page_states, cpu)); prefetch(&per_cpu(page_states, cpu));
out = (unsigned long *)ret; out = (unsigned long *)ret;
...@@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p, ...@@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
* not check if the processor is online before following the pageset pointer. * not check if the processor is online before following the pageset pointer.
* Other parts of the kernel may not check if the zone is available. * Other parts of the kernel may not check if the zone is available.
*/ */
static struct per_cpu_pageset static struct per_cpu_pageset boot_pageset[NR_CPUS];
boot_pageset[NR_CPUS];
/* /*
* Dynamically allocate memory for the * Dynamically allocate memory for the
......
...@@ -3237,7 +3237,7 @@ static int __init net_dev_init(void) ...@@ -3237,7 +3237,7 @@ static int __init net_dev_init(void)
* Initialise the packet receive queues. * Initialise the packet receive queues.
*/ */
for (i = 0; i < NR_CPUS; i++) { for_each_cpu(i) {
struct softnet_data *queue; struct softnet_data *queue;
queue = &per_cpu(softnet_data, i); queue = &per_cpu(softnet_data, i);
......
...@@ -121,7 +121,7 @@ void __init net_random_init(void) ...@@ -121,7 +121,7 @@ void __init net_random_init(void)
{ {
int i; int i;
for (i = 0; i < NR_CPUS; i++) { for_each_cpu(i) {
struct nrnd_state *state = &per_cpu(net_rand_state,i); struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, i+jiffies); __net_srandom(state, i+jiffies);
} }
...@@ -133,7 +133,7 @@ static int net_random_reseed(void) ...@@ -133,7 +133,7 @@ static int net_random_reseed(void)
unsigned long seed[NR_CPUS]; unsigned long seed[NR_CPUS];
get_random_bytes(seed, sizeof(seed)); get_random_bytes(seed, sizeof(seed));
for (i = 0; i < NR_CPUS; i++) { for_each_cpu(i) {
struct nrnd_state *state = &per_cpu(net_rand_state,i); struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, seed[i]); __net_srandom(state, seed[i]);
} }
......
...@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto) ...@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
int res = 0; int res = 0;
int cpu; int cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) for_each_cpu(cpu)
res += proto->stats[cpu].inuse; res += proto->stats[cpu].inuse;
return res; return res;
......
...@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto) ...@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
int res = 0; int res = 0;
int cpu; int cpu;
for (cpu=0; cpu<NR_CPUS; cpu++) for_each_cpu(cpu)
res += proto->stats[cpu].inuse; res += proto->stats[cpu].inuse;
return res; return res;
......
...@@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq) ...@@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq)
int cpu; int cpu;
int counter = 0; int counter = 0;
for (cpu = 0; cpu < NR_CPUS; cpu++) for_each_cpu(cpu)
counter += per_cpu(sockets_in_use, cpu); counter += per_cpu(sockets_in_use, cpu);
/* It can be negative, by the way. 8) */ /* It can be negative, by the way. 8) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment