Commit e1d12f32 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

kernel/smp.c: cleanups

We sometimes use "struct call_single_data *data" and sometimes "struct
call_single_data *csd".  Use "csd" consistently.

We sometimes use "struct call_function_data *data" and sometimes "struct
call_function_data *cfd".  Use "cfd" consistently.

Also, avoid some 80-col layout tricks.

Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Shaohua Li <shli@fusionio.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 74e3d1e1
...@@ -100,16 +100,16 @@ void __init call_function_init(void) ...@@ -100,16 +100,16 @@ void __init call_function_init(void)
* previous function call. For multi-cpu calls its even more interesting * previous function call. For multi-cpu calls its even more interesting
* as we'll have to ensure no other cpu is observing our csd. * as we'll have to ensure no other cpu is observing our csd.
*/ */
static void csd_lock_wait(struct call_single_data *data) static void csd_lock_wait(struct call_single_data *csd)
{ {
while (data->flags & CSD_FLAG_LOCK) while (csd->flags & CSD_FLAG_LOCK)
cpu_relax(); cpu_relax();
} }
static void csd_lock(struct call_single_data *data) static void csd_lock(struct call_single_data *csd)
{ {
csd_lock_wait(data); csd_lock_wait(csd);
data->flags |= CSD_FLAG_LOCK; csd->flags |= CSD_FLAG_LOCK;
/* /*
* prevent CPU from reordering the above assignment * prevent CPU from reordering the above assignment
...@@ -119,16 +119,16 @@ static void csd_lock(struct call_single_data *data) ...@@ -119,16 +119,16 @@ static void csd_lock(struct call_single_data *data)
smp_mb(); smp_mb();
} }
static void csd_unlock(struct call_single_data *data) static void csd_unlock(struct call_single_data *csd)
{ {
WARN_ON(!(data->flags & CSD_FLAG_LOCK)); WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
/* /*
* ensure we're all done before releasing data: * ensure we're all done before releasing data:
*/ */
smp_mb(); smp_mb();
data->flags &= ~CSD_FLAG_LOCK; csd->flags &= ~CSD_FLAG_LOCK;
} }
/* /*
...@@ -137,7 +137,7 @@ static void csd_unlock(struct call_single_data *data) ...@@ -137,7 +137,7 @@ static void csd_unlock(struct call_single_data *data)
* ->func, ->info, and ->flags set. * ->func, ->info, and ->flags set.
*/ */
static static
void generic_exec_single(int cpu, struct call_single_data *data, int wait) void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
{ {
struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
unsigned long flags; unsigned long flags;
...@@ -145,7 +145,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait) ...@@ -145,7 +145,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
raw_spin_lock_irqsave(&dst->lock, flags); raw_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list); ipi = list_empty(&dst->list);
list_add_tail(&data->list, &dst->list); list_add_tail(&csd->list, &dst->list);
raw_spin_unlock_irqrestore(&dst->lock, flags); raw_spin_unlock_irqrestore(&dst->lock, flags);
/* /*
...@@ -163,7 +163,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait) ...@@ -163,7 +163,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
arch_send_call_function_single_ipi(cpu); arch_send_call_function_single_ipi(cpu);
if (wait) if (wait)
csd_lock_wait(data); csd_lock_wait(csd);
} }
/* /*
...@@ -173,7 +173,6 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait) ...@@ -173,7 +173,6 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
void generic_smp_call_function_single_interrupt(void) void generic_smp_call_function_single_interrupt(void)
{ {
struct call_single_queue *q = &__get_cpu_var(call_single_queue); struct call_single_queue *q = &__get_cpu_var(call_single_queue);
unsigned int data_flags;
LIST_HEAD(list); LIST_HEAD(list);
/* /*
...@@ -186,25 +185,26 @@ void generic_smp_call_function_single_interrupt(void) ...@@ -186,25 +185,26 @@ void generic_smp_call_function_single_interrupt(void)
raw_spin_unlock(&q->lock); raw_spin_unlock(&q->lock);
while (!list_empty(&list)) { while (!list_empty(&list)) {
struct call_single_data *data; struct call_single_data *csd;
unsigned int csd_flags;
data = list_entry(list.next, struct call_single_data, list); csd = list_entry(list.next, struct call_single_data, list);
list_del(&data->list); list_del(&csd->list);
/* /*
* 'data' can be invalid after this call if flags == 0 * 'csd' can be invalid after this call if flags == 0
* (when called through generic_exec_single()), * (when called through generic_exec_single()),
* so save them away before making the call: * so save them away before making the call:
*/ */
data_flags = data->flags; csd_flags = csd->flags;
data->func(data->info); csd->func(csd->info);
/* /*
* Unlocked CSDs are valid through generic_exec_single(): * Unlocked CSDs are valid through generic_exec_single():
*/ */
if (data_flags & CSD_FLAG_LOCK) if (csd_flags & CSD_FLAG_LOCK)
csd_unlock(data); csd_unlock(csd);
} }
} }
...@@ -249,16 +249,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, ...@@ -249,16 +249,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
local_irq_restore(flags); local_irq_restore(flags);
} else { } else {
if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
struct call_single_data *data = &d; struct call_single_data *csd = &d;
if (!wait) if (!wait)
data = &__get_cpu_var(csd_data); csd = &__get_cpu_var(csd_data);
csd_lock(data); csd_lock(csd);
data->func = func; csd->func = func;
data->info = info; csd->info = info;
generic_exec_single(cpu, data, wait); generic_exec_single(cpu, csd, wait);
} else { } else {
err = -ENXIO; /* CPU not online */ err = -ENXIO; /* CPU not online */
} }
...@@ -325,7 +325,7 @@ EXPORT_SYMBOL_GPL(smp_call_function_any); ...@@ -325,7 +325,7 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
* pre-allocated data structure. Useful for embedding @data inside * pre-allocated data structure. Useful for embedding @data inside
* other structures, for instance. * other structures, for instance.
*/ */
void __smp_call_function_single(int cpu, struct call_single_data *data, void __smp_call_function_single(int cpu, struct call_single_data *csd,
int wait) int wait)
{ {
unsigned int this_cpu; unsigned int this_cpu;
...@@ -343,11 +343,11 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, ...@@ -343,11 +343,11 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
if (cpu == this_cpu) { if (cpu == this_cpu) {
local_irq_save(flags); local_irq_save(flags);
data->func(data->info); csd->func(csd->info);
local_irq_restore(flags); local_irq_restore(flags);
} else { } else {
csd_lock(data); csd_lock(csd);
generic_exec_single(cpu, data, wait); generic_exec_single(cpu, csd, wait);
} }
put_cpu(); put_cpu();
} }
...@@ -369,7 +369,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, ...@@ -369,7 +369,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
void smp_call_function_many(const struct cpumask *mask, void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait) smp_call_func_t func, void *info, bool wait)
{ {
struct call_function_data *data; struct call_function_data *cfd;
int cpu, next_cpu, this_cpu = smp_processor_id(); int cpu, next_cpu, this_cpu = smp_processor_id();
/* /*
...@@ -401,24 +401,24 @@ void smp_call_function_many(const struct cpumask *mask, ...@@ -401,24 +401,24 @@ void smp_call_function_many(const struct cpumask *mask,
return; return;
} }
data = &__get_cpu_var(cfd_data); cfd = &__get_cpu_var(cfd_data);
cpumask_and(data->cpumask, mask, cpu_online_mask); cpumask_and(cfd->cpumask, mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, data->cpumask); cpumask_clear_cpu(this_cpu, cfd->cpumask);
/* Some callers race with other cpus changing the passed mask */ /* Some callers race with other cpus changing the passed mask */
if (unlikely(!cpumask_weight(data->cpumask))) if (unlikely(!cpumask_weight(cfd->cpumask)))
return; return;
/* /*
* After we put an entry into the list, data->cpumask * After we put an entry into the list, cfd->cpumask may be cleared
* may be cleared again when another CPU sends another IPI for * again when another CPU sends another IPI for a SMP function call, so
* a SMP function call, so data->cpumask will be zero. * cfd->cpumask will be zero.
*/ */
cpumask_copy(data->cpumask_ipi, data->cpumask); cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
for_each_cpu(cpu, data->cpumask) { for_each_cpu(cpu, cfd->cpumask) {
struct call_single_data *csd = per_cpu_ptr(data->csd, cpu); struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
struct call_single_queue *dst = struct call_single_queue *dst =
&per_cpu(call_single_queue, cpu); &per_cpu(call_single_queue, cpu);
unsigned long flags; unsigned long flags;
...@@ -433,12 +433,13 @@ void smp_call_function_many(const struct cpumask *mask, ...@@ -433,12 +433,13 @@ void smp_call_function_many(const struct cpumask *mask,
} }
/* Send a message to all CPUs in the map */ /* Send a message to all CPUs in the map */
arch_send_call_function_ipi_mask(data->cpumask_ipi); arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
if (wait) { if (wait) {
for_each_cpu(cpu, data->cpumask) { for_each_cpu(cpu, cfd->cpumask) {
struct call_single_data *csd = struct call_single_data *csd;
per_cpu_ptr(data->csd, cpu);
csd = per_cpu_ptr(cfd->csd, cpu);
csd_lock_wait(csd); csd_lock_wait(csd);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment