Commit 104699c0 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Benjamin Herrenschmidt

powerpc: Convert old cpumask API into new one

Adapt new API.

Almost change is trivial. Most important change is the below line
because we plan to change task->cpus_allowed implementation.

-       ctx->cpus_allowed = current->cpus_allowed;
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 48404f2e
...@@ -37,16 +37,16 @@ extern cpumask_t threads_core_mask; ...@@ -37,16 +37,16 @@ extern cpumask_t threads_core_mask;
* This can typically be used for things like IPI for tlb invalidations * This can typically be used for things like IPI for tlb invalidations
* since those need to be done only once per core/TLB * since those need to be done only once per core/TLB
*/ */
static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads) static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
{ {
cpumask_t tmp, res; cpumask_t tmp, res;
int i; int i;
res = CPU_MASK_NONE; cpumask_clear(&res);
for (i = 0; i < NR_CPUS; i += threads_per_core) { for (i = 0; i < NR_CPUS; i += threads_per_core) {
cpus_shift_left(tmp, threads_core_mask, i); cpumask_shift_left(&tmp, &threads_core_mask, i);
if (cpus_intersects(threads, tmp)) if (cpumask_intersects(threads, &tmp))
cpu_set(i, res); cpumask_set_cpu(i, &res);
} }
return res; return res;
} }
...@@ -58,7 +58,7 @@ static inline int cpu_nr_cores(void) ...@@ -58,7 +58,7 @@ static inline int cpu_nr_cores(void)
static inline cpumask_t cpu_online_cores_map(void) static inline cpumask_t cpu_online_cores_map(void)
{ {
return cpu_thread_mask_to_cores(cpu_online_map); return cpu_thread_mask_to_cores(cpu_online_mask);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -76,7 +76,7 @@ extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); ...@@ -76,7 +76,7 @@ extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
extern cpumask_t cpus_in_sr; extern cpumask_t cpus_in_sr;
static inline int kexec_sr_activated(int cpu) static inline int kexec_sr_activated(int cpu)
{ {
return cpu_isset(cpu,cpus_in_sr); return cpumask_test_cpu(cpu, &cpus_in_sr);
} }
struct kimage; struct kimage;
......
...@@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs) ...@@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs)
return; return;
hard_irq_disable(); hard_irq_disable();
if (!cpu_isset(cpu, cpus_in_crash)) if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu); crash_save_cpu(regs, cpu);
cpu_set(cpu, cpus_in_crash); cpumask_set_cpu(cpu, &cpus_in_crash);
/* /*
* Entered via soft-reset - could be the kdump * Entered via soft-reset - could be the kdump
...@@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs) ...@@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs)
* Tell the kexec CPU that entered via soft-reset and ready * Tell the kexec CPU that entered via soft-reset and ready
* to go down. * to go down.
*/ */
if (cpu_isset(cpu, cpus_in_sr)) { if (cpumask_test_cpu(cpu, &cpus_in_sr)) {
cpu_clear(cpu, cpus_in_sr); cpumask_clear_cpu(cpu, &cpus_in_sr);
atomic_inc(&enter_on_soft_reset); atomic_inc(&enter_on_soft_reset);
} }
...@@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs) ...@@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs)
* This barrier is needed to make sure that all CPUs are stopped. * This barrier is needed to make sure that all CPUs are stopped.
* If not, soft-reset will be invoked to bring other CPUs. * If not, soft-reset will be invoked to bring other CPUs.
*/ */
while (!cpu_isset(crashing_cpu, cpus_in_crash)) while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash))
cpu_relax(); cpu_relax();
if (ppc_md.kexec_cpu_down) if (ppc_md.kexec_cpu_down)
...@@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu) ...@@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu)
{ {
unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
cpu_clear(cpu, cpus_in_sr); cpumask_clear_cpu(cpu, &cpus_in_sr);
while (atomic_read(&enter_on_soft_reset) != ncpus) while (atomic_read(&enter_on_soft_reset) != ncpus)
cpu_relax(); cpu_relax();
} }
...@@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int cpu) ...@@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int cpu)
*/ */
printk(KERN_EMERG "Sending IPI to other cpus...\n"); printk(KERN_EMERG "Sending IPI to other cpus...\n");
msecs = 10000; msecs = 10000;
while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
cpu_relax(); cpu_relax();
mdelay(1); mdelay(1);
} }
...@@ -144,20 +144,20 @@ static void crash_kexec_prepare_cpus(int cpu) ...@@ -144,20 +144,20 @@ static void crash_kexec_prepare_cpus(int cpu)
* user to do soft reset such that we get all. * user to do soft reset such that we get all.
* Soft-reset will be used until better mechanism is implemented. * Soft-reset will be used until better mechanism is implemented.
*/ */
if (cpus_weight(cpus_in_crash) < ncpus) { if (cpumask_weight(&cpus_in_crash) < ncpus) {
printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
ncpus - cpus_weight(cpus_in_crash)); ncpus - cpumask_weight(&cpus_in_crash));
printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
cpus_in_sr = CPU_MASK_NONE; cpumask_clear(&cpus_in_sr);
atomic_set(&enter_on_soft_reset, 0); atomic_set(&enter_on_soft_reset, 0);
while (cpus_weight(cpus_in_crash) < ncpus) while (cpumask_weight(&cpus_in_crash) < ncpus)
cpu_relax(); cpu_relax();
} }
/* /*
* Make sure all CPUs are entered via soft-reset if the kdump is * Make sure all CPUs are entered via soft-reset if the kdump is
* invoked using soft-reset. * invoked using soft-reset.
*/ */
if (cpu_isset(cpu, cpus_in_sr)) if (cpumask_test_cpu(cpu, &cpus_in_sr))
crash_soft_reset_check(cpu); crash_soft_reset_check(cpu);
/* Leave the IPI callback set */ /* Leave the IPI callback set */
} }
...@@ -210,7 +210,7 @@ void crash_kexec_secondary(struct pt_regs *regs) ...@@ -210,7 +210,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
* exited using 'x'(exit and recover) or * exited using 'x'(exit and recover) or
* kexec_should_crash() failed for all running tasks. * kexec_should_crash() failed for all running tasks.
*/ */
cpu_clear(cpu, cpus_in_sr); cpumask_clear_cpu(cpu, &cpus_in_sr);
local_irq_restore(flags); local_irq_restore(flags);
return; return;
} }
...@@ -224,7 +224,7 @@ void crash_kexec_secondary(struct pt_regs *regs) ...@@ -224,7 +224,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
* then start kexec boot. * then start kexec boot.
*/ */
crash_soft_reset_check(cpu); crash_soft_reset_check(cpu);
cpu_set(crashing_cpu, cpus_in_crash); cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
if (ppc_md.kexec_cpu_down) if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0); ppc_md.kexec_cpu_down(1, 0);
machine_kexec(kexec_crash_image); machine_kexec(kexec_crash_image);
...@@ -253,7 +253,7 @@ static void crash_kexec_prepare_cpus(int cpu) ...@@ -253,7 +253,7 @@ static void crash_kexec_prepare_cpus(int cpu)
void crash_kexec_secondary(struct pt_regs *regs) void crash_kexec_secondary(struct pt_regs *regs)
{ {
cpus_in_sr = CPU_MASK_NONE; cpumask_clear(&cpus_in_sr);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -345,7 +345,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) ...@@ -345,7 +345,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crashing_cpu = smp_processor_id(); crashing_cpu = smp_processor_id();
crash_save_cpu(regs, crashing_cpu); crash_save_cpu(regs, crashing_cpu);
crash_kexec_prepare_cpus(crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu);
cpu_set(crashing_cpu, cpus_in_crash); cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
crash_kexec_wait_realmode(crashing_cpu); crash_kexec_wait_realmode(crashing_cpu);
machine_kexec_mask_interrupts(); machine_kexec_mask_interrupts();
......
...@@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc) ...@@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
int i; int i;
threads_per_core = tpc; threads_per_core = tpc;
threads_core_mask = CPU_MASK_NONE; cpumask_clear(&threads_core_mask);
/* This implementation only supports power of 2 number of threads /* This implementation only supports power of 2 number of threads
* for simplicity and performance * for simplicity and performance
...@@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc) ...@@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
BUG_ON(tpc != (1 << threads_shift)); BUG_ON(tpc != (1 << threads_shift));
for (i = 0; i < tpc; i++) for (i = 0; i < tpc; i++)
cpu_set(i, threads_core_mask); cpumask_set_cpu(i, &threads_core_mask);
printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
tpc, tpc > 1 ? "s" : ""); tpc, tpc > 1 ? "s" : "");
......
...@@ -513,7 +513,7 @@ int cpu_first_thread_of_core(int core) ...@@ -513,7 +513,7 @@ int cpu_first_thread_of_core(int core)
} }
EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
/* Must be called when no change can occur to cpu_present_map, /* Must be called when no change can occur to cpu_present_mask,
* i.e. during cpu online or offline. * i.e. during cpu online or offline.
*/ */
static struct device_node *cpu_to_l2cache(int cpu) static struct device_node *cpu_to_l2cache(int cpu)
...@@ -614,7 +614,7 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -614,7 +614,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
* se we pin us down to CPU 0 for a short while * se we pin us down to CPU 0 for a short while
*/ */
alloc_cpumask_var(&old_mask, GFP_NOWAIT); alloc_cpumask_var(&old_mask, GFP_NOWAIT);
cpumask_copy(old_mask, &current->cpus_allowed); cpumask_copy(old_mask, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
if (smp_ops && smp_ops->setup_cpu) if (smp_ops && smp_ops->setup_cpu)
......
...@@ -221,7 +221,7 @@ void system_reset_exception(struct pt_regs *regs) ...@@ -221,7 +221,7 @@ void system_reset_exception(struct pt_regs *regs)
} }
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC
cpu_set(smp_processor_id(), cpus_in_sr); cpumask_set_cpu(smp_processor_id(), &cpus_in_sr);
#endif #endif
die("System Reset", regs, SIGABRT); die("System Reset", regs, SIGABRT);
......
...@@ -1452,7 +1452,7 @@ int arch_update_cpu_topology(void) ...@@ -1452,7 +1452,7 @@ int arch_update_cpu_topology(void)
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
struct sys_device *sysdev; struct sys_device *sysdev;
for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { for_each_cpu(cpu,&cpu_associativity_changes_mask) {
vphn_get_associativity(cpu, associativity); vphn_get_associativity(cpu, associativity);
nid = associativity_to_nid(associativity); nid = associativity_to_nid(associativity);
......
...@@ -85,7 +85,7 @@ static void smp_beatic_message_pass(int target, int msg) ...@@ -85,7 +85,7 @@ static void smp_beatic_message_pass(int target, int msg)
static int __init smp_beatic_probe(void) static int __init smp_beatic_probe(void)
{ {
return cpus_weight(cpu_possible_map); return cpumask_weight(cpu_possible_mask);
} }
static void __devinit smp_beatic_setup_cpu(int cpu) static void __devinit smp_beatic_setup_cpu(int cpu)
......
...@@ -45,8 +45,8 @@ static struct cbe_thread_map ...@@ -45,8 +45,8 @@ static struct cbe_thread_map
unsigned int cbe_id; unsigned int cbe_id;
} cbe_thread_map[NR_CPUS]; } cbe_thread_map[NR_CPUS];
static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE }; static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE; static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
static struct cbe_regs_map *cbe_find_map(struct device_node *np) static struct cbe_regs_map *cbe_find_map(struct device_node *np)
{ {
...@@ -159,7 +159,8 @@ EXPORT_SYMBOL_GPL(cbe_cpu_to_node); ...@@ -159,7 +159,8 @@ EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
u32 cbe_node_to_cpu(int node) u32 cbe_node_to_cpu(int node)
{ {
return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t)); return cpumask_first(&cbe_local_mask[node]);
} }
EXPORT_SYMBOL_GPL(cbe_node_to_cpu); EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
...@@ -268,9 +269,9 @@ void __init cbe_regs_init(void) ...@@ -268,9 +269,9 @@ void __init cbe_regs_init(void)
thread->regs = map; thread->regs = map;
thread->cbe_id = cbe_id; thread->cbe_id = cbe_id;
map->be_node = thread->be_node; map->be_node = thread->be_node;
cpu_set(i, cbe_local_mask[cbe_id]); cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
if(thread->thread_id == 0) if(thread->thread_id == 0)
cpu_set(i, cbe_first_online_cpu); cpumask_set_cpu(i, &cbe_first_online_cpu);
} }
} }
......
...@@ -77,7 +77,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) ...@@ -77,7 +77,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
unsigned int pcpu; unsigned int pcpu;
int start_cpu; int start_cpu;
if (cpu_isset(lcpu, of_spin_map)) if (cpumask_test_cpu(lcpu, &of_spin_map))
/* Already started by OF and sitting in spin loop */ /* Already started by OF and sitting in spin loop */
return 1; return 1;
...@@ -123,7 +123,7 @@ static int __init smp_iic_probe(void) ...@@ -123,7 +123,7 @@ static int __init smp_iic_probe(void)
{ {
iic_request_IPIs(); iic_request_IPIs();
return cpus_weight(cpu_possible_map); return cpumask_weight(cpu_possible_mask);
} }
static void __devinit smp_cell_setup_cpu(int cpu) static void __devinit smp_cell_setup_cpu(int cpu)
...@@ -188,13 +188,12 @@ void __init smp_init_cell(void) ...@@ -188,13 +188,12 @@ void __init smp_init_cell(void)
if (cpu_has_feature(CPU_FTR_SMT)) { if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) { for_each_present_cpu(i) {
if (cpu_thread_in_core(i) == 0) if (cpu_thread_in_core(i) == 0)
cpu_set(i, of_spin_map); cpumask_set_cpu(i, &of_spin_map);
}
} else {
of_spin_map = cpu_present_map;
} }
} else
cpumask_copy(&of_spin_map, cpu_present_mask);
cpu_clear(boot_cpuid, of_spin_map); cpumask_clear_cpu(boot_cpuid, &of_spin_map);
/* Non-lpar has additional take/give timebase */ /* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
......
...@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx) ...@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
* runqueue. The context will be rescheduled on the proper node * runqueue. The context will be rescheduled on the proper node
* if it is timesliced or preempted. * if it is timesliced or preempted.
*/ */
ctx->cpus_allowed = current->cpus_allowed; cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
/* Save the current cpu id for spu interrupt routing. */ /* Save the current cpu id for spu interrupt routing. */
ctx->last_ran = raw_smp_processor_id(); ctx->last_ran = raw_smp_processor_id();
......
...@@ -281,7 +281,7 @@ static int pseries_add_processor(struct device_node *np) ...@@ -281,7 +281,7 @@ static int pseries_add_processor(struct device_node *np)
} }
for_each_cpu(cpu, tmp) { for_each_cpu(cpu, tmp) {
BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); BUG_ON(cpu_present(cpu));
set_cpu_present(cpu, true); set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, *intserv++); set_hard_smp_processor_id(cpu, *intserv++);
} }
......
...@@ -334,7 +334,7 @@ static void release_output_lock(void) ...@@ -334,7 +334,7 @@ static void release_output_lock(void)
int cpus_are_in_xmon(void) int cpus_are_in_xmon(void)
{ {
return !cpus_empty(cpus_in_xmon); return !cpumask_empty(&cpus_in_xmon);
} }
#endif #endif
...@@ -373,7 +373,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) ...@@ -373,7 +373,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpu = smp_processor_id(); cpu = smp_processor_id();
if (cpu_isset(cpu, cpus_in_xmon)) { if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
get_output_lock(); get_output_lock();
excprint(regs); excprint(regs);
printf("cpu 0x%x: Exception %lx %s in xmon, " printf("cpu 0x%x: Exception %lx %s in xmon, "
...@@ -396,7 +396,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) ...@@ -396,7 +396,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
} }
xmon_fault_jmp[cpu] = recurse_jmp; xmon_fault_jmp[cpu] = recurse_jmp;
cpu_set(cpu, cpus_in_xmon); cpumask_set_cpu(cpu, &cpus_in_xmon);
bp = NULL; bp = NULL;
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
...@@ -440,7 +440,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) ...@@ -440,7 +440,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
smp_send_debugger_break(MSG_ALL_BUT_SELF); smp_send_debugger_break(MSG_ALL_BUT_SELF);
/* wait for other cpus to come in */ /* wait for other cpus to come in */
for (timeout = 100000000; timeout != 0; --timeout) { for (timeout = 100000000; timeout != 0; --timeout) {
if (cpus_weight(cpus_in_xmon) >= ncpus) if (cpumask_weight(&cpus_in_xmon) >= ncpus)
break; break;
barrier(); barrier();
} }
...@@ -484,7 +484,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) ...@@ -484,7 +484,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
} }
} }
leave: leave:
cpu_clear(cpu, cpus_in_xmon); cpumask_clear_cpu(cpu, &cpus_in_xmon);
xmon_fault_jmp[cpu] = NULL; xmon_fault_jmp[cpu] = NULL;
#else #else
/* UP is simple... */ /* UP is simple... */
...@@ -630,7 +630,7 @@ static int xmon_iabr_match(struct pt_regs *regs) ...@@ -630,7 +630,7 @@ static int xmon_iabr_match(struct pt_regs *regs)
static int xmon_ipi(struct pt_regs *regs) static int xmon_ipi(struct pt_regs *regs)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon)) if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon))
xmon_core(regs, 1); xmon_core(regs, 1);
#endif #endif
return 0; return 0;
...@@ -976,7 +976,7 @@ static int cpu_cmd(void) ...@@ -976,7 +976,7 @@ static int cpu_cmd(void)
printf("cpus stopped:"); printf("cpus stopped:");
count = 0; count = 0;
for (cpu = 0; cpu < NR_CPUS; ++cpu) { for (cpu = 0; cpu < NR_CPUS; ++cpu) {
if (cpu_isset(cpu, cpus_in_xmon)) { if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
if (count == 0) if (count == 0)
printf(" %x", cpu); printf(" %x", cpu);
++count; ++count;
...@@ -992,7 +992,7 @@ static int cpu_cmd(void) ...@@ -992,7 +992,7 @@ static int cpu_cmd(void)
return 0; return 0;
} }
/* try to switch to cpu specified */ /* try to switch to cpu specified */
if (!cpu_isset(cpu, cpus_in_xmon)) { if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) {
printf("cpu 0x%x isn't in xmon\n", cpu); printf("cpu 0x%x isn't in xmon\n", cpu);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment