Commit e7986739 authored by Mike Travis's avatar Mike Travis

x86 smp: modify send_IPI_mask interface to accept cpumask_t pointers

Impact: cleanup, change parameter passing

  * Change genapic interfaces to accept cpumask_t pointers where possible.

  * Modify external callers to use cpumask_t pointers in function calls.

  * Create new send_IPI_mask_allbutself which is the same as the
    send_IPI_mask functions but removes smp_processor_id() from list.
    This removes another common need for a temporary cpumask_t variable.

  * Functions that used a temp cpumask_t variable for:

	cpumask_t allbutme = cpu_online_map;

	cpu_clear(smp_processor_id(), allbutme);
	if (!cpus_empty(allbutme))
		...

    become:

	if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu)))
		...

  * Other minor code optimizations (like using cpus_clear instead of
    CPU_MASK_NONE, etc.)

Applies to linux-2.6.tip/master.
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
parent 36f5101a
...@@ -9,12 +9,12 @@ static inline int apic_id_registered(void) ...@@ -9,12 +9,12 @@ static inline int apic_id_registered(void)
return (1); return (1);
} }
static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
return cpu_online_map; return &cpu_online_map;
#else #else
return cpumask_of_cpu(0); return &cpumask_of_cpu(0);
#endif #endif
} }
...@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid) ...@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid)
static inline int cpu_present_to_apicid(int mps_cpu) static inline int cpu_present_to_apicid(int mps_cpu)
{ {
if (mps_cpu < NR_CPUS) if (mps_cpu < nr_cpu_ids)
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
return BAD_APICID; return BAD_APICID;
...@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[]; ...@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[];
/* Mapping from cpu number to logical apicid */ /* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_to_logical_apicid(int cpu)
{ {
if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids)
return BAD_APICID; return BAD_APICID;
return cpu_physical_id(cpu); return cpu_physical_id(cpu);
} }
...@@ -119,12 +119,12 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) ...@@ -119,12 +119,12 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
} }
/* As we are using single CPU as destination, pick only one CPU here */ /* As we are using single CPU as destination, pick only one CPU here */
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
int cpu; int cpu;
int apicid; int apicid;
cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
return apicid; return apicid;
} }
......
#ifndef __ASM_MACH_IPI_H #ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const cpumask_t *mask, int vector)
{ {
send_IPI_mask_sequence(mask, vector); send_IPI_mask_sequence(mask, vector);
} }
...@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) ...@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector)
cpu_clear(smp_processor_id(), mask); cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask)) if (!cpus_empty(mask))
send_IPI_mask(mask, vector); send_IPI_mask(&mask, vector);
} }
static inline void send_IPI_all(int vector) static inline void send_IPI_all(int vector)
{ {
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(&cpu_online_map, vector);
} }
#endif /* __ASM_MACH_IPI_H */ #endif /* __ASM_MACH_IPI_H */
...@@ -9,14 +9,14 @@ static inline int apic_id_registered(void) ...@@ -9,14 +9,14 @@ static inline int apic_id_registered(void)
return (1); return (1);
} }
static inline cpumask_t target_cpus_cluster(void) static inline const cpumask_t *target_cpus_cluster(void)
{ {
return CPU_MASK_ALL; return &CPU_MASK_ALL;
} }
static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void)
{ {
return cpumask_of_cpu(smp_processor_id()); return &cpumask_of_cpu(smp_processor_id());
} }
#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
...@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS]; ...@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS];
static inline void setup_apic_routing(void) static inline void setup_apic_routing(void)
{ {
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ? (apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); "Physical Cluster" : "Logical Cluster",
nr_ioapics, cpus_addr(*target_cpus())[0]);
} }
static inline int multi_timer_check(int apic, int irq) static inline int multi_timer_check(int apic, int irq)
...@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) ...@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu)
{ {
if (!mps_cpu) if (!mps_cpu)
return boot_cpu_physical_apicid; return boot_cpu_physical_apicid;
else if (mps_cpu < NR_CPUS) else if (mps_cpu < nr_cpu_ids)
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
else else
return BAD_APICID; return BAD_APICID;
...@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[]; ...@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[];
static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_to_logical_apicid(int cpu)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids)
return BAD_APICID; return BAD_APICID;
return (int)cpu_2_logical_apicid[cpu]; return (int)cpu_2_logical_apicid[cpu];
#else #else
return logical_smp_processor_id(); return logical_smp_processor_id();
#endif #endif
...@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) ...@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid)
return (1); return (1);
} }
static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) static inline unsigned int
cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
{ {
int num_bits_set; int num_bits_set;
int cpus_found = 0; int cpus_found = 0;
int cpu; int cpu;
int apicid; int apicid;
num_bits_set = cpus_weight(cpumask); num_bits_set = cpumask_weight(cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == NR_CPUS)
return 0xFF; return 0xFF;
...@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) ...@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
cpu = first_cpu(cpumask); cpu = cpumask_first(cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, cpumask)) { if (cpumask_test_cpu(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){ apicid_cluster(new_apicid)){
...@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) ...@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
return apicid; return apicid;
} }
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
int num_bits_set; int num_bits_set;
int cpus_found = 0; int cpus_found = 0;
int cpu; int cpu;
int apicid; int apicid;
num_bits_set = cpus_weight(cpumask); num_bits_set = cpus_weight(*cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == NR_CPUS)
return cpu_to_logical_apicid(0); return cpu_to_logical_apicid(0);
...@@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, cpumask)) { if (cpu_isset(cpu, *cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){ apicid_cluster(new_apicid)){
......
#ifndef __ASM_ES7000_IPI_H #ifndef __ASM_ES7000_IPI_H
#define __ASM_ES7000_IPI_H #define __ASM_ES7000_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const cpumask_t *mask, int vector)
{ {
send_IPI_mask_sequence(mask, vector); send_IPI_mask_sequence(mask, vector);
} }
...@@ -13,12 +14,12 @@ static inline void send_IPI_allbutself(int vector) ...@@ -13,12 +14,12 @@ static inline void send_IPI_allbutself(int vector)
cpumask_t mask = cpu_online_map; cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask); cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask)) if (!cpus_empty(mask))
send_IPI_mask(mask, vector); send_IPI_mask(&mask, vector);
} }
static inline void send_IPI_all(int vector) static inline void send_IPI_all(int vector)
{ {
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(&cpu_online_map, vector);
} }
#endif /* __ASM_ES7000_IPI_H */ #endif /* __ASM_ES7000_IPI_H */
...@@ -24,7 +24,7 @@ struct genapic { ...@@ -24,7 +24,7 @@ struct genapic {
int (*probe)(void); int (*probe)(void);
int (*apic_id_registered)(void); int (*apic_id_registered)(void);
cpumask_t (*target_cpus)(void); const cpumask_t *(*target_cpus)(void);
int int_delivery_mode; int int_delivery_mode;
int int_dest_mode; int int_dest_mode;
int ESR_DISABLE; int ESR_DISABLE;
...@@ -57,12 +57,13 @@ struct genapic { ...@@ -57,12 +57,13 @@ struct genapic {
unsigned (*get_apic_id)(unsigned long x); unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask; unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
cpumask_t (*vector_allocation_domain)(int cpu); void (*vector_allocation_domain)(int cpu, cpumask_t *retmask);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* ipi */ /* ipi */
void (*send_IPI_mask)(cpumask_t mask, int vector); void (*send_IPI_mask)(const cpumask_t *mask, int vector);
void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector);
void (*send_IPI_allbutself)(int vector); void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector); void (*send_IPI_all)(int vector);
#endif #endif
......
#ifndef _ASM_X86_GENAPIC_64_H #ifndef _ASM_X86_GENAPIC_64_H
#define _ASM_X86_GENAPIC_64_H #define _ASM_X86_GENAPIC_64_H
#include <linux/cpumask.h>
/* /*
* Copyright 2004 James Cleverdon, IBM. * Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2 * Subject to the GNU Public License, v.2
...@@ -18,16 +20,17 @@ struct genapic { ...@@ -18,16 +20,17 @@ struct genapic {
u32 int_delivery_mode; u32 int_delivery_mode;
u32 int_dest_mode; u32 int_dest_mode;
int (*apic_id_registered)(void); int (*apic_id_registered)(void);
cpumask_t (*target_cpus)(void); const cpumask_t *(*target_cpus)(void);
cpumask_t (*vector_allocation_domain)(int cpu); void (*vector_allocation_domain)(int cpu, cpumask_t *retmask);
void (*init_apic_ldr)(void); void (*init_apic_ldr)(void);
/* ipi */ /* ipi */
void (*send_IPI_mask)(cpumask_t mask, int vector); void (*send_IPI_mask)(const cpumask_t *mask, int vector);
void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector);
void (*send_IPI_allbutself)(int vector); void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector); void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector); void (*send_IPI_self)(int vector);
/* */ /* */
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
unsigned int (*phys_pkg_id)(int index_msb); unsigned int (*phys_pkg_id)(int index_msb);
unsigned int (*get_apic_id)(unsigned long x); unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id); unsigned long (*set_apic_id)(unsigned int id);
......
...@@ -117,7 +117,7 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, ...@@ -117,7 +117,7 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
native_apic_mem_write(APIC_ICR, cfg); native_apic_mem_write(APIC_ICR, cfg);
} }
static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) static inline void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
{ {
unsigned long flags; unsigned long flags;
unsigned long query_cpu; unsigned long query_cpu;
...@@ -128,11 +128,28 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) ...@@ -128,11 +128,28 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
* - mbligh * - mbligh
*/ */
local_irq_save(flags); local_irq_save(flags);
for_each_cpu_mask_nr(query_cpu, mask) { for_each_cpu_mask_nr(query_cpu, *mask) {
__send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL); vector, APIC_DEST_PHYSICAL);
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
{
unsigned long flags;
unsigned int query_cpu;
unsigned int this_cpu = smp_processor_id();
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu_mask_nr(query_cpu, *mask)
if (query_cpu != this_cpu)
__send_IPI_dest_field(
per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
local_irq_restore(flags);
}
#endif /* _ASM_X86_IPI_H */ #endif /* _ASM_X86_IPI_H */
...@@ -8,12 +8,12 @@ ...@@ -8,12 +8,12 @@
#define APIC_DFR_VALUE (APIC_DFR_FLAT) #define APIC_DFR_VALUE (APIC_DFR_FLAT)
static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
return cpu_online_map; return &cpu_online_map;
#else #else
return cpumask_of_cpu(0); return &cpumask_of_cpu(0);
#endif #endif
} }
...@@ -61,9 +61,9 @@ static inline int apic_id_registered(void) ...@@ -61,9 +61,9 @@ static inline int apic_id_registered(void)
return physid_isset(read_apic_id(), phys_cpu_present_map); return physid_isset(read_apic_id(), phys_cpu_present_map);
} }
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
return cpus_addr(cpumask)[0]; return cpus_addr(*cpumask)[0];
} }
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
...@@ -88,7 +88,7 @@ static inline int apicid_to_node(int logical_apicid) ...@@ -88,7 +88,7 @@ static inline int apicid_to_node(int logical_apicid)
#endif #endif
} }
static inline cpumask_t vector_allocation_domain(int cpu) static inline void vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
/* Careful. Some cpus do not strictly honor the set of cpus /* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest * specified in the interrupt destination when using lowest
...@@ -98,8 +98,7 @@ static inline cpumask_t vector_allocation_domain(int cpu) ...@@ -98,8 +98,7 @@ static inline cpumask_t vector_allocation_domain(int cpu)
* deliver interrupts to the wrong hyperthread when only one * deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination. * hyperthread was specified in the interrupt desitination.
*/ */
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
return domain;
} }
#endif #endif
...@@ -131,7 +130,7 @@ static inline int cpu_to_logical_apicid(int cpu) ...@@ -131,7 +130,7 @@ static inline int cpu_to_logical_apicid(int cpu)
static inline int cpu_present_to_apicid(int mps_cpu) static inline int cpu_present_to_apicid(int mps_cpu)
{ {
if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
else else
return BAD_APICID; return BAD_APICID;
......
...@@ -4,7 +4,8 @@ ...@@ -4,7 +4,8 @@
/* Avoid include hell */ /* Avoid include hell */
#define NMI_VECTOR 0x02 #define NMI_VECTOR 0x02
void send_IPI_mask_bitmask(cpumask_t mask, int vector); void send_IPI_mask_bitmask(const cpumask_t *mask, int vector);
void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
void __send_IPI_shortcut(unsigned int shortcut, int vector); void __send_IPI_shortcut(unsigned int shortcut, int vector);
extern int no_broadcast; extern int no_broadcast;
...@@ -12,28 +13,27 @@ extern int no_broadcast; ...@@ -12,28 +13,27 @@ extern int no_broadcast;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#include <asm/genapic.h> #include <asm/genapic.h>
#define send_IPI_mask (genapic->send_IPI_mask) #define send_IPI_mask (genapic->send_IPI_mask)
#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
#else #else
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const cpumask_t *mask, int vector)
{ {
send_IPI_mask_bitmask(mask, vector); send_IPI_mask_bitmask(mask, vector);
} }
void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
#endif #endif
static inline void __local_send_IPI_allbutself(int vector) static inline void __local_send_IPI_allbutself(int vector)
{ {
if (no_broadcast || vector == NMI_VECTOR) { if (no_broadcast || vector == NMI_VECTOR)
cpumask_t mask = cpu_online_map; send_IPI_mask_allbutself(&cpu_online_map, vector);
else
cpu_clear(smp_processor_id(), mask);
send_IPI_mask(mask, vector);
} else
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector); __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
} }
static inline void __local_send_IPI_all(int vector) static inline void __local_send_IPI_all(int vector)
{ {
if (no_broadcast || vector == NMI_VECTOR) if (no_broadcast || vector == NMI_VECTOR)
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(&cpu_online_map, vector);
else else
__send_IPI_shortcut(APIC_DEST_ALLINC, vector); __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
} }
......
...@@ -7,9 +7,9 @@ ...@@ -7,9 +7,9 @@
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) #define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void)
{ {
return CPU_MASK_ALL; return &CPU_MASK_ALL;
} }
#define NO_BALANCE_IRQ (1) #define NO_BALANCE_IRQ (1)
...@@ -122,7 +122,7 @@ static inline void enable_apic_mode(void) ...@@ -122,7 +122,7 @@ static inline void enable_apic_mode(void)
* We use physical apicids here, not logical, so just return the default * We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us * physical broadcast to stop people from breaking us
*/ */
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
return (int) 0xF; return (int) 0xF;
} }
......
#ifndef __ASM_NUMAQ_IPI_H #ifndef __ASM_NUMAQ_IPI_H
#define __ASM_NUMAQ_IPI_H #define __ASM_NUMAQ_IPI_H
void send_IPI_mask_sequence(cpumask_t, int vector); void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const cpumask_t *mask, int vector)
{ {
send_IPI_mask_sequence(mask, vector); send_IPI_mask_sequence(mask, vector);
} }
...@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) ...@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector)
cpu_clear(smp_processor_id(), mask); cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask)) if (!cpus_empty(mask))
send_IPI_mask(mask, vector); send_IPI_mask(&mask, vector);
} }
static inline void send_IPI_all(int vector) static inline void send_IPI_all(int vector)
{ {
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(&cpu_online_map, vector);
} }
#endif /* __ASM_NUMAQ_IPI_H */ #endif /* __ASM_NUMAQ_IPI_H */
...@@ -60,7 +60,7 @@ struct smp_ops { ...@@ -60,7 +60,7 @@ struct smp_ops {
void (*cpu_die)(unsigned int cpu); void (*cpu_die)(unsigned int cpu);
void (*play_dead)(void); void (*play_dead)(void);
void (*send_call_func_ipi)(cpumask_t mask); void (*send_call_func_ipi)(const cpumask_t *mask);
void (*send_call_func_single_ipi)(int cpu); void (*send_call_func_single_ipi)(int cpu);
}; };
...@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu) ...@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu)
static inline void arch_send_call_function_ipi(cpumask_t mask) static inline void arch_send_call_function_ipi(cpumask_t mask)
{ {
smp_ops.send_call_func_ipi(mask); smp_ops.send_call_func_ipi(&mask);
} }
void cpu_disable_common(void); void cpu_disable_common(void);
...@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu); ...@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu);
void native_play_dead(void); void native_play_dead(void);
void play_dead_common(void); void play_dead_common(void);
void native_send_call_func_ipi(cpumask_t mask); void native_send_call_func_ipi(const cpumask_t *mask);
void native_send_call_func_single_ipi(int cpu); void native_send_call_func_single_ipi(int cpu);
extern void prefill_possible_map(void); extern void prefill_possible_map(void);
......
...@@ -14,13 +14,13 @@ ...@@ -14,13 +14,13 @@
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) #define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void)
{ {
/* CPU_MASK_ALL (0xff) has undefined behaviour with /* CPU_MASK_ALL (0xff) has undefined behaviour with
* dest_LowestPrio mode logical clustered apic interrupt routing * dest_LowestPrio mode logical clustered apic interrupt routing
* Just start on cpu 0. IRQ balancing will spread load * Just start on cpu 0. IRQ balancing will spread load
*/ */
return cpumask_of_cpu(0); return &cpumask_of_cpu(0);
} }
#define INT_DELIVERY_MODE (dest_LowestPrio) #define INT_DELIVERY_MODE (dest_LowestPrio)
...@@ -137,14 +137,14 @@ static inline void enable_apic_mode(void) ...@@ -137,14 +137,14 @@ static inline void enable_apic_mode(void)
{ {
} }
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
int num_bits_set; int num_bits_set;
int cpus_found = 0; int cpus_found = 0;
int cpu; int cpu;
int apicid; int apicid;
num_bits_set = cpus_weight(cpumask); num_bits_set = cpus_weight(*cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == NR_CPUS)
return (int) 0xFF; return (int) 0xFF;
...@@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, cpumask)) { if (cpu_isset(cpu, *cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){ apicid_cluster(new_apicid)){
......
#ifndef __ASM_SUMMIT_IPI_H #ifndef __ASM_SUMMIT_IPI_H
#define __ASM_SUMMIT_IPI_H #define __ASM_SUMMIT_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const cpumask_t *mask, int vector)
{ {
send_IPI_mask_sequence(mask, vector); send_IPI_mask_sequence(mask, vector);
} }
...@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) ...@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector)
cpu_clear(smp_processor_id(), mask); cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask)) if (!cpus_empty(mask))
send_IPI_mask(mask, vector); send_IPI_mask(&mask, vector);
} }
static inline void send_IPI_all(int vector) static inline void send_IPI_all(int vector)
{ {
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(&cpu_online_map, vector);
} }
#endif /* __ASM_SUMMIT_IPI_H */ #endif /* __ASM_SUMMIT_IPI_H */
...@@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta, ...@@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta,
struct clock_event_device *evt); struct clock_event_device *evt);
static void lapic_timer_setup(enum clock_event_mode mode, static void lapic_timer_setup(enum clock_event_mode mode,
struct clock_event_device *evt); struct clock_event_device *evt);
static void lapic_timer_broadcast(const struct cpumask *mask); static void lapic_timer_broadcast(const cpumask_t *mask);
static void apic_pm_activate(void); static void apic_pm_activate(void);
/* /*
...@@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode, ...@@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode,
/* /*
* Local APIC timer broadcast function * Local APIC timer broadcast function
*/ */
static void lapic_timer_broadcast(const struct cpumask *mask) static void lapic_timer_broadcast(const cpumask_t *mask)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
send_IPI_mask(*mask, LOCAL_TIMER_VECTOR); send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
#endif #endif
} }
......
...@@ -77,10 +77,7 @@ static int crash_nmi_callback(struct notifier_block *self, ...@@ -77,10 +77,7 @@ static int crash_nmi_callback(struct notifier_block *self,
static void smp_send_nmi_allbutself(void) static void smp_send_nmi_allbutself(void)
{ {
cpumask_t mask = cpu_online_map; send_IPI_allbutself(NMI_VECTOR);
cpu_clear(safe_smp_processor_id(), mask);
if (!cpus_empty(mask))
send_IPI_mask(mask, NMI_VECTOR);
} }
static struct notifier_block crash_nmi_nb = { static struct notifier_block crash_nmi_nb = {
......
...@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 1; return 1;
} }
static cpumask_t flat_target_cpus(void) static const cpumask_t *flat_target_cpus(void)
{ {
return cpu_online_map; return &cpu_online_map;
} }
static cpumask_t flat_vector_allocation_domain(int cpu) static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
/* Careful. Some cpus do not strictly honor the set of cpus /* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest * specified in the interrupt destination when using lowest
...@@ -45,8 +45,7 @@ static cpumask_t flat_vector_allocation_domain(int cpu) ...@@ -45,8 +45,7 @@ static cpumask_t flat_vector_allocation_domain(int cpu)
* deliver interrupts to the wrong hyperthread when only one * deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination. * hyperthread was specified in the interrupt desitination.
*/ */
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; *retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } };
return domain;
} }
/* /*
...@@ -69,9 +68,8 @@ static void flat_init_apic_ldr(void) ...@@ -69,9 +68,8 @@ static void flat_init_apic_ldr(void)
apic_write(APIC_LDR, val); apic_write(APIC_LDR, val);
} }
static void flat_send_IPI_mask(cpumask_t cpumask, int vector) static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
{ {
unsigned long mask = cpus_addr(cpumask)[0];
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
...@@ -79,20 +77,40 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector) ...@@ -79,20 +77,40 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
local_irq_restore(flags); local_irq_restore(flags);
} }
static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector)
{
unsigned long mask = cpus_addr(*cpumask)[0];
_flat_send_IPI_mask(mask, vector);
}
static void flat_send_IPI_mask_allbutself(const cpumask_t *cpumask, int vector)
{
unsigned long mask = cpus_addr(*cpumask)[0];
int cpu = smp_processor_id();
if (cpu < BITS_PER_LONG)
clear_bit(cpu, &mask);
_flat_send_IPI_mask(mask, vector);
}
static void flat_send_IPI_allbutself(int vector) static void flat_send_IPI_allbutself(int vector)
{ {
int cpu = smp_processor_id();
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int hotplug = 1; int hotplug = 1;
#else #else
int hotplug = 0; int hotplug = 0;
#endif #endif
if (hotplug || vector == NMI_VECTOR) { if (hotplug || vector == NMI_VECTOR) {
cpumask_t allbutme = cpu_online_map; if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) {
unsigned long mask = cpus_addr(cpu_online_map)[0];
cpu_clear(smp_processor_id(), allbutme); if (cpu < BITS_PER_LONG)
clear_bit(cpu, &mask);
if (!cpus_empty(allbutme)) _flat_send_IPI_mask(mask, vector);
flat_send_IPI_mask(allbutme, vector); }
} else if (num_online_cpus() > 1) { } else if (num_online_cpus() > 1) {
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
} }
...@@ -101,7 +119,7 @@ static void flat_send_IPI_allbutself(int vector) ...@@ -101,7 +119,7 @@ static void flat_send_IPI_allbutself(int vector)
static void flat_send_IPI_all(int vector) static void flat_send_IPI_all(int vector)
{ {
if (vector == NMI_VECTOR) if (vector == NMI_VECTOR)
flat_send_IPI_mask(cpu_online_map, vector); flat_send_IPI_mask(&cpu_online_map, vector);
else else
__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
} }
...@@ -135,9 +153,9 @@ static int flat_apic_id_registered(void) ...@@ -135,9 +153,9 @@ static int flat_apic_id_registered(void)
return physid_isset(read_xapic_id(), phys_cpu_present_map); return physid_isset(read_xapic_id(), phys_cpu_present_map);
} }
static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS;
} }
static unsigned int phys_pkg_id(int index_msb) static unsigned int phys_pkg_id(int index_msb)
...@@ -157,6 +175,7 @@ struct genapic apic_flat = { ...@@ -157,6 +175,7 @@ struct genapic apic_flat = {
.send_IPI_all = flat_send_IPI_all, .send_IPI_all = flat_send_IPI_all,
.send_IPI_allbutself = flat_send_IPI_allbutself, .send_IPI_allbutself = flat_send_IPI_allbutself,
.send_IPI_mask = flat_send_IPI_mask, .send_IPI_mask = flat_send_IPI_mask,
.send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
.send_IPI_self = apic_send_IPI_self, .send_IPI_self = apic_send_IPI_self,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid, .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id, .phys_pkg_id = phys_pkg_id,
...@@ -188,35 +207,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -188,35 +207,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0; return 0;
} }
static cpumask_t physflat_target_cpus(void) static const cpumask_t *physflat_target_cpus(void)
{ {
return cpu_online_map; return &cpu_online_map;
} }
static cpumask_t physflat_vector_allocation_domain(int cpu) static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
return cpumask_of_cpu(cpu); cpus_clear(*retmask);
cpu_set(cpu, *retmask);
} }
static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector)
{ {
send_IPI_mask_sequence(cpumask, vector); send_IPI_mask_sequence(cpumask, vector);
} }
static void physflat_send_IPI_allbutself(int vector) static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask,
int vector)
{ {
cpumask_t allbutme = cpu_online_map; send_IPI_mask_allbutself(cpumask, vector);
}
cpu_clear(smp_processor_id(), allbutme); static void physflat_send_IPI_allbutself(int vector)
physflat_send_IPI_mask(allbutme, vector); {
send_IPI_mask_allbutself(&cpu_online_map, vector);
} }
static void physflat_send_IPI_all(int vector) static void physflat_send_IPI_all(int vector)
{ {
physflat_send_IPI_mask(cpu_online_map, vector); physflat_send_IPI_mask(&cpu_online_map, vector);
} }
static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
int cpu; int cpu;
...@@ -224,7 +247,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -224,7 +247,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask);
if ((unsigned)cpu < nr_cpu_ids) if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
else else
...@@ -243,6 +266,7 @@ struct genapic apic_physflat = { ...@@ -243,6 +266,7 @@ struct genapic apic_physflat = {
.send_IPI_all = physflat_send_IPI_all, .send_IPI_all = physflat_send_IPI_all,
.send_IPI_allbutself = physflat_send_IPI_allbutself, .send_IPI_allbutself = physflat_send_IPI_allbutself,
.send_IPI_mask = physflat_send_IPI_mask, .send_IPI_mask = physflat_send_IPI_mask,
.send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
.send_IPI_self = apic_send_IPI_self, .send_IPI_self = apic_send_IPI_self,
.cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id, .phys_pkg_id = phys_pkg_id,
......
...@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t x2apic_target_cpus(void) static const cpumask_t *x2apic_target_cpus(void)
{ {
return cpumask_of_cpu(0); return &cpumask_of_cpu(0);
} }
/* /*
* for now each logical cpu is in its own vector allocation domain. * for now each logical cpu is in its own vector allocation domain.
*/ */
static cpumask_t x2apic_vector_allocation_domain(int cpu) static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
cpumask_t domain = CPU_MASK_NONE; cpus_clear(*retmask);
cpu_set(cpu, domain); cpu_set(cpu, *retmask);
return domain;
} }
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
...@@ -56,32 +55,52 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, ...@@ -56,32 +55,52 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
* at once. We have 16 cpu's in a cluster. This will minimize IPI register * at once. We have 16 cpu's in a cluster. This will minimize IPI register
* writes. * writes.
*/ */
static void x2apic_send_IPI_mask(cpumask_t mask, int vector) static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector)
{ {
unsigned long flags; unsigned long flags;
unsigned long query_cpu; unsigned long query_cpu;
local_irq_save(flags); local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) { for_each_cpu_mask_nr(query_cpu, *mask)
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), __x2apic_send_IPI_dest(
vector, APIC_DEST_LOGICAL); per_cpu(x86_cpu_to_logical_apicid, query_cpu),
} vector, APIC_DEST_LOGICAL);
local_irq_restore(flags); local_irq_restore(flags);
} }
static void x2apic_send_IPI_allbutself(int vector) static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
{ {
cpumask_t mask = cpu_online_map; unsigned long flags;
unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
cpu_clear(smp_processor_id(), mask); local_irq_save(flags);
for_each_cpu_mask_nr(query_cpu, *mask)
if (query_cpu != this_cpu)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, APIC_DEST_LOGICAL);
local_irq_restore(flags);
}
if (!cpus_empty(mask)) static void x2apic_send_IPI_allbutself(int vector)
x2apic_send_IPI_mask(mask, vector); {
unsigned long flags;
unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
local_irq_save(flags);
for_each_online_cpu(query_cpu)
if (query_cpu != this_cpu)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, APIC_DEST_LOGICAL);
local_irq_restore(flags);
} }
static void x2apic_send_IPI_all(int vector) static void x2apic_send_IPI_all(int vector)
{ {
x2apic_send_IPI_mask(cpu_online_map, vector); x2apic_send_IPI_mask(&cpu_online_map, vector);
} }
static int x2apic_apic_id_registered(void) static int x2apic_apic_id_registered(void)
...@@ -89,7 +108,7 @@ static int x2apic_apic_id_registered(void) ...@@ -89,7 +108,7 @@ static int x2apic_apic_id_registered(void)
return 1; return 1;
} }
static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
int cpu; int cpu;
...@@ -97,8 +116,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -97,8 +116,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask);
if ((unsigned)cpu < NR_CPUS) if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_logical_apicid, cpu); return per_cpu(x86_cpu_to_logical_apicid, cpu);
else else
return BAD_APICID; return BAD_APICID;
...@@ -150,6 +169,7 @@ struct genapic apic_x2apic_cluster = { ...@@ -150,6 +169,7 @@ struct genapic apic_x2apic_cluster = {
.send_IPI_all = x2apic_send_IPI_all, .send_IPI_all = x2apic_send_IPI_all,
.send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_mask = x2apic_send_IPI_mask, .send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
.send_IPI_self = x2apic_send_IPI_self, .send_IPI_self = x2apic_send_IPI_self,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id, .phys_pkg_id = phys_pkg_id,
......
...@@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t x2apic_target_cpus(void) static const cpumask_t *x2apic_target_cpus(void)
{ {
return cpumask_of_cpu(0); return &cpumask_of_cpu(0);
} }
static cpumask_t x2apic_vector_allocation_domain(int cpu) static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
cpumask_t domain = CPU_MASK_NONE; cpus_clear(*retmask);
cpu_set(cpu, domain); cpu_set(cpu, *retmask);
return domain;
} }
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
...@@ -54,32 +53,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, ...@@ -54,32 +53,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
x2apic_icr_write(cfg, apicid); x2apic_icr_write(cfg, apicid);
} }
static void x2apic_send_IPI_mask(cpumask_t mask, int vector) static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector)
{ {
unsigned long flags; unsigned long flags;
unsigned long query_cpu; unsigned long query_cpu;
local_irq_save(flags); local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) { for_each_cpu_mask_nr(query_cpu, *mask) {
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL); vector, APIC_DEST_PHYSICAL);
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
static void x2apic_send_IPI_allbutself(int vector) static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
{ {
cpumask_t mask = cpu_online_map; unsigned long flags;
unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
local_irq_save(flags);
for_each_cpu_mask_nr(query_cpu, *mask) {
if (query_cpu != this_cpu)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
cpu_clear(smp_processor_id(), mask); static void x2apic_send_IPI_allbutself(int vector)
{
unsigned long flags;
unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
if (!cpus_empty(mask)) local_irq_save(flags);
x2apic_send_IPI_mask(mask, vector); for_each_online_cpu(query_cpu)
if (query_cpu != this_cpu)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
local_irq_restore(flags);
} }
static void x2apic_send_IPI_all(int vector) static void x2apic_send_IPI_all(int vector)
{ {
x2apic_send_IPI_mask(cpu_online_map, vector); x2apic_send_IPI_mask(&cpu_online_map, vector);
} }
static int x2apic_apic_id_registered(void) static int x2apic_apic_id_registered(void)
...@@ -87,7 +107,7 @@ static int x2apic_apic_id_registered(void) ...@@ -87,7 +107,7 @@ static int x2apic_apic_id_registered(void)
return 1; return 1;
} }
static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
int cpu; int cpu;
...@@ -95,8 +115,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -95,8 +115,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask);
if ((unsigned)cpu < NR_CPUS) if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
else else
return BAD_APICID; return BAD_APICID;
...@@ -145,6 +165,7 @@ struct genapic apic_x2apic_phys = { ...@@ -145,6 +165,7 @@ struct genapic apic_x2apic_phys = {
.send_IPI_all = x2apic_send_IPI_all, .send_IPI_all = x2apic_send_IPI_all,
.send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_mask = x2apic_send_IPI_mask, .send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
.send_IPI_self = x2apic_send_IPI_self, .send_IPI_self = x2apic_send_IPI_self,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id, .phys_pkg_id = phys_pkg_id,
......
...@@ -75,16 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); ...@@ -75,16 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t uv_target_cpus(void) static const cpumask_t *uv_target_cpus(void)
{ {
return cpumask_of_cpu(0); return &cpumask_of_cpu(0);
} }
static cpumask_t uv_vector_allocation_domain(int cpu) static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
cpumask_t domain = CPU_MASK_NONE; cpus_clear(*retmask);
cpu_set(cpu, domain); cpu_set(cpu, *retmask);
return domain;
} }
int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
...@@ -123,28 +122,37 @@ static void uv_send_IPI_one(int cpu, int vector) ...@@ -123,28 +122,37 @@ static void uv_send_IPI_one(int cpu, int vector)
uv_write_global_mmr64(pnode, UVH_IPI_INT, val); uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
} }
static void uv_send_IPI_mask(cpumask_t mask, int vector) static void uv_send_IPI_mask(const cpumask_t *mask, int vector)
{ {
unsigned int cpu; unsigned int cpu;
for_each_possible_cpu(cpu) for_each_cpu_mask_nr(cpu, *mask)
if (cpu_isset(cpu, mask)) uv_send_IPI_one(cpu, vector);
}
static void uv_send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
{
unsigned int cpu;
unsigned int this_cpu = smp_processor_id();
for_each_cpu_mask_nr(cpu, *mask)
if (cpu != this_cpu)
uv_send_IPI_one(cpu, vector); uv_send_IPI_one(cpu, vector);
} }
static void uv_send_IPI_allbutself(int vector) static void uv_send_IPI_allbutself(int vector)
{ {
cpumask_t mask = cpu_online_map; unsigned int cpu;
unsigned int this_cpu = smp_processor_id();
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask)) for_each_online_cpu(cpu)
uv_send_IPI_mask(mask, vector); if (cpu != this_cpu)
uv_send_IPI_one(cpu, vector);
} }
static void uv_send_IPI_all(int vector) static void uv_send_IPI_all(int vector)
{ {
uv_send_IPI_mask(cpu_online_map, vector); uv_send_IPI_mask(&cpu_online_map, vector);
} }
static int uv_apic_id_registered(void) static int uv_apic_id_registered(void)
...@@ -156,7 +164,7 @@ static void uv_init_apic_ldr(void) ...@@ -156,7 +164,7 @@ static void uv_init_apic_ldr(void)
{ {
} }
static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
int cpu; int cpu;
...@@ -164,7 +172,7 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -164,7 +172,7 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask);
if ((unsigned)cpu < nr_cpu_ids) if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
else else
...@@ -218,6 +226,7 @@ struct genapic apic_x2apic_uv_x = { ...@@ -218,6 +226,7 @@ struct genapic apic_x2apic_uv_x = {
.send_IPI_all = uv_send_IPI_all, .send_IPI_all = uv_send_IPI_all,
.send_IPI_allbutself = uv_send_IPI_allbutself, .send_IPI_allbutself = uv_send_IPI_allbutself,
.send_IPI_mask = uv_send_IPI_mask, .send_IPI_mask = uv_send_IPI_mask,
.send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
.send_IPI_self = uv_send_IPI_self, .send_IPI_self = uv_send_IPI_self,
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid, .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id, .phys_pkg_id = phys_pkg_id,
......
This diff is collapsed.
...@@ -116,9 +116,9 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector) ...@@ -116,9 +116,9 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector)
/* /*
* This is only used on smaller machines. * This is only used on smaller machines.
*/ */
void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) void send_IPI_mask_bitmask(const cpumask_t *cpumask, int vector)
{ {
unsigned long mask = cpus_addr(cpumask)[0]; unsigned long mask = cpus_addr(*cpumask)[0];
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
...@@ -127,7 +127,7 @@ void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) ...@@ -127,7 +127,7 @@ void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
local_irq_restore(flags); local_irq_restore(flags);
} }
void send_IPI_mask_sequence(cpumask_t mask, int vector) void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
{ {
unsigned long flags; unsigned long flags;
unsigned int query_cpu; unsigned int query_cpu;
...@@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector) ...@@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
*/ */
local_irq_save(flags); local_irq_save(flags);
for_each_possible_cpu(query_cpu) { for_each_cpu_mask_nr(query_cpu, *mask)
if (cpu_isset(query_cpu, mask)) { __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
local_irq_restore(flags);
}
void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
{
unsigned long flags;
unsigned int query_cpu;
unsigned int this_cpu = smp_processor_id();
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu_mask_nr(query_cpu, *mask)
if (query_cpu != this_cpu)
__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
vector); vector);
}
}
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu) ...@@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu)
WARN_ON(1); WARN_ON(1);
return; return;
} }
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); send_IPI_mask(&cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
} }
void native_send_call_func_single_ipi(int cpu) void native_send_call_func_single_ipi(int cpu)
{ {
send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); send_IPI_mask(&cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
} }
void native_send_call_func_ipi(cpumask_t mask) void native_send_call_func_ipi(const cpumask_t *mask)
{ {
cpumask_t allbutself; cpumask_t allbutself;
allbutself = cpu_online_map; allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself); cpu_clear(smp_processor_id(), allbutself);
if (cpus_equal(mask, allbutself) && if (cpus_equal(*mask, allbutself) &&
cpus_equal(cpu_online_map, cpu_callout_map)) cpus_equal(cpu_online_map, cpu_callout_map))
send_IPI_allbutself(CALL_FUNCTION_VECTOR); send_IPI_allbutself(CALL_FUNCTION_VECTOR);
else else
......
...@@ -164,7 +164,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, ...@@ -164,7 +164,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
* We have to send the IPI only to * We have to send the IPI only to
* CPUs affected. * CPUs affected.
*/ */
send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
while (!cpus_empty(flush_cpumask)) while (!cpus_empty(flush_cpumask))
/* nothing. lockup detection does not belong here */ /* nothing. lockup detection does not belong here */
......
...@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, ...@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
* We have to send the IPI only to * We have to send the IPI only to
* CPUs affected. * CPUs affected.
*/ */
send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
while (!cpus_empty(f->flush_cpumask)) while (!cpus_empty(f->flush_cpumask))
cpu_relax(); cpu_relax();
......
...@@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { ...@@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
{ } { }
}; };
static cpumask_t vector_allocation_domain(int cpu) static void vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
return cpumask_of_cpu(cpu); cpus_clear(*retmask);
cpu_set(cpu, *retmask);
} }
static int probe_bigsmp(void) static int probe_bigsmp(void)
......
...@@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
} }
#endif #endif
static cpumask_t vector_allocation_domain(int cpu) static void vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
/* Careful. Some cpus do not strictly honor the set of cpus /* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest * specified in the interrupt destination when using lowest
...@@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu) ...@@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu)
* deliver interrupts to the wrong hyperthread when only one * deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination. * hyperthread was specified in the interrupt desitination.
*/ */
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
return domain;
} }
struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
...@@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0; return 0;
} }
static cpumask_t vector_allocation_domain(int cpu) static void vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
/* Careful. Some cpus do not strictly honor the set of cpus /* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest * specified in the interrupt destination when using lowest
...@@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu) ...@@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu)
* deliver interrupts to the wrong hyperthread when only one * deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination. * hyperthread was specified in the interrupt desitination.
*/ */
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
return domain;
} }
struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
...@@ -24,7 +24,7 @@ static int probe_summit(void) ...@@ -24,7 +24,7 @@ static int probe_summit(void)
return 0; return 0;
} }
static cpumask_t vector_allocation_domain(int cpu) static void vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
/* Careful. Some cpus do not strictly honor the set of cpus /* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest * specified in the interrupt destination when using lowest
...@@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu) ...@@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu)
* deliver interrupts to the wrong hyperthread when only one * deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination. * hyperthread was specified in the interrupt desitination.
*/ */
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
return domain;
} }
struct genapic apic_summit = APIC_INIT("summit", probe_summit); struct genapic apic_summit = APIC_INIT("summit", probe_summit);
...@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void) ...@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void)
{ {
int i, rc; int i, rc;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < nr_cpu_ids; i++) {
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
if (rc >= 0) { if (rc >= 0) {
num_processors++; num_processors++;
...@@ -196,7 +196,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) ...@@ -196,7 +196,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
/* Restrict the possible_map according to max_cpus. */ /* Restrict the possible_map according to max_cpus. */
while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
continue; continue;
cpu_clear(cpu, cpu_possible_map); cpu_clear(cpu, cpu_possible_map);
} }
...@@ -408,24 +408,22 @@ static void xen_smp_send_reschedule(int cpu) ...@@ -408,24 +408,22 @@ static void xen_smp_send_reschedule(int cpu)
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
} }
static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) static void xen_send_IPI_mask(const cpumask_t *mask, enum ipi_vector vector)
{ {
unsigned cpu; unsigned cpu;
cpus_and(mask, mask, cpu_online_map); for_each_cpu_and(cpu, mask, &cpu_online_map)
for_each_cpu_mask_nr(cpu, mask)
xen_send_IPI_one(cpu, vector); xen_send_IPI_one(cpu, vector);
} }
static void xen_smp_send_call_function_ipi(cpumask_t mask) static void xen_smp_send_call_function_ipi(const cpumask_t *mask)
{ {
int cpu; int cpu;
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
/* Make sure other vcpus get a chance to run if they need to. */ /* Make sure other vcpus get a chance to run if they need to. */
for_each_cpu_mask_nr(cpu, mask) { for_each_cpu_mask_nr(cpu, *mask) {
if (xen_vcpu_stolen(cpu)) { if (xen_vcpu_stolen(cpu)) {
HYPERVISOR_sched_op(SCHEDOP_yield, 0); HYPERVISOR_sched_op(SCHEDOP_yield, 0);
break; break;
...@@ -435,7 +433,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask) ...@@ -435,7 +433,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask)
static void xen_smp_send_call_function_single_ipi(int cpu) static void xen_smp_send_call_function_single_ipi(int cpu)
{ {
xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); xen_send_IPI_mask(&cpumask_of_cpu(cpu),
XEN_CALL_FUNCTION_SINGLE_VECTOR);
} }
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment