Commit 91cd9cb7 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/apic: Move cpumask and to core code

All implementations of apic->cpu_mask_to_apicid_and() and the two incoming
cpumasks to search for the target.

Move that operation to the call site and rename it to cpu_mask_to_apicid()
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Link: http://lkml.kernel.org/r/20170619235446.641575516@linutronix.de
parent 52b166af
......@@ -296,9 +296,8 @@ struct apic {
/* Can't be NULL on 64-bit */
unsigned long (*set_apic_id)(unsigned int id);
int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid);
int (*cpu_mask_to_apicid)(const struct cpumask *cpumask,
unsigned int *apicid);
/* ipi */
void (*send_IPI)(int cpu, int vector);
......@@ -540,12 +539,10 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
#endif
extern int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid);
extern int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid);
extern int flat_cpu_mask_to_apicid(const struct cpumask *cpumask,
unsigned int *apicid);
extern int default_cpu_mask_to_apicid(const struct cpumask *cpumask,
unsigned int *apicid);
static inline void
flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
......
......@@ -2201,11 +2201,9 @@ void default_init_apic_ldr(void)
apic_write(APIC_LDR, val);
}
int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
int default_cpu_mask_to_apicid(const struct cpumask *mask, unsigned int *apicid)
{
unsigned int cpu = cpumask_first_and(cpumask, andmask);
unsigned int cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
......@@ -2213,13 +2211,9 @@ int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
return 0;
}
int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
int flat_cpu_mask_to_apicid(const struct cpumask *mask, unsigned int *apicid)
{
unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
cpumask_bits(andmask)[0] &
APIC_ALL_CPUS;
unsigned long cpu_mask = cpumask_bits(mask)[0] & APIC_ALL_CPUS;
if (!cpu_mask)
return -EINVAL;
......
......@@ -172,7 +172,7 @@ static struct apic apic_flat __ro_after_init = {
.get_apic_id = flat_get_apic_id,
.set_apic_id = set_apic_id,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.send_IPI = default_send_IPI_single,
.send_IPI_mask = flat_send_IPI_mask,
......@@ -268,7 +268,7 @@ static struct apic apic_physflat __ro_after_init = {
.get_apic_id = flat_get_apic_id,
.set_apic_id = set_apic_id,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.send_IPI = default_send_IPI_single_phys,
.send_IPI_mask = default_send_IPI_mask_sequence_phys,
......
......@@ -141,7 +141,7 @@ struct apic apic_noop __ro_after_init = {
.get_apic_id = noop_get_apic_id,
.set_apic_id = NULL,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.send_IPI = noop_send_IPI,
.send_IPI_mask = noop_send_IPI_mask,
......
......@@ -267,7 +267,7 @@ static const struct apic apic_numachip1 __refconst = {
.get_apic_id = numachip1_get_apic_id,
.set_apic_id = numachip1_set_apic_id,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.send_IPI = numachip_send_IPI_one,
.send_IPI_mask = numachip_send_IPI_mask,
......@@ -318,7 +318,7 @@ static const struct apic apic_numachip2 __refconst = {
.get_apic_id = numachip2_get_apic_id,
.set_apic_id = numachip2_set_apic_id,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.send_IPI = numachip_send_IPI_one,
.send_IPI_mask = numachip_send_IPI_mask,
......
......@@ -172,7 +172,7 @@ static struct apic apic_bigsmp __ro_after_init = {
.get_apic_id = bigsmp_get_apic_id,
.set_apic_id = NULL,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.send_IPI = default_send_IPI_single_phys,
.send_IPI_mask = default_send_IPI_mask_sequence_phys,
......
......@@ -102,7 +102,7 @@ static struct apic apic_default __ro_after_init = {
.get_apic_id = default_get_apic_id,
.set_apic_id = NULL,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.send_IPI = default_send_IPI_single,
.send_IPI_mask = default_send_IPI_mask_logical,
......
......@@ -141,7 +141,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
/*
* Clear the offline cpus from @vector_cpumask for searching
* and verify whether the result overlaps with @mask. If true,
* then the call to apic->cpu_mask_to_apicid_and() will
* then the call to apic->cpu_mask_to_apicid() will
* succeed as well. If not, no point in trying to find a
* vector in this mask.
*/
......@@ -225,8 +225,8 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
* vector_searchmask is a subset of d->domain and has the offline
* cpus masked out.
*/
BUG_ON(apic->cpu_mask_to_apicid_and(mask, vector_searchmask,
&d->cfg.dest_apicid));
cpumask_and(vector_searchmask, vector_searchmask, mask);
BUG_ON(apic->cpu_mask_to_apicid(vector_searchmask, &d->cfg.dest_apicid));
return 0;
}
......
......@@ -104,22 +104,20 @@ static void x2apic_send_IPI_all(int vector)
}
static int
x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
x2apic_cpu_mask_to_apicid(const struct cpumask *mask, unsigned int *apicid)
{
unsigned int cpu;
u32 dest = 0;
u16 cluster;
cpu = cpumask_first_and(cpumask, andmask);
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
cluster = x2apic_cluster(cpu);
for_each_cpu_and(cpu, cpumask, andmask) {
for_each_cpu(cpu, mask) {
if (cluster != x2apic_cluster(cpu))
continue;
dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
......@@ -249,7 +247,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id,
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.send_IPI = x2apic_send_IPI,
.send_IPI_mask = x2apic_send_IPI_mask,
......
......@@ -127,7 +127,7 @@ static struct apic apic_x2apic_phys __ro_after_init = {
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.send_IPI = x2apic_send_IPI,
.send_IPI_mask = x2apic_send_IPI_mask,
......
......@@ -526,11 +526,9 @@ static void uv_init_apic_ldr(void)
}
static int
uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
uv_cpu_mask_to_apicid(const struct cpumask *mask, unsigned int *apicid)
{
int ret = default_cpu_mask_to_apicid_and(cpumask, andmask, apicid);
int ret = default_cpu_mask_to_apicid(mask, apicid);
if (!ret)
*apicid |= uv_apicid_hibits;
......@@ -603,7 +601,7 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = set_apic_id,
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.send_IPI = uv_send_IPI_one,
.send_IPI_mask = uv_send_IPI_mask,
......
......@@ -178,7 +178,7 @@ static struct apic xen_pv_apic = {
.get_apic_id = xen_get_apic_id,
.set_apic_id = xen_set_apic_id, /* Can be NULL on 32-bit. */
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
#ifdef CONFIG_SMP
.send_IPI_mask = xen_send_IPI_mask,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment