Commit 9171c670 authored by Linus Torvalds's avatar Linus Torvalds

Merge branches 'irq-urgent-for-linus' and 'smp-hotplug-for-linus' of...

Merge branches 'irq-urgent-for-linus' and 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq and smpboot updates from Thomas Gleixner:
 "Just cleanup patches with no functional change and a fix for suspend
  issues."

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  genirq: Introduce irq_do_set_affinity() to reduce duplicated code
  genirq: Add IRQS_PENDING for nested and simple irq

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  smpboot, idle: Fix comment mismatch over idle_threads_init()
  smpboot, idle: Optimize calls to smp_processor_id() in idle_threads_init()
...@@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq) ...@@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq)
kstat_incr_irqs_this_cpu(irq, desc); kstat_incr_irqs_this_cpu(irq, desc);
action = desc->action; action = desc->action;
if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock; goto out_unlock;
}
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock_irq(&desc->lock); raw_spin_unlock_irq(&desc->lock);
...@@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) ...@@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc); kstat_incr_irqs_this_cpu(irq, desc);
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock; goto out_unlock;
}
handle_irq_event(desc); handle_irq_event(desc);
......
...@@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); ...@@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
extern void irq_set_thread_affinity(struct irq_desc *desc); extern void irq_set_thread_affinity(struct irq_desc *desc);
extern int irq_do_set_affinity(struct irq_data *data,
const struct cpumask *dest, bool force);
/* Inline functions for support of irq chips on slow busses */ /* Inline functions for support of irq chips on slow busses */
static inline void chip_bus_lock(struct irq_desc *desc) static inline void chip_bus_lock(struct irq_desc *desc)
{ {
......
...@@ -142,16 +142,13 @@ static inline void ...@@ -142,16 +142,13 @@ static inline void
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
#endif #endif
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{ {
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct irq_desc *desc = irq_data_to_desc(data); struct irq_desc *desc = irq_data_to_desc(data);
int ret = 0; struct irq_chip *chip = irq_data_get_irq_chip(data);
int ret;
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
if (irq_can_move_pcntxt(data)) {
ret = chip->irq_set_affinity(data, mask, false); ret = chip->irq_set_affinity(data, mask, false);
switch (ret) { switch (ret) {
case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK:
...@@ -160,6 +157,21 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) ...@@ -160,6 +157,21 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
irq_set_thread_affinity(desc); irq_set_thread_affinity(desc);
ret = 0; ret = 0;
} }
return ret;
}
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct irq_desc *desc = irq_data_to_desc(data);
int ret = 0;
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
if (irq_can_move_pcntxt(data)) {
ret = irq_do_set_affinity(data, mask, false);
} else { } else {
irqd_set_move_pending(data); irqd_set_move_pending(data);
irq_copy_pending(desc, mask); irq_copy_pending(desc, mask);
...@@ -283,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); ...@@ -283,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
static int static int
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{ {
struct irq_chip *chip = irq_desc_get_chip(desc);
struct cpumask *set = irq_default_affinity; struct cpumask *set = irq_default_affinity;
int ret, node = desc->irq_data.node; int node = desc->irq_data.node;
/* Excludes PER_CPU and NO_BALANCE interrupts */ /* Excludes PER_CPU and NO_BALANCE interrupts */
if (!irq_can_set_affinity(irq)) if (!irq_can_set_affinity(irq))
...@@ -311,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) ...@@ -311,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
if (cpumask_intersects(mask, nodemask)) if (cpumask_intersects(mask, nodemask))
cpumask_and(mask, mask, nodemask); cpumask_and(mask, mask, nodemask);
} }
ret = chip->irq_set_affinity(&desc->irq_data, mask, false); irq_do_set_affinity(&desc->irq_data, mask, false);
switch (ret) {
case IRQ_SET_MASK_OK:
cpumask_copy(desc->irq_data.affinity, mask);
case IRQ_SET_MASK_OK_NOCOPY:
irq_set_thread_affinity(desc);
}
return 0; return 0;
} }
#else #else
......
...@@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata) ...@@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata)
* For correct operation this depends on the caller * For correct operation this depends on the caller
* masking the irqs. * masking the irqs.
*/ */
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
< nr_cpu_ids)) { irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
int ret = chip->irq_set_affinity(&desc->irq_data,
desc->pending_mask, false);
switch (ret) {
case IRQ_SET_MASK_OK:
cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
case IRQ_SET_MASK_OK_NOCOPY:
irq_set_thread_affinity(desc);
}
}
cpumask_clear(desc->pending_mask); cpumask_clear(desc->pending_mask);
} }
......
...@@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void) ...@@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void)
per_cpu(idle_threads, smp_processor_id()) = current; per_cpu(idle_threads, smp_processor_id()) = current;
} }
/**
* idle_init - Initialize the idle thread for a cpu
* @cpu: The cpu for which the idle thread should be initialized
*
* Creates the thread if it does not exist.
*/
static inline void idle_init(unsigned int cpu) static inline void idle_init(unsigned int cpu)
{ {
struct task_struct *tsk = per_cpu(idle_threads, cpu); struct task_struct *tsk = per_cpu(idle_threads, cpu);
...@@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu) ...@@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu)
} }
/** /**
* idle_thread_init - Initialize the idle thread for a cpu * idle_threads_init - Initialize idle threads for all cpus
* @cpu: The cpu for which the idle thread should be initialized
*
* Creates the thread if it does not exist.
*/ */
void __init idle_threads_init(void) void __init idle_threads_init(void)
{ {
unsigned int cpu; unsigned int cpu, boot_cpu;
boot_cpu = smp_processor_id();
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu != smp_processor_id()) if (cpu != boot_cpu)
idle_init(cpu); idle_init(cpu);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment