Commit 5d18081d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-6.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "These fix two cpufreq issues, one in the core and one in the
  intel_pstate driver:

   - Fix CPU device node reference counting in the cpufreq core (Miquel
     Sabaté Solà)

   - Turn the spinlock used by the intel_pstate driver in hard IRQ
     context into a raw one to prevent the driver from crashing when
     PREEMPT_RT is enabled (Uwe Kleine-König)"

* tag 'pm-6.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpufreq: Avoid a bad reference count on CPU node
  cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock
parents cc70ce8f c0f02536
...@@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work) ...@@ -1845,7 +1845,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
} }
static DEFINE_SPINLOCK(hwp_notify_lock); static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
static cpumask_t hwp_intr_enable_mask; static cpumask_t hwp_intr_enable_mask;
#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0) #define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0)
...@@ -1868,7 +1868,7 @@ void notify_hwp_interrupt(void) ...@@ -1868,7 +1868,7 @@ void notify_hwp_interrupt(void)
if (!(value & status_mask)) if (!(value & status_mask))
return; return;
spin_lock_irqsave(&hwp_notify_lock, flags); raw_spin_lock_irqsave(&hwp_notify_lock, flags);
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
goto ack_intr; goto ack_intr;
...@@ -1876,13 +1876,13 @@ void notify_hwp_interrupt(void) ...@@ -1876,13 +1876,13 @@ void notify_hwp_interrupt(void)
schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work, schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
msecs_to_jiffies(10)); msecs_to_jiffies(10));
spin_unlock_irqrestore(&hwp_notify_lock, flags); raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
return; return;
ack_intr: ack_intr:
wrmsrl_safe(MSR_HWP_STATUS, 0); wrmsrl_safe(MSR_HWP_STATUS, 0);
spin_unlock_irqrestore(&hwp_notify_lock, flags); raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
} }
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
...@@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) ...@@ -1895,9 +1895,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
spin_lock_irq(&hwp_notify_lock); raw_spin_lock_irq(&hwp_notify_lock);
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
spin_unlock_irq(&hwp_notify_lock); raw_spin_unlock_irq(&hwp_notify_lock);
if (cancel_work) if (cancel_work)
cancel_delayed_work_sync(&cpudata->hwp_notify_work); cancel_delayed_work_sync(&cpudata->hwp_notify_work);
...@@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) ...@@ -1912,10 +1912,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ; u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
spin_lock_irq(&hwp_notify_lock); raw_spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
spin_unlock_irq(&hwp_notify_lock); raw_spin_unlock_irq(&hwp_notify_lock);
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
......
...@@ -1107,10 +1107,9 @@ static inline int parse_perf_domain(int cpu, const char *list_name, ...@@ -1107,10 +1107,9 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
const char *cell_name, const char *cell_name,
struct of_phandle_args *args) struct of_phandle_args *args)
{ {
struct device_node *cpu_np;
int ret; int ret;
cpu_np = of_cpu_device_node_get(cpu); struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu);
if (!cpu_np) if (!cpu_np)
return -ENODEV; return -ENODEV;
...@@ -1118,9 +1117,6 @@ static inline int parse_perf_domain(int cpu, const char *list_name, ...@@ -1118,9 +1117,6 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
args); args);
if (ret < 0) if (ret < 0)
return ret; return ret;
of_node_put(cpu_np);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment