Commit 64ca738f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Zhang Rui

thermal/x86_pkg_temp: Move work scheduled flag into package struct

Storage for a boolean information whether work is scheduled for a package
is kept in separate allocated storage, which is resized when the number of
detected packages grows.

With the proper locking in place this is a completely pointless exercise
because we can simply stick it into the per package struct.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarSrinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Signed-off-by: default avatarZhang Rui <rui.zhang@intel.com>
parent ab47bd96
...@@ -61,6 +61,7 @@ struct pkg_device { ...@@ -61,6 +61,7 @@ struct pkg_device {
struct list_head list; struct list_head list;
u16 phys_proc_id; u16 phys_proc_id;
u16 cpu; u16 cpu;
bool work_scheduled;
u32 tj_max; u32 tj_max;
u32 msr_pkg_therm_low; u32 msr_pkg_therm_low;
u32 msr_pkg_therm_high; u32 msr_pkg_therm_high;
...@@ -82,11 +83,6 @@ static DEFINE_MUTEX(thermal_zone_mutex); ...@@ -82,11 +83,6 @@ static DEFINE_MUTEX(thermal_zone_mutex);
/* Interrupt to work function schedule queue */ /* Interrupt to work function schedule queue */
static DEFINE_PER_CPU(struct delayed_work, pkg_temp_thermal_threshold_work); static DEFINE_PER_CPU(struct delayed_work, pkg_temp_thermal_threshold_work);
/* To track if the work is already scheduled on a package */
static u8 *pkg_work_scheduled;
static u16 max_phy_id;
/* Debug counters to show using debugfs */ /* Debug counters to show using debugfs */
static struct dentry *debugfs; static struct dentry *debugfs;
static unsigned int pkg_interrupt_cnt; static unsigned int pkg_interrupt_cnt;
...@@ -294,7 +290,7 @@ static inline void disable_pkg_thres_interrupt(void) ...@@ -294,7 +290,7 @@ static inline void disable_pkg_thres_interrupt(void)
static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
{ {
struct thermal_zone_device *tzone = NULL; struct thermal_zone_device *tzone = NULL;
int phy_id, cpu = smp_processor_id(); int cpu = smp_processor_id();
struct pkg_device *pkgdev; struct pkg_device *pkgdev;
u64 msr_val, wr_val; u64 msr_val, wr_val;
...@@ -308,8 +304,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) ...@@ -308,8 +304,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
mutex_unlock(&thermal_zone_mutex); mutex_unlock(&thermal_zone_mutex);
return; return;
} }
pkgdev->work_scheduled = false;
pkg_work_scheduled[phy_id] = 0;
rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1); wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1);
...@@ -334,7 +329,6 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) ...@@ -334,7 +329,6 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
static int pkg_thermal_notify(u64 msr_val) static int pkg_thermal_notify(u64 msr_val)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int phy_id = topology_physical_package_id(cpu);
struct pkg_device *pkgdev; struct pkg_device *pkgdev;
unsigned long flags; unsigned long flags;
...@@ -345,8 +339,8 @@ static int pkg_thermal_notify(u64 msr_val) ...@@ -345,8 +339,8 @@ static int pkg_thermal_notify(u64 msr_val)
/* Work is per package, so scheduling it once is enough. */ /* Work is per package, so scheduling it once is enough. */
pkgdev = pkg_temp_thermal_get_dev(cpu); pkgdev = pkg_temp_thermal_get_dev(cpu);
if (pkgdev && pkg_work_scheduled && !pkg_work_scheduled[phy_id]) { if (pkgdev && !pkgdev->work_scheduled) {
pkg_work_scheduled[phy_id] = 1; pkgdev->work_scheduled = true;
schedule_delayed_work_on(cpu, schedule_delayed_work_on(cpu,
&per_cpu(pkg_temp_thermal_threshold_work, cpu), &per_cpu(pkg_temp_thermal_threshold_work, cpu),
msecs_to_jiffies(notify_delay_ms)); msecs_to_jiffies(notify_delay_ms));
...@@ -361,8 +355,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) ...@@ -361,8 +355,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
u32 tj_max, eax, ebx, ecx, edx; u32 tj_max, eax, ebx, ecx, edx;
struct pkg_device *pkgdev; struct pkg_device *pkgdev;
int thres_count, err; int thres_count, err;
unsigned long flags;
u8 *temp;
cpuid(6, &eax, &ebx, &ecx, &edx); cpuid(6, &eax, &ebx, &ecx, &edx);
thres_count = ebx & 0x07; thres_count = ebx & 0x07;
...@@ -382,20 +374,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) ...@@ -382,20 +374,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
if (!pkgdev) if (!pkgdev)
return -ENOMEM; return -ENOMEM;
spin_lock_irqsave(&pkg_temp_lock, flags);
if (topology_physical_package_id(cpu) > max_phy_id)
max_phy_id = topology_physical_package_id(cpu);
temp = krealloc(pkg_work_scheduled,
(max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
if (!temp) {
spin_unlock_irqrestore(&pkg_temp_lock, flags);
kfree(pkgdev);
return -ENOMEM;
}
pkg_work_scheduled = temp;
pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
spin_unlock_irqrestore(&pkg_temp_lock, flags);
pkgdev->phys_proc_id = topology_physical_package_id(cpu); pkgdev->phys_proc_id = topology_physical_package_id(cpu);
pkgdev->cpu = cpu; pkgdev->cpu = cpu;
pkgdev->tj_max = tj_max; pkgdev->tj_max = tj_max;
...@@ -554,7 +532,6 @@ static int __init pkg_temp_thermal_init(void) ...@@ -554,7 +532,6 @@ static int __init pkg_temp_thermal_init(void)
for_each_online_cpu(i) for_each_online_cpu(i)
put_core_offline(i); put_core_offline(i);
cpu_notifier_register_done(); cpu_notifier_register_done();
kfree(pkg_work_scheduled);
return -ENODEV; return -ENODEV;
} }
module_init(pkg_temp_thermal_init) module_init(pkg_temp_thermal_init)
...@@ -572,8 +549,6 @@ static void __exit pkg_temp_thermal_exit(void) ...@@ -572,8 +549,6 @@ static void __exit pkg_temp_thermal_exit(void)
put_core_offline(i); put_core_offline(i);
cpu_notifier_register_done(); cpu_notifier_register_done();
kfree(pkg_work_scheduled);
debugfs_remove_recursive(debugfs); debugfs_remove_recursive(debugfs);
} }
module_exit(pkg_temp_thermal_exit) module_exit(pkg_temp_thermal_exit)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment