Commit ef1d7151 authored by Akinobu Mita's avatar Akinobu Mita Committed by Linus Torvalds

cpu hotplug: intel_cacheinfo: fix cpu hotplug error handling

- Fix resource leakage in error case within detect_cache_attributes()

- Don't register hotcpu notifier when cache_add_dev() returns error

- Introduce cache_dev_map cpumask to track whether cache interface for
  CPU is successfully added by cache_add_dev() or not.

  cache_add_dev() may fail with out of memory error. In order to
  avoid cache_remove_dev() with that uninitialized cache interface when
  CPU_DEAD event is delivered we need to have the cache_dev_map cpumask.

  (We cannot change cache_add_dev() from CPU_ONLINE event handler
  to CPU_UP_PREPARE event handler. Because cache_add_dev() needs
  to do cpuid and store the results with its CPU online.)

[nix.or.die@googlemail.com: fix a section mismatch warning]
Cc: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: default avatarAkinobu Mita <akinobu.mita@gmail.com>
Cc: Gautham R Shenoy <ego@in.ibm.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Andi Kleen <ak@suse.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: default avatarGabriel Craciunescu <nix.or.die@googlemail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d435d862
...@@ -499,6 +499,11 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { ...@@ -499,6 +499,11 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {
static void free_cache_attributes(unsigned int cpu) static void free_cache_attributes(unsigned int cpu)
{ {
int i;
for (i = 0; i < num_cache_leaves; i++)
cache_remove_shared_cpu_map(cpu, i);
kfree(cpuid4_info[cpu]); kfree(cpuid4_info[cpu]);
cpuid4_info[cpu] = NULL; cpuid4_info[cpu] = NULL;
} }
...@@ -506,8 +511,8 @@ static void free_cache_attributes(unsigned int cpu) ...@@ -506,8 +511,8 @@ static void free_cache_attributes(unsigned int cpu)
static int __cpuinit detect_cache_attributes(unsigned int cpu) static int __cpuinit detect_cache_attributes(unsigned int cpu)
{ {
struct _cpuid4_info *this_leaf; struct _cpuid4_info *this_leaf;
unsigned long j; unsigned long j;
int retval; int retval;
cpumask_t oldmask; cpumask_t oldmask;
if (num_cache_leaves == 0) if (num_cache_leaves == 0)
...@@ -524,19 +529,26 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -524,19 +529,26 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
goto out; goto out;
/* Do cpuid and store the results */ /* Do cpuid and store the results */
retval = 0;
for (j = 0; j < num_cache_leaves; j++) { for (j = 0; j < num_cache_leaves; j++) {
this_leaf = CPUID4_INFO_IDX(cpu, j); this_leaf = CPUID4_INFO_IDX(cpu, j);
retval = cpuid4_cache_lookup(j, this_leaf); retval = cpuid4_cache_lookup(j, this_leaf);
if (unlikely(retval < 0)) if (unlikely(retval < 0)) {
int i;
for (i = 0; i < j; i++)
cache_remove_shared_cpu_map(cpu, i);
break; break;
}
cache_shared_cpu_map_setup(cpu, j); cache_shared_cpu_map_setup(cpu, j);
} }
set_cpus_allowed(current, oldmask); set_cpus_allowed(current, oldmask);
out: out:
if (retval) if (retval) {
free_cache_attributes(cpu); kfree(cpuid4_info[cpu]);
cpuid4_info[cpu] = NULL;
}
return retval; return retval;
} }
...@@ -669,7 +681,7 @@ static struct kobj_type ktype_percpu_entry = { ...@@ -669,7 +681,7 @@ static struct kobj_type ktype_percpu_entry = {
.sysfs_ops = &sysfs_ops, .sysfs_ops = &sysfs_ops,
}; };
static void cpuid4_cache_sysfs_exit(unsigned int cpu) static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
{ {
kfree(cache_kobject[cpu]); kfree(cache_kobject[cpu]);
kfree(index_kobject[cpu]); kfree(index_kobject[cpu]);
...@@ -680,13 +692,14 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu) ...@@ -680,13 +692,14 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu)
static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
{ {
int err;
if (num_cache_leaves == 0) if (num_cache_leaves == 0)
return -ENOENT; return -ENOENT;
detect_cache_attributes(cpu); err = detect_cache_attributes(cpu);
if (cpuid4_info[cpu] == NULL) if (err)
return -ENOENT; return err;
/* Allocate all required memory */ /* Allocate all required memory */
cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
...@@ -705,13 +718,15 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) ...@@ -705,13 +718,15 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
return -ENOMEM; return -ENOMEM;
} }
static cpumask_t cache_dev_map = CPU_MASK_NONE;
/* Add/Remove cache interface for CPU device */ /* Add/Remove cache interface for CPU device */
static int __cpuinit cache_add_dev(struct sys_device * sys_dev) static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
{ {
unsigned int cpu = sys_dev->id; unsigned int cpu = sys_dev->id;
unsigned long i, j; unsigned long i, j;
struct _index_kobject *this_object; struct _index_kobject *this_object;
int retval = 0; int retval;
retval = cpuid4_cache_sysfs_init(cpu); retval = cpuid4_cache_sysfs_init(cpu);
if (unlikely(retval < 0)) if (unlikely(retval < 0))
...@@ -721,6 +736,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) ...@@ -721,6 +736,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
kobject_set_name(cache_kobject[cpu], "%s", "cache"); kobject_set_name(cache_kobject[cpu], "%s", "cache");
cache_kobject[cpu]->ktype = &ktype_percpu_entry; cache_kobject[cpu]->ktype = &ktype_percpu_entry;
retval = kobject_register(cache_kobject[cpu]); retval = kobject_register(cache_kobject[cpu]);
if (retval < 0) {
cpuid4_cache_sysfs_exit(cpu);
return retval;
}
for (i = 0; i < num_cache_leaves; i++) { for (i = 0; i < num_cache_leaves; i++) {
this_object = INDEX_KOBJECT_PTR(cpu,i); this_object = INDEX_KOBJECT_PTR(cpu,i);
...@@ -740,6 +759,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) ...@@ -740,6 +759,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
break; break;
} }
} }
if (!retval)
cpu_set(cpu, cache_dev_map);
return retval; return retval;
} }
...@@ -750,13 +772,14 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) ...@@ -750,13 +772,14 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
if (cpuid4_info[cpu] == NULL) if (cpuid4_info[cpu] == NULL)
return; return;
for (i = 0; i < num_cache_leaves; i++) { if (!cpu_isset(cpu, cache_dev_map))
cache_remove_shared_cpu_map(cpu, i); return;
cpu_clear(cpu, cache_dev_map);
for (i = 0; i < num_cache_leaves; i++)
kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
}
kobject_unregister(cache_kobject[cpu]); kobject_unregister(cache_kobject[cpu]);
cpuid4_cache_sysfs_exit(cpu); cpuid4_cache_sysfs_exit(cpu);
return;
} }
static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
...@@ -781,7 +804,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, ...@@ -781,7 +804,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
{ {
.notifier_call = cacheinfo_cpu_callback, .notifier_call = cacheinfo_cpu_callback,
}; };
static int __cpuinit cache_sysfs_init(void) static int __cpuinit cache_sysfs_init(void)
...@@ -791,14 +814,15 @@ static int __cpuinit cache_sysfs_init(void) ...@@ -791,14 +814,15 @@ static int __cpuinit cache_sysfs_init(void)
if (num_cache_leaves == 0) if (num_cache_leaves == 0)
return 0; return 0;
register_hotcpu_notifier(&cacheinfo_cpu_notifier);
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i); int err;
struct sys_device *sys_dev = get_cpu_sysdev(i);
cache_add_dev(sys_dev); err = cache_add_dev(sys_dev);
if (err)
return err;
} }
register_hotcpu_notifier(&cacheinfo_cpu_notifier);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment