Commit 8f553c49 authored by Thomas Gleixner's avatar Thomas Gleixner

cpu/hotplug: Provide cpus_read|write_[un]lock()

The counting 'rwsem' hackery of get|put_online_cpus() is going to be
replaced by percpu rwsem.

Rename the functions to make it clear that it's locking and not some
refcount style interface. These new functions will be used for the
preparatory patches which make the code ready for the percpu rwsem
conversion.

Rename all instances in the cpu hotplug code while at it.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081547.080397752@linutronix.de
parent 08332893
...@@ -99,12 +99,10 @@ static inline void cpu_maps_update_done(void) ...@@ -99,12 +99,10 @@ static inline void cpu_maps_update_done(void)
extern struct bus_type cpu_subsys; extern struct bus_type cpu_subsys;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */ extern void cpus_write_lock(void);
extern void cpus_write_unlock(void);
extern void cpu_hotplug_begin(void); extern void cpus_read_lock(void);
extern void cpu_hotplug_done(void); extern void cpus_read_unlock(void);
extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void); extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void); extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu); void clear_tasks_mm_cpumask(int cpu);
...@@ -112,13 +110,19 @@ int cpu_down(unsigned int cpu); ...@@ -112,13 +110,19 @@ int cpu_down(unsigned int cpu);
#else /* CONFIG_HOTPLUG_CPU */ #else /* CONFIG_HOTPLUG_CPU */
static inline void cpu_hotplug_begin(void) {} static inline void cpus_write_lock(void) { }
static inline void cpu_hotplug_done(void) {} static inline void cpus_write_unlock(void) { }
#define get_online_cpus() do { } while (0) static inline void cpus_read_lock(void) { }
#define put_online_cpus() do { } while (0) static inline void cpus_read_unlock(void) { }
#define cpu_hotplug_disable() do { } while (0) static inline void cpu_hotplug_disable(void) { }
#define cpu_hotplug_enable() do { } while (0) static inline void cpu_hotplug_enable(void) { }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* !CONFIG_HOTPLUG_CPU */
/* Wrappers which go away once all code is converted */
static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
static inline void get_online_cpus(void) { cpus_read_lock(); }
static inline void put_online_cpus(void) { cpus_read_unlock(); }
#ifdef CONFIG_PM_SLEEP_SMP #ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary); extern int freeze_secondary_cpus(int primary);
......
...@@ -235,7 +235,7 @@ static struct { ...@@ -235,7 +235,7 @@ static struct {
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
void get_online_cpus(void) void cpus_read_lock(void)
{ {
might_sleep(); might_sleep();
if (cpu_hotplug.active_writer == current) if (cpu_hotplug.active_writer == current)
...@@ -245,9 +245,9 @@ void get_online_cpus(void) ...@@ -245,9 +245,9 @@ void get_online_cpus(void)
atomic_inc(&cpu_hotplug.refcount); atomic_inc(&cpu_hotplug.refcount);
mutex_unlock(&cpu_hotplug.lock); mutex_unlock(&cpu_hotplug.lock);
} }
EXPORT_SYMBOL_GPL(get_online_cpus); EXPORT_SYMBOL_GPL(cpus_read_lock);
void put_online_cpus(void) void cpus_read_unlock(void)
{ {
int refcount; int refcount;
...@@ -264,7 +264,7 @@ void put_online_cpus(void) ...@@ -264,7 +264,7 @@ void put_online_cpus(void)
cpuhp_lock_release(); cpuhp_lock_release();
} }
EXPORT_SYMBOL_GPL(put_online_cpus); EXPORT_SYMBOL_GPL(cpus_read_unlock);
/* /*
* This ensures that the hotplug operation can begin only when the * This ensures that the hotplug operation can begin only when the
...@@ -288,7 +288,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus); ...@@ -288,7 +288,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
* get_online_cpus() not an api which is called all that often. * get_online_cpus() not an api which is called all that often.
* *
*/ */
void cpu_hotplug_begin(void) void cpus_write_lock(void)
{ {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
...@@ -306,7 +306,7 @@ void cpu_hotplug_begin(void) ...@@ -306,7 +306,7 @@ void cpu_hotplug_begin(void)
finish_wait(&cpu_hotplug.wq, &wait); finish_wait(&cpu_hotplug.wq, &wait);
} }
void cpu_hotplug_done(void) void cpus_write_unlock(void)
{ {
cpu_hotplug.active_writer = NULL; cpu_hotplug.active_writer = NULL;
mutex_unlock(&cpu_hotplug.lock); mutex_unlock(&cpu_hotplug.lock);
...@@ -773,7 +773,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, ...@@ -773,7 +773,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
if (!cpu_present(cpu)) if (!cpu_present(cpu))
return -EINVAL; return -EINVAL;
cpu_hotplug_begin(); cpus_write_lock();
cpuhp_tasks_frozen = tasks_frozen; cpuhp_tasks_frozen = tasks_frozen;
...@@ -811,7 +811,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, ...@@ -811,7 +811,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
} }
out: out:
cpu_hotplug_done(); cpus_write_unlock();
return ret; return ret;
} }
...@@ -893,7 +893,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) ...@@ -893,7 +893,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
struct task_struct *idle; struct task_struct *idle;
int ret = 0; int ret = 0;
cpu_hotplug_begin(); cpus_write_lock();
if (!cpu_present(cpu)) { if (!cpu_present(cpu)) {
ret = -EINVAL; ret = -EINVAL;
...@@ -941,7 +941,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) ...@@ -941,7 +941,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
target = min((int)target, CPUHP_BRINGUP_CPU); target = min((int)target, CPUHP_BRINGUP_CPU);
ret = cpuhp_up_callbacks(cpu, st, target); ret = cpuhp_up_callbacks(cpu, st, target);
out: out:
cpu_hotplug_done(); cpus_write_unlock();
return ret; return ret;
} }
...@@ -1424,7 +1424,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, ...@@ -1424,7 +1424,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
if (sp->multi_instance == false) if (sp->multi_instance == false)
return -EINVAL; return -EINVAL;
get_online_cpus(); cpus_read_lock();
mutex_lock(&cpuhp_state_mutex); mutex_lock(&cpuhp_state_mutex);
if (!invoke || !sp->startup.multi) if (!invoke || !sp->startup.multi)
...@@ -1453,7 +1453,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, ...@@ -1453,7 +1453,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
hlist_add_head(node, &sp->list); hlist_add_head(node, &sp->list);
unlock: unlock:
mutex_unlock(&cpuhp_state_mutex); mutex_unlock(&cpuhp_state_mutex);
put_online_cpus(); cpus_read_unlock();
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance); EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
...@@ -1486,7 +1486,7 @@ int __cpuhp_setup_state(enum cpuhp_state state, ...@@ -1486,7 +1486,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
if (cpuhp_cb_check(state) || !name) if (cpuhp_cb_check(state) || !name)
return -EINVAL; return -EINVAL;
get_online_cpus(); cpus_read_lock();
mutex_lock(&cpuhp_state_mutex); mutex_lock(&cpuhp_state_mutex);
ret = cpuhp_store_callbacks(state, name, startup, teardown, ret = cpuhp_store_callbacks(state, name, startup, teardown,
...@@ -1522,7 +1522,7 @@ int __cpuhp_setup_state(enum cpuhp_state state, ...@@ -1522,7 +1522,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
} }
out: out:
mutex_unlock(&cpuhp_state_mutex); mutex_unlock(&cpuhp_state_mutex);
put_online_cpus(); cpus_read_unlock();
/* /*
* If the requested state is CPUHP_AP_ONLINE_DYN, return the * If the requested state is CPUHP_AP_ONLINE_DYN, return the
* dynamically allocated state in case of success. * dynamically allocated state in case of success.
...@@ -1544,7 +1544,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state, ...@@ -1544,7 +1544,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
if (!sp->multi_instance) if (!sp->multi_instance)
return -EINVAL; return -EINVAL;
get_online_cpus(); cpus_read_lock();
mutex_lock(&cpuhp_state_mutex); mutex_lock(&cpuhp_state_mutex);
if (!invoke || !cpuhp_get_teardown_cb(state)) if (!invoke || !cpuhp_get_teardown_cb(state))
...@@ -1565,7 +1565,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state, ...@@ -1565,7 +1565,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
remove: remove:
hlist_del(node); hlist_del(node);
mutex_unlock(&cpuhp_state_mutex); mutex_unlock(&cpuhp_state_mutex);
put_online_cpus(); cpus_read_unlock();
return 0; return 0;
} }
...@@ -1587,7 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) ...@@ -1587,7 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
BUG_ON(cpuhp_cb_check(state)); BUG_ON(cpuhp_cb_check(state));
get_online_cpus(); cpus_read_lock();
mutex_lock(&cpuhp_state_mutex); mutex_lock(&cpuhp_state_mutex);
if (sp->multi_instance) { if (sp->multi_instance) {
...@@ -1615,7 +1615,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) ...@@ -1615,7 +1615,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
remove: remove:
cpuhp_store_callbacks(state, NULL, NULL, NULL, false); cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
mutex_unlock(&cpuhp_state_mutex); mutex_unlock(&cpuhp_state_mutex);
put_online_cpus(); cpus_read_unlock();
} }
EXPORT_SYMBOL(__cpuhp_remove_state); EXPORT_SYMBOL(__cpuhp_remove_state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment