Commit 29c6d1bb authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Thomas Gleixner

md/raid5: Convert to hotplug state machine

Install the callbacks via the state machine and let the core invoke
the callbacks on the already online CPUs.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Neil Brown <neilb@suse.com>
Cc: linux-raid@vger.kernel.org
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160818125731.27256-10-bigeasy@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 84a3f4db
...@@ -6330,22 +6330,20 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu ...@@ -6330,22 +6330,20 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
return 0; return 0;
} }
static void raid5_free_percpu(struct r5conf *conf) static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
{ {
unsigned long cpu; struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
return 0;
}
static void raid5_free_percpu(struct r5conf *conf)
{
if (!conf->percpu) if (!conf->percpu)
return; return;
#ifdef CONFIG_HOTPLUG_CPU cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
unregister_cpu_notifier(&conf->cpu_notify);
#endif
get_online_cpus();
for_each_possible_cpu(cpu)
free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus();
free_percpu(conf->percpu); free_percpu(conf->percpu);
} }
...@@ -6364,64 +6362,28 @@ static void free_conf(struct r5conf *conf) ...@@ -6364,64 +6362,28 @@ static void free_conf(struct r5conf *conf)
kfree(conf); kfree(conf);
} }
#ifdef CONFIG_HOTPLUG_CPU static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{ {
struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
long cpu = (long)hcpu;
struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
switch (action) { if (alloc_scratch_buffer(conf, percpu)) {
case CPU_UP_PREPARE: pr_err("%s: failed memory allocation for cpu%u\n",
case CPU_UP_PREPARE_FROZEN: __func__, cpu);
if (alloc_scratch_buffer(conf, percpu)) { return -ENOMEM;
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
return notifier_from_errno(-ENOMEM);
}
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
break;
} }
return NOTIFY_OK; return 0;
} }
#endif
static int raid5_alloc_percpu(struct r5conf *conf) static int raid5_alloc_percpu(struct r5conf *conf)
{ {
unsigned long cpu;
int err = 0; int err = 0;
conf->percpu = alloc_percpu(struct raid5_percpu); conf->percpu = alloc_percpu(struct raid5_percpu);
if (!conf->percpu) if (!conf->percpu)
return -ENOMEM; return -ENOMEM;
#ifdef CONFIG_HOTPLUG_CPU err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
conf->cpu_notify.notifier_call = raid456_cpu_notify;
conf->cpu_notify.priority = 0;
err = register_cpu_notifier(&conf->cpu_notify);
if (err)
return err;
#endif
get_online_cpus();
for_each_present_cpu(cpu) {
err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
if (err) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
break;
}
}
put_online_cpus();
if (!err) { if (!err) {
conf->scribble_disks = max(conf->raid_disks, conf->scribble_disks = max(conf->raid_disks,
conf->previous_raid_disks); conf->previous_raid_disks);
...@@ -7953,10 +7915,21 @@ static struct md_personality raid4_personality = ...@@ -7953,10 +7915,21 @@ static struct md_personality raid4_personality =
static int __init raid5_init(void) static int __init raid5_init(void)
{ {
int ret;
raid5_wq = alloc_workqueue("raid5wq", raid5_wq = alloc_workqueue("raid5wq",
WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
if (!raid5_wq) if (!raid5_wq)
return -ENOMEM; return -ENOMEM;
ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE,
"md/raid5:prepare",
raid456_cpu_up_prepare,
raid456_cpu_dead);
if (ret) {
destroy_workqueue(raid5_wq);
return ret;
}
register_md_personality(&raid6_personality); register_md_personality(&raid6_personality);
register_md_personality(&raid5_personality); register_md_personality(&raid5_personality);
register_md_personality(&raid4_personality); register_md_personality(&raid4_personality);
...@@ -7968,6 +7941,7 @@ static void raid5_exit(void) ...@@ -7968,6 +7941,7 @@ static void raid5_exit(void)
unregister_md_personality(&raid6_personality); unregister_md_personality(&raid6_personality);
unregister_md_personality(&raid5_personality); unregister_md_personality(&raid5_personality);
unregister_md_personality(&raid4_personality); unregister_md_personality(&raid4_personality);
cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
destroy_workqueue(raid5_wq); destroy_workqueue(raid5_wq);
} }
......
...@@ -512,9 +512,7 @@ struct r5conf { ...@@ -512,9 +512,7 @@ struct r5conf {
} __percpu *percpu; } __percpu *percpu;
int scribble_disks; int scribble_disks;
int scribble_sectors; int scribble_sectors;
#ifdef CONFIG_HOTPLUG_CPU struct hlist_node node;
struct notifier_block cpu_notify;
#endif
/* /*
* Free stripes pool * Free stripes pool
......
...@@ -27,6 +27,7 @@ enum cpuhp_state { ...@@ -27,6 +27,7 @@ enum cpuhp_state {
CPUHP_SMPCFD_PREPARE, CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE, CPUHP_RELAY_PREPARE,
CPUHP_SLAB_PREPARE, CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP, CPUHP_RCUTREE_PREP,
CPUHP_NOTIFY_PREPARE, CPUHP_NOTIFY_PREPARE,
CPUHP_TIMERS_DEAD, CPUHP_TIMERS_DEAD,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment