Commit 72adae23 authored by Xiao Ni's avatar Xiao Ni Committed by Song Liu

md: Change active_io to percpu

Now the type of active_io is atomic. It's used to count how many ios are
in the submitting process and it's added and decreased very time. But it
only needs to check if it's zero when suspending the raid. So we can
switch atomic to percpu to improve the performance.

After switching active_io to percpu type, we use the state of active_io
to judge if the raid device is suspended. And we don't need to wake up
->sb_wait in md_handle_request anymore. It's done in the callback function
which is registered when initing active_io. The argument mddev->suspended
is only used to count how many users are trying to set raid to suspend
state.
Signed-off-by: default avatarXiao Ni <xni@redhat.com>
Signed-off-by: default avatarSong Liu <song@kernel.org>
parent d1932913
......@@ -382,10 +382,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
static bool is_md_suspended(struct mddev *mddev)
{
if (mddev->suspended)
return true;
else
return false;
return percpu_ref_is_dying(&mddev->active_io);
}
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
......@@ -412,12 +409,10 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio)
void md_handle_request(struct mddev *mddev, struct bio *bio)
{
check_suspended:
rcu_read_lock();
if (is_suspended(mddev, bio)) {
DEFINE_WAIT(__wait);
/* Bail out if REQ_NOWAIT is set for the bio */
if (bio->bi_opf & REQ_NOWAIT) {
rcu_read_unlock();
bio_wouldblock_error(bio);
return;
}
......@@ -426,23 +421,19 @@ void md_handle_request(struct mddev *mddev, struct bio *bio)
TASK_UNINTERRUPTIBLE);
if (!is_suspended(mddev, bio))
break;
rcu_read_unlock();
schedule();
rcu_read_lock();
}
finish_wait(&mddev->sb_wait, &__wait);
}
atomic_inc(&mddev->active_io);
rcu_read_unlock();
if (!percpu_ref_tryget_live(&mddev->active_io))
goto check_suspended;
if (!mddev->pers->make_request(mddev, bio)) {
atomic_dec(&mddev->active_io);
wake_up(&mddev->sb_wait);
percpu_ref_put(&mddev->active_io);
goto check_suspended;
}
if (atomic_dec_and_test(&mddev->active_io) && is_md_suspended(mddev))
wake_up(&mddev->sb_wait);
percpu_ref_put(&mddev->active_io);
}
EXPORT_SYMBOL(md_handle_request);
......@@ -490,11 +481,10 @@ void mddev_suspend(struct mddev *mddev)
lockdep_assert_held(&mddev->reconfig_mutex);
if (mddev->suspended++)
return;
synchronize_rcu();
wake_up(&mddev->sb_wait);
set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
smp_mb__after_atomic();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
percpu_ref_kill(&mddev->active_io);
wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
mddev->pers->quiesce(mddev, 1);
clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
......@@ -512,6 +502,7 @@ void mddev_resume(struct mddev *mddev)
lockdep_assert_held(&mddev->reconfig_mutex);
if (--mddev->suspended)
return;
percpu_ref_resurrect(&mddev->active_io);
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
......@@ -690,7 +681,6 @@ void mddev_init(struct mddev *mddev)
timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
spin_lock_init(&mddev->lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
......@@ -5767,6 +5757,12 @@ static void md_safemode_timeout(struct timer_list *t)
}
static int start_dirty_degraded;
static void active_io_release(struct percpu_ref *ref)
{
struct mddev *mddev = container_of(ref, struct mddev, active_io);
wake_up(&mddev->sb_wait);
}
int md_run(struct mddev *mddev)
{
......@@ -5847,10 +5843,15 @@ int md_run(struct mddev *mddev)
nowait = nowait && bdev_nowait(rdev->bdev);
}
err = percpu_ref_init(&mddev->active_io, active_io_release,
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
if (err)
return err;
if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
return err;
goto exit_active_io;
}
if (!bioset_initialized(&mddev->sync_set)) {
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
......@@ -6038,6 +6039,8 @@ int md_run(struct mddev *mddev)
bioset_exit(&mddev->sync_set);
exit_bio_set:
bioset_exit(&mddev->bio_set);
exit_active_io:
percpu_ref_exit(&mddev->active_io);
return err;
}
EXPORT_SYMBOL_GPL(md_run);
......@@ -6262,6 +6265,7 @@ void md_stop(struct mddev *mddev)
*/
__md_stop_writes(mddev);
__md_stop(mddev);
percpu_ref_exit(&mddev->active_io);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
}
......@@ -7835,6 +7839,7 @@ static void md_free_disk(struct gendisk *disk)
struct mddev *mddev = disk->private_data;
percpu_ref_exit(&mddev->writes_pending);
percpu_ref_exit(&mddev->active_io);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
......
......@@ -315,7 +315,7 @@ struct mddev {
unsigned long sb_flags;
int suspended;
atomic_t active_io;
struct percpu_ref active_io;
int ro;
int sysfs_active; /* set when sysfs deletes
* are happening, so run/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment