Commit daae161f authored by Mariusz Tkaczyk's avatar Mariusz Tkaczyk Committed by Song Liu

md: raid1/raid10: drop pending_cnt

Those counters are not necessary after commit 11bb45e8aaf6 ("md: drop queue
limitation for RAID1 and RAID10"). Remove them from all code (conf and
plug structs). raid1_plug_cb and raid10_plug_cb are identical, so move
definition of raid1_plug_cb to common raid1-10 definitions and use it for
RAID10 too.
Signed-off-by: default avatarMariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
Signed-off-by: default avatarSong Liu <song@kernel.org>
parent a7637069
...@@ -28,6 +28,11 @@ struct resync_pages { ...@@ -28,6 +28,11 @@ struct resync_pages {
struct page *pages[RESYNC_PAGES]; struct page *pages[RESYNC_PAGES];
}; };
struct raid1_plug_cb {
struct blk_plug_cb cb;
struct bio_list pending;
};
static void rbio_pool_free(void *rbio, void *data) static void rbio_pool_free(void *rbio, void *data)
{ {
kfree(rbio); kfree(rbio);
......
...@@ -824,7 +824,6 @@ static void flush_pending_writes(struct r1conf *conf) ...@@ -824,7 +824,6 @@ static void flush_pending_writes(struct r1conf *conf)
struct bio *bio; struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list); bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
/* /*
...@@ -1167,12 +1166,6 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, ...@@ -1167,12 +1166,6 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
bio_put(behind_bio); bio_put(behind_bio);
} }
struct raid1_plug_cb {
struct blk_plug_cb cb;
struct bio_list pending;
int pending_cnt;
};
static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
{ {
struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
...@@ -1184,7 +1177,6 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) ...@@ -1184,7 +1177,6 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (from_schedule || current->bio_list) { if (from_schedule || current->bio_list) {
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending); bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_barrier); wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
...@@ -1588,11 +1580,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1588,11 +1580,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
plug = NULL; plug = NULL;
if (plug) { if (plug) {
bio_list_add(&plug->pending, mbio); bio_list_add(&plug->pending, mbio);
plug->pending_cnt++;
} else { } else {
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio); bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
} }
...@@ -3058,7 +3048,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) ...@@ -3058,7 +3048,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
init_waitqueue_head(&conf->wait_barrier); init_waitqueue_head(&conf->wait_barrier);
bio_list_init(&conf->pending_bio_list); bio_list_init(&conf->pending_bio_list);
conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1; conf->recovery_disabled = mddev->recovery_disabled - 1;
err = -EIO; err = -EIO;
......
...@@ -87,7 +87,6 @@ struct r1conf { ...@@ -87,7 +87,6 @@ struct r1conf {
/* queue pending writes to be submitted on unplug */ /* queue pending writes to be submitted on unplug */
struct bio_list pending_bio_list; struct bio_list pending_bio_list;
int pending_count;
/* for use when syncing mirrors: /* for use when syncing mirrors:
* We don't allow both normal IO and resync/recovery IO at * We don't allow both normal IO and resync/recovery IO at
......
...@@ -861,7 +861,6 @@ static void flush_pending_writes(struct r10conf *conf) ...@@ -861,7 +861,6 @@ static void flush_pending_writes(struct r10conf *conf)
struct bio *bio; struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list); bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
/* /*
...@@ -1054,16 +1053,9 @@ static sector_t choose_data_offset(struct r10bio *r10_bio, ...@@ -1054,16 +1053,9 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
return rdev->new_data_offset; return rdev->new_data_offset;
} }
struct raid10_plug_cb {
struct blk_plug_cb cb;
struct bio_list pending;
int pending_cnt;
};
static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
{ {
struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb, struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
cb);
struct mddev *mddev = plug->cb.data; struct mddev *mddev = plug->cb.data;
struct r10conf *conf = mddev->private; struct r10conf *conf = mddev->private;
struct bio *bio; struct bio *bio;
...@@ -1071,7 +1063,6 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) ...@@ -1071,7 +1063,6 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
if (from_schedule || current->bio_list) { if (from_schedule || current->bio_list) {
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending); bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_barrier); wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
...@@ -1238,7 +1229,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, ...@@ -1238,7 +1229,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
const unsigned long do_fua = (bio->bi_opf & REQ_FUA); const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
unsigned long flags; unsigned long flags;
struct blk_plug_cb *cb; struct blk_plug_cb *cb;
struct raid10_plug_cb *plug = NULL; struct raid1_plug_cb *plug = NULL;
struct r10conf *conf = mddev->private; struct r10conf *conf = mddev->private;
struct md_rdev *rdev; struct md_rdev *rdev;
int devnum = r10_bio->devs[n_copy].devnum; int devnum = r10_bio->devs[n_copy].devnum;
...@@ -1280,16 +1271,14 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, ...@@ -1280,16 +1271,14 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
if (cb) if (cb)
plug = container_of(cb, struct raid10_plug_cb, cb); plug = container_of(cb, struct raid1_plug_cb, cb);
else else
plug = NULL; plug = NULL;
if (plug) { if (plug) {
bio_list_add(&plug->pending, mbio); bio_list_add(&plug->pending, mbio);
plug->pending_cnt++;
} else { } else {
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio); bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
} }
......
...@@ -75,7 +75,6 @@ struct r10conf { ...@@ -75,7 +75,6 @@ struct r10conf {
/* queue pending writes and submit them on unplug */ /* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list; struct bio_list pending_bio_list;
int pending_count;
spinlock_t resync_lock; spinlock_t resync_lock;
atomic_t nr_pending; atomic_t nr_pending;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment