Commit eac58d08 authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Jens Axboe

md: Use enum for overloaded magic numbers used by mddev->curr_resync

Comments in the code document special values used for
mddev->curr_resync. Make this clearer by using an enum to label these
values.

The only functional change is a couple places use the wrong comparison
operator that implied 3 is another special value. They are all
fixed to imply that 3 or greater is an active resync.
Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarSong Liu <song@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6f28c5c3
...@@ -5001,7 +5001,7 @@ static ssize_t ...@@ -5001,7 +5001,7 @@ static ssize_t
sync_speed_show(struct mddev *mddev, char *page) sync_speed_show(struct mddev *mddev, char *page)
{ {
unsigned long resync, dt, db; unsigned long resync, dt, db;
if (mddev->curr_resync == 0) if (mddev->curr_resync == MD_RESYNC_NONE)
return sprintf(page, "none\n"); return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
dt = (jiffies - mddev->resync_mark) / HZ; dt = (jiffies - mddev->resync_mark) / HZ;
...@@ -5020,8 +5020,8 @@ sync_completed_show(struct mddev *mddev, char *page) ...@@ -5020,8 +5020,8 @@ sync_completed_show(struct mddev *mddev, char *page)
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n"); return sprintf(page, "none\n");
if (mddev->curr_resync == 1 || if (mddev->curr_resync == MD_RESYNC_YIELDED ||
mddev->curr_resync == 2) mddev->curr_resync == MD_RESYNC_DELAYED)
return sprintf(page, "delayed\n"); return sprintf(page, "delayed\n");
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
...@@ -8018,7 +8018,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev) ...@@ -8018,7 +8018,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
max_sectors = mddev->dev_sectors; max_sectors = mddev->dev_sectors;
resync = mddev->curr_resync; resync = mddev->curr_resync;
if (resync <= 3) { if (resync < MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
/* Still cleaning up */ /* Still cleaning up */
resync = max_sectors; resync = max_sectors;
...@@ -8027,7 +8027,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev) ...@@ -8027,7 +8027,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
else else
resync -= atomic_read(&mddev->recovery_active); resync -= atomic_read(&mddev->recovery_active);
if (resync == 0) { if (resync == MD_RESYNC_NONE) {
if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
struct md_rdev *rdev; struct md_rdev *rdev;
...@@ -8051,7 +8051,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev) ...@@ -8051,7 +8051,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
} }
return 0; return 0;
} }
if (resync < 3) { if (resync < MD_RESYNC_ACTIVE) {
seq_printf(seq, "\tresync=DELAYED"); seq_printf(seq, "\tresync=DELAYED");
return 1; return 1;
} }
...@@ -8729,13 +8729,7 @@ void md_do_sync(struct md_thread *thread) ...@@ -8729,13 +8729,7 @@ void md_do_sync(struct md_thread *thread)
mddev->last_sync_action = action ?: desc; mddev->last_sync_action = action ?: desc;
/* we overload curr_resync somewhat here. /*
* 0 == not engaged in resync at all
* 2 == checking that there is no conflict with another sync
* 1 == like 2, but have yielded to allow conflicting resync to
* commence
* other == active in resync - this many blocks
*
* Before starting a resync we must have set curr_resync to * Before starting a resync we must have set curr_resync to
* 2, and then checked that every "conflicting" array has curr_resync * 2, and then checked that every "conflicting" array has curr_resync
* less than ours. When we find one that is the same or higher * less than ours. When we find one that is the same or higher
...@@ -8747,7 +8741,7 @@ void md_do_sync(struct md_thread *thread) ...@@ -8747,7 +8741,7 @@ void md_do_sync(struct md_thread *thread)
do { do {
int mddev2_minor = -1; int mddev2_minor = -1;
mddev->curr_resync = 2; mddev->curr_resync = MD_RESYNC_DELAYED;
try_again: try_again:
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
...@@ -8759,12 +8753,14 @@ void md_do_sync(struct md_thread *thread) ...@@ -8759,12 +8753,14 @@ void md_do_sync(struct md_thread *thread)
&& mddev2->curr_resync && mddev2->curr_resync
&& match_mddev_units(mddev, mddev2)) { && match_mddev_units(mddev, mddev2)) {
DEFINE_WAIT(wq); DEFINE_WAIT(wq);
if (mddev < mddev2 && mddev->curr_resync == 2) { if (mddev < mddev2 &&
mddev->curr_resync == MD_RESYNC_DELAYED) {
/* arbitrarily yield */ /* arbitrarily yield */
mddev->curr_resync = 1; mddev->curr_resync = MD_RESYNC_YIELDED;
wake_up(&resync_wait); wake_up(&resync_wait);
} }
if (mddev > mddev2 && mddev->curr_resync == 1) if (mddev > mddev2 &&
mddev->curr_resync == MD_RESYNC_YIELDED)
/* no need to wait here, we can wait the next /* no need to wait here, we can wait the next
* time 'round when curr_resync == 2 * time 'round when curr_resync == 2
*/ */
...@@ -8792,7 +8788,7 @@ void md_do_sync(struct md_thread *thread) ...@@ -8792,7 +8788,7 @@ void md_do_sync(struct md_thread *thread)
finish_wait(&resync_wait, &wq); finish_wait(&resync_wait, &wq);
} }
} }
} while (mddev->curr_resync < 2); } while (mddev->curr_resync < MD_RESYNC_DELAYED);
j = 0; j = 0;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
...@@ -8876,7 +8872,7 @@ void md_do_sync(struct md_thread *thread) ...@@ -8876,7 +8872,7 @@ void md_do_sync(struct md_thread *thread)
desc, mdname(mddev)); desc, mdname(mddev));
mddev->curr_resync = j; mddev->curr_resync = j;
} else } else
mddev->curr_resync = 3; /* no longer delayed */ mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */
mddev->curr_resync_completed = j; mddev->curr_resync_completed = j;
sysfs_notify_dirent_safe(mddev->sysfs_completed); sysfs_notify_dirent_safe(mddev->sysfs_completed);
md_new_event(); md_new_event();
...@@ -9011,14 +9007,14 @@ void md_do_sync(struct md_thread *thread) ...@@ -9011,14 +9007,14 @@ void md_do_sync(struct md_thread *thread)
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev->curr_resync > 3) { mddev->curr_resync >= MD_RESYNC_ACTIVE) {
mddev->curr_resync_completed = mddev->curr_resync; mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify_dirent_safe(mddev->sysfs_completed); sysfs_notify_dirent_safe(mddev->sysfs_completed);
} }
mddev->pers->sync_request(mddev, max_sectors, &skipped); mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 3) { mddev->curr_resync >= MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) { if (mddev->curr_resync >= mddev->recovery_cp) {
...@@ -9082,7 +9078,7 @@ void md_do_sync(struct md_thread *thread) ...@@ -9082,7 +9078,7 @@ void md_do_sync(struct md_thread *thread)
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed; mddev->resync_min = mddev->curr_resync_completed;
set_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_DONE, &mddev->recovery);
mddev->curr_resync = 0; mddev->curr_resync = MD_RESYNC_NONE;
spin_unlock(&mddev->lock); spin_unlock(&mddev->lock);
wake_up(&resync_wait); wake_up(&resync_wait);
......
...@@ -288,6 +288,21 @@ struct serial_info { ...@@ -288,6 +288,21 @@ struct serial_info {
sector_t _subtree_last; /* highest sector in subtree of rb node */ sector_t _subtree_last; /* highest sector in subtree of rb node */
}; };
/*
* mddev->curr_resync stores the current sector of the resync but
* also has some overloaded values.
*/
enum {
/* No resync in progress */
MD_RESYNC_NONE = 0,
/* Yielded to allow another conflicting resync to commence */
MD_RESYNC_YIELDED = 1,
/* Delayed to check that there is no conflict with another sync */
MD_RESYNC_DELAYED = 2,
/* Any value greater than or equal to this is in an active resync */
MD_RESYNC_ACTIVE = 3,
};
struct mddev { struct mddev {
void *private; void *private;
struct md_personality *pers; struct md_personality *pers;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment