Commit 2aa82191 authored by Goldwyn Rodrigues's avatar Goldwyn Rodrigues

md-cluster: Perform a lazy update

In a clustered environment, a change such as marking a device faulty,
can be recorded by any of the nodes. This is communicated to all the
nodes and re-recording such a change is unnecessary, and quite often
pretty disruptive.

With this patch, just before the update, we detect for the changes
and if the changes are already in superblock, we abort the update
after clearing all the flags
Signed-off-by: default avatarGoldwyn Rodrigues <rgoldwyn@suse.com>
parent 70bcecdb
...@@ -2199,6 +2199,46 @@ static void sync_sbs(struct mddev *mddev, int nospares) ...@@ -2199,6 +2199,46 @@ static void sync_sbs(struct mddev *mddev, int nospares)
} }
} }
static bool does_sb_need_changing(struct mddev *mddev)
{
struct md_rdev *rdev;
struct mdp_superblock_1 *sb;
int role;
/* Find a good rdev */
rdev_for_each(rdev, mddev)
if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
break;
/* No good device found. */
if (!rdev)
return false;
sb = page_address(rdev->sb_page);
/* Check if a device has become faulty or a spare become active */
rdev_for_each(rdev, mddev) {
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
/* Device activated? */
if (role == 0xffff && rdev->raid_disk >=0 &&
!test_bit(Faulty, &rdev->flags))
return true;
/* Device turned faulty? */
if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
return true;
}
/* Check if any mddev parameters have changed */
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
(mddev->recovery_cp != le64_to_cpu(sb->resync_offset)) ||
(mddev->layout != le64_to_cpu(sb->layout)) ||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
return true;
return false;
}
void md_update_sb(struct mddev *mddev, int force_change) void md_update_sb(struct mddev *mddev, int force_change)
{ {
struct md_rdev *rdev; struct md_rdev *rdev;
...@@ -2211,6 +2251,18 @@ void md_update_sb(struct mddev *mddev, int force_change) ...@@ -2211,6 +2251,18 @@ void md_update_sb(struct mddev *mddev, int force_change)
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags);
return; return;
} }
if (mddev_is_clustered(mddev)) {
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
md_cluster_ops->metadata_update_start(mddev);
/* Has someone else has updated the sb */
if (!does_sb_need_changing(mddev)) {
md_cluster_ops->metadata_update_cancel(mddev);
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
return;
}
}
repeat: repeat:
/* First make sure individual recovery_offsets are correct */ /* First make sure individual recovery_offsets are correct */
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
...@@ -2359,6 +2411,9 @@ void md_update_sb(struct mddev *mddev, int force_change) ...@@ -2359,6 +2411,9 @@ void md_update_sb(struct mddev *mddev, int force_change)
clear_bit(BlockedBadBlocks, &rdev->flags); clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait); wake_up(&rdev->blocked_wait);
} }
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
} }
EXPORT_SYMBOL(md_update_sb); EXPORT_SYMBOL(md_update_sb);
...@@ -2496,13 +2551,9 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) ...@@ -2496,13 +2551,9 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (mddev_is_clustered(mddev)) if (mddev_is_clustered(mddev))
md_cluster_ops->remove_disk(mddev, rdev); md_cluster_ops->remove_disk(mddev, rdev);
md_kick_rdev_from_array(rdev); md_kick_rdev_from_array(rdev);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
if (mddev->pers) if (mddev->pers)
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
md_new_event(mddev); md_new_event(mddev);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
err = 0; err = 0;
} }
} else if (cmd_match(buf, "writemostly")) { } else if (cmd_match(buf, "writemostly")) {
...@@ -4063,12 +4114,8 @@ size_store(struct mddev *mddev, const char *buf, size_t len) ...@@ -4063,12 +4114,8 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
if (err) if (err)
return err; return err;
if (mddev->pers) { if (mddev->pers) {
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
err = update_size(mddev, sectors); err = update_size(mddev, sectors);
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
} else { } else {
if (mddev->dev_sectors == 0 || if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors) mddev->dev_sectors > sectors)
...@@ -5306,8 +5353,6 @@ static void md_clean(struct mddev *mddev) ...@@ -5306,8 +5353,6 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev) static void __md_stop_writes(struct mddev *mddev)
{ {
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
flush_workqueue(md_misc_wq); flush_workqueue(md_misc_wq);
if (mddev->sync_thread) { if (mddev->sync_thread) {
...@@ -5326,8 +5371,6 @@ static void __md_stop_writes(struct mddev *mddev) ...@@ -5326,8 +5371,6 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->in_sync = 1; mddev->in_sync = 1;
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
} }
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
} }
void md_stop_writes(struct mddev *mddev) void md_stop_writes(struct mddev *mddev)
...@@ -6015,9 +6058,6 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) ...@@ -6015,9 +6058,6 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
md_new_event(mddev); md_new_event(mddev);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
return 0; return 0;
busy: busy:
if (mddev_is_clustered(mddev)) if (mddev_is_clustered(mddev))
...@@ -6073,14 +6113,12 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) ...@@ -6073,14 +6113,12 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
goto abort_export; goto abort_export;
} }
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
clear_bit(In_sync, &rdev->flags); clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1; rdev->desc_nr = -1;
rdev->saved_raid_disk = -1; rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev); err = bind_rdev_to_array(rdev, mddev);
if (err) if (err)
goto abort_clustered; goto abort_export;
/* /*
* The rest should better be atomic, we can have disk failures * The rest should better be atomic, we can have disk failures
...@@ -6090,9 +6128,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) ...@@ -6090,9 +6128,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
rdev->raid_disk = -1; rdev->raid_disk = -1;
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
/* /*
* Kick recovery, maybe this spare has to be added to the * Kick recovery, maybe this spare has to be added to the
* array immediately. * array immediately.
...@@ -6102,9 +6137,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) ...@@ -6102,9 +6137,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
md_new_event(mddev); md_new_event(mddev);
return 0; return 0;
abort_clustered:
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_cancel(mddev);
abort_export: abort_export:
export_rdev(rdev); export_rdev(rdev);
return err; return err;
...@@ -6422,8 +6454,6 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) ...@@ -6422,8 +6454,6 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
return rv; return rv;
} }
} }
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2); rv = update_size(mddev, (sector_t)info->size * 2);
...@@ -6481,12 +6511,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) ...@@ -6481,12 +6511,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
} }
} }
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
return rv; return rv;
err: err:
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_cancel(mddev);
return rv; return rv;
} }
...@@ -7599,11 +7625,7 @@ int md_allow_write(struct mddev *mddev) ...@@ -7599,11 +7625,7 @@ int md_allow_write(struct mddev *mddev)
mddev->safemode == 0) mddev->safemode == 0)
mddev->safemode = 1; mddev->safemode = 1;
spin_unlock(&mddev->lock); spin_unlock(&mddev->lock);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0); md_update_sb(mddev, 0);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
} else } else
spin_unlock(&mddev->lock); spin_unlock(&mddev->lock);
...@@ -8182,13 +8204,8 @@ void md_check_recovery(struct mddev *mddev) ...@@ -8182,13 +8204,8 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
} }
if (mddev->flags & MD_UPDATE_SB_FLAGS) { if (mddev->flags & MD_UPDATE_SB_FLAGS)
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0); md_update_sb(mddev, 0);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
...@@ -8286,8 +8303,6 @@ void md_reap_sync_thread(struct mddev *mddev) ...@@ -8286,8 +8303,6 @@ void md_reap_sync_thread(struct mddev *mddev)
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags);
} }
} }
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_start(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape) mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev); mddev->pers->finish_reshape(mddev);
...@@ -8300,8 +8315,6 @@ void md_reap_sync_thread(struct mddev *mddev) ...@@ -8300,8 +8315,6 @@ void md_reap_sync_thread(struct mddev *mddev)
rdev->saved_raid_disk = -1; rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
if (mddev_is_clustered(mddev))
md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery); clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment