Commit 7abe8493 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md

Pull md updates from Shaohua Li:
 "This mainly improves raid10 cluster and fixes some bugs:

   - raid10 cluster improvements from Guoqing

   - Memory leak fixes from Jack and Xiao

   - raid10 hang fix from Alex

   - raid5 block faulty device fix from Mariusz

   - metadata update fix from Neil

   - Invalid disk role fix from Me

   - Other clearnups"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md:
  MD: Memory leak when flush bio size is zero
  md: fix memleak for mempool
  md-cluster: remove suspend_info
  md-cluster: send BITMAP_NEEDS_SYNC message if reshaping is interrupted
  md-cluster/bitmap: don't call md_bitmap_sync_with_cluster during reshaping stage
  md-cluster/raid10: don't call remove_and_add_spares during reshaping stage
  md-cluster/raid10: call update_size in md_reap_sync_thread
  md-cluster: introduce resync_info_get interface for sanity check
  md-cluster/raid10: support add disk under grow mode
  md-cluster/raid10: resize all the bitmaps before start reshape
  MD: fix invalid stored role for a disk - try2
  md/bitmap: use mddev_suspend/resume instead of ->quiesce()
  md: remove redundant code that is no longer reachable
  md: allow metadata updates while suspending an array - fix
  MD: fix invalid stored role for a disk
  md/raid10: Fix raid10 replace hang when new added disk faulty
  raid5: block failing device if raid will be failed
parents 71f4d95b af9b926d
...@@ -2288,9 +2288,9 @@ location_store(struct mddev *mddev, const char *buf, size_t len) ...@@ -2288,9 +2288,9 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
goto out; goto out;
} }
if (mddev->pers) { if (mddev->pers) {
mddev->pers->quiesce(mddev, 1); mddev_suspend(mddev);
md_bitmap_destroy(mddev); md_bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0); mddev_resume(mddev);
} }
mddev->bitmap_info.offset = 0; mddev->bitmap_info.offset = 0;
if (mddev->bitmap_info.file) { if (mddev->bitmap_info.file) {
...@@ -2327,8 +2327,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len) ...@@ -2327,8 +2327,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
mddev->bitmap_info.offset = offset; mddev->bitmap_info.offset = offset;
if (mddev->pers) { if (mddev->pers) {
struct bitmap *bitmap; struct bitmap *bitmap;
mddev->pers->quiesce(mddev, 1);
bitmap = md_bitmap_create(mddev, -1); bitmap = md_bitmap_create(mddev, -1);
mddev_suspend(mddev);
if (IS_ERR(bitmap)) if (IS_ERR(bitmap))
rv = PTR_ERR(bitmap); rv = PTR_ERR(bitmap);
else { else {
...@@ -2337,11 +2337,12 @@ location_store(struct mddev *mddev, const char *buf, size_t len) ...@@ -2337,11 +2337,12 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
if (rv) if (rv)
mddev->bitmap_info.offset = 0; mddev->bitmap_info.offset = 0;
} }
mddev->pers->quiesce(mddev, 0);
if (rv) { if (rv) {
md_bitmap_destroy(mddev); md_bitmap_destroy(mddev);
mddev_resume(mddev);
goto out; goto out;
} }
mddev_resume(mddev);
} }
} }
} }
......
This diff is collapsed.
...@@ -14,6 +14,7 @@ struct md_cluster_operations { ...@@ -14,6 +14,7 @@ struct md_cluster_operations {
int (*leave)(struct mddev *mddev); int (*leave)(struct mddev *mddev);
int (*slot_number)(struct mddev *mddev); int (*slot_number)(struct mddev *mddev);
int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi); int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
void (*resync_info_get)(struct mddev *mddev, sector_t *lo, sector_t *hi);
int (*metadata_update_start)(struct mddev *mddev); int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev); int (*metadata_update_finish)(struct mddev *mddev);
void (*metadata_update_cancel)(struct mddev *mddev); void (*metadata_update_cancel)(struct mddev *mddev);
...@@ -26,6 +27,7 @@ struct md_cluster_operations { ...@@ -26,6 +27,7 @@ struct md_cluster_operations {
int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev); int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
void (*load_bitmaps)(struct mddev *mddev, int total_slots); void (*load_bitmaps)(struct mddev *mddev, int total_slots);
int (*gather_bitmaps)(struct md_rdev *rdev); int (*gather_bitmaps)(struct md_rdev *rdev);
int (*resize_bitmaps)(struct mddev *mddev, sector_t newsize, sector_t oldsize);
int (*lock_all_bitmaps)(struct mddev *mddev); int (*lock_all_bitmaps)(struct mddev *mddev);
void (*unlock_all_bitmaps)(struct mddev *mddev); void (*unlock_all_bitmaps)(struct mddev *mddev);
void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors); void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors);
......
...@@ -452,10 +452,11 @@ static void md_end_flush(struct bio *fbio) ...@@ -452,10 +452,11 @@ static void md_end_flush(struct bio *fbio)
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&fi->flush_pending)) { if (atomic_dec_and_test(&fi->flush_pending)) {
if (bio->bi_iter.bi_size == 0) if (bio->bi_iter.bi_size == 0) {
/* an empty barrier - all done */ /* an empty barrier - all done */
bio_endio(bio); bio_endio(bio);
else { mempool_free(fi, mddev->flush_pool);
} else {
INIT_WORK(&fi->flush_work, submit_flushes); INIT_WORK(&fi->flush_work, submit_flushes);
queue_work(md_wq, &fi->flush_work); queue_work(md_wq, &fi->flush_work);
} }
...@@ -509,10 +510,11 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) ...@@ -509,10 +510,11 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
rcu_read_unlock(); rcu_read_unlock();
if (atomic_dec_and_test(&fi->flush_pending)) { if (atomic_dec_and_test(&fi->flush_pending)) {
if (bio->bi_iter.bi_size == 0) if (bio->bi_iter.bi_size == 0) {
/* an empty barrier - all done */ /* an empty barrier - all done */
bio_endio(bio); bio_endio(bio);
else { mempool_free(fi, mddev->flush_pool);
} else {
INIT_WORK(&fi->flush_work, submit_flushes); INIT_WORK(&fi->flush_work, submit_flushes);
queue_work(md_wq, &fi->flush_work); queue_work(md_wq, &fi->flush_work);
} }
...@@ -5904,14 +5906,6 @@ static void __md_stop(struct mddev *mddev) ...@@ -5904,14 +5906,6 @@ static void __md_stop(struct mddev *mddev)
mddev->to_remove = &md_redundancy_group; mddev->to_remove = &md_redundancy_group;
module_put(pers->owner); module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
void md_stop(struct mddev *mddev)
{
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
__md_stop(mddev);
if (mddev->flush_bio_pool) { if (mddev->flush_bio_pool) {
mempool_destroy(mddev->flush_bio_pool); mempool_destroy(mddev->flush_bio_pool);
mddev->flush_bio_pool = NULL; mddev->flush_bio_pool = NULL;
...@@ -5920,6 +5914,14 @@ void md_stop(struct mddev *mddev) ...@@ -5920,6 +5914,14 @@ void md_stop(struct mddev *mddev)
mempool_destroy(mddev->flush_pool); mempool_destroy(mddev->flush_pool);
mddev->flush_pool = NULL; mddev->flush_pool = NULL;
} }
}
void md_stop(struct mddev *mddev)
{
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
__md_stop(mddev);
bioset_exit(&mddev->bio_set); bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set); bioset_exit(&mddev->sync_set);
} }
...@@ -8370,9 +8372,17 @@ void md_do_sync(struct md_thread *thread) ...@@ -8370,9 +8372,17 @@ void md_do_sync(struct md_thread *thread)
else if (!mddev->bitmap) else if (!mddev->bitmap)
j = mddev->recovery_cp; j = mddev->recovery_cp;
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
max_sectors = mddev->resync_max_sectors; max_sectors = mddev->resync_max_sectors;
else { /*
* If the original node aborts reshaping then we continue the
* reshaping, so set j again to avoid restart reshape from the
* first beginning
*/
if (mddev_is_clustered(mddev) &&
mddev->reshape_position != MaxSector)
j = mddev->reshape_position;
} else {
/* recovery follows the physical size of devices */ /* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors; max_sectors = mddev->dev_sectors;
j = MaxSector; j = MaxSector;
...@@ -8623,8 +8633,10 @@ void md_do_sync(struct md_thread *thread) ...@@ -8623,8 +8633,10 @@ void md_do_sync(struct md_thread *thread)
mddev_lock_nointr(mddev); mddev_lock_nointr(mddev);
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
mddev_unlock(mddev); mddev_unlock(mddev);
set_capacity(mddev->gendisk, mddev->array_sectors); if (!mddev_is_clustered(mddev)) {
revalidate_disk(mddev->gendisk); set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
}
} }
spin_lock(&mddev->lock); spin_lock(&mddev->lock);
...@@ -8790,6 +8802,18 @@ static void md_start_sync(struct work_struct *ws) ...@@ -8790,6 +8802,18 @@ static void md_start_sync(struct work_struct *ws)
*/ */
void md_check_recovery(struct mddev *mddev) void md_check_recovery(struct mddev *mddev)
{ {
if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
/* Write superblock - thread that called mddev_suspend()
* holds reconfig_mutex for us.
*/
set_bit(MD_UPDATING_SB, &mddev->flags);
smp_mb__after_atomic();
if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
md_update_sb(mddev, 0);
clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
wake_up(&mddev->sb_wait);
}
if (mddev->suspended) if (mddev->suspended)
return; return;
...@@ -8949,16 +8973,6 @@ void md_check_recovery(struct mddev *mddev) ...@@ -8949,16 +8973,6 @@ void md_check_recovery(struct mddev *mddev)
unlock: unlock:
wake_up(&mddev->sb_wait); wake_up(&mddev->sb_wait);
mddev_unlock(mddev); mddev_unlock(mddev);
} else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
/* Write superblock - thread that called mddev_suspend()
* holds reconfig_mutex for us.
*/
set_bit(MD_UPDATING_SB, &mddev->flags);
smp_mb__after_atomic();
if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
md_update_sb(mddev, 0);
clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
wake_up(&mddev->sb_wait);
} }
} }
EXPORT_SYMBOL(md_check_recovery); EXPORT_SYMBOL(md_check_recovery);
...@@ -8966,6 +8980,8 @@ EXPORT_SYMBOL(md_check_recovery); ...@@ -8966,6 +8980,8 @@ EXPORT_SYMBOL(md_check_recovery);
void md_reap_sync_thread(struct mddev *mddev) void md_reap_sync_thread(struct mddev *mddev)
{ {
struct md_rdev *rdev; struct md_rdev *rdev;
sector_t old_dev_sectors = mddev->dev_sectors;
bool is_reshaped = false;
/* resync has finished, collect result */ /* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread); md_unregister_thread(&mddev->sync_thread);
...@@ -8980,8 +8996,11 @@ void md_reap_sync_thread(struct mddev *mddev) ...@@ -8980,8 +8996,11 @@ void md_reap_sync_thread(struct mddev *mddev)
} }
} }
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape) mddev->pers->finish_reshape) {
mddev->pers->finish_reshape(mddev); mddev->pers->finish_reshape(mddev);
if (mddev_is_clustered(mddev))
is_reshaped = true;
}
/* If array is no-longer degraded, then any saved_raid_disk /* If array is no-longer degraded, then any saved_raid_disk
* information must be scrapped. * information must be scrapped.
...@@ -9002,6 +9021,14 @@ void md_reap_sync_thread(struct mddev *mddev) ...@@ -9002,6 +9021,14 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
/*
* We call md_cluster_ops->update_size here because sync_size could
* be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
* so it is time to update size across cluster.
*/
if (mddev_is_clustered(mddev) && is_reshaped
&& !test_bit(MD_CLOSING, &mddev->flags))
md_cluster_ops->update_size(mddev, old_dev_sectors);
wake_up(&resync_wait); wake_up(&resync_wait);
/* flag recovery needed just to double check */ /* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
...@@ -9201,8 +9228,12 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) ...@@ -9201,8 +9228,12 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
} }
if (role != rdev2->raid_disk) { if (role != rdev2->raid_disk) {
/* got activated */ /*
if (rdev2->raid_disk == -1 && role != 0xffff) { * got activated except reshape is happening.
*/
if (rdev2->raid_disk == -1 && role != 0xffff &&
!(le32_to_cpu(sb->feature_map) &
MD_FEATURE_RESHAPE_ACTIVE)) {
rdev2->saved_raid_disk = role; rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2); ret = remove_and_add_spares(mddev, rdev2);
pr_info("Activated spare: %s\n", pr_info("Activated spare: %s\n",
...@@ -9228,6 +9259,30 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) ...@@ -9228,6 +9259,30 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
/*
* Since mddev->delta_disks has already updated in update_raid_disks,
* so it is time to check reshape.
*/
if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
/*
* reshape is happening in the remote node, we need to
* update reshape_position and call start_reshape.
*/
mddev->reshape_position = sb->reshape_position;
if (mddev->pers->update_reshape_pos)
mddev->pers->update_reshape_pos(mddev);
if (mddev->pers->start_reshape)
mddev->pers->start_reshape(mddev);
} else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
mddev->reshape_position != MaxSector &&
!(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
/* reshape is just done in another node. */
mddev->reshape_position = MaxSector;
if (mddev->pers->update_reshape_pos)
mddev->pers->update_reshape_pos(mddev);
}
/* Finally set the event to be up to date */ /* Finally set the event to be up to date */
mddev->events = le64_to_cpu(sb->events); mddev->events = le64_to_cpu(sb->events);
} }
......
...@@ -557,6 +557,7 @@ struct md_personality ...@@ -557,6 +557,7 @@ struct md_personality
int (*check_reshape) (struct mddev *mddev); int (*check_reshape) (struct mddev *mddev);
int (*start_reshape) (struct mddev *mddev); int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev); void (*finish_reshape) (struct mddev *mddev);
void (*update_reshape_pos) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing. /* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete * 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour * 0 - return to normal behaviour
......
...@@ -1734,6 +1734,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1734,6 +1734,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
*/ */
if (rdev->saved_raid_disk >= 0 && if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first && rdev->saved_raid_disk >= first &&
rdev->saved_raid_disk < conf->raid_disks &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL) conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
first = last = rdev->saved_raid_disk; first = last = rdev->saved_raid_disk;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/raid/md_p.h>
#include <trace/events/block.h> #include <trace/events/block.h>
#include "md.h" #include "md.h"
#include "raid10.h" #include "raid10.h"
...@@ -1808,6 +1809,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1808,6 +1809,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
first = last = rdev->raid_disk; first = last = rdev->raid_disk;
if (rdev->saved_raid_disk >= first && if (rdev->saved_raid_disk >= first &&
rdev->saved_raid_disk < conf->geo.raid_disks &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL) conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
mirror = rdev->saved_raid_disk; mirror = rdev->saved_raid_disk;
else else
...@@ -3079,6 +3081,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3079,6 +3081,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sect; sector_t sect;
int must_sync; int must_sync;
int any_working; int any_working;
int need_recover = 0;
int need_replace = 0;
struct raid10_info *mirror = &conf->mirrors[i]; struct raid10_info *mirror = &conf->mirrors[i];
struct md_rdev *mrdev, *mreplace; struct md_rdev *mrdev, *mreplace;
...@@ -3086,11 +3090,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3086,11 +3090,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
mrdev = rcu_dereference(mirror->rdev); mrdev = rcu_dereference(mirror->rdev);
mreplace = rcu_dereference(mirror->replacement); mreplace = rcu_dereference(mirror->replacement);
if ((mrdev == NULL || if (mrdev != NULL &&
test_bit(Faulty, &mrdev->flags) || !test_bit(Faulty, &mrdev->flags) &&
test_bit(In_sync, &mrdev->flags)) && !test_bit(In_sync, &mrdev->flags))
(mreplace == NULL || need_recover = 1;
test_bit(Faulty, &mreplace->flags))) { if (mreplace != NULL &&
!test_bit(Faulty, &mreplace->flags))
need_replace = 1;
if (!need_recover && !need_replace) {
rcu_read_unlock(); rcu_read_unlock();
continue; continue;
} }
...@@ -3213,7 +3221,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3213,7 +3221,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->devs[1].devnum = i; r10_bio->devs[1].devnum = i;
r10_bio->devs[1].addr = to_addr; r10_bio->devs[1].addr = to_addr;
if (!test_bit(In_sync, &mrdev->flags)) { if (need_recover) {
bio = r10_bio->devs[1].bio; bio = r10_bio->devs[1].bio;
bio->bi_next = biolist; bio->bi_next = biolist;
biolist = bio; biolist = bio;
...@@ -3230,16 +3238,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3230,16 +3238,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[1].repl_bio; bio = r10_bio->devs[1].repl_bio;
if (bio) if (bio)
bio->bi_end_io = NULL; bio->bi_end_io = NULL;
/* Note: if mreplace != NULL, then bio /* Note: if need_replace, then bio
* cannot be NULL as r10buf_pool_alloc will * cannot be NULL as r10buf_pool_alloc will
* have allocated it. * have allocated it.
* So the second test here is pointless.
* But it keeps semantic-checkers happy, and
* this comment keeps human reviewers
* happy.
*/ */
if (mreplace == NULL || bio == NULL || if (!need_replace)
test_bit(Faulty, &mreplace->flags))
break; break;
bio->bi_next = biolist; bio->bi_next = biolist;
biolist = bio; biolist = bio;
...@@ -4286,12 +4289,46 @@ static int raid10_start_reshape(struct mddev *mddev) ...@@ -4286,12 +4289,46 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (mddev->delta_disks && mddev->bitmap) { if (mddev->delta_disks && mddev->bitmap) {
ret = md_bitmap_resize(mddev->bitmap, struct mdp_superblock_1 *sb = NULL;
raid10_size(mddev, 0, conf->geo.raid_disks), sector_t oldsize, newsize;
0, 0);
oldsize = raid10_size(mddev, 0, 0);
newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
if (!mddev_is_clustered(mddev)) {
ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
if (ret)
goto abort;
else
goto out;
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk > -1 &&
!test_bit(Faulty, &rdev->flags))
sb = page_address(rdev->sb_page);
}
/*
* some node is already performing reshape, and no need to
* call md_bitmap_resize again since it should be called when
* receiving BITMAP_RESIZE msg
*/
if ((sb && (le32_to_cpu(sb->feature_map) &
MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
goto out;
ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
if (ret) if (ret)
goto abort; goto abort;
ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
if (ret) {
md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
goto abort;
}
} }
out:
if (mddev->delta_disks > 0) { if (mddev->delta_disks > 0) {
rdev_for_each(rdev, mddev) rdev_for_each(rdev, mddev)
if (rdev->raid_disk < 0 && if (rdev->raid_disk < 0 &&
...@@ -4568,6 +4605,32 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, ...@@ -4568,6 +4605,32 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->master_bio = read_bio; r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
/*
* Broadcast RESYNC message to other nodes, so all nodes would not
* write to the region to avoid conflict.
*/
if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
struct mdp_superblock_1 *sb = NULL;
int sb_reshape_pos = 0;
conf->cluster_sync_low = sector_nr;
conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
sb = page_address(rdev->sb_page);
if (sb) {
sb_reshape_pos = le64_to_cpu(sb->reshape_position);
/*
* Set cluster_sync_low again if next address for array
* reshape is less than cluster_sync_low. Since we can't
* update cluster_sync_low until it has finished reshape.
*/
if (sb_reshape_pos < conf->cluster_sync_low)
conf->cluster_sync_low = sb_reshape_pos;
}
md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
conf->cluster_sync_high);
}
/* Now find the locations in the new layout */ /* Now find the locations in the new layout */
__raid10_find_phys(&conf->geo, r10_bio); __raid10_find_phys(&conf->geo, r10_bio);
...@@ -4719,6 +4782,19 @@ static void end_reshape(struct r10conf *conf) ...@@ -4719,6 +4782,19 @@ static void end_reshape(struct r10conf *conf)
conf->fullsync = 0; conf->fullsync = 0;
} }
static void raid10_update_reshape_pos(struct mddev *mddev)
{
struct r10conf *conf = mddev->private;
sector_t lo, hi;
md_cluster_ops->resync_info_get(mddev, &lo, &hi);
if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
|| mddev->reshape_position == MaxSector)
conf->reshape_progress = mddev->reshape_position;
else
WARN_ON_ONCE(1);
}
static int handle_reshape_read_error(struct mddev *mddev, static int handle_reshape_read_error(struct mddev *mddev,
struct r10bio *r10_bio) struct r10bio *r10_bio)
{ {
...@@ -4887,6 +4963,7 @@ static struct md_personality raid10_personality = ...@@ -4887,6 +4963,7 @@ static struct md_personality raid10_personality =
.check_reshape = raid10_check_reshape, .check_reshape = raid10_check_reshape,
.start_reshape = raid10_start_reshape, .start_reshape = raid10_start_reshape,
.finish_reshape = raid10_finish_reshape, .finish_reshape = raid10_finish_reshape,
.update_reshape_pos = raid10_update_reshape_pos,
.congested = raid10_congested, .congested = raid10_congested,
}; };
......
...@@ -3151,8 +3151,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) ...@@ -3151,8 +3151,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0; return 0;
rcu_assign_pointer(conf->log, NULL);
md_unregister_thread(&log->reclaim_thread);
reclaim_thread: reclaim_thread:
mempool_exit(&log->meta_pool); mempool_exit(&log->meta_pool);
out_mempool: out_mempool:
......
...@@ -2681,6 +2681,18 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) ...@@ -2681,6 +2681,18 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
pr_debug("raid456: error called\n"); pr_debug("raid456: error called\n");
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
if (test_bit(In_sync, &rdev->flags) &&
mddev->degraded == conf->max_degraded) {
/*
* Don't allow to achieve failed state
* Don't try to recover this device
*/
conf->recovery_disabled = mddev->recovery_disabled;
spin_unlock_irqrestore(&conf->device_lock, flags);
return;
}
set_bit(Faulty, &rdev->flags); set_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags); clear_bit(In_sync, &rdev->flags);
mddev->degraded = raid5_calc_degraded(conf); mddev->degraded = raid5_calc_degraded(conf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment