Commit 8b48ec23 authored by Guoqing Jiang's avatar Guoqing Jiang Committed by Song Liu

md: don't unregister sync_thread with reconfig_mutex held

Unregister sync_thread doesn't need to hold reconfig_mutex since it
doesn't reconfigure array.

And it could cause deadlock problem for raid5 as follows:

1. process A tried to reap sync thread with reconfig_mutex held after echo
   idle to sync_action.
2. raid5 sync thread was blocked if there were too many active stripes.
3. SB_CHANGE_PENDING was set (because of write IO comes from upper layer)
   which causes the number of active stripes can't be decreased.
4. SB_CHANGE_PENDING can't be cleared since md_check_recovery was not able
   to hold reconfig_mutex.

More details in the link:
https://lore.kernel.org/linux-raid/5ed54ffc-ce82-bf66-4eff-390cb23bc1ac@molgen.mpg.de/T/#t

And add one parameter to md_reap_sync_thread since it could be called by
dm-raid which doesn't hold reconfig_mutex.
Reported-and-tested-by: default avatarDonald Buczek <buczek@molgen.mpg.de>
Signed-off-by: default avatarGuoqing Jiang <guoqing.jiang@cloud.ionos.com>
Signed-off-by: default avatarSong Liu <song@kernel.org>
parent 537b9f2b
...@@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, ...@@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
if (mddev->sync_thread) { if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev); md_reap_sync_thread(mddev, false);
} }
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
return -EBUSY; return -EBUSY;
......
...@@ -4844,7 +4844,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) ...@@ -4844,7 +4844,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
flush_workqueue(md_misc_wq); flush_workqueue(md_misc_wq);
if (mddev->sync_thread) { if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev); md_reap_sync_thread(mddev, true);
} }
mddev_unlock(mddev); mddev_unlock(mddev);
} }
...@@ -6213,7 +6213,7 @@ static void __md_stop_writes(struct mddev *mddev) ...@@ -6213,7 +6213,7 @@ static void __md_stop_writes(struct mddev *mddev)
flush_workqueue(md_misc_wq); flush_workqueue(md_misc_wq);
if (mddev->sync_thread) { if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev); md_reap_sync_thread(mddev, true);
} }
del_timer_sync(&mddev->safemode_timer); del_timer_sync(&mddev->safemode_timer);
...@@ -9324,7 +9324,7 @@ void md_check_recovery(struct mddev *mddev) ...@@ -9324,7 +9324,7 @@ void md_check_recovery(struct mddev *mddev)
* ->spare_active and clear saved_raid_disk * ->spare_active and clear saved_raid_disk
*/ */
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev); md_reap_sync_thread(mddev, true);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
...@@ -9359,7 +9359,7 @@ void md_check_recovery(struct mddev *mddev) ...@@ -9359,7 +9359,7 @@ void md_check_recovery(struct mddev *mddev)
goto unlock; goto unlock;
} }
if (mddev->sync_thread) { if (mddev->sync_thread) {
md_reap_sync_thread(mddev); md_reap_sync_thread(mddev, true);
goto unlock; goto unlock;
} }
/* Set RUNNING before clearing NEEDED to avoid /* Set RUNNING before clearing NEEDED to avoid
...@@ -9432,14 +9432,18 @@ void md_check_recovery(struct mddev *mddev) ...@@ -9432,14 +9432,18 @@ void md_check_recovery(struct mddev *mddev)
} }
EXPORT_SYMBOL(md_check_recovery); EXPORT_SYMBOL(md_check_recovery);
void md_reap_sync_thread(struct mddev *mddev) void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held)
{ {
struct md_rdev *rdev; struct md_rdev *rdev;
sector_t old_dev_sectors = mddev->dev_sectors; sector_t old_dev_sectors = mddev->dev_sectors;
bool is_reshaped = false; bool is_reshaped = false;
if (reconfig_mutex_held)
mddev_unlock(mddev);
/* resync has finished, collect result */ /* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread); md_unregister_thread(&mddev->sync_thread);
if (reconfig_mutex_held)
mddev_lock_nointr(mddev);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
mddev->degraded != mddev->raid_disks) { mddev->degraded != mddev->raid_disks) {
......
...@@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread( ...@@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread(
extern void md_unregister_thread(struct md_thread **threadp); extern void md_unregister_thread(struct md_thread **threadp);
extern void md_wakeup_thread(struct md_thread *thread); extern void md_wakeup_thread(struct md_thread *thread);
extern void md_check_recovery(struct mddev *mddev); extern void md_check_recovery(struct mddev *mddev);
extern void md_reap_sync_thread(struct mddev *mddev); extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held);
extern int mddev_init_writes_pending(struct mddev *mddev); extern int mddev_init_writes_pending(struct mddev *mddev);
extern bool md_write_start(struct mddev *mddev, struct bio *bi); extern bool md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_inc(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment