Commit 06d91a5f authored by NeilBrown's avatar NeilBrown Committed by Linus Torvalds

[PATCH] md: improve locking on 'safemode' and move superblock writes

When md marks the superblock dirty before a write, it calls
generic_make_request (to write the superblock) from within
generic_make_request (to write the first dirty block), which could cause
problems later.

With this patch, the superblock write is always done by the helper thread, and
write request are delayed until that write completes.

Also, the locking around marking the array dirty and writing the superblock is
improved to avoid possible races.
Signed-off-by: default avatarNeil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent fca4d848
...@@ -218,6 +218,8 @@ static mddev_t * mddev_find(dev_t unit) ...@@ -218,6 +218,8 @@ static mddev_t * mddev_find(dev_t unit)
INIT_LIST_HEAD(&new->all_mddevs); INIT_LIST_HEAD(&new->all_mddevs);
init_timer(&new->safemode_timer); init_timer(&new->safemode_timer);
atomic_set(&new->active, 1); atomic_set(&new->active, 1);
bio_list_init(&new->write_list);
spin_lock_init(&new->write_lock);
new->queue = blk_alloc_queue(GFP_KERNEL); new->queue = blk_alloc_queue(GFP_KERNEL);
if (!new->queue) { if (!new->queue) {
...@@ -1251,9 +1253,11 @@ static void md_update_sb(mddev_t * mddev) ...@@ -1251,9 +1253,11 @@ static void md_update_sb(mddev_t * mddev)
int err, count = 100; int err, count = 100;
struct list_head *tmp; struct list_head *tmp;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
int sync_req;
mddev->sb_dirty = 0;
repeat: repeat:
spin_lock(&mddev->write_lock);
sync_req = mddev->in_sync;
mddev->utime = get_seconds(); mddev->utime = get_seconds();
mddev->events ++; mddev->events ++;
...@@ -1272,8 +1276,12 @@ static void md_update_sb(mddev_t * mddev) ...@@ -1272,8 +1276,12 @@ static void md_update_sb(mddev_t * mddev)
* do not write anything to disk if using * do not write anything to disk if using
* nonpersistent superblocks * nonpersistent superblocks
*/ */
if (!mddev->persistent) if (!mddev->persistent) {
mddev->sb_dirty = 0;
spin_unlock(&mddev->write_lock);
return; return;
}
spin_unlock(&mddev->write_lock);
dprintk(KERN_INFO dprintk(KERN_INFO
"md: updating %s RAID superblock on device (in sync %d)\n", "md: updating %s RAID superblock on device (in sync %d)\n",
...@@ -1304,6 +1312,15 @@ static void md_update_sb(mddev_t * mddev) ...@@ -1304,6 +1312,15 @@ static void md_update_sb(mddev_t * mddev)
printk(KERN_ERR \ printk(KERN_ERR \
"md: excessive errors occurred during superblock update, exiting\n"); "md: excessive errors occurred during superblock update, exiting\n");
} }
spin_lock(&mddev->write_lock);
if (mddev->in_sync != sync_req) {
/* have to write it out again */
spin_unlock(&mddev->write_lock);
goto repeat;
}
mddev->sb_dirty = 0;
spin_unlock(&mddev->write_lock);
} }
/* /*
...@@ -3178,19 +3195,31 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok) ...@@ -3178,19 +3195,31 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok)
} }
void md_write_start(mddev_t *mddev) /* md_write_start(mddev, bi)
* If we need to update some array metadata (e.g. 'active' flag
* in superblock) before writing, queue bi for later writing
* and return 0, else return 1 and it will be written now
*/
int md_write_start(mddev_t *mddev, struct bio *bi)
{ {
if (!atomic_read(&mddev->writes_pending)) { if (bio_data_dir(bi) != WRITE)
mddev_lock_uninterruptible(mddev); return 1;
if (mddev->in_sync) {
mddev->in_sync = 0; atomic_inc(&mddev->writes_pending);
del_timer(&mddev->safemode_timer); spin_lock(&mddev->write_lock);
md_update_sb(mddev); if (mddev->in_sync == 0 && mddev->sb_dirty == 0) {
} spin_unlock(&mddev->write_lock);
atomic_inc(&mddev->writes_pending); return 1;
mddev_unlock(mddev); }
} else bio_list_add(&mddev->write_list, bi);
atomic_inc(&mddev->writes_pending);
if (mddev->in_sync) {
mddev->in_sync = 0;
mddev->sb_dirty = 1;
}
spin_unlock(&mddev->write_lock);
md_wakeup_thread(mddev->thread);
return 0;
} }
void md_write_end(mddev_t *mddev) void md_write_end(mddev_t *mddev)
...@@ -3472,6 +3501,7 @@ void md_check_recovery(mddev_t *mddev) ...@@ -3472,6 +3501,7 @@ void md_check_recovery(mddev_t *mddev)
mddev->sb_dirty || mddev->sb_dirty ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) || test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
mddev->write_list.head ||
(mddev->safemode == 1) || (mddev->safemode == 1) ||
(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
&& !mddev->in_sync && mddev->recovery_cp == MaxSector) && !mddev->in_sync && mddev->recovery_cp == MaxSector)
...@@ -3480,7 +3510,9 @@ void md_check_recovery(mddev_t *mddev) ...@@ -3480,7 +3510,9 @@ void md_check_recovery(mddev_t *mddev)
if (mddev_trylock(mddev)==0) { if (mddev_trylock(mddev)==0) {
int spares =0; int spares =0;
struct bio *blist;
spin_lock(&mddev->write_lock);
if (mddev->safemode && !atomic_read(&mddev->writes_pending) && if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
!mddev->in_sync && mddev->recovery_cp == MaxSector) { !mddev->in_sync && mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1; mddev->in_sync = 1;
...@@ -3488,9 +3520,22 @@ void md_check_recovery(mddev_t *mddev) ...@@ -3488,9 +3520,22 @@ void md_check_recovery(mddev_t *mddev)
} }
if (mddev->safemode == 1) if (mddev->safemode == 1)
mddev->safemode = 0; mddev->safemode = 0;
blist = bio_list_get(&mddev->write_list);
spin_unlock(&mddev->write_lock);
if (mddev->sb_dirty) if (mddev->sb_dirty)
md_update_sb(mddev); md_update_sb(mddev);
while (blist) {
struct bio *b = blist;
blist = blist->bi_next;
b->bi_next = NULL;
generic_make_request(b);
/* we already counted this, so need to un-count */
md_write_end(mddev);
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
/* resync/recovery still happening */ /* resync/recovery still happening */
......
...@@ -530,6 +530,8 @@ static int make_request(request_queue_t *q, struct bio * bio) ...@@ -530,6 +530,8 @@ static int make_request(request_queue_t *q, struct bio * bio)
* thread has put up a bar for new requests. * thread has put up a bar for new requests.
* Continue immediately if no resync is active currently. * Continue immediately if no resync is active currently.
*/ */
if (md_write_start(mddev, bio)==0)
return 0;
spin_lock_irq(&conf->resync_lock); spin_lock_irq(&conf->resync_lock);
wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, ); wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, );
conf->nr_pending++; conf->nr_pending++;
...@@ -611,7 +613,7 @@ static int make_request(request_queue_t *q, struct bio * bio) ...@@ -611,7 +613,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
rcu_read_unlock(); rcu_read_unlock();
atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->remaining, 1);
md_write_start(mddev);
for (i = 0; i < disks; i++) { for (i = 0; i < disks; i++) {
struct bio *mbio; struct bio *mbio;
if (!r1_bio->bios[i]) if (!r1_bio->bios[i])
......
...@@ -700,6 +700,9 @@ static int make_request(request_queue_t *q, struct bio * bio) ...@@ -700,6 +700,9 @@ static int make_request(request_queue_t *q, struct bio * bio)
return 0; return 0;
} }
if (md_write_start(mddev, bio) == 0)
return 0;
/* /*
* Register the new request and wait if the reconstruction * Register the new request and wait if the reconstruction
* thread has put up a bar for new requests. * thread has put up a bar for new requests.
...@@ -774,7 +777,7 @@ static int make_request(request_queue_t *q, struct bio * bio) ...@@ -774,7 +777,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
rcu_read_unlock(); rcu_read_unlock();
atomic_set(&r10_bio->remaining, 1); atomic_set(&r10_bio->remaining, 1);
md_write_start(mddev);
for (i = 0; i < conf->copies; i++) { for (i = 0; i < conf->copies; i++) {
struct bio *mbio; struct bio *mbio;
int d = r10_bio->devs[i].devnum; int d = r10_bio->devs[i].devnum;
......
...@@ -1411,6 +1411,9 @@ static int make_request (request_queue_t *q, struct bio * bi) ...@@ -1411,6 +1411,9 @@ static int make_request (request_queue_t *q, struct bio * bi)
sector_t logical_sector, last_sector; sector_t logical_sector, last_sector;
struct stripe_head *sh; struct stripe_head *sh;
if (md_write_start(mddev, bi)==0)
return 0;
if (bio_data_dir(bi)==WRITE) { if (bio_data_dir(bi)==WRITE) {
disk_stat_inc(mddev->gendisk, writes); disk_stat_inc(mddev->gendisk, writes);
disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bi)); disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bi));
...@@ -1423,8 +1426,7 @@ static int make_request (request_queue_t *q, struct bio * bi) ...@@ -1423,8 +1426,7 @@ static int make_request (request_queue_t *q, struct bio * bi)
last_sector = bi->bi_sector + (bi->bi_size>>9); last_sector = bi->bi_sector + (bi->bi_size>>9);
bi->bi_next = NULL; bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
if ( bio_data_dir(bi) == WRITE )
md_write_start(mddev);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w); DEFINE_WAIT(w);
......
...@@ -1570,6 +1570,9 @@ static int make_request (request_queue_t *q, struct bio * bi) ...@@ -1570,6 +1570,9 @@ static int make_request (request_queue_t *q, struct bio * bi)
sector_t logical_sector, last_sector; sector_t logical_sector, last_sector;
struct stripe_head *sh; struct stripe_head *sh;
if (md_write_start(mddev, bi)==0)
return 0;
if (bio_data_dir(bi)==WRITE) { if (bio_data_dir(bi)==WRITE) {
disk_stat_inc(mddev->gendisk, writes); disk_stat_inc(mddev->gendisk, writes);
disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bi)); disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bi));
...@@ -1583,8 +1586,7 @@ static int make_request (request_queue_t *q, struct bio * bi) ...@@ -1583,8 +1586,7 @@ static int make_request (request_queue_t *q, struct bio * bi)
bi->bi_next = NULL; bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
if ( bio_data_dir(bi) == WRITE )
md_write_start(mddev);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w); DEFINE_WAIT(w);
......
...@@ -69,7 +69,7 @@ extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), ...@@ -69,7 +69,7 @@ extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev),
extern void md_unregister_thread (mdk_thread_t *thread); extern void md_unregister_thread (mdk_thread_t *thread);
extern void md_wakeup_thread(mdk_thread_t *thread); extern void md_wakeup_thread(mdk_thread_t *thread);
extern void md_check_recovery(mddev_t *mddev); extern void md_check_recovery(mddev_t *mddev);
extern void md_write_start(mddev_t *mddev); extern int md_write_start(mddev_t *mddev, struct bio *bi);
extern void md_write_end(mddev_t *mddev); extern void md_write_end(mddev_t *mddev);
extern void md_handle_safemode(mddev_t *mddev); extern void md_handle_safemode(mddev_t *mddev);
extern void md_done_sync(mddev_t *mddev, int blocks, int ok); extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
......
...@@ -15,6 +15,9 @@ ...@@ -15,6 +15,9 @@
#ifndef _MD_K_H #ifndef _MD_K_H
#define _MD_K_H #define _MD_K_H
/* and dm-bio-list.h is not under include/linux because.... ??? */
#include "../../../drivers/md/dm-bio-list.h"
#define MD_RESERVED 0UL #define MD_RESERVED 0UL
#define LINEAR 1UL #define LINEAR 1UL
#define RAID0 2UL #define RAID0 2UL
...@@ -252,6 +255,10 @@ struct mddev_s ...@@ -252,6 +255,10 @@ struct mddev_s
atomic_t recovery_active; /* blocks scheduled, but not written */ atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait; wait_queue_head_t recovery_wait;
sector_t recovery_cp; sector_t recovery_cp;
spinlock_t write_lock;
struct bio_list write_list;
unsigned int safemode; /* if set, update "clean" superblock unsigned int safemode; /* if set, update "clean" superblock
* when no writes pending. * when no writes pending.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment