Commit 4367af55 authored by NeilBrown's avatar NeilBrown

md/raid1: clear bad-block record when write succeeds.

If we succeed in writing to a block that was recorded as
being bad, we clear the bad-block record.

This requires some delayed handling as the bad-block-list update has
to happen in process-context.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Reviewed-by: default avatarNamhyung Kim <namhyung@gmail.com>
parent 1f68f0c4
...@@ -163,7 +163,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) ...@@ -163,7 +163,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
for (i = 0; i < conf->raid_disks; i++) { for (i = 0; i < conf->raid_disks; i++) {
struct bio **bio = r1_bio->bios + i; struct bio **bio = r1_bio->bios + i;
if (*bio && *bio != IO_BLOCKED) if (!BIO_SPECIAL(*bio))
bio_put(*bio); bio_put(*bio);
*bio = NULL; *bio = NULL;
} }
...@@ -337,7 +337,10 @@ static void r1_bio_write_done(r1bio_t *r1_bio) ...@@ -337,7 +337,10 @@ static void r1_bio_write_done(r1bio_t *r1_bio)
!test_bit(R1BIO_Degraded, &r1_bio->state), !test_bit(R1BIO_Degraded, &r1_bio->state),
test_bit(R1BIO_BehindIO, &r1_bio->state)); test_bit(R1BIO_BehindIO, &r1_bio->state));
md_write_end(r1_bio->mddev); md_write_end(r1_bio->mddev);
raid_end_bio_io(r1_bio); if (test_bit(R1BIO_MadeGood, &r1_bio->state))
reschedule_retry(r1_bio);
else
raid_end_bio_io(r1_bio);
} }
} }
...@@ -363,7 +366,7 @@ static void raid1_end_write_request(struct bio *bio, int error) ...@@ -363,7 +366,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
/* an I/O failed, we can't clear the bitmap */ /* an I/O failed, we can't clear the bitmap */
set_bit(R1BIO_Degraded, &r1_bio->state); set_bit(R1BIO_Degraded, &r1_bio->state);
} else } else {
/* /*
* Set R1BIO_Uptodate in our master bio, so that we * Set R1BIO_Uptodate in our master bio, so that we
* will return a good error code for to the higher * will return a good error code for to the higher
...@@ -374,8 +377,20 @@ static void raid1_end_write_request(struct bio *bio, int error) ...@@ -374,8 +377,20 @@ static void raid1_end_write_request(struct bio *bio, int error)
* to user-side. So if something waits for IO, then it * to user-side. So if something waits for IO, then it
* will wait for the 'master' bio. * will wait for the 'master' bio.
*/ */
sector_t first_bad;
int bad_sectors;
set_bit(R1BIO_Uptodate, &r1_bio->state); set_bit(R1BIO_Uptodate, &r1_bio->state);
/* Maybe we can clear some bad blocks. */
if (is_badblock(conf->mirrors[mirror].rdev,
r1_bio->sector, r1_bio->sectors,
&first_bad, &bad_sectors)) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
}
}
update_head_pos(mirror, r1_bio); update_head_pos(mirror, r1_bio);
if (behind) { if (behind) {
...@@ -402,7 +417,9 @@ static void raid1_end_write_request(struct bio *bio, int error) ...@@ -402,7 +417,9 @@ static void raid1_end_write_request(struct bio *bio, int error)
} }
} }
} }
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); if (r1_bio->bios[mirror] == NULL)
rdev_dec_pending(conf->mirrors[mirror].rdev,
conf->mddev);
/* /*
* Let's see if all mirrored write operations have finished * Let's see if all mirrored write operations have finished
...@@ -1340,6 +1357,8 @@ static void end_sync_write(struct bio *bio, int error) ...@@ -1340,6 +1357,8 @@ static void end_sync_write(struct bio *bio, int error)
conf_t *conf = mddev->private; conf_t *conf = mddev->private;
int i; int i;
int mirror=0; int mirror=0;
sector_t first_bad;
int bad_sectors;
for (i = 0; i < conf->raid_disks; i++) for (i = 0; i < conf->raid_disks; i++)
if (r1_bio->bios[i] == bio) { if (r1_bio->bios[i] == bio) {
...@@ -1358,14 +1377,22 @@ static void end_sync_write(struct bio *bio, int error) ...@@ -1358,14 +1377,22 @@ static void end_sync_write(struct bio *bio, int error)
sectors_to_go -= sync_blocks; sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0); } while (sectors_to_go > 0);
md_error(mddev, conf->mirrors[mirror].rdev); md_error(mddev, conf->mirrors[mirror].rdev);
} } else if (is_badblock(conf->mirrors[mirror].rdev,
r1_bio->sector,
r1_bio->sectors,
&first_bad, &bad_sectors))
set_bit(R1BIO_MadeGood, &r1_bio->state);
update_head_pos(mirror, r1_bio); update_head_pos(mirror, r1_bio);
if (atomic_dec_and_test(&r1_bio->remaining)) { if (atomic_dec_and_test(&r1_bio->remaining)) {
sector_t s = r1_bio->sectors; int s = r1_bio->sectors;
put_buf(r1_bio); if (test_bit(R1BIO_MadeGood, &r1_bio->state))
md_done_sync(mddev, s, uptodate); reschedule_retry(r1_bio);
else {
put_buf(r1_bio);
md_done_sync(mddev, s, uptodate);
}
} }
} }
...@@ -1727,9 +1754,39 @@ static void raid1d(mddev_t *mddev) ...@@ -1727,9 +1754,39 @@ static void raid1d(mddev_t *mddev)
mddev = r1_bio->mddev; mddev = r1_bio->mddev;
conf = mddev->private; conf = mddev->private;
if (test_bit(R1BIO_IsSync, &r1_bio->state)) if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
sync_request_write(mddev, r1_bio); if (test_bit(R1BIO_MadeGood, &r1_bio->state)) {
else if (test_bit(R1BIO_ReadError, &r1_bio->state)) { int m;
int s = r1_bio->sectors;
for (m = 0; m < conf->raid_disks ; m++) {
struct bio *bio = r1_bio->bios[m];
if (bio->bi_end_io != NULL &&
test_bit(BIO_UPTODATE,
&bio->bi_flags)) {
rdev = conf->mirrors[m].rdev;
rdev_clear_badblocks(
rdev,
r1_bio->sector,
r1_bio->sectors);
}
}
put_buf(r1_bio);
md_done_sync(mddev, s, 1);
} else
sync_request_write(mddev, r1_bio);
} else if (test_bit(R1BIO_MadeGood, &r1_bio->state)) {
int m;
for (m = 0; m < conf->raid_disks ; m++)
if (r1_bio->bios[m] == IO_MADE_GOOD) {
rdev = conf->mirrors[m].rdev;
rdev_clear_badblocks(
rdev,
r1_bio->sector,
r1_bio->sectors);
rdev_dec_pending(rdev, mddev);
}
raid_end_bio_io(r1_bio);
} else if (test_bit(R1BIO_ReadError, &r1_bio->state)) {
int disk; int disk;
int max_sectors; int max_sectors;
......
...@@ -116,7 +116,14 @@ struct r1bio_s { ...@@ -116,7 +116,14 @@ struct r1bio_s {
* correct the read error. To keep track of bad blocks on a per-bio * correct the read error. To keep track of bad blocks on a per-bio
* level, we store IO_BLOCKED in the appropriate 'bios' pointer * level, we store IO_BLOCKED in the appropriate 'bios' pointer
*/ */
#define IO_BLOCKED ((struct bio*)1) #define IO_BLOCKED ((struct bio *)1)
/* When we successfully write to a known bad-block, we need to remove the
* bad-block marking which must be done from process context. So we record
* the success by setting bios[n] to IO_MADE_GOOD
*/
#define IO_MADE_GOOD ((struct bio *)2)
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
/* bits for r1bio.state */ /* bits for r1bio.state */
#define R1BIO_Uptodate 0 #define R1BIO_Uptodate 0
...@@ -135,6 +142,10 @@ struct r1bio_s { ...@@ -135,6 +142,10 @@ struct r1bio_s {
* Record that bi_end_io was called with this flag... * Record that bi_end_io was called with this flag...
*/ */
#define R1BIO_Returned 6 #define R1BIO_Returned 6
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag
*/
#define R1BIO_MadeGood 7
extern int md_raid1_congested(mddev_t *mddev, int bits); extern int md_raid1_congested(mddev_t *mddev, int bits);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment