Commit fe6a19d4 authored by Yu Kuai's avatar Yu Kuai Committed by Song Liu

md/md-bitmap: merge md_bitmap_start_sync() into bitmap_operations

So that the implementation won't be exposed, and it'll be possible
to invent a new bitmap by replacing bitmap_operations.

Also change the parameter from bitmap to mddev, to avoid access
bitmap outside md-bitmap.c as much as possible.

Also fix lots of code style.
Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Link: https://lore.kernel.org/r/20240826074452.1490072-26-yukuai1@huaweicloud.comSigned-off-by: default avatarSong Liu <song@kernel.org>
parent 3486015f
...@@ -1579,24 +1579,26 @@ static void bitmap_endwrite(struct mddev *mddev, sector_t offset, ...@@ -1579,24 +1579,26 @@ static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
} }
} }
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, static bool __bitmap_start_sync(struct bitmap *bitmap, sector_t offset,
int degraded) sector_t *blocks, bool degraded)
{ {
bitmap_counter_t *bmc; bitmap_counter_t *bmc;
int rv; bool rv;
if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */ if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
*blocks = 1024; *blocks = 1024;
return 1; /* always resync if no bitmap */ return true; /* always resync if no bitmap */
} }
spin_lock_irq(&bitmap->counts.lock); spin_lock_irq(&bitmap->counts.lock);
rv = false;
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0); bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
rv = 0;
if (bmc) { if (bmc) {
/* locked */ /* locked */
if (RESYNC(*bmc)) if (RESYNC(*bmc)) {
rv = 1; rv = true;
else if (NEEDED(*bmc)) { } else if (NEEDED(*bmc)) {
rv = 1; rv = true;
if (!degraded) { /* don't set/clear bits if degraded */ if (!degraded) { /* don't set/clear bits if degraded */
*bmc |= RESYNC_MASK; *bmc |= RESYNC_MASK;
*bmc &= ~NEEDED_MASK; *bmc &= ~NEEDED_MASK;
...@@ -1604,11 +1606,12 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t ...@@ -1604,11 +1606,12 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
} }
} }
spin_unlock_irq(&bitmap->counts.lock); spin_unlock_irq(&bitmap->counts.lock);
return rv; return rv;
} }
int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, static bool bitmap_start_sync(struct mddev *mddev, sector_t offset,
int degraded) sector_t *blocks, bool degraded)
{ {
/* bitmap_start_sync must always report on multiples of whole /* bitmap_start_sync must always report on multiples of whole
* pages, otherwise resync (which is very PAGE_SIZE based) will * pages, otherwise resync (which is very PAGE_SIZE based) will
...@@ -1617,19 +1620,19 @@ int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *block ...@@ -1617,19 +1620,19 @@ int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *block
* At least PAGE_SIZE>>9 blocks are covered. * At least PAGE_SIZE>>9 blocks are covered.
* Return the 'or' of the result. * Return the 'or' of the result.
*/ */
int rv = 0; bool rv = false;
sector_t blocks1; sector_t blocks1;
*blocks = 0; *blocks = 0;
while (*blocks < (PAGE_SIZE>>9)) { while (*blocks < (PAGE_SIZE>>9)) {
rv |= __bitmap_start_sync(bitmap, offset, rv |= __bitmap_start_sync(mddev->bitmap, offset,
&blocks1, degraded); &blocks1, degraded);
offset += blocks1; offset += blocks1;
*blocks += blocks1; *blocks += blocks1;
} }
return rv; return rv;
} }
EXPORT_SYMBOL(md_bitmap_start_sync);
void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
{ {
...@@ -1723,7 +1726,7 @@ void md_bitmap_sync_with_cluster(struct mddev *mddev, ...@@ -1723,7 +1726,7 @@ void md_bitmap_sync_with_cluster(struct mddev *mddev,
WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n"); WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
for (sector = old_hi; sector < new_hi; ) { for (sector = old_hi; sector < new_hi; ) {
md_bitmap_start_sync(bitmap, sector, &blocks, 0); bitmap_start_sync(mddev, sector, &blocks, false);
sector += blocks; sector += blocks;
} }
WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n"); WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
...@@ -2005,7 +2008,7 @@ static int bitmap_load(struct mddev *mddev) ...@@ -2005,7 +2008,7 @@ static int bitmap_load(struct mddev *mddev)
*/ */
while (sector < mddev->resync_max_sectors) { while (sector < mddev->resync_max_sectors) {
sector_t blocks; sector_t blocks;
md_bitmap_start_sync(bitmap, sector, &blocks, 0); bitmap_start_sync(mddev, sector, &blocks, false);
sector += blocks; sector += blocks;
} }
md_bitmap_close_sync(bitmap); md_bitmap_close_sync(bitmap);
...@@ -2734,6 +2737,7 @@ static struct bitmap_operations bitmap_ops = { ...@@ -2734,6 +2737,7 @@ static struct bitmap_operations bitmap_ops = {
.startwrite = bitmap_startwrite, .startwrite = bitmap_startwrite,
.endwrite = bitmap_endwrite, .endwrite = bitmap_endwrite,
.start_sync = bitmap_start_sync,
.update_sb = bitmap_update_sb, .update_sb = bitmap_update_sb,
.get_stats = bitmap_get_stats, .get_stats = bitmap_get_stats,
......
...@@ -259,6 +259,8 @@ struct bitmap_operations { ...@@ -259,6 +259,8 @@ struct bitmap_operations {
unsigned long sectors, bool behind); unsigned long sectors, bool behind);
void (*endwrite)(struct mddev *mddev, sector_t offset, void (*endwrite)(struct mddev *mddev, sector_t offset,
unsigned long sectors, bool success, bool behind); unsigned long sectors, bool success, bool behind);
bool (*start_sync)(struct mddev *mddev, sector_t offset,
sector_t *blocks, bool degraded);
void (*update_sb)(struct bitmap *bitmap); void (*update_sb)(struct bitmap *bitmap);
int (*get_stats)(struct bitmap *bitmap, struct md_bitmap_stats *stats); int (*get_stats)(struct bitmap *bitmap, struct md_bitmap_stats *stats);
...@@ -268,7 +270,6 @@ struct bitmap_operations { ...@@ -268,7 +270,6 @@ struct bitmap_operations {
void mddev_set_bitmap_ops(struct mddev *mddev); void mddev_set_bitmap_ops(struct mddev *mddev);
/* these are exported */ /* these are exported */
int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted); void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
void md_bitmap_close_sync(struct bitmap *bitmap); void md_bitmap_close_sync(struct bitmap *bitmap);
void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force); void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
......
...@@ -2755,7 +2755,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2755,7 +2755,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
int wonly = -1; int wonly = -1;
int write_targets = 0, read_targets = 0; int write_targets = 0, read_targets = 0;
sector_t sync_blocks; sector_t sync_blocks;
int still_degraded = 0; bool still_degraded = false;
int good_sectors = RESYNC_SECTORS; int good_sectors = RESYNC_SECTORS;
int min_bad = 0; /* number of sectors that are bad in all devices */ int min_bad = 0; /* number of sectors that are bad in all devices */
int idx = sector_to_idx(sector_nr); int idx = sector_to_idx(sector_nr);
...@@ -2797,7 +2797,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2797,7 +2797,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* before building a request, check if we can skip these blocks.. /* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything * This call the bitmap_start_sync doesn't actually record anything
*/ */
if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && if (!mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, true) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */ /* We can skip this block, and probably several more */
*skipped = 1; *skipped = 1;
...@@ -2848,7 +2848,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2848,7 +2848,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (rdev == NULL || if (rdev == NULL ||
test_bit(Faulty, &rdev->flags)) { test_bit(Faulty, &rdev->flags)) {
if (i < conf->raid_disks) if (i < conf->raid_disks)
still_degraded = 1; still_degraded = true;
} else if (!test_bit(In_sync, &rdev->flags)) { } else if (!test_bit(In_sync, &rdev->flags)) {
bio->bi_opf = REQ_OP_WRITE; bio->bi_opf = REQ_OP_WRITE;
bio->bi_end_io = end_sync_write; bio->bi_end_io = end_sync_write;
...@@ -2972,7 +2972,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2972,7 +2972,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (len == 0) if (len == 0)
break; break;
if (sync_blocks == 0) { if (sync_blocks == 0) {
if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
&sync_blocks, still_degraded) && &sync_blocks, still_degraded) &&
!conf->fullsync && !conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
......
...@@ -3289,10 +3289,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3289,10 +3289,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio = NULL; r10_bio = NULL;
for (i = 0 ; i < conf->geo.raid_disks; i++) { for (i = 0 ; i < conf->geo.raid_disks; i++) {
int still_degraded; bool still_degraded;
struct r10bio *rb2; struct r10bio *rb2;
sector_t sect; sector_t sect;
int must_sync; bool must_sync;
int any_working; int any_working;
struct raid10_info *mirror = &conf->mirrors[i]; struct raid10_info *mirror = &conf->mirrors[i];
struct md_rdev *mrdev, *mreplace; struct md_rdev *mrdev, *mreplace;
...@@ -3309,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3309,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (!mrdev && !mreplace) if (!mrdev && !mreplace)
continue; continue;
still_degraded = 0; still_degraded = false;
/* want to reconstruct this device */ /* want to reconstruct this device */
rb2 = r10_bio; rb2 = r10_bio;
sect = raid10_find_virt(conf, sector_nr, i); sect = raid10_find_virt(conf, sector_nr, i);
...@@ -3322,8 +3322,9 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3322,8 +3322,9 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* we only need to recover the block if it is set in * we only need to recover the block if it is set in
* the bitmap * the bitmap
*/ */
must_sync = md_bitmap_start_sync(mddev->bitmap, sect, must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
&sync_blocks, 1); &sync_blocks,
true);
if (sync_blocks < max_sync) if (sync_blocks < max_sync)
max_sync = sync_blocks; max_sync = sync_blocks;
if (!must_sync && if (!must_sync &&
...@@ -3361,12 +3362,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3361,12 +3362,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
struct md_rdev *rdev = conf->mirrors[j].rdev; struct md_rdev *rdev = conf->mirrors[j].rdev;
if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
still_degraded = 1; still_degraded = false;
break; break;
} }
} }
must_sync = md_bitmap_start_sync(mddev->bitmap, sect, must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
&sync_blocks, still_degraded); &sync_blocks, still_degraded);
any_working = 0; any_working = 0;
...@@ -3544,8 +3545,9 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3544,8 +3545,9 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
mddev_is_clustered(mddev) && mddev_is_clustered(mddev) &&
(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
&sync_blocks, mddev->degraded) && &sync_blocks,
mddev->degraded) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
&mddev->recovery)) { &mddev->recovery)) {
/* We can skip this block */ /* We can skip this block */
......
...@@ -6485,7 +6485,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n ...@@ -6485,7 +6485,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
struct stripe_head *sh; struct stripe_head *sh;
sector_t sync_blocks; sector_t sync_blocks;
int still_degraded = 0; bool still_degraded = false;
int i; int i;
if (sector_nr >= max_sector) { if (sector_nr >= max_sector) {
...@@ -6530,7 +6530,8 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n ...@@ -6530,7 +6530,8 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
} }
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync && !conf->fullsync &&
!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && !mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks,
true) &&
sync_blocks >= RAID5_STRIPE_SECTORS(conf)) { sync_blocks >= RAID5_STRIPE_SECTORS(conf)) {
/* we can skip this block, and probably more */ /* we can skip this block, and probably more */
do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf)); do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf));
...@@ -6558,10 +6559,11 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n ...@@ -6558,10 +6559,11 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
struct md_rdev *rdev = conf->disks[i].rdev; struct md_rdev *rdev = conf->disks[i].rdev;
if (rdev == NULL || test_bit(Faulty, &rdev->flags)) if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1; still_degraded = true;
} }
md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks,
still_degraded);
set_bit(STRIPE_SYNC_REQUESTED, &sh->state); set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment