Commit e64e4018 authored by Andy Shevchenko's avatar Andy Shevchenko Committed by Dmitry Torokhov

md: Avoid namespace collision with bitmap API

bitmap API (include/linux/bitmap.h) has 'bitmap' prefix for its methods.

On the other hand MD bitmap API is special case.
Adding 'md' prefix to it to avoid name space collision.

No functional changes intended.
Signed-off-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: default avatarShaohua Li <shli@kernel.org>
Signed-off-by: default avatarDmitry Torokhov <dmitry.torokhov@gmail.com>
parent 5cc9cdf6
...@@ -3859,7 +3859,7 @@ static int __load_dirty_region_bitmap(struct raid_set *rs) ...@@ -3859,7 +3859,7 @@ static int __load_dirty_region_bitmap(struct raid_set *rs)
/* Try loading the bitmap unless "raid0", which does not have one */ /* Try loading the bitmap unless "raid0", which does not have one */
if (!rs_is_raid0(rs) && if (!rs_is_raid0(rs) &&
!test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
r = bitmap_load(&rs->md); r = md_bitmap_load(&rs->md);
if (r) if (r)
DMERR("Failed to load bitmap"); DMERR("Failed to load bitmap");
} }
...@@ -3987,8 +3987,8 @@ static int raid_preresume(struct dm_target *ti) ...@@ -3987,8 +3987,8 @@ static int raid_preresume(struct dm_target *ti)
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */ /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
r = bitmap_resize(mddev->bitmap, mddev->dev_sectors, r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors,
to_bytes(rs->requested_bitmap_chunk_sectors), 0); to_bytes(rs->requested_bitmap_chunk_sectors), 0);
if (r) if (r)
DMERR("Failed to resize bitmap"); DMERR("Failed to resize bitmap");
} }
......
This diff is collapsed.
...@@ -236,43 +236,43 @@ struct bitmap { ...@@ -236,43 +236,43 @@ struct bitmap {
/* the bitmap API */ /* the bitmap API */
/* these are used only by md/bitmap */ /* these are used only by md/bitmap */
struct bitmap *bitmap_create(struct mddev *mddev, int slot); struct bitmap *md_bitmap_create(struct mddev *mddev, int slot);
int bitmap_load(struct mddev *mddev); int md_bitmap_load(struct mddev *mddev);
void bitmap_flush(struct mddev *mddev); void md_bitmap_flush(struct mddev *mddev);
void bitmap_destroy(struct mddev *mddev); void md_bitmap_destroy(struct mddev *mddev);
void bitmap_print_sb(struct bitmap *bitmap); void md_bitmap_print_sb(struct bitmap *bitmap);
void bitmap_update_sb(struct bitmap *bitmap); void md_bitmap_update_sb(struct bitmap *bitmap);
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap); void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap);
int bitmap_setallbits(struct bitmap *bitmap); int md_bitmap_setallbits(struct bitmap *bitmap);
void bitmap_write_all(struct bitmap *bitmap); void md_bitmap_write_all(struct bitmap *bitmap);
void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e); void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
/* these are exported */ /* these are exported */
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors, int behind); unsigned long sectors, int behind);
void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
unsigned long sectors, int success, int behind); unsigned long sectors, int success, int behind);
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded); int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted); void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
void bitmap_close_sync(struct bitmap *bitmap); void md_bitmap_close_sync(struct bitmap *bitmap);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force); void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
void bitmap_sync_with_cluster(struct mddev *mddev, void md_bitmap_sync_with_cluster(struct mddev *mddev,
sector_t old_lo, sector_t old_hi, sector_t old_lo, sector_t old_hi,
sector_t new_lo, sector_t new_hi); sector_t new_lo, sector_t new_hi);
void bitmap_unplug(struct bitmap *bitmap); void md_bitmap_unplug(struct bitmap *bitmap);
void bitmap_daemon_work(struct mddev *mddev); void md_bitmap_daemon_work(struct mddev *mddev);
int bitmap_resize(struct bitmap *bitmap, sector_t blocks, int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
int chunksize, int init); int chunksize, int init);
struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot); struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot);
int bitmap_copy_from_slot(struct mddev *mddev, int slot, int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
sector_t *lo, sector_t *hi, bool clear_bits); sector_t *lo, sector_t *hi, bool clear_bits);
void bitmap_free(struct bitmap *bitmap); void md_bitmap_free(struct bitmap *bitmap);
void bitmap_wait_behind_writes(struct mddev *mddev); void md_bitmap_wait_behind_writes(struct mddev *mddev);
#endif #endif
#endif #endif
...@@ -326,7 +326,7 @@ static void recover_bitmaps(struct md_thread *thread) ...@@ -326,7 +326,7 @@ static void recover_bitmaps(struct md_thread *thread)
str, ret); str, ret);
goto clear_bit; goto clear_bit;
} }
ret = bitmap_copy_from_slot(mddev, slot, &lo, &hi, true); ret = md_bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
if (ret) { if (ret) {
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot); pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
goto clear_bit; goto clear_bit;
...@@ -480,9 +480,7 @@ static void process_suspend_info(struct mddev *mddev, ...@@ -480,9 +480,7 @@ static void process_suspend_info(struct mddev *mddev,
* resync thread is running in another node, * resync thread is running in another node,
* so we don't need to do the resync again * so we don't need to do the resync again
* with the same section */ * with the same section */
bitmap_sync_with_cluster(mddev, cinfo->sync_low, md_bitmap_sync_with_cluster(mddev, cinfo->sync_low, cinfo->sync_hi, lo, hi);
cinfo->sync_hi,
lo, hi);
cinfo->sync_low = lo; cinfo->sync_low = lo;
cinfo->sync_hi = hi; cinfo->sync_hi = hi;
...@@ -829,7 +827,7 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots) ...@@ -829,7 +827,7 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
} }
/* Read the disk bitmap sb and check if it needs recovery */ /* Read the disk bitmap sb and check if it needs recovery */
ret = bitmap_copy_from_slot(mddev, i, &lo, &hi, false); ret = md_bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
if (ret) { if (ret) {
pr_warn("md-cluster: Could not gather bitmaps from slot %d", i); pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
lockres_free(bm_lockres); lockres_free(bm_lockres);
...@@ -1127,13 +1125,13 @@ static int cluster_check_sync_size(struct mddev *mddev) ...@@ -1127,13 +1125,13 @@ static int cluster_check_sync_size(struct mddev *mddev)
bm_lockres = lockres_init(mddev, str, NULL, 1); bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) { if (!bm_lockres) {
pr_err("md-cluster: Cannot initialize %s\n", str); pr_err("md-cluster: Cannot initialize %s\n", str);
bitmap_free(bitmap); md_bitmap_free(bitmap);
return -1; return -1;
} }
bm_lockres->flags |= DLM_LKF_NOQUEUE; bm_lockres->flags |= DLM_LKF_NOQUEUE;
rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
if (!rv) if (!rv)
bitmap_update_sb(bitmap); md_bitmap_update_sb(bitmap);
lockres_free(bm_lockres); lockres_free(bm_lockres);
sb = kmap_atomic(bitmap->storage.sb_page); sb = kmap_atomic(bitmap->storage.sb_page);
...@@ -1141,11 +1139,11 @@ static int cluster_check_sync_size(struct mddev *mddev) ...@@ -1141,11 +1139,11 @@ static int cluster_check_sync_size(struct mddev *mddev)
sync_size = sb->sync_size; sync_size = sb->sync_size;
else if (sync_size != sb->sync_size) { else if (sync_size != sb->sync_size) {
kunmap_atomic(sb); kunmap_atomic(sb);
bitmap_free(bitmap); md_bitmap_free(bitmap);
return -1; return -1;
} }
kunmap_atomic(sb); kunmap_atomic(sb);
bitmap_free(bitmap); md_bitmap_free(bitmap);
} }
return (my_sync_size == sync_size) ? 0 : -1; return (my_sync_size == sync_size) ? 0 : -1;
...@@ -1442,7 +1440,7 @@ static int gather_bitmaps(struct md_rdev *rdev) ...@@ -1442,7 +1440,7 @@ static int gather_bitmaps(struct md_rdev *rdev)
for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) { for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) {
if (sn == (cinfo->slot_number - 1)) if (sn == (cinfo->slot_number - 1))
continue; continue;
err = bitmap_copy_from_slot(mddev, sn, &lo, &hi, false); err = md_bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
if (err) { if (err) {
pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn); pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
goto out; goto out;
......
...@@ -2560,7 +2560,7 @@ void md_update_sb(struct mddev *mddev, int force_change) ...@@ -2560,7 +2560,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
if (mddev->queue) if (mddev->queue)
blk_add_trace_msg(mddev->queue, "md md_update_sb"); blk_add_trace_msg(mddev->queue, "md md_update_sb");
rewrite: rewrite:
bitmap_update_sb(mddev->bitmap); md_bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
...@@ -4372,10 +4372,10 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len) ...@@ -4372,10 +4372,10 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
if (buf == end) break; if (buf == end) break;
} }
if (*end && !isspace(*end)) break; if (*end && !isspace(*end)) break;
bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
buf = skip_spaces(end); buf = skip_spaces(end);
} }
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out: out:
mddev_unlock(mddev); mddev_unlock(mddev);
return len; return len;
...@@ -5588,7 +5588,7 @@ int md_run(struct mddev *mddev) ...@@ -5588,7 +5588,7 @@ int md_run(struct mddev *mddev)
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) { (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
struct bitmap *bitmap; struct bitmap *bitmap;
bitmap = bitmap_create(mddev, -1); bitmap = md_bitmap_create(mddev, -1);
if (IS_ERR(bitmap)) { if (IS_ERR(bitmap)) {
err = PTR_ERR(bitmap); err = PTR_ERR(bitmap);
pr_warn("%s: failed to create bitmap (%d)\n", pr_warn("%s: failed to create bitmap (%d)\n",
...@@ -5603,7 +5603,7 @@ int md_run(struct mddev *mddev) ...@@ -5603,7 +5603,7 @@ int md_run(struct mddev *mddev)
pers->free(mddev, mddev->private); pers->free(mddev, mddev->private);
mddev->private = NULL; mddev->private = NULL;
module_put(pers->owner); module_put(pers->owner);
bitmap_destroy(mddev); md_bitmap_destroy(mddev);
goto abort; goto abort;
} }
if (mddev->queue) { if (mddev->queue) {
...@@ -5688,9 +5688,9 @@ static int do_md_run(struct mddev *mddev) ...@@ -5688,9 +5688,9 @@ static int do_md_run(struct mddev *mddev)
err = md_run(mddev); err = md_run(mddev);
if (err) if (err)
goto out; goto out;
err = bitmap_load(mddev); err = md_bitmap_load(mddev);
if (err) { if (err) {
bitmap_destroy(mddev); md_bitmap_destroy(mddev);
goto out; goto out;
} }
...@@ -5832,7 +5832,7 @@ static void __md_stop_writes(struct mddev *mddev) ...@@ -5832,7 +5832,7 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0); mddev->pers->quiesce(mddev, 0);
} }
bitmap_flush(mddev); md_bitmap_flush(mddev);
if (mddev->ro == 0 && if (mddev->ro == 0 &&
((!mddev->in_sync && !mddev_is_clustered(mddev)) || ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
...@@ -5854,7 +5854,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes); ...@@ -5854,7 +5854,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev) static void mddev_detach(struct mddev *mddev)
{ {
bitmap_wait_behind_writes(mddev); md_bitmap_wait_behind_writes(mddev);
if (mddev->pers && mddev->pers->quiesce) { if (mddev->pers && mddev->pers->quiesce) {
mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0); mddev->pers->quiesce(mddev, 0);
...@@ -5867,7 +5867,7 @@ static void mddev_detach(struct mddev *mddev) ...@@ -5867,7 +5867,7 @@ static void mddev_detach(struct mddev *mddev)
static void __md_stop(struct mddev *mddev) static void __md_stop(struct mddev *mddev)
{ {
struct md_personality *pers = mddev->pers; struct md_personality *pers = mddev->pers;
bitmap_destroy(mddev); md_bitmap_destroy(mddev);
mddev_detach(mddev); mddev_detach(mddev);
/* Ensure ->event_work is done */ /* Ensure ->event_work is done */
flush_workqueue(md_misc_wq); flush_workqueue(md_misc_wq);
...@@ -6681,21 +6681,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd) ...@@ -6681,21 +6681,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
if (fd >= 0) { if (fd >= 0) {
struct bitmap *bitmap; struct bitmap *bitmap;
bitmap = bitmap_create(mddev, -1); bitmap = md_bitmap_create(mddev, -1);
mddev_suspend(mddev); mddev_suspend(mddev);
if (!IS_ERR(bitmap)) { if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap; mddev->bitmap = bitmap;
err = bitmap_load(mddev); err = md_bitmap_load(mddev);
} else } else
err = PTR_ERR(bitmap); err = PTR_ERR(bitmap);
if (err) { if (err) {
bitmap_destroy(mddev); md_bitmap_destroy(mddev);
fd = -1; fd = -1;
} }
mddev_resume(mddev); mddev_resume(mddev);
} else if (fd < 0) { } else if (fd < 0) {
mddev_suspend(mddev); mddev_suspend(mddev);
bitmap_destroy(mddev); md_bitmap_destroy(mddev);
mddev_resume(mddev); mddev_resume(mddev);
} }
} }
...@@ -6981,15 +6981,15 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) ...@@ -6981,15 +6981,15 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset; mddev->bitmap_info.default_offset;
mddev->bitmap_info.space = mddev->bitmap_info.space =
mddev->bitmap_info.default_space; mddev->bitmap_info.default_space;
bitmap = bitmap_create(mddev, -1); bitmap = md_bitmap_create(mddev, -1);
mddev_suspend(mddev); mddev_suspend(mddev);
if (!IS_ERR(bitmap)) { if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap; mddev->bitmap = bitmap;
rv = bitmap_load(mddev); rv = md_bitmap_load(mddev);
} else } else
rv = PTR_ERR(bitmap); rv = PTR_ERR(bitmap);
if (rv) if (rv)
bitmap_destroy(mddev); md_bitmap_destroy(mddev);
mddev_resume(mddev); mddev_resume(mddev);
} else { } else {
/* remove the bitmap */ /* remove the bitmap */
...@@ -7014,7 +7014,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) ...@@ -7014,7 +7014,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
md_cluster_ops->leave(mddev); md_cluster_ops->leave(mddev);
} }
mddev_suspend(mddev); mddev_suspend(mddev);
bitmap_destroy(mddev); md_bitmap_destroy(mddev);
mddev_resume(mddev); mddev_resume(mddev);
mddev->bitmap_info.offset = 0; mddev->bitmap_info.offset = 0;
} }
...@@ -7877,7 +7877,7 @@ static int md_seq_show(struct seq_file *seq, void *v) ...@@ -7877,7 +7877,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
} else } else
seq_printf(seq, "\n "); seq_printf(seq, "\n ");
bitmap_status(seq, mddev->bitmap); md_bitmap_status(seq, mddev->bitmap);
seq_printf(seq, "\n"); seq_printf(seq, "\n");
} }
...@@ -8748,7 +8748,7 @@ void md_check_recovery(struct mddev *mddev) ...@@ -8748,7 +8748,7 @@ void md_check_recovery(struct mddev *mddev)
return; return;
if (mddev->bitmap) if (mddev->bitmap)
bitmap_daemon_work(mddev); md_bitmap_daemon_work(mddev);
if (signal_pending(current)) { if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) { if (mddev->pers->sync_request && !mddev->external) {
...@@ -8885,7 +8885,7 @@ void md_check_recovery(struct mddev *mddev) ...@@ -8885,7 +8885,7 @@ void md_check_recovery(struct mddev *mddev)
* which has the bitmap stored on all devices. * which has the bitmap stored on all devices.
* So make sure all bitmap pages get written * So make sure all bitmap pages get written
*/ */
bitmap_write_all(mddev->bitmap); md_bitmap_write_all(mddev->bitmap);
} }
INIT_WORK(&mddev->del_work, md_start_sync); INIT_WORK(&mddev->del_work, md_start_sync);
queue_work(md_misc_wq, &mddev->del_work); queue_work(md_misc_wq, &mddev->del_work);
...@@ -9133,7 +9133,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) ...@@ -9133,7 +9133,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (ret) if (ret)
pr_info("md-cluster: resize failed\n"); pr_info("md-cluster: resize failed\n");
else else
bitmap_update_sb(mddev->bitmap); md_bitmap_update_sb(mddev->bitmap);
} }
/* Check for change of roles in the active devices */ /* Check for change of roles in the active devices */
......
...@@ -385,10 +385,10 @@ static void close_write(struct r1bio *r1_bio) ...@@ -385,10 +385,10 @@ static void close_write(struct r1bio *r1_bio)
r1_bio->behind_master_bio = NULL; r1_bio->behind_master_bio = NULL;
} }
/* clear the bitmap if all writes complete successfully */ /* clear the bitmap if all writes complete successfully */
bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
r1_bio->sectors, r1_bio->sectors,
!test_bit(R1BIO_Degraded, &r1_bio->state), !test_bit(R1BIO_Degraded, &r1_bio->state),
test_bit(R1BIO_BehindIO, &r1_bio->state)); test_bit(R1BIO_BehindIO, &r1_bio->state));
md_write_end(r1_bio->mddev); md_write_end(r1_bio->mddev);
} }
...@@ -781,7 +781,7 @@ static int raid1_congested(struct mddev *mddev, int bits) ...@@ -781,7 +781,7 @@ static int raid1_congested(struct mddev *mddev, int bits)
static void flush_bio_list(struct r1conf *conf, struct bio *bio) static void flush_bio_list(struct r1conf *conf, struct bio *bio)
{ {
/* flush any pending bitmap writes to disk before proceeding w/ I/O */ /* flush any pending bitmap writes to disk before proceeding w/ I/O */
bitmap_unplug(conf->mddev->bitmap); md_bitmap_unplug(conf->mddev->bitmap);
wake_up(&conf->wait_barrier); wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
...@@ -1470,10 +1470,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1470,10 +1470,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
alloc_behind_master_bio(r1_bio, bio); alloc_behind_master_bio(r1_bio, bio);
} }
bitmap_startwrite(bitmap, r1_bio->sector, md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
r1_bio->sectors, test_bit(R1BIO_BehindIO, &r1_bio->state));
test_bit(R1BIO_BehindIO,
&r1_bio->state));
first_clone = 0; first_clone = 0;
} }
...@@ -1881,8 +1879,7 @@ static void end_sync_write(struct bio *bio) ...@@ -1881,8 +1879,7 @@ static void end_sync_write(struct bio *bio)
long sectors_to_go = r1_bio->sectors; long sectors_to_go = r1_bio->sectors;
/* make sure these bits doesn't get cleared. */ /* make sure these bits doesn't get cleared. */
do { do {
bitmap_end_sync(mddev->bitmap, s, md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
&sync_blocks, 1);
s += sync_blocks; s += sync_blocks;
sectors_to_go -= sync_blocks; sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0); } while (sectors_to_go > 0);
...@@ -2629,12 +2626,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2629,12 +2626,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* We can find the current addess in mddev->curr_resync * We can find the current addess in mddev->curr_resync
*/ */
if (mddev->curr_resync < max_sector) /* aborted */ if (mddev->curr_resync < max_sector) /* aborted */
bitmap_end_sync(mddev->bitmap, mddev->curr_resync, md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
&sync_blocks, 1); &sync_blocks, 1);
else /* completed sync */ else /* completed sync */
conf->fullsync = 0; conf->fullsync = 0;
bitmap_close_sync(mddev->bitmap); md_bitmap_close_sync(mddev->bitmap);
close_sync(conf); close_sync(conf);
if (mddev_is_clustered(mddev)) { if (mddev_is_clustered(mddev)) {
...@@ -2654,7 +2651,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2654,7 +2651,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* before building a request, check if we can skip these blocks.. /* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything * This call the bitmap_start_sync doesn't actually record anything
*/ */
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */ /* We can skip this block, and probably several more */
*skipped = 1; *skipped = 1;
...@@ -2672,7 +2669,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2672,7 +2669,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* sector_nr + two times RESYNC_SECTORS * sector_nr + two times RESYNC_SECTORS
*/ */
bitmap_cond_end_sync(mddev->bitmap, sector_nr, md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
...@@ -2831,8 +2828,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2831,8 +2828,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (len == 0) if (len == 0)
break; break;
if (sync_blocks == 0) { if (sync_blocks == 0) {
if (!bitmap_start_sync(mddev->bitmap, sector_nr, if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
&sync_blocks, still_degraded) && &sync_blocks, still_degraded) &&
!conf->fullsync && !conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break; break;
...@@ -3171,7 +3168,7 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors) ...@@ -3171,7 +3168,7 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > newsize) mddev->array_sectors > newsize)
return -EINVAL; return -EINVAL;
if (mddev->bitmap) { if (mddev->bitmap) {
int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0); int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -438,10 +438,10 @@ static void raid10_end_read_request(struct bio *bio) ...@@ -438,10 +438,10 @@ static void raid10_end_read_request(struct bio *bio)
static void close_write(struct r10bio *r10_bio) static void close_write(struct r10bio *r10_bio)
{ {
/* clear the bitmap if all writes complete successfully */ /* clear the bitmap if all writes complete successfully */
bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
r10_bio->sectors, r10_bio->sectors,
!test_bit(R10BIO_Degraded, &r10_bio->state), !test_bit(R10BIO_Degraded, &r10_bio->state),
0); 0);
md_write_end(r10_bio->mddev); md_write_end(r10_bio->mddev);
} }
...@@ -915,7 +915,7 @@ static void flush_pending_writes(struct r10conf *conf) ...@@ -915,7 +915,7 @@ static void flush_pending_writes(struct r10conf *conf)
blk_start_plug(&plug); blk_start_plug(&plug);
/* flush any pending bitmap writes to disk /* flush any pending bitmap writes to disk
* before proceeding w/ I/O */ * before proceeding w/ I/O */
bitmap_unplug(conf->mddev->bitmap); md_bitmap_unplug(conf->mddev->bitmap);
wake_up(&conf->wait_barrier); wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
...@@ -1100,7 +1100,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) ...@@ -1100,7 +1100,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
/* we aren't scheduling, so we can do the write-out directly. */ /* we aren't scheduling, so we can do the write-out directly. */
bio = bio_list_get(&plug->pending); bio = bio_list_get(&plug->pending);
bitmap_unplug(mddev->bitmap); md_bitmap_unplug(mddev->bitmap);
wake_up(&conf->wait_barrier); wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
...@@ -1517,7 +1517,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1517,7 +1517,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
} }
atomic_set(&r10_bio->remaining, 1); atomic_set(&r10_bio->remaining, 1);
bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
for (i = 0; i < conf->copies; i++) { for (i = 0; i < conf->copies; i++) {
if (r10_bio->devs[i].bio) if (r10_bio->devs[i].bio)
...@@ -2990,13 +2990,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2990,13 +2990,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (mddev->curr_resync < max_sector) { /* aborted */ if (mddev->curr_resync < max_sector) { /* aborted */
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
bitmap_end_sync(mddev->bitmap, mddev->curr_resync, md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
&sync_blocks, 1); &sync_blocks, 1);
else for (i = 0; i < conf->geo.raid_disks; i++) { else for (i = 0; i < conf->geo.raid_disks; i++) {
sector_t sect = sector_t sect =
raid10_find_virt(conf, mddev->curr_resync, i); raid10_find_virt(conf, mddev->curr_resync, i);
bitmap_end_sync(mddev->bitmap, sect, md_bitmap_end_sync(mddev->bitmap, sect,
&sync_blocks, 1); &sync_blocks, 1);
} }
} else { } else {
/* completed sync */ /* completed sync */
...@@ -3017,7 +3017,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3017,7 +3017,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
} }
conf->fullsync = 0; conf->fullsync = 0;
} }
bitmap_close_sync(mddev->bitmap); md_bitmap_close_sync(mddev->bitmap);
close_sync(conf); close_sync(conf);
*skipped = 1; *skipped = 1;
return sectors_skipped; return sectors_skipped;
...@@ -3111,8 +3111,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3111,8 +3111,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* we only need to recover the block if it is set in * we only need to recover the block if it is set in
* the bitmap * the bitmap
*/ */
must_sync = bitmap_start_sync(mddev->bitmap, sect, must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
&sync_blocks, 1); &sync_blocks, 1);
if (sync_blocks < max_sync) if (sync_blocks < max_sync)
max_sync = sync_blocks; max_sync = sync_blocks;
if (!must_sync && if (!must_sync &&
...@@ -3157,8 +3157,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3157,8 +3157,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
} }
} }
must_sync = bitmap_start_sync(mddev->bitmap, sect, must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
&sync_blocks, still_degraded); &sync_blocks, still_degraded);
any_working = 0; any_working = 0;
for (j=0; j<conf->copies;j++) { for (j=0; j<conf->copies;j++) {
...@@ -3334,13 +3334,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3334,13 +3334,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* safety reason, which ensures curr_resync_completed is * safety reason, which ensures curr_resync_completed is
* updated in bitmap_cond_end_sync. * updated in bitmap_cond_end_sync.
*/ */
bitmap_cond_end_sync(mddev->bitmap, sector_nr, md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
mddev_is_clustered(mddev) && mddev_is_clustered(mddev) &&
(sector_nr + 2 * RESYNC_SECTORS > (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
conf->cluster_sync_high));
if (!bitmap_start_sync(mddev->bitmap, sector_nr, if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
&sync_blocks, mddev->degraded) && &sync_blocks, mddev->degraded) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
&mddev->recovery)) { &mddev->recovery)) {
/* We can skip this block */ /* We can skip this block */
...@@ -4015,7 +4014,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) ...@@ -4015,7 +4014,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > size) mddev->array_sectors > size)
return -EINVAL; return -EINVAL;
if (mddev->bitmap) { if (mddev->bitmap) {
int ret = bitmap_resize(mddev->bitmap, size, 0, 0); int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -4281,10 +4280,9 @@ static int raid10_start_reshape(struct mddev *mddev) ...@@ -4281,10 +4280,9 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (mddev->delta_disks && mddev->bitmap) { if (mddev->delta_disks && mddev->bitmap) {
ret = bitmap_resize(mddev->bitmap, ret = md_bitmap_resize(mddev->bitmap,
raid10_size(mddev, 0, raid10_size(mddev, 0, conf->geo.raid_disks),
conf->geo.raid_disks), 0, 0);
0, 0);
if (ret) if (ret)
goto abort; goto abort;
} }
......
...@@ -324,10 +324,10 @@ void r5c_handle_cached_data_endio(struct r5conf *conf, ...@@ -324,10 +324,10 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
if (sh->dev[i].written) { if (sh->dev[i].written) {
set_bit(R5_UPTODATE, &sh->dev[i].flags); set_bit(R5_UPTODATE, &sh->dev[i].flags);
r5c_return_dev_pending_writes(conf, &sh->dev[i]); r5c_return_dev_pending_writes(conf, &sh->dev[i]);
bitmap_endwrite(conf->mddev->bitmap, sh->sector, md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, STRIPE_SECTORS,
!test_bit(STRIPE_DEGRADED, &sh->state), !test_bit(STRIPE_DEGRADED, &sh->state),
0); 0);
} }
} }
} }
......
...@@ -3295,8 +3295,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, ...@@ -3295,8 +3295,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
*/ */
set_bit(STRIPE_BITMAP_PENDING, &sh->state); set_bit(STRIPE_BITMAP_PENDING, &sh->state);
spin_unlock_irq(&sh->stripe_lock); spin_unlock_irq(&sh->stripe_lock);
bitmap_startwrite(conf->mddev->bitmap, sh->sector, md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, 0); STRIPE_SECTORS, 0);
spin_lock_irq(&sh->stripe_lock); spin_lock_irq(&sh->stripe_lock);
clear_bit(STRIPE_BITMAP_PENDING, &sh->state); clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
if (!sh->batch_head) { if (!sh->batch_head) {
...@@ -3386,8 +3386,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, ...@@ -3386,8 +3386,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = nextbi; bi = nextbi;
} }
if (bitmap_end) if (bitmap_end)
bitmap_endwrite(conf->mddev->bitmap, sh->sector, md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, 0, 0); STRIPE_SECTORS, 0, 0);
bitmap_end = 0; bitmap_end = 0;
/* and fail all 'written' */ /* and fail all 'written' */
bi = sh->dev[i].written; bi = sh->dev[i].written;
...@@ -3432,8 +3432,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, ...@@ -3432,8 +3432,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
} }
} }
if (bitmap_end) if (bitmap_end)
bitmap_endwrite(conf->mddev->bitmap, sh->sector, md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, 0, 0); STRIPE_SECTORS, 0, 0);
/* If we were in the middle of a write the parity block might /* If we were in the middle of a write the parity block might
* still be locked - so just clear all R5_LOCKED flags * still be locked - so just clear all R5_LOCKED flags
*/ */
...@@ -3773,10 +3773,10 @@ static void handle_stripe_clean_event(struct r5conf *conf, ...@@ -3773,10 +3773,10 @@ static void handle_stripe_clean_event(struct r5conf *conf,
bio_endio(wbi); bio_endio(wbi);
wbi = wbi2; wbi = wbi2;
} }
bitmap_endwrite(conf->mddev->bitmap, sh->sector, md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, STRIPE_SECTORS,
!test_bit(STRIPE_DEGRADED, &sh->state), !test_bit(STRIPE_DEGRADED, &sh->state),
0); 0);
if (head_sh->batch_head) { if (head_sh->batch_head) {
sh = list_first_entry(&sh->batch_list, sh = list_first_entry(&sh->batch_list,
struct stripe_head, struct stripe_head,
...@@ -5533,10 +5533,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) ...@@ -5533,10 +5533,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
for (d = 0; for (d = 0;
d < conf->raid_disks - conf->max_degraded; d < conf->raid_disks - conf->max_degraded;
d++) d++)
bitmap_startwrite(mddev->bitmap, md_bitmap_startwrite(mddev->bitmap,
sh->sector, sh->sector,
STRIPE_SECTORS, STRIPE_SECTORS,
0); 0);
sh->bm_seq = conf->seq_flush + 1; sh->bm_seq = conf->seq_flush + 1;
set_bit(STRIPE_BIT_DELAY, &sh->state); set_bit(STRIPE_BIT_DELAY, &sh->state);
} }
...@@ -6014,11 +6014,11 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n ...@@ -6014,11 +6014,11 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
} }
if (mddev->curr_resync < max_sector) /* aborted */ if (mddev->curr_resync < max_sector) /* aborted */
bitmap_end_sync(mddev->bitmap, mddev->curr_resync, md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
&sync_blocks, 1); &sync_blocks, 1);
else /* completed sync */ else /* completed sync */
conf->fullsync = 0; conf->fullsync = 0;
bitmap_close_sync(mddev->bitmap); md_bitmap_close_sync(mddev->bitmap);
return 0; return 0;
} }
...@@ -6047,7 +6047,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n ...@@ -6047,7 +6047,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
} }
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync && !conf->fullsync &&
!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
sync_blocks >= STRIPE_SECTORS) { sync_blocks >= STRIPE_SECTORS) {
/* we can skip this block, and probably more */ /* we can skip this block, and probably more */
sync_blocks /= STRIPE_SECTORS; sync_blocks /= STRIPE_SECTORS;
...@@ -6055,7 +6055,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n ...@@ -6055,7 +6055,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
} }
bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
if (sh == NULL) { if (sh == NULL) {
...@@ -6078,7 +6078,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n ...@@ -6078,7 +6078,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
} }
rcu_read_unlock(); rcu_read_unlock();
bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
set_bit(STRIPE_SYNC_REQUESTED, &sh->state); set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
...@@ -6279,7 +6279,7 @@ static void raid5d(struct md_thread *thread) ...@@ -6279,7 +6279,7 @@ static void raid5d(struct md_thread *thread)
/* Now is a good time to flush some bitmap updates */ /* Now is a good time to flush some bitmap updates */
conf->seq_flush++; conf->seq_flush++;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
bitmap_unplug(mddev->bitmap); md_bitmap_unplug(mddev->bitmap);
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush; conf->seq_write = conf->seq_flush;
activate_bit_delay(conf, conf->temp_inactive_list); activate_bit_delay(conf, conf->temp_inactive_list);
...@@ -7734,7 +7734,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) ...@@ -7734,7 +7734,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > newsize) mddev->array_sectors > newsize)
return -EINVAL; return -EINVAL;
if (mddev->bitmap) { if (mddev->bitmap) {
int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0);
if (ret) if (ret)
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment