Commit aca105a6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'md/4.2-fixes' of git://neil.brown.name/md

Pull md fixes from Neil Brown:
 "Some md fixes for 4.2

  Several are tagged for -stable.
  A few aren't because they are not very, serious or because they are in
  the 'experimental' cluster code"

* tag 'md/4.2-fixes' of git://neil.brown.name/md:
  md/raid5: clear R5_NeedReplace when no longer needed.
  Fix read-balancing during node failure
  md-cluster: fix bitmap sub-offset in bitmap_read_sb
  md: Return error if request_module fails and returns positive value
  md: Skip cluster setup in case of error while reading bitmap
  md/raid1: fix test for 'was read error from last working device'.
  md: Skip cluster setup for dm-raid
  md: flush ->event_work before stopping array.
  md/raid10: always set reshape_safe when initializing reshape_position.
  md/raid5: avoid races when changing cache size.
parents 32fd3d4a e6030cb0
...@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) ...@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
bitmap_super_t *sb; bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind; unsigned long chunksize, daemon_sleep, write_behind;
bitmap->storage.sb_page = alloc_page(GFP_KERNEL); bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (bitmap->storage.sb_page == NULL) if (bitmap->storage.sb_page == NULL)
return -ENOMEM; return -ENOMEM;
bitmap->storage.sb_page->index = 0; bitmap->storage.sb_page->index = 0;
...@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) ...@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
sb->state = cpu_to_le32(bitmap->flags); sb->state = cpu_to_le32(bitmap->flags);
bitmap->events_cleared = bitmap->mddev->events; bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->mddev->events); sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
bitmap->mddev->bitmap_info.nodes = 0;
kunmap_atomic(sb); kunmap_atomic(sb);
...@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) ...@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
unsigned long sectors_reserved = 0; unsigned long sectors_reserved = 0;
int err = -EINVAL; int err = -EINVAL;
struct page *sb_page; struct page *sb_page;
loff_t offset = bitmap->mddev->bitmap_info.offset;
if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
chunksize = 128 * 1024 * 1024; chunksize = 128 * 1024 * 1024;
...@@ -584,9 +586,9 @@ static int bitmap_read_sb(struct bitmap *bitmap) ...@@ -584,9 +586,9 @@ static int bitmap_read_sb(struct bitmap *bitmap)
bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
/* to 4k blocks */ /* to 4k blocks */
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3); offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset); bitmap->cluster_slot, offset);
} }
if (bitmap->storage.file) { if (bitmap->storage.file) {
...@@ -597,7 +599,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) ...@@ -597,7 +599,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
bitmap, bytes, sb_page); bitmap, bytes, sb_page);
} else { } else {
err = read_sb_page(bitmap->mddev, err = read_sb_page(bitmap->mddev,
bitmap->mddev->bitmap_info.offset, offset,
sb_page, sb_page,
0, sizeof(bitmap_super_t)); 0, sizeof(bitmap_super_t));
} }
...@@ -611,8 +613,16 @@ static int bitmap_read_sb(struct bitmap *bitmap) ...@@ -611,8 +613,16 @@ static int bitmap_read_sb(struct bitmap *bitmap)
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind); write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved); sectors_reserved = le32_to_cpu(sb->sectors_reserved);
nodes = le32_to_cpu(sb->nodes); /* XXX: This is a hack to ensure that we don't use clustering
strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); * in case:
* - dm-raid is in use and
* - the nodes written in bitmap_sb is erroneous.
*/
if (!bitmap->mddev->sync_super) {
nodes = le32_to_cpu(sb->nodes);
strlcpy(bitmap->mddev->bitmap_info.cluster_name,
sb->cluster_name, 64);
}
/* verify that the bitmap-specific fields are valid */ /* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
...@@ -671,7 +681,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) ...@@ -671,7 +681,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
kunmap_atomic(sb); kunmap_atomic(sb);
/* Assiging chunksize is required for "re_read" */ /* Assiging chunksize is required for "re_read" */
bitmap->mddev->bitmap_info.chunksize = chunksize; bitmap->mddev->bitmap_info.chunksize = chunksize;
if (nodes && (bitmap->cluster_slot < 0)) { if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
err = md_setup_cluster(bitmap->mddev, nodes); err = md_setup_cluster(bitmap->mddev, nodes);
if (err) { if (err) {
pr_err("%s: Could not setup cluster service (%d)\n", pr_err("%s: Could not setup cluster service (%d)\n",
...@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot, ...@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
if (IS_ERR(bitmap)) if (IS_ERR(bitmap))
return PTR_ERR(bitmap); return PTR_ERR(bitmap);
rv = bitmap_read_sb(bitmap);
if (rv)
goto err;
rv = bitmap_init_from_disk(bitmap, 0); rv = bitmap_init_from_disk(bitmap, 0);
if (rv) if (rv)
goto err; goto err;
......
...@@ -44,6 +44,7 @@ struct resync_info { ...@@ -44,6 +44,7 @@ struct resync_info {
/* md_cluster_info flags */ /* md_cluster_info flags */
#define MD_CLUSTER_WAITING_FOR_NEWDISK 1 #define MD_CLUSTER_WAITING_FOR_NEWDISK 1
#define MD_CLUSTER_SUSPEND_READ_BALANCING 2
struct md_cluster_info { struct md_cluster_info {
...@@ -275,6 +276,9 @@ static void recover_bitmaps(struct md_thread *thread) ...@@ -275,6 +276,9 @@ static void recover_bitmaps(struct md_thread *thread)
static void recover_prep(void *arg) static void recover_prep(void *arg)
{ {
struct mddev *mddev = arg;
struct md_cluster_info *cinfo = mddev->cluster_info;
set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
} }
static void recover_slot(void *arg, struct dlm_slot *slot) static void recover_slot(void *arg, struct dlm_slot *slot)
...@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots, ...@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots,
cinfo->slot_number = our_slot; cinfo->slot_number = our_slot;
complete(&cinfo->completion); complete(&cinfo->completion);
clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
} }
static const struct dlm_lockspace_ops md_ls_ops = { static const struct dlm_lockspace_ops md_ls_ops = {
...@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev) ...@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev)
resync_send(mddev, RESYNCING, 0, 0); resync_send(mddev, RESYNCING, 0, 0);
} }
static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) static int area_resyncing(struct mddev *mddev, int direction,
sector_t lo, sector_t hi)
{ {
struct md_cluster_info *cinfo = mddev->cluster_info; struct md_cluster_info *cinfo = mddev->cluster_info;
int ret = 0; int ret = 0;
struct suspend_info *s; struct suspend_info *s;
if ((direction == READ) &&
test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
return 1;
spin_lock_irq(&cinfo->suspend_lock); spin_lock_irq(&cinfo->suspend_lock);
if (list_empty(&cinfo->suspend_list)) if (list_empty(&cinfo->suspend_list))
goto out; goto out;
......
...@@ -18,7 +18,7 @@ struct md_cluster_operations { ...@@ -18,7 +18,7 @@ struct md_cluster_operations {
int (*metadata_update_start)(struct mddev *mddev); int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev); int (*metadata_update_finish)(struct mddev *mddev);
int (*metadata_update_cancel)(struct mddev *mddev); int (*metadata_update_cancel)(struct mddev *mddev);
int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi); int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
int (*add_new_disk_finish)(struct mddev *mddev); int (*add_new_disk_finish)(struct mddev *mddev);
int (*new_disk_ack)(struct mddev *mddev, bool ack); int (*new_disk_ack)(struct mddev *mddev, bool ack);
......
...@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev) ...@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev)
{ {
struct md_personality *pers = mddev->pers; struct md_personality *pers = mddev->pers;
mddev_detach(mddev); mddev_detach(mddev);
/* Ensure ->event_work is done */
flush_workqueue(md_misc_wq);
spin_lock(&mddev->lock); spin_lock(&mddev->lock);
mddev->ready = 0; mddev->ready = 0;
mddev->pers = NULL; mddev->pers = NULL;
...@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes) ...@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
err = request_module("md-cluster"); err = request_module("md-cluster");
if (err) { if (err) {
pr_err("md-cluster module not found.\n"); pr_err("md-cluster module not found.\n");
return err; return -ENOENT;
} }
spin_lock(&pers_lock); spin_lock(&pers_lock);
......
...@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error) ...@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
if (r1_bio->mddev->degraded == conf->raid_disks || if (r1_bio->mddev->degraded == conf->raid_disks ||
(r1_bio->mddev->degraded == conf->raid_disks-1 && (r1_bio->mddev->degraded == conf->raid_disks-1 &&
!test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
uptodate = 1; uptodate = 1;
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
} }
...@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect ...@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if ((conf->mddev->recovery_cp < this_sector + sectors) || if ((conf->mddev->recovery_cp < this_sector + sectors) ||
(mddev_is_clustered(conf->mddev) && (mddev_is_clustered(conf->mddev) &&
md_cluster_ops->area_resyncing(conf->mddev, this_sector, md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
this_sector + sectors))) this_sector + sectors)))
choose_first = 1; choose_first = 1;
else else
...@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
((bio_end_sector(bio) > mddev->suspend_lo && ((bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_iter.bi_sector < mddev->suspend_hi) || bio->bi_iter.bi_sector < mddev->suspend_hi) ||
(mddev_is_clustered(mddev) && (mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { md_cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
/* As the suspend_* range is controlled by /* As the suspend_* range is controlled by
* userspace, we want an interruptible * userspace, we want an interruptible
* wait. * wait.
...@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
if (bio_end_sector(bio) <= mddev->suspend_lo || if (bio_end_sector(bio) <= mddev->suspend_lo ||
bio->bi_iter.bi_sector >= mddev->suspend_hi || bio->bi_iter.bi_sector >= mddev->suspend_hi ||
(mddev_is_clustered(mddev) && (mddev_is_clustered(mddev) &&
!md_cluster_ops->area_resyncing(mddev, !md_cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio)))) bio->bi_iter.bi_sector, bio_end_sector(bio))))
break; break;
schedule(); schedule();
......
...@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) ...@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
/* far_copies must be 1 */ /* far_copies must be 1 */
conf->prev.stride = conf->dev_sectors; conf->prev.stride = conf->dev_sectors;
} }
conf->reshape_safe = conf->reshape_progress;
spin_lock_init(&conf->device_lock); spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list); INIT_LIST_HEAD(&conf->retry_list);
...@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev) ...@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev)
} }
conf->offset_diff = min_offset_diff; conf->offset_diff = min_offset_diff;
conf->reshape_safe = conf->reshape_progress;
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
...@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev) ...@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev)
conf->reshape_progress = size; conf->reshape_progress = size;
} else } else
conf->reshape_progress = 0; conf->reshape_progress = 0;
conf->reshape_safe = conf->reshape_progress;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (mddev->delta_disks && mddev->bitmap) { if (mddev->delta_disks && mddev->bitmap) {
...@@ -4170,6 +4171,7 @@ static int raid10_start_reshape(struct mddev *mddev) ...@@ -4170,6 +4171,7 @@ static int raid10_start_reshape(struct mddev *mddev)
rdev->new_data_offset = rdev->data_offset; rdev->new_data_offset = rdev->data_offset;
smp_wmb(); smp_wmb();
conf->reshape_progress = MaxSector; conf->reshape_progress = MaxSector;
conf->reshape_safe = MaxSector;
mddev->reshape_position = MaxSector; mddev->reshape_position = MaxSector;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
return ret; return ret;
...@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf) ...@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf)
md_finish_reshape(conf->mddev); md_finish_reshape(conf->mddev);
smp_wmb(); smp_wmb();
conf->reshape_progress = MaxSector; conf->reshape_progress = MaxSector;
conf->reshape_safe = MaxSector;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
/* read-ahead size must cover two whole stripes, which is /* read-ahead size must cover two whole stripes, which is
......
...@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize) ...@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (!sc) if (!sc)
return -ENOMEM; return -ENOMEM;
/* Need to ensure auto-resizing doesn't interfere */
mutex_lock(&conf->cache_size_mutex);
for (i = conf->max_nr_stripes; i; i--) { for (i = conf->max_nr_stripes; i; i--) {
nsh = alloc_stripe(sc, GFP_KERNEL); nsh = alloc_stripe(sc, GFP_KERNEL);
if (!nsh) if (!nsh)
...@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) ...@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
kmem_cache_free(sc, nsh); kmem_cache_free(sc, nsh);
} }
kmem_cache_destroy(sc); kmem_cache_destroy(sc);
mutex_unlock(&conf->cache_size_mutex);
return -ENOMEM; return -ENOMEM;
} }
/* Step 2 - Must use GFP_NOIO now. /* Step 2 - Must use GFP_NOIO now.
...@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) ...@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
} else } else
err = -ENOMEM; err = -ENOMEM;
mutex_unlock(&conf->cache_size_mutex);
/* Step 4, return new stripes to service */ /* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) { while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru); nsh = list_entry(newstripes.next, struct stripe_head, lru);
...@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
&first_bad, &bad_sectors)) &first_bad, &bad_sectors))
set_bit(R5_ReadRepl, &dev->flags); set_bit(R5_ReadRepl, &dev->flags);
else { else {
if (rdev) if (rdev && !test_bit(Faulty, &rdev->flags))
set_bit(R5_NeedReplace, &dev->flags); set_bit(R5_NeedReplace, &dev->flags);
else
clear_bit(R5_NeedReplace, &dev->flags);
rdev = rcu_dereference(conf->disks[i].rdev); rdev = rcu_dereference(conf->disks[i].rdev);
clear_bit(R5_ReadRepl, &dev->flags); clear_bit(R5_ReadRepl, &dev->flags);
} }
...@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread) ...@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread)
pr_debug("%d stripes handled\n", handled); pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
mutex_trylock(&conf->cache_size_mutex)) {
grow_one_stripe(conf, __GFP_NOWARN); grow_one_stripe(conf, __GFP_NOWARN);
/* Set flag even if allocation failed. This helps /* Set flag even if allocation failed. This helps
* slow down allocation requests when mem is short * slow down allocation requests when mem is short
*/ */
set_bit(R5_DID_ALLOC, &conf->cache_state); set_bit(R5_DID_ALLOC, &conf->cache_state);
mutex_unlock(&conf->cache_size_mutex);
} }
async_tx_issue_pending_all(); async_tx_issue_pending_all();
...@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size) ...@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
return -EINVAL; return -EINVAL;
conf->min_nr_stripes = size; conf->min_nr_stripes = size;
mutex_lock(&conf->cache_size_mutex);
while (size < conf->max_nr_stripes && while (size < conf->max_nr_stripes &&
drop_one_stripe(conf)) drop_one_stripe(conf))
; ;
mutex_unlock(&conf->cache_size_mutex);
err = md_allow_write(mddev); err = md_allow_write(mddev);
if (err) if (err)
return err; return err;
mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes) while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL)) if (!grow_one_stripe(conf, GFP_KERNEL))
break; break;
mutex_unlock(&conf->cache_size_mutex);
return 0; return 0;
} }
...@@ -6371,11 +6384,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, ...@@ -6371,11 +6384,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
struct shrink_control *sc) struct shrink_control *sc)
{ {
struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
int ret = 0; unsigned long ret = SHRINK_STOP;
while (ret < sc->nr_to_scan) {
if (drop_one_stripe(conf) == 0) if (mutex_trylock(&conf->cache_size_mutex)) {
return SHRINK_STOP; ret= 0;
ret++; while (ret < sc->nr_to_scan) {
if (drop_one_stripe(conf) == 0) {
ret = SHRINK_STOP;
break;
}
ret++;
}
mutex_unlock(&conf->cache_size_mutex);
} }
return ret; return ret;
} }
...@@ -6444,6 +6464,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) ...@@ -6444,6 +6464,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort; goto abort;
spin_lock_init(&conf->device_lock); spin_lock_init(&conf->device_lock);
seqcount_init(&conf->gen_lock); seqcount_init(&conf->gen_lock);
mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent); init_waitqueue_head(&conf->wait_for_quiescent);
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
init_waitqueue_head(&conf->wait_for_stripe[i]); init_waitqueue_head(&conf->wait_for_stripe[i]);
......
...@@ -482,7 +482,8 @@ struct r5conf { ...@@ -482,7 +482,8 @@ struct r5conf {
*/ */
int active_name; int active_name;
char cache_name[2][32]; char cache_name[2][32];
struct kmem_cache *slab_cache; /* for allocating stripes */ struct kmem_cache *slab_cache; /* for allocating stripes */
struct mutex cache_size_mutex; /* Protect changes to cache size */
int seq_flush, seq_write; int seq_flush, seq_write;
int quiesce; int quiesce;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment