Commit 3c28c9cc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'md/4.5' of git://neil.brown.name/md

Pull md updates from Neil Brown:
 "Mostly clustered-raid1 and raid5 journal updates.  one Y2038 fix and
  other minor stuff.

  One patch removes me from the MAINTAINERS file and adds a record of my
  md maintainership to Credits"

Many thanks to Neil, who has been around for a _looong_ time.

* tag 'md/4.5' of git://neil.brown.name/md: (26 commits)
  md/raid: only permit hot-add of compatible integrity profiles
  Remove myself as MD Maintainer, and add to Credits.
  raid5-cache: handle journal hotadd in quiesce
  MD: add journal with array suspended
  md: set MD_HAS_JOURNAL in correct places
  md: Remove 'ready' field from mddev.
  md: remove unnecesary md_new_event_inintr
  raid5: allow r5l_io_unit allocations to fail
  raid5-cache: use a mempool for the metadata block
  raid5-cache: use a bio_set
  raid5-cache: add journal hot add/remove support
  drivers: md: use ktime_get_real_seconds()
  md: avoid warning for 32-bit sector_t
  raid5-cache: free meta_page earlier
  raid5-cache: simplify r5l_move_io_unit_list
  md: update comment for md_allow_write
  md-cluster: update comments for MD_CLUSTER_SEND_LOCKED_ALREADY
  md-cluster: Protect communication with mutexes
  md-cluster: Defer MD reloading to mddev->thread
  md-cluster: update the documentation
  ...
parents 4b43ea2a 1501efad
...@@ -534,6 +534,7 @@ N: NeilBrown ...@@ -534,6 +534,7 @@ N: NeilBrown
E: neil@brown.name E: neil@brown.name
P: 4096R/566281B9 1BC6 29EB D390 D870 7B5F 497A 39EC 9EDD 5662 81B9 P: 4096R/566281B9 1BC6 29EB D390 D870 7B5F 497A 39EC 9EDD 5662 81B9
D: NFSD Maintainer 2000-2007 D: NFSD Maintainer 2000-2007
D: MD Maintainer 2001-2016
N: Zach Brown N: Zach Brown
E: zab@zabbo.net E: zab@zabbo.net
......
This diff is collapsed.
...@@ -9999,7 +9999,6 @@ S: Supported ...@@ -9999,7 +9999,6 @@ S: Supported
F: drivers/media/pci/solo6x10/ F: drivers/media/pci/solo6x10/
SOFTWARE RAID (Multiple Disks) SUPPORT SOFTWARE RAID (Multiple Disks) SUPPORT
M: Neil Brown <neilb@suse.com>
L: linux-raid@vger.kernel.org L: linux-raid@vger.kernel.org
S: Supported S: Supported
F: drivers/md/ F: drivers/md/
......
...@@ -48,13 +48,29 @@ struct resync_info { ...@@ -48,13 +48,29 @@ struct resync_info {
#define MD_CLUSTER_SUSPEND_READ_BALANCING 2 #define MD_CLUSTER_SUSPEND_READ_BALANCING 2
#define MD_CLUSTER_BEGIN_JOIN_CLUSTER 3 #define MD_CLUSTER_BEGIN_JOIN_CLUSTER 3
/* Lock the send communication. This is done through
* bit manipulation as opposed to a mutex in order to
* accomodate lock and hold. See next comment.
*/
#define MD_CLUSTER_SEND_LOCK 4
/* If cluster operations (such as adding a disk) must lock the
* communication channel, so as to perform extra operations
* (update metadata) and no other operation is allowed on the
* MD. Token needs to be locked and held until the operation
* completes witha md_update_sb(), which would eventually release
* the lock.
*/
#define MD_CLUSTER_SEND_LOCKED_ALREADY 5
struct md_cluster_info { struct md_cluster_info {
/* dlm lock space and resources for clustered raid. */ /* dlm lock space and resources for clustered raid. */
dlm_lockspace_t *lockspace; dlm_lockspace_t *lockspace;
int slot_number; int slot_number;
struct completion completion; struct completion completion;
struct mutex recv_mutex;
struct dlm_lock_resource *bitmap_lockres; struct dlm_lock_resource *bitmap_lockres;
struct dlm_lock_resource **other_bitmap_lockres;
struct dlm_lock_resource *resync_lockres; struct dlm_lock_resource *resync_lockres;
struct list_head suspend_list; struct list_head suspend_list;
spinlock_t suspend_lock; spinlock_t suspend_lock;
...@@ -67,6 +83,7 @@ struct md_cluster_info { ...@@ -67,6 +83,7 @@ struct md_cluster_info {
struct dlm_lock_resource *no_new_dev_lockres; struct dlm_lock_resource *no_new_dev_lockres;
struct md_thread *recv_thread; struct md_thread *recv_thread;
struct completion newdisk_completion; struct completion newdisk_completion;
wait_queue_head_t wait;
unsigned long state; unsigned long state;
}; };
...@@ -431,8 +448,10 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) ...@@ -431,8 +448,10 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg) static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg)
{ {
struct md_cluster_info *cinfo = mddev->cluster_info; struct md_cluster_info *cinfo = mddev->cluster_info;
md_reload_sb(mddev, le32_to_cpu(msg->raid_slot)); mddev->good_device_nr = le32_to_cpu(msg->raid_slot);
set_bit(MD_RELOAD_SB, &mddev->flags);
dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
md_wakeup_thread(mddev->thread);
} }
static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg) static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
...@@ -440,8 +459,11 @@ static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg) ...@@ -440,8 +459,11 @@ static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev,
le32_to_cpu(msg->raid_slot)); le32_to_cpu(msg->raid_slot));
if (rdev) if (rdev) {
md_kick_rdev_from_array(rdev); set_bit(ClusterRemove, &rdev->flags);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
else else
pr_warn("%s: %d Could not find disk(%d) to REMOVE\n", pr_warn("%s: %d Could not find disk(%d) to REMOVE\n",
__func__, __LINE__, le32_to_cpu(msg->raid_slot)); __func__, __LINE__, le32_to_cpu(msg->raid_slot));
...@@ -502,9 +524,11 @@ static void recv_daemon(struct md_thread *thread) ...@@ -502,9 +524,11 @@ static void recv_daemon(struct md_thread *thread)
struct cluster_msg msg; struct cluster_msg msg;
int ret; int ret;
mutex_lock(&cinfo->recv_mutex);
/*get CR on Message*/ /*get CR on Message*/
if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) { if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) {
pr_err("md/raid1:failed to get CR on MESSAGE\n"); pr_err("md/raid1:failed to get CR on MESSAGE\n");
mutex_unlock(&cinfo->recv_mutex);
return; return;
} }
...@@ -528,33 +552,45 @@ static void recv_daemon(struct md_thread *thread) ...@@ -528,33 +552,45 @@ static void recv_daemon(struct md_thread *thread)
ret = dlm_unlock_sync(message_lockres); ret = dlm_unlock_sync(message_lockres);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
pr_info("unlock msg failed return %d\n", ret); pr_info("unlock msg failed return %d\n", ret);
mutex_unlock(&cinfo->recv_mutex);
} }
/* lock_comm() /* lock_token()
* Takes the lock on the TOKEN lock resource so no other * Takes the lock on the TOKEN lock resource so no other
* node can communicate while the operation is underway. * node can communicate while the operation is underway.
* If called again, and the TOKEN lock is alread in EX mode
* return success. However, care must be taken that unlock_comm()
* is called only once.
*/ */
static int lock_comm(struct md_cluster_info *cinfo) static int lock_token(struct md_cluster_info *cinfo)
{ {
int error; int error;
if (cinfo->token_lockres->mode == DLM_LOCK_EX)
return 0;
error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
if (error) if (error)
pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
__func__, __LINE__, error); __func__, __LINE__, error);
/* Lock the receive sequence */
mutex_lock(&cinfo->recv_mutex);
return error; return error;
} }
/* lock_comm()
* Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
*/
static int lock_comm(struct md_cluster_info *cinfo)
{
wait_event(cinfo->wait,
!test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));
return lock_token(cinfo);
}
static void unlock_comm(struct md_cluster_info *cinfo) static void unlock_comm(struct md_cluster_info *cinfo)
{ {
WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX); WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX);
mutex_unlock(&cinfo->recv_mutex);
dlm_unlock_sync(cinfo->token_lockres); dlm_unlock_sync(cinfo->token_lockres);
clear_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state);
wake_up(&cinfo->wait);
} }
/* __sendmsg() /* __sendmsg()
...@@ -707,6 +743,8 @@ static int join(struct mddev *mddev, int nodes) ...@@ -707,6 +743,8 @@ static int join(struct mddev *mddev, int nodes)
spin_lock_init(&cinfo->suspend_lock); spin_lock_init(&cinfo->suspend_lock);
init_completion(&cinfo->completion); init_completion(&cinfo->completion);
set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state); set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
init_waitqueue_head(&cinfo->wait);
mutex_init(&cinfo->recv_mutex);
mddev->cluster_info = cinfo; mddev->cluster_info = cinfo;
...@@ -800,6 +838,7 @@ static void resync_bitmap(struct mddev *mddev) ...@@ -800,6 +838,7 @@ static void resync_bitmap(struct mddev *mddev)
__func__, __LINE__, err); __func__, __LINE__, err);
} }
static void unlock_all_bitmaps(struct mddev *mddev);
static int leave(struct mddev *mddev) static int leave(struct mddev *mddev)
{ {
struct md_cluster_info *cinfo = mddev->cluster_info; struct md_cluster_info *cinfo = mddev->cluster_info;
...@@ -820,6 +859,7 @@ static int leave(struct mddev *mddev) ...@@ -820,6 +859,7 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->ack_lockres); lockres_free(cinfo->ack_lockres);
lockres_free(cinfo->no_new_dev_lockres); lockres_free(cinfo->no_new_dev_lockres);
lockres_free(cinfo->bitmap_lockres); lockres_free(cinfo->bitmap_lockres);
unlock_all_bitmaps(mddev);
dlm_release_lockspace(cinfo->lockspace, 2); dlm_release_lockspace(cinfo->lockspace, 2);
return 0; return 0;
} }
...@@ -835,9 +875,25 @@ static int slot_number(struct mddev *mddev) ...@@ -835,9 +875,25 @@ static int slot_number(struct mddev *mddev)
return cinfo->slot_number - 1; return cinfo->slot_number - 1;
} }
/*
* Check if the communication is already locked, else lock the communication
* channel.
* If it is already locked, token is in EX mode, and hence lock_token()
* should not be called.
*/
static int metadata_update_start(struct mddev *mddev) static int metadata_update_start(struct mddev *mddev)
{ {
return lock_comm(mddev->cluster_info); struct md_cluster_info *cinfo = mddev->cluster_info;
wait_event(cinfo->wait,
!test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state) ||
test_and_clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state));
/* If token is already locked, return 0 */
if (cinfo->token_lockres->mode == DLM_LOCK_EX)
return 0;
return lock_token(cinfo);
} }
static int metadata_update_finish(struct mddev *mddev) static int metadata_update_finish(struct mddev *mddev)
...@@ -862,6 +918,7 @@ static int metadata_update_finish(struct mddev *mddev) ...@@ -862,6 +918,7 @@ static int metadata_update_finish(struct mddev *mddev)
ret = __sendmsg(cinfo, &cmsg); ret = __sendmsg(cinfo, &cmsg);
} else } else
pr_warn("md-cluster: No good device id found to send\n"); pr_warn("md-cluster: No good device id found to send\n");
clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
unlock_comm(cinfo); unlock_comm(cinfo);
return ret; return ret;
} }
...@@ -869,6 +926,7 @@ static int metadata_update_finish(struct mddev *mddev) ...@@ -869,6 +926,7 @@ static int metadata_update_finish(struct mddev *mddev)
static void metadata_update_cancel(struct mddev *mddev) static void metadata_update_cancel(struct mddev *mddev)
{ {
struct md_cluster_info *cinfo = mddev->cluster_info; struct md_cluster_info *cinfo = mddev->cluster_info;
clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
unlock_comm(cinfo); unlock_comm(cinfo);
} }
...@@ -882,8 +940,16 @@ static int resync_start(struct mddev *mddev) ...@@ -882,8 +940,16 @@ static int resync_start(struct mddev *mddev)
static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
{ {
struct md_cluster_info *cinfo = mddev->cluster_info; struct md_cluster_info *cinfo = mddev->cluster_info;
struct resync_info ri;
struct cluster_msg cmsg = {0}; struct cluster_msg cmsg = {0};
/* do not send zero again, if we have sent before */
if (hi == 0) {
memcpy(&ri, cinfo->bitmap_lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
if (le64_to_cpu(ri.hi) == 0)
return 0;
}
add_resync_info(cinfo->bitmap_lockres, lo, hi); add_resync_info(cinfo->bitmap_lockres, lo, hi);
/* Re-acquire the lock to refresh LVB */ /* Re-acquire the lock to refresh LVB */
dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW);
...@@ -954,14 +1020,30 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -954,14 +1020,30 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
ret = -ENOENT; ret = -ENOENT;
if (ret) if (ret)
unlock_comm(cinfo); unlock_comm(cinfo);
else else {
dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
/* Since MD_CHANGE_DEVS will be set in add_bound_rdev which
* will run soon after add_new_disk, the below path will be
* invoked:
* md_wakeup_thread(mddev->thread)
* -> conf->thread (raid1d)
* -> md_check_recovery -> md_update_sb
* -> metadata_update_start/finish
* MD_CLUSTER_SEND_LOCKED_ALREADY will be cleared eventually.
*
* For other failure cases, metadata_update_cancel and
* add_new_disk_cancel also clear below bit as well.
* */
set_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
wake_up(&cinfo->wait);
}
return ret; return ret;
} }
static void add_new_disk_cancel(struct mddev *mddev) static void add_new_disk_cancel(struct mddev *mddev)
{ {
struct md_cluster_info *cinfo = mddev->cluster_info; struct md_cluster_info *cinfo = mddev->cluster_info;
clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
unlock_comm(cinfo); unlock_comm(cinfo);
} }
...@@ -986,7 +1068,59 @@ static int remove_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -986,7 +1068,59 @@ static int remove_disk(struct mddev *mddev, struct md_rdev *rdev)
struct md_cluster_info *cinfo = mddev->cluster_info; struct md_cluster_info *cinfo = mddev->cluster_info;
cmsg.type = cpu_to_le32(REMOVE); cmsg.type = cpu_to_le32(REMOVE);
cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
return __sendmsg(cinfo, &cmsg); return sendmsg(cinfo, &cmsg);
}
static int lock_all_bitmaps(struct mddev *mddev)
{
int slot, my_slot, ret, held = 1, i = 0;
char str[64];
struct md_cluster_info *cinfo = mddev->cluster_info;
cinfo->other_bitmap_lockres = kzalloc((mddev->bitmap_info.nodes - 1) *
sizeof(struct dlm_lock_resource *),
GFP_KERNEL);
if (!cinfo->other_bitmap_lockres) {
pr_err("md: can't alloc mem for other bitmap locks\n");
return 0;
}
my_slot = slot_number(mddev);
for (slot = 0; slot < mddev->bitmap_info.nodes; slot++) {
if (slot == my_slot)
continue;
memset(str, '\0', 64);
snprintf(str, 64, "bitmap%04d", slot);
cinfo->other_bitmap_lockres[i] = lockres_init(mddev, str, NULL, 1);
if (!cinfo->other_bitmap_lockres[i])
return -ENOMEM;
cinfo->other_bitmap_lockres[i]->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(cinfo->other_bitmap_lockres[i], DLM_LOCK_PW);
if (ret)
held = -1;
i++;
}
return held;
}
static void unlock_all_bitmaps(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
int i;
/* release other node's bitmap lock if they are existed */
if (cinfo->other_bitmap_lockres) {
for (i = 0; i < mddev->bitmap_info.nodes - 1; i++) {
if (cinfo->other_bitmap_lockres[i]) {
dlm_unlock_sync(cinfo->other_bitmap_lockres[i]);
lockres_free(cinfo->other_bitmap_lockres[i]);
}
}
kfree(cinfo->other_bitmap_lockres);
}
} }
static int gather_bitmaps(struct md_rdev *rdev) static int gather_bitmaps(struct md_rdev *rdev)
...@@ -1034,6 +1168,8 @@ static struct md_cluster_operations cluster_ops = { ...@@ -1034,6 +1168,8 @@ static struct md_cluster_operations cluster_ops = {
.new_disk_ack = new_disk_ack, .new_disk_ack = new_disk_ack,
.remove_disk = remove_disk, .remove_disk = remove_disk,
.gather_bitmaps = gather_bitmaps, .gather_bitmaps = gather_bitmaps,
.lock_all_bitmaps = lock_all_bitmaps,
.unlock_all_bitmaps = unlock_all_bitmaps,
}; };
static int __init cluster_init(void) static int __init cluster_init(void)
......
...@@ -24,6 +24,8 @@ struct md_cluster_operations { ...@@ -24,6 +24,8 @@ struct md_cluster_operations {
int (*new_disk_ack)(struct mddev *mddev, bool ack); int (*new_disk_ack)(struct mddev *mddev, bool ack);
int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev); int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
int (*gather_bitmaps)(struct md_rdev *rdev); int (*gather_bitmaps)(struct md_rdev *rdev);
int (*lock_all_bitmaps)(struct mddev *mddev);
void (*unlock_all_bitmaps)(struct mddev *mddev);
}; };
#endif /* _MD_CLUSTER_H */ #endif /* _MD_CLUSTER_H */
This diff is collapsed.
...@@ -162,6 +162,7 @@ enum flag_bits { ...@@ -162,6 +162,7 @@ enum flag_bits {
* Usually, this device should be faster * Usually, this device should be faster
* than other devices in the array * than other devices in the array
*/ */
ClusterRemove,
}; };
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
...@@ -200,6 +201,9 @@ struct mddev { ...@@ -200,6 +201,9 @@ struct mddev {
*/ */
#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */ #define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */
#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */ #define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */
#define MD_RELOAD_SB 7 /* Reload the superblock because another node
* updated it.
*/
int suspended; int suspended;
atomic_t active_io; atomic_t active_io;
...@@ -208,8 +212,6 @@ struct mddev { ...@@ -208,8 +212,6 @@ struct mddev {
* are happening, so run/ * are happening, so run/
* takeover/stop are not safe * takeover/stop are not safe
*/ */
int ready; /* See when safe to pass
* IO requests down */
struct gendisk *gendisk; struct gendisk *gendisk;
struct kobject kobj; struct kobject kobj;
...@@ -226,7 +228,7 @@ struct mddev { ...@@ -226,7 +228,7 @@ struct mddev {
* managed externally */ * managed externally */
char metadata_type[17]; /* externally set*/ char metadata_type[17]; /* externally set*/
int chunk_sectors; int chunk_sectors;
time_t ctime, utime; time64_t ctime, utime;
int level, layout; int level, layout;
char clevel[16]; char clevel[16];
int raid_disks; int raid_disks;
...@@ -430,6 +432,7 @@ struct mddev { ...@@ -430,6 +432,7 @@ struct mddev {
struct work_struct event_work; /* used by dm to report failure event */ struct work_struct event_work; /* used by dm to report failure event */
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info; struct md_cluster_info *cluster_info;
unsigned int good_device_nr; /* good device num within cluster raid */
}; };
static inline int __must_check mddev_lock(struct mddev *mddev) static inline int __must_check mddev_lock(struct mddev *mddev)
...@@ -623,7 +626,7 @@ extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); ...@@ -623,7 +626,7 @@ extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(struct mddev *mddev); extern int md_check_no_bitmap(struct mddev *mddev);
extern int md_integrity_register(struct mddev *mddev); extern int md_integrity_register(struct mddev *mddev);
extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev); extern void mddev_init(struct mddev *mddev);
......
...@@ -257,6 +257,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -257,6 +257,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9); rdev->data_offset << 9);
err = md_integrity_add_rdev(rdev, mddev);
if (err)
break;
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
mddev->degraded--; mddev->degraded--;
rdev->raid_disk = path; rdev->raid_disk = path;
...@@ -264,9 +267,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -264,9 +267,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
rcu_assign_pointer(p->rdev, rdev); rcu_assign_pointer(p->rdev, rdev);
err = 0; err = 0;
mddev_suspend(mddev);
md_integrity_add_rdev(rdev, mddev);
mddev_resume(mddev);
break; break;
} }
......
...@@ -1589,6 +1589,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1589,6 +1589,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (mddev->recovery_disabled == conf->recovery_disabled) if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY; return -EBUSY;
if (md_integrity_add_rdev(rdev, mddev))
return -ENXIO;
if (rdev->raid_disk >= 0) if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk; first = last = rdev->raid_disk;
...@@ -1632,9 +1635,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1632,9 +1635,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break; break;
} }
} }
mddev_suspend(mddev);
md_integrity_add_rdev(rdev, mddev);
mddev_resume(mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf); print_conf(conf);
......
...@@ -1698,6 +1698,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1698,6 +1698,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
return -EINVAL; return -EINVAL;
if (md_integrity_add_rdev(rdev, mddev))
return -ENXIO;
if (rdev->raid_disk >= 0) if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk; first = last = rdev->raid_disk;
...@@ -1739,9 +1742,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1739,9 +1742,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev); rcu_assign_pointer(p->rdev, rdev);
break; break;
} }
mddev_suspend(mddev);
md_integrity_add_rdev(rdev, mddev);
mddev_resume(mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
......
...@@ -34,6 +34,12 @@ ...@@ -34,6 +34,12 @@
#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
#define RECLAIM_MAX_FREE_SPACE_SHIFT (2) #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
/*
* We only need 2 bios per I/O unit to make progress, but ensure we
* have a few more available to not get too tight.
*/
#define R5L_POOL_SIZE 4
struct r5l_log { struct r5l_log {
struct md_rdev *rdev; struct md_rdev *rdev;
...@@ -69,7 +75,12 @@ struct r5l_log { ...@@ -69,7 +75,12 @@ struct r5l_log {
struct list_head finished_ios; /* io_units which settle down in log disk */ struct list_head finished_ios; /* io_units which settle down in log disk */
struct bio flush_bio; struct bio flush_bio;
struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
struct kmem_cache *io_kc; struct kmem_cache *io_kc;
mempool_t *io_pool;
struct bio_set *bs;
mempool_t *meta_pool;
struct md_thread *reclaim_thread; struct md_thread *reclaim_thread;
unsigned long reclaim_target; /* number of space that need to be unsigned long reclaim_target; /* number of space that need to be
...@@ -150,27 +161,6 @@ static bool r5l_has_free_space(struct r5l_log *log, sector_t size) ...@@ -150,27 +161,6 @@ static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
return log->device_size > used_size + size; return log->device_size > used_size + size;
} }
static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
{
__free_page(io->meta_page);
kmem_cache_free(log->io_kc, io);
}
static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
enum r5l_io_unit_state state)
{
struct r5l_io_unit *io;
while (!list_empty(from)) {
io = list_first_entry(from, struct r5l_io_unit, log_sibling);
/* don't change list order */
if (io->state >= state)
list_move_tail(&io->log_sibling, to);
else
break;
}
}
static void __r5l_set_io_unit_state(struct r5l_io_unit *io, static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
enum r5l_io_unit_state state) enum r5l_io_unit_state state)
{ {
...@@ -206,6 +196,20 @@ static void r5l_log_run_stripes(struct r5l_log *log) ...@@ -206,6 +196,20 @@ static void r5l_log_run_stripes(struct r5l_log *log)
} }
} }
static void r5l_move_to_end_ios(struct r5l_log *log)
{
struct r5l_io_unit *io, *next;
assert_spin_locked(&log->io_list_lock);
list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
/* don't change list order */
if (io->state < IO_UNIT_IO_END)
break;
list_move_tail(&io->log_sibling, &log->io_end_ios);
}
}
static void r5l_log_endio(struct bio *bio) static void r5l_log_endio(struct bio *bio)
{ {
struct r5l_io_unit *io = bio->bi_private; struct r5l_io_unit *io = bio->bi_private;
...@@ -216,12 +220,12 @@ static void r5l_log_endio(struct bio *bio) ...@@ -216,12 +220,12 @@ static void r5l_log_endio(struct bio *bio)
md_error(log->rdev->mddev, log->rdev); md_error(log->rdev->mddev, log->rdev);
bio_put(bio); bio_put(bio);
mempool_free(io->meta_page, log->meta_pool);
spin_lock_irqsave(&log->io_list_lock, flags); spin_lock_irqsave(&log->io_list_lock, flags);
__r5l_set_io_unit_state(io, IO_UNIT_IO_END); __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
if (log->need_cache_flush) if (log->need_cache_flush)
r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, r5l_move_to_end_ios(log);
IO_UNIT_IO_END);
else else
r5l_log_run_stripes(log); r5l_log_run_stripes(log);
spin_unlock_irqrestore(&log->io_list_lock, flags); spin_unlock_irqrestore(&log->io_list_lock, flags);
...@@ -255,7 +259,7 @@ static void r5l_submit_current_io(struct r5l_log *log) ...@@ -255,7 +259,7 @@ static void r5l_submit_current_io(struct r5l_log *log)
static struct bio *r5l_bio_alloc(struct r5l_log *log) static struct bio *r5l_bio_alloc(struct r5l_log *log)
{ {
struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
bio->bi_rw = WRITE; bio->bi_rw = WRITE;
bio->bi_bdev = log->rdev->bdev; bio->bi_bdev = log->rdev->bdev;
...@@ -286,15 +290,19 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) ...@@ -286,15 +290,19 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
struct r5l_io_unit *io; struct r5l_io_unit *io;
struct r5l_meta_block *block; struct r5l_meta_block *block;
/* We can't handle memory allocate failure so far */ io = mempool_alloc(log->io_pool, GFP_ATOMIC);
io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL); if (!io)
return NULL;
memset(io, 0, sizeof(*io));
io->log = log; io->log = log;
INIT_LIST_HEAD(&io->log_sibling); INIT_LIST_HEAD(&io->log_sibling);
INIT_LIST_HEAD(&io->stripe_list); INIT_LIST_HEAD(&io->stripe_list);
io->state = IO_UNIT_RUNNING; io->state = IO_UNIT_RUNNING;
io->meta_page = alloc_page(GFP_NOIO | __GFP_NOFAIL | __GFP_ZERO); io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
block = page_address(io->meta_page); block = page_address(io->meta_page);
clear_page(block);
block->magic = cpu_to_le32(R5LOG_MAGIC); block->magic = cpu_to_le32(R5LOG_MAGIC);
block->version = R5LOG_VERSION; block->version = R5LOG_VERSION;
block->seq = cpu_to_le64(log->seq); block->seq = cpu_to_le64(log->seq);
...@@ -324,8 +332,12 @@ static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) ...@@ -324,8 +332,12 @@ static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
log->current_io->meta_offset + payload_size > PAGE_SIZE) log->current_io->meta_offset + payload_size > PAGE_SIZE)
r5l_submit_current_io(log); r5l_submit_current_io(log);
if (!log->current_io) if (!log->current_io) {
log->current_io = r5l_new_meta(log); log->current_io = r5l_new_meta(log);
if (!log->current_io)
return -ENOMEM;
}
return 0; return 0;
} }
...@@ -370,11 +382,12 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page) ...@@ -370,11 +382,12 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
r5_reserve_log_entry(log, io); r5_reserve_log_entry(log, io);
} }
static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
int data_pages, int parity_pages) int data_pages, int parity_pages)
{ {
int i; int i;
int meta_size; int meta_size;
int ret;
struct r5l_io_unit *io; struct r5l_io_unit *io;
meta_size = meta_size =
...@@ -383,7 +396,10 @@ static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, ...@@ -383,7 +396,10 @@ static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
sizeof(struct r5l_payload_data_parity) + sizeof(struct r5l_payload_data_parity) +
sizeof(__le32) * parity_pages; sizeof(__le32) * parity_pages;
r5l_get_meta(log, meta_size); ret = r5l_get_meta(log, meta_size);
if (ret)
return ret;
io = log->current_io; io = log->current_io;
for (i = 0; i < sh->disks; i++) { for (i = 0; i < sh->disks; i++) {
...@@ -413,6 +429,8 @@ static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, ...@@ -413,6 +429,8 @@ static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
list_add_tail(&sh->log_list, &io->stripe_list); list_add_tail(&sh->log_list, &io->stripe_list);
atomic_inc(&io->pending_stripe); atomic_inc(&io->pending_stripe);
sh->log_io = io; sh->log_io = io;
return 0;
} }
static void r5l_wake_reclaim(struct r5l_log *log, sector_t space); static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
...@@ -427,6 +445,7 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) ...@@ -427,6 +445,7 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
int meta_size; int meta_size;
int reserve; int reserve;
int i; int i;
int ret = 0;
if (!log) if (!log)
return -EAGAIN; return -EAGAIN;
...@@ -475,17 +494,22 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) ...@@ -475,17 +494,22 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
mutex_lock(&log->io_mutex); mutex_lock(&log->io_mutex);
/* meta + data */ /* meta + data */
reserve = (1 + write_disks) << (PAGE_SHIFT - 9); reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
if (r5l_has_free_space(log, reserve)) if (!r5l_has_free_space(log, reserve)) {
r5l_log_stripe(log, sh, data_pages, parity_pages);
else {
spin_lock(&log->no_space_stripes_lock); spin_lock(&log->no_space_stripes_lock);
list_add_tail(&sh->log_list, &log->no_space_stripes); list_add_tail(&sh->log_list, &log->no_space_stripes);
spin_unlock(&log->no_space_stripes_lock); spin_unlock(&log->no_space_stripes_lock);
r5l_wake_reclaim(log, reserve); r5l_wake_reclaim(log, reserve);
} else {
ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
if (ret) {
spin_lock_irq(&log->io_list_lock);
list_add_tail(&sh->log_list, &log->no_mem_stripes);
spin_unlock_irq(&log->io_list_lock);
}
} }
mutex_unlock(&log->io_mutex);
mutex_unlock(&log->io_mutex);
return 0; return 0;
} }
...@@ -538,6 +562,21 @@ static sector_t r5l_reclaimable_space(struct r5l_log *log) ...@@ -538,6 +562,21 @@ static sector_t r5l_reclaimable_space(struct r5l_log *log)
log->next_checkpoint); log->next_checkpoint);
} }
static void r5l_run_no_mem_stripe(struct r5l_log *log)
{
struct stripe_head *sh;
assert_spin_locked(&log->io_list_lock);
if (!list_empty(&log->no_mem_stripes)) {
sh = list_first_entry(&log->no_mem_stripes,
struct stripe_head, log_list);
list_del_init(&sh->log_list);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
}
}
static bool r5l_complete_finished_ios(struct r5l_log *log) static bool r5l_complete_finished_ios(struct r5l_log *log)
{ {
struct r5l_io_unit *io, *next; struct r5l_io_unit *io, *next;
...@@ -554,7 +593,8 @@ static bool r5l_complete_finished_ios(struct r5l_log *log) ...@@ -554,7 +593,8 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
log->next_cp_seq = io->seq; log->next_cp_seq = io->seq;
list_del(&io->log_sibling); list_del(&io->log_sibling);
r5l_free_io_unit(log, io); mempool_free(io, log->io_pool);
r5l_run_no_mem_stripe(log);
found = true; found = true;
} }
...@@ -787,6 +827,13 @@ void r5l_quiesce(struct r5l_log *log, int state) ...@@ -787,6 +827,13 @@ void r5l_quiesce(struct r5l_log *log, int state)
return; return;
if (state == 0) { if (state == 0) {
log->in_teardown = 0; log->in_teardown = 0;
/*
* This is a special case for hotadd. In suspend, the array has
* no journal. In resume, journal is initialized as well as the
* reclaim thread.
*/
if (log->reclaim_thread)
return;
log->reclaim_thread = md_register_thread(r5l_reclaim_thread, log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
log->rdev->mddev, "reclaim"); log->rdev->mddev, "reclaim");
} else if (state == 1) { } else if (state == 1) {
...@@ -806,10 +853,18 @@ void r5l_quiesce(struct r5l_log *log, int state) ...@@ -806,10 +853,18 @@ void r5l_quiesce(struct r5l_log *log, int state)
bool r5l_log_disk_error(struct r5conf *conf) bool r5l_log_disk_error(struct r5conf *conf)
{ {
struct r5l_log *log;
bool ret;
/* don't allow write if journal disk is missing */ /* don't allow write if journal disk is missing */
if (!conf->log) rcu_read_lock();
return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); log = rcu_dereference(conf->log);
return test_bit(Faulty, &conf->log->rdev->flags);
if (!log)
ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
else
ret = test_bit(Faulty, &log->rdev->flags);
rcu_read_unlock();
return ret;
} }
struct r5l_recovery_ctx { struct r5l_recovery_ctx {
...@@ -1160,23 +1215,45 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) ...@@ -1160,23 +1215,45 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
if (!log->io_kc) if (!log->io_kc)
goto io_kc; goto io_kc;
log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc);
if (!log->io_pool)
goto io_pool;
log->bs = bioset_create(R5L_POOL_SIZE, 0);
if (!log->bs)
goto io_bs;
log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0);
if (!log->meta_pool)
goto out_mempool;
log->reclaim_thread = md_register_thread(r5l_reclaim_thread, log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
log->rdev->mddev, "reclaim"); log->rdev->mddev, "reclaim");
if (!log->reclaim_thread) if (!log->reclaim_thread)
goto reclaim_thread; goto reclaim_thread;
init_waitqueue_head(&log->iounit_wait); init_waitqueue_head(&log->iounit_wait);
INIT_LIST_HEAD(&log->no_mem_stripes);
INIT_LIST_HEAD(&log->no_space_stripes); INIT_LIST_HEAD(&log->no_space_stripes);
spin_lock_init(&log->no_space_stripes_lock); spin_lock_init(&log->no_space_stripes_lock);
if (r5l_load_log(log)) if (r5l_load_log(log))
goto error; goto error;
conf->log = log; rcu_assign_pointer(conf->log, log);
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0; return 0;
error: error:
md_unregister_thread(&log->reclaim_thread); md_unregister_thread(&log->reclaim_thread);
reclaim_thread: reclaim_thread:
mempool_destroy(log->meta_pool);
out_mempool:
bioset_free(log->bs);
io_bs:
mempool_destroy(log->io_pool);
io_pool:
kmem_cache_destroy(log->io_kc); kmem_cache_destroy(log->io_kc);
io_kc: io_kc:
kfree(log); kfree(log);
...@@ -1186,6 +1263,9 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) ...@@ -1186,6 +1263,9 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
void r5l_exit_log(struct r5l_log *log) void r5l_exit_log(struct r5l_log *log)
{ {
md_unregister_thread(&log->reclaim_thread); md_unregister_thread(&log->reclaim_thread);
mempool_destroy(log->meta_pool);
bioset_free(log->bs);
mempool_destroy(log->io_pool);
kmem_cache_destroy(log->io_kc); kmem_cache_destroy(log->io_kc);
kfree(log); kfree(log);
} }
...@@ -772,8 +772,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh ...@@ -772,8 +772,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
int hash; int hash;
int dd_idx; int dd_idx;
if (!stripe_can_batch(sh))
return;
/* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
tmp_sec = sh->sector; tmp_sec = sh->sector;
if (!sector_div(tmp_sec, conf->chunk_sectors)) if (!sector_div(tmp_sec, conf->chunk_sectors))
...@@ -7141,14 +7139,19 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -7141,14 +7139,19 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
struct disk_info *p = conf->disks + number; struct disk_info *p = conf->disks + number;
print_raid5_conf(conf); print_raid5_conf(conf);
if (test_bit(Journal, &rdev->flags)) { if (test_bit(Journal, &rdev->flags) && conf->log) {
struct r5l_log *log;
/* /*
* journal disk is not removable, but we need give a chance to * we can't wait pending write here, as this is called in
* update superblock of other disks. Otherwise journal disk * raid5d, wait will deadlock.
* will be considered as 'fresh'
*/ */
set_bit(MD_CHANGE_DEVS, &mddev->flags); if (atomic_read(&mddev->writes_pending))
return -EINVAL; return -EBUSY;
log = conf->log;
conf->log = NULL;
synchronize_rcu();
r5l_exit_log(log);
return 0;
} }
if (rdev == p->rdev) if (rdev == p->rdev)
rdevp = &p->rdev; rdevp = &p->rdev;
...@@ -7212,8 +7215,21 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -7212,8 +7215,21 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int first = 0; int first = 0;
int last = conf->raid_disks - 1; int last = conf->raid_disks - 1;
if (test_bit(Journal, &rdev->flags)) if (test_bit(Journal, &rdev->flags)) {
return -EINVAL; char b[BDEVNAME_SIZE];
if (conf->log)
return -EBUSY;
rdev->raid_disk = 0;
/*
* The array is in readonly mode if journal is missing, so no
* write requests running. We should be safe
*/
r5l_init_log(conf, rdev);
printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
mdname(mddev), bdevname(rdev->bdev, b));
return 0;
}
if (mddev->recovery_disabled == conf->recovery_disabled) if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY; return -EBUSY;
......
...@@ -80,7 +80,7 @@ typedef struct mdu_array_info_s { ...@@ -80,7 +80,7 @@ typedef struct mdu_array_info_s {
int major_version; int major_version;
int minor_version; int minor_version;
int patch_version; int patch_version;
int ctime; unsigned int ctime;
int level; int level;
int size; int size;
int nr_disks; int nr_disks;
...@@ -91,7 +91,7 @@ typedef struct mdu_array_info_s { ...@@ -91,7 +91,7 @@ typedef struct mdu_array_info_s {
/* /*
* Generic state information * Generic state information
*/ */
int utime; /* 0 Superblock update time */ unsigned int utime; /* 0 Superblock update time */
int state; /* 1 State bits (clean, ...) */ int state; /* 1 State bits (clean, ...) */
int active_disks; /* 2 Number of currently active disks */ int active_disks; /* 2 Number of currently active disks */
int working_disks; /* 3 Number of working disks */ int working_disks; /* 3 Number of working disks */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment