Commit 6d6e352c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'md/3.13' of git://neil.brown.name/md

Pull md update from Neil Brown:
 "Mostly optimisations and obscure bug fixes.
   - raid5 gets less lock contention
   - raid1 gets less contention between normal-io and resync-io during
     resync"

* tag 'md/3.13' of git://neil.brown.name/md:
  md/raid5: Use conf->device_lock protect changing of multi-thread resources.
  md/raid5: Before freeing old multi-thread worker, it should flush them.
  md/raid5: For stripe with R5_ReadNoMerge, we replace REQ_FLUSH with REQ_NOMERGE.
  UAPI: include <asm/byteorder.h> in linux/raid/md_p.h
  raid1: Rewrite the implementation of iobarrier.
  raid1: Add some macros to make code clearly.
  raid1: Replace raise_barrier/lower_barrier with freeze_array/unfreeze_array when reconfiguring the array.
  raid1: Add a field array_frozen to indicate whether raid in freeze state.
  md: Convert use of typedef ctl_table to struct ctl_table
  md/raid5: avoid deadlock when raid5 array has unack badblocks during md_stop_writes.
  md: use MD_RECOVERY_INTR instead of kthread_should_stop in resync thread.
  md: fix some places where mddev_lock return value is not checked.
  raid5: Retry R5_ReadNoMerge flag when hit a read error.
  raid5: relieve lock contention in get_active_stripe()
  raid5: relieve lock contention in get_active_stripe()
  wait: add wait_event_cmd()
  md/raid5.c: add proper locking to error path of raid5_start_reshape.
  md: fix calculation of stacking limits on level change.
  raid5: Use slow_path to release stripe when mddev->thread is null
parents b4789b8e 60aaf933
...@@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev) ...@@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev)
static struct ctl_table_header *raid_table_header; static struct ctl_table_header *raid_table_header;
static ctl_table raid_table[] = { static struct ctl_table raid_table[] = {
{ {
.procname = "speed_limit_min", .procname = "speed_limit_min",
.data = &sysctl_speed_limit_min, .data = &sysctl_speed_limit_min,
...@@ -130,7 +130,7 @@ static ctl_table raid_table[] = { ...@@ -130,7 +130,7 @@ static ctl_table raid_table[] = {
{ } { }
}; };
static ctl_table raid_dir_table[] = { static struct ctl_table raid_dir_table[] = {
{ {
.procname = "raid", .procname = "raid",
.maxlen = 0, .maxlen = 0,
...@@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = { ...@@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = {
{ } { }
}; };
static ctl_table raid_root_table[] = { static struct ctl_table raid_root_table[] = {
{ {
.procname = "dev", .procname = "dev",
.maxlen = 0, .maxlen = 0,
...@@ -562,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit) ...@@ -562,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit)
goto retry; goto retry;
} }
static inline int mddev_lock(struct mddev * mddev) static inline int __must_check mddev_lock(struct mddev * mddev)
{ {
return mutex_lock_interruptible(&mddev->reconfig_mutex); return mutex_lock_interruptible(&mddev->reconfig_mutex);
} }
/* Sometimes we need to take the lock in a situation where
* failure due to interrupts is not acceptable.
*/
static inline void mddev_lock_nointr(struct mddev * mddev)
{
mutex_lock(&mddev->reconfig_mutex);
}
static inline int mddev_is_locked(struct mddev *mddev) static inline int mddev_is_locked(struct mddev *mddev)
{ {
return mutex_is_locked(&mddev->reconfig_mutex); return mutex_is_locked(&mddev->reconfig_mutex);
...@@ -2978,7 +2986,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) ...@@ -2978,7 +2986,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
for_each_mddev(mddev, tmp) { for_each_mddev(mddev, tmp) {
struct md_rdev *rdev2; struct md_rdev *rdev2;
mddev_lock(mddev); mddev_lock_nointr(mddev);
rdev_for_each(rdev2, mddev) rdev_for_each(rdev2, mddev)
if (rdev->bdev == rdev2->bdev && if (rdev->bdev == rdev2->bdev &&
rdev != rdev2 && rdev != rdev2 &&
...@@ -2994,7 +3002,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) ...@@ -2994,7 +3002,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
break; break;
} }
} }
mddev_lock(my_mddev); mddev_lock_nointr(my_mddev);
if (overlap) { if (overlap) {
/* Someone else could have slipped in a size /* Someone else could have slipped in a size
* change here, but doing so is just silly. * change here, but doing so is just silly.
...@@ -3580,6 +3588,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) ...@@ -3580,6 +3588,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1; mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer); del_timer_sync(&mddev->safemode_timer);
} }
blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev); pers->run(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev_resume(mddev); mddev_resume(mddev);
...@@ -5258,7 +5267,7 @@ static void __md_stop_writes(struct mddev *mddev) ...@@ -5258,7 +5267,7 @@ static void __md_stop_writes(struct mddev *mddev)
void md_stop_writes(struct mddev *mddev) void md_stop_writes(struct mddev *mddev)
{ {
mddev_lock(mddev); mddev_lock_nointr(mddev);
__md_stop_writes(mddev); __md_stop_writes(mddev);
mddev_unlock(mddev); mddev_unlock(mddev);
} }
...@@ -5291,20 +5300,35 @@ EXPORT_SYMBOL_GPL(md_stop); ...@@ -5291,20 +5300,35 @@ EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{ {
int err = 0; int err = 0;
int did_freeze = 0;
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
}
mddev_unlock(mddev);
wait_event(resync_wait, mddev->sync_thread == NULL);
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex); mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > !!bdev) { if (atomic_read(&mddev->openers) > !!bdev ||
mddev->sync_thread ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev)); printk("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
err = -EBUSY; err = -EBUSY;
goto out; goto out;
} }
if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
/* Someone opened the device since we flushed it
* so page cache could be dirty and it is too late
* to flush. So abort
*/
mutex_unlock(&mddev->open_mutex);
return -EBUSY;
}
if (mddev->pers) { if (mddev->pers) {
__md_stop_writes(mddev); __md_stop_writes(mddev);
...@@ -5315,7 +5339,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) ...@@ -5315,7 +5339,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
set_disk_ro(mddev->gendisk, 1); set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0; err = 0;
} }
out: out:
mutex_unlock(&mddev->open_mutex); mutex_unlock(&mddev->open_mutex);
...@@ -5331,20 +5355,34 @@ static int do_md_stop(struct mddev * mddev, int mode, ...@@ -5331,20 +5355,34 @@ static int do_md_stop(struct mddev * mddev, int mode,
{ {
struct gendisk *disk = mddev->gendisk; struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev; struct md_rdev *rdev;
int did_freeze = 0;
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
}
mddev_unlock(mddev);
wait_event(resync_wait, mddev->sync_thread == NULL);
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex); mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > !!bdev || if (atomic_read(&mddev->openers) > !!bdev ||
mddev->sysfs_active) { mddev->sysfs_active ||
mddev->sync_thread ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev)); printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex); mutex_unlock(&mddev->open_mutex);
return -EBUSY; if (did_freeze) {
} clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { md_wakeup_thread(mddev->thread);
/* Someone opened the device since we flushed it }
* so page cache could be dirty and it is too late
* to flush. So abort
*/
mutex_unlock(&mddev->open_mutex);
return -EBUSY; return -EBUSY;
} }
if (mddev->pers) { if (mddev->pers) {
...@@ -6551,7 +6589,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, ...@@ -6551,7 +6589,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
wait_event(mddev->sb_wait, wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags) && !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags)); !test_bit(MD_CHANGE_PENDING, &mddev->flags));
mddev_lock(mddev); mddev_lock_nointr(mddev);
} }
} else { } else {
err = -EROFS; err = -EROFS;
...@@ -7361,9 +7399,6 @@ void md_do_sync(struct md_thread *thread) ...@@ -7361,9 +7399,6 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync = 2; mddev->curr_resync = 2;
try_again: try_again:
if (kthread_should_stop())
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip; goto skip;
for_each_mddev(mddev2, tmp) { for_each_mddev(mddev2, tmp) {
...@@ -7388,7 +7423,7 @@ void md_do_sync(struct md_thread *thread) ...@@ -7388,7 +7423,7 @@ void md_do_sync(struct md_thread *thread)
* be caught by 'softlockup' * be caught by 'softlockup'
*/ */
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!kthread_should_stop() && if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev2->curr_resync >= mddev->curr_resync) { mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s" printk(KERN_INFO "md: delaying %s of %s"
" until %s has finished (they" " until %s has finished (they"
...@@ -7464,7 +7499,7 @@ void md_do_sync(struct md_thread *thread) ...@@ -7464,7 +7499,7 @@ void md_do_sync(struct md_thread *thread)
last_check = 0; last_check = 0;
if (j>2) { if (j>2) {
printk(KERN_INFO printk(KERN_INFO
"md: resuming %s of %s from checkpoint.\n", "md: resuming %s of %s from checkpoint.\n",
desc, mdname(mddev)); desc, mdname(mddev));
mddev->curr_resync = j; mddev->curr_resync = j;
...@@ -7501,7 +7536,8 @@ void md_do_sync(struct md_thread *thread) ...@@ -7501,7 +7536,8 @@ void md_do_sync(struct md_thread *thread)
sysfs_notify(&mddev->kobj, NULL, "sync_completed"); sysfs_notify(&mddev->kobj, NULL, "sync_completed");
} }
while (j >= mddev->resync_max && !kthread_should_stop()) { while (j >= mddev->resync_max &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* As this condition is controlled by user-space, /* As this condition is controlled by user-space,
* we can block indefinitely, so use '_interruptible' * we can block indefinitely, so use '_interruptible'
* to avoid triggering warnings. * to avoid triggering warnings.
...@@ -7509,17 +7545,18 @@ void md_do_sync(struct md_thread *thread) ...@@ -7509,17 +7545,18 @@ void md_do_sync(struct md_thread *thread)
flush_signals(current); /* just in case */ flush_signals(current); /* just in case */
wait_event_interruptible(mddev->recovery_wait, wait_event_interruptible(mddev->recovery_wait,
mddev->resync_max > j mddev->resync_max > j
|| kthread_should_stop()); || test_bit(MD_RECOVERY_INTR,
&mddev->recovery));
} }
if (kthread_should_stop()) if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto interrupted; break;
sectors = mddev->pers->sync_request(mddev, j, &skipped, sectors = mddev->pers->sync_request(mddev, j, &skipped,
currspeed < speed_min(mddev)); currspeed < speed_min(mddev));
if (sectors == 0) { if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto out; break;
} }
if (!skipped) { /* actual IO requested */ if (!skipped) { /* actual IO requested */
...@@ -7556,10 +7593,8 @@ void md_do_sync(struct md_thread *thread) ...@@ -7556,10 +7593,8 @@ void md_do_sync(struct md_thread *thread)
last_mark = next; last_mark = next;
} }
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
if (kthread_should_stop()) break;
goto interrupted;
/* /*
* this loop exits only if either when we are slower than * this loop exits only if either when we are slower than
...@@ -7582,11 +7617,12 @@ void md_do_sync(struct md_thread *thread) ...@@ -7582,11 +7617,12 @@ void md_do_sync(struct md_thread *thread)
} }
} }
} }
printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
test_bit(MD_RECOVERY_INTR, &mddev->recovery)
? "interrupted" : "done");
/* /*
* this also signals 'finished resyncing' to md_stop * this also signals 'finished resyncing' to md_stop
*/ */
out:
blk_finish_plug(&plug); blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
...@@ -7640,16 +7676,6 @@ void md_do_sync(struct md_thread *thread) ...@@ -7640,16 +7676,6 @@ void md_do_sync(struct md_thread *thread)
set_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
return; return;
interrupted:
/*
* got a signal, exit.
*/
printk(KERN_INFO
"md: md_do_sync() got signal ... exiting\n");
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto out;
} }
EXPORT_SYMBOL_GPL(md_do_sync); EXPORT_SYMBOL_GPL(md_do_sync);
...@@ -7894,6 +7920,7 @@ void md_reap_sync_thread(struct mddev *mddev) ...@@ -7894,6 +7920,7 @@ void md_reap_sync_thread(struct mddev *mddev)
/* resync has finished, collect result */ /* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread); md_unregister_thread(&mddev->sync_thread);
wake_up(&resync_wait);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/ /* success...*/
......
This diff is collapsed.
...@@ -41,6 +41,19 @@ struct r1conf { ...@@ -41,6 +41,19 @@ struct r1conf {
*/ */
sector_t next_resync; sector_t next_resync;
/* When raid1 starts resync, we divide array into four partitions
* |---------|--------------|---------------------|-------------|
* next_resync start_next_window end_window
* start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
* end_window = start_next_window + NEXT_NORMALIO_DISTANCE
* current_window_requests means the count of normalIO between
* start_next_window and end_window.
* next_window_requests means the count of normalIO after end_window.
* */
sector_t start_next_window;
int current_window_requests;
int next_window_requests;
spinlock_t device_lock; spinlock_t device_lock;
/* list of 'struct r1bio' that need to be processed by raid1d, /* list of 'struct r1bio' that need to be processed by raid1d,
...@@ -65,6 +78,7 @@ struct r1conf { ...@@ -65,6 +78,7 @@ struct r1conf {
int nr_waiting; int nr_waiting;
int nr_queued; int nr_queued;
int barrier; int barrier;
int array_frozen;
/* Set to 1 if a full sync is needed, (fresh device added). /* Set to 1 if a full sync is needed, (fresh device added).
* Cleared when a sync completes. * Cleared when a sync completes.
...@@ -111,6 +125,7 @@ struct r1bio { ...@@ -111,6 +125,7 @@ struct r1bio {
* in this BehindIO request * in this BehindIO request
*/ */
sector_t sector; sector_t sector;
sector_t start_next_window;
int sectors; int sectors;
unsigned long state; unsigned long state;
struct mddev *mddev; struct mddev *mddev;
......
...@@ -4384,7 +4384,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, ...@@ -4384,7 +4384,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait, mddev->flags == 0 || wait_event(mddev->sb_wait, mddev->flags == 0 ||
kthread_should_stop()); test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
allow_barrier(conf);
return sectors_done;
}
conf->reshape_safe = mddev->reshape_position; conf->reshape_safe = mddev->reshape_position;
allow_barrier(conf); allow_barrier(conf);
} }
......
This diff is collapsed.
...@@ -205,6 +205,7 @@ struct stripe_head { ...@@ -205,6 +205,7 @@ struct stripe_head {
short pd_idx; /* parity disk index */ short pd_idx; /* parity disk index */
short qd_idx; /* 'Q' disk index for raid6 */ short qd_idx; /* 'Q' disk index for raid6 */
short ddf_layout;/* use DDF ordering to calculate Q */ short ddf_layout;/* use DDF ordering to calculate Q */
short hash_lock_index;
unsigned long state; /* state flags */ unsigned long state; /* state flags */
atomic_t count; /* nr of active thread/requests */ atomic_t count; /* nr of active thread/requests */
int bm_seq; /* sequence number for bitmap flushes */ int bm_seq; /* sequence number for bitmap flushes */
...@@ -367,9 +368,18 @@ struct disk_info { ...@@ -367,9 +368,18 @@ struct disk_info {
struct md_rdev *rdev, *replacement; struct md_rdev *rdev, *replacement;
}; };
/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
* This is because we sometimes take all the spinlocks
* and creating that much locking depth can cause
* problems.
*/
#define NR_STRIPE_HASH_LOCKS 8
#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
struct r5worker { struct r5worker {
struct work_struct work; struct work_struct work;
struct r5worker_group *group; struct r5worker_group *group;
struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
bool working; bool working;
}; };
...@@ -382,6 +392,8 @@ struct r5worker_group { ...@@ -382,6 +392,8 @@ struct r5worker_group {
struct r5conf { struct r5conf {
struct hlist_head *stripe_hashtbl; struct hlist_head *stripe_hashtbl;
/* only protect corresponding hash list and inactive_list */
spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
struct mddev *mddev; struct mddev *mddev;
int chunk_sectors; int chunk_sectors;
int level, algorithm; int level, algorithm;
...@@ -462,7 +474,8 @@ struct r5conf { ...@@ -462,7 +474,8 @@ struct r5conf {
* Free stripes pool * Free stripes pool
*/ */
atomic_t active_stripes; atomic_t active_stripes;
struct list_head inactive_list; struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes; struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe; wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap; wait_queue_head_t wait_for_overlap;
...@@ -477,6 +490,7 @@ struct r5conf { ...@@ -477,6 +490,7 @@ struct r5conf {
* the new thread here until we fully activate the array. * the new thread here until we fully activate the array.
*/ */
struct md_thread *thread; struct md_thread *thread;
struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
struct r5worker_group *worker_groups; struct r5worker_group *worker_groups;
int group_cnt; int group_cnt;
int worker_cnt_per_group; int worker_cnt_per_group;
......
...@@ -278,6 +278,31 @@ do { \ ...@@ -278,6 +278,31 @@ do { \
__ret; \ __ret; \
}) })
#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
cmd1; schedule(); cmd2)
/**
* wait_event_cmd - sleep until a condition gets true
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* cmd1: the command will be executed before sleep
* cmd2: the command will be executed after sleep
*
* The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
* @condition evaluates to true. The @condition is checked each time
* the waitqueue @wq is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*/
#define wait_event_cmd(wq, condition, cmd1, cmd2) \
do { \
if (condition) \
break; \
__wait_event_cmd(wq, condition, cmd1, cmd2); \
} while (0)
#define __wait_event_interruptible(wq, condition) \ #define __wait_event_interruptible(wq, condition) \
___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
schedule()) schedule())
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#define _MD_P_H #define _MD_P_H
#include <linux/types.h> #include <linux/types.h>
#include <asm/byteorder.h>
/* /*
* RAID superblock. * RAID superblock.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment