Commit e4d84909 authored by Dan Williams's avatar Dan Williams

raid5: fix 2 bugs in ops_complete_biofill

1/ ops_complete_biofill tried to avoid calling handle_stripe since all the
state necessary to return read completions is available.  However the
process of determining whether more read requests are pending requires
locking the stripe (to block add_stripe_bio from updating dev->toead).
ops_complete_biofill can run in tasklet context, so rather than upgrading
all the stripe locks from spin_lock to spin_lock_bh this patch just
unconditionally reschedules handle_stripe after completing the read
request.

2/ ops_complete_biofill needlessly qualified processing R5_Wantfill with
dev->toread.  The result being that the 'biofill' pending bit is cleared
before handling the pending read-completions on dev->read.  R5_Wantfill can
be unconditionally handled because the 'biofill' pending bit prevents new
R5_Wantfill requests from being seen by ops_run_biofill and
ops_complete_biofill.
Found-by: default avatarYuri Tikhonov <yur@emcraft.com>
[neilb@suse.de: simpler fix for bug 1 than moving code]
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 6247cdc2
...@@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref) ...@@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
struct stripe_head *sh = stripe_head_ref; struct stripe_head *sh = stripe_head_ref;
struct bio *return_bi = NULL; struct bio *return_bi = NULL;
raid5_conf_t *conf = sh->raid_conf; raid5_conf_t *conf = sh->raid_conf;
int i, more_to_read = 0; int i;
pr_debug("%s: stripe %llu\n", __FUNCTION__, pr_debug("%s: stripe %llu\n", __FUNCTION__,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
...@@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref) ...@@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref)
/* clear completed biofills */ /* clear completed biofills */
for (i = sh->disks; i--; ) { for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i]; struct r5dev *dev = &sh->dev[i];
/* check if this stripe has new incoming reads */
if (dev->toread)
more_to_read++;
/* acknowledge completion of a biofill operation */ /* acknowledge completion of a biofill operation */
/* and check if we need to reply to a read request /* and check if we need to reply to a read request,
*/ * new R5_Wantfill requests are held off until
if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) { * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
*/
if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
struct bio *rbi, *rbi2; struct bio *rbi, *rbi2;
clear_bit(R5_Wantfill, &dev->flags);
/* The access to dev->read is outside of the /* The access to dev->read is outside of the
* spin_lock_irq(&conf->device_lock), but is protected * spin_lock_irq(&conf->device_lock), but is protected
...@@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref) ...@@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
return_io(return_bi); return_io(return_bi);
if (more_to_read) set_bit(STRIPE_HANDLE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh); release_stripe(sh);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment