Commit 9d6cb1b0 authored by Johannes Thumshirn's avatar Johannes Thumshirn Committed by David Sterba

btrfs: raid56: reduce indentation in lock_stripe_add

In lock_stripe_add() we're traversing the stripe hash list and check if
the current list element's raid_map equals is equal to the raid bio's
raid_map. If both are equal we continue processing.

If we'd check for inequality instead of equality we can reduce one level
of indentation.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 1d2e7c7c
...@@ -682,10 +682,12 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) ...@@ -682,10 +682,12 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
spin_lock_irqsave(&h->lock, flags); spin_lock_irqsave(&h->lock, flags);
list_for_each_entry(cur, &h->hash_list, hash_list) { list_for_each_entry(cur, &h->hash_list, hash_list) {
if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
continue;
spin_lock(&cur->bio_list_lock); spin_lock(&cur->bio_list_lock);
/* can we steal this cached rbio's pages? */ /* Can we steal this cached rbio's pages? */
if (bio_list_empty(&cur->bio_list) && if (bio_list_empty(&cur->bio_list) &&
list_empty(&cur->plug_list) && list_empty(&cur->plug_list) &&
test_bit(RBIO_CACHE_BIT, &cur->flags) && test_bit(RBIO_CACHE_BIT, &cur->flags) &&
...@@ -700,7 +702,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) ...@@ -700,7 +702,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
goto lockit; goto lockit;
} }
/* can we merge into the lock owner? */ /* Can we merge into the lock owner? */
if (rbio_can_merge(cur, rbio)) { if (rbio_can_merge(cur, rbio)) {
merge_rbio(cur, rbio); merge_rbio(cur, rbio);
spin_unlock(&cur->bio_list_lock); spin_unlock(&cur->bio_list_lock);
...@@ -711,15 +713,11 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) ...@@ -711,15 +713,11 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
/* /*
* we couldn't merge with the running * We couldn't merge with the running rbio, see if we can merge
* rbio, see if we can merge with the * with the pending ones. We don't have to check for rmw_locked
* pending ones. We don't have to * because there is no way they are inside finish_rmw right now
* check for rmw_locked because there
* is no way they are inside finish_rmw
* right now
*/ */
list_for_each_entry(pending, &cur->plug_list, list_for_each_entry(pending, &cur->plug_list, plug_list) {
plug_list) {
if (rbio_can_merge(pending, rbio)) { if (rbio_can_merge(pending, rbio)) {
merge_rbio(pending, rbio); merge_rbio(pending, rbio);
spin_unlock(&cur->bio_list_lock); spin_unlock(&cur->bio_list_lock);
...@@ -729,16 +727,15 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) ...@@ -729,16 +727,15 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
} }
} }
/* no merging, put us on the tail of the plug list, /*
* our rbio will be started with the currently * No merging, put us on the tail of the plug list, our rbio
* running rbio unlocks * will be started with the currently running rbio unlocks
*/ */
list_add_tail(&rbio->plug_list, &cur->plug_list); list_add_tail(&rbio->plug_list, &cur->plug_list);
spin_unlock(&cur->bio_list_lock); spin_unlock(&cur->bio_list_lock);
ret = 1; ret = 1;
goto out; goto out;
} }
}
lockit: lockit:
refcount_inc(&rbio->refs); refcount_inc(&rbio->refs);
list_add(&rbio->hash_list, &h->hash_list); list_add(&rbio->hash_list, &h->hash_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment