Commit 7692c5dd authored by Jonathan E Brassow's avatar Jonathan E Brassow Committed by Linus Torvalds

[PATCH] device-mapper raid1: drop mark_region spinlock fix

The spinlock region_lock is held while calling mark_region which can sleep.
Drop the spinlock before calling that function.

A region's state and inclusion in the clean list are altered by rh_inc and
rh_dec.  The state variable is set to RH_CLEAN in rh_dec, but only if
'pending' is zero.  It is set to RH_DIRTY in rh_inc, but not if it is already
so.  The changes to 'pending', the state, and the region's inclusion in the
clean list need to be atomicly.
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 233886dd
...@@ -376,16 +376,18 @@ static void rh_inc(struct region_hash *rh, region_t region) ...@@ -376,16 +376,18 @@ static void rh_inc(struct region_hash *rh, region_t region)
read_lock(&rh->hash_lock); read_lock(&rh->hash_lock);
reg = __rh_find(rh, region); reg = __rh_find(rh, region);
spin_lock_irq(&rh->region_lock);
atomic_inc(&reg->pending); atomic_inc(&reg->pending);
spin_lock_irq(&rh->region_lock);
if (reg->state == RH_CLEAN) { if (reg->state == RH_CLEAN) {
rh->log->type->mark_region(rh->log, reg->key);
reg->state = RH_DIRTY; reg->state = RH_DIRTY;
list_del_init(&reg->list); /* take off the clean list */ list_del_init(&reg->list); /* take off the clean list */
} spin_unlock_irq(&rh->region_lock);
spin_unlock_irq(&rh->region_lock);
rh->log->type->mark_region(rh->log, reg->key);
} else
spin_unlock_irq(&rh->region_lock);
read_unlock(&rh->hash_lock); read_unlock(&rh->hash_lock);
} }
...@@ -408,21 +410,17 @@ static void rh_dec(struct region_hash *rh, region_t region) ...@@ -408,21 +410,17 @@ static void rh_dec(struct region_hash *rh, region_t region)
reg = __rh_lookup(rh, region); reg = __rh_lookup(rh, region);
read_unlock(&rh->hash_lock); read_unlock(&rh->hash_lock);
spin_lock_irqsave(&rh->region_lock, flags);
if (atomic_dec_and_test(&reg->pending)) { if (atomic_dec_and_test(&reg->pending)) {
spin_lock_irqsave(&rh->region_lock, flags);
if (atomic_read(&reg->pending)) { /* check race */
spin_unlock_irqrestore(&rh->region_lock, flags);
return;
}
if (reg->state == RH_RECOVERING) { if (reg->state == RH_RECOVERING) {
list_add_tail(&reg->list, &rh->quiesced_regions); list_add_tail(&reg->list, &rh->quiesced_regions);
} else { } else {
reg->state = RH_CLEAN; reg->state = RH_CLEAN;
list_add(&reg->list, &rh->clean_regions); list_add(&reg->list, &rh->clean_regions);
} }
spin_unlock_irqrestore(&rh->region_lock, flags);
should_wake = 1; should_wake = 1;
} }
spin_unlock_irqrestore(&rh->region_lock, flags);
if (should_wake) if (should_wake)
wake(); wake();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment