Commit df98b6e2 authored by Josef Bacik's avatar Josef Bacik

Btrfs: fix how we merge extent states and deal with cached states

First, we can sometimes free the state we're merging, which means anybody who
calls merge_state() may have the state it passed in free'ed.  This is
problematic because we could end up caching the state, which makes caching
useless as the state will no longer be part of the tree.  So instead of free'ing
the state we passed into merge_state(), set it's end to the other->end and free
the other state.  This way we are sure to cache the correct state.  Also because
we can merge states together, instead of only using the cache'd state if it's
start == the start we are looking for, go ahead and use it if the start we are
looking for is within the range of the cached state.  Thanks,
Signed-off-by: default avatarJosef Bacik <josef@redhat.com>
parent 2f356126
...@@ -279,11 +279,10 @@ static int merge_state(struct extent_io_tree *tree, ...@@ -279,11 +279,10 @@ static int merge_state(struct extent_io_tree *tree,
if (other->start == state->end + 1 && if (other->start == state->end + 1 &&
other->state == state->state) { other->state == state->state) {
merge_cb(tree, state, other); merge_cb(tree, state, other);
other->start = state->start; state->end = other->end;
state->tree = NULL; other->tree = NULL;
rb_erase(&state->rb_node, &tree->state); rb_erase(&other->rb_node, &tree->state);
free_extent_state(state); free_extent_state(other);
state = NULL;
} }
} }
...@@ -349,7 +348,6 @@ static int insert_state(struct extent_io_tree *tree, ...@@ -349,7 +348,6 @@ static int insert_state(struct extent_io_tree *tree,
"%llu %llu\n", (unsigned long long)found->start, "%llu %llu\n", (unsigned long long)found->start,
(unsigned long long)found->end, (unsigned long long)found->end,
(unsigned long long)start, (unsigned long long)end); (unsigned long long)start, (unsigned long long)end);
free_extent_state(state);
return -EEXIST; return -EEXIST;
} }
state->tree = tree; state->tree = tree;
...@@ -498,7 +496,8 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -498,7 +496,8 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
cached_state = NULL; cached_state = NULL;
} }
if (cached && cached->tree && cached->start == start) { if (cached && cached->tree && cached->start <= start &&
cached->end > start) {
if (clear) if (clear)
atomic_dec(&cached->refs); atomic_dec(&cached->refs);
state = cached; state = cached;
...@@ -740,7 +739,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -740,7 +739,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
spin_lock(&tree->lock); spin_lock(&tree->lock);
if (cached_state && *cached_state) { if (cached_state && *cached_state) {
state = *cached_state; state = *cached_state;
if (state->start == start && state->tree) { if (state->start <= start && state->end > start &&
state->tree) {
node = &state->rb_node; node = &state->rb_node;
goto hit_next; goto hit_next;
} }
...@@ -781,13 +781,13 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -781,13 +781,13 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (err) if (err)
goto out; goto out;
next_node = rb_next(node);
cache_state(state, cached_state); cache_state(state, cached_state);
merge_state(tree, state); merge_state(tree, state);
if (last_end == (u64)-1) if (last_end == (u64)-1)
goto out; goto out;
start = last_end + 1; start = last_end + 1;
next_node = rb_next(&state->rb_node);
if (next_node && start < end && prealloc && !need_resched()) { if (next_node && start < end && prealloc && !need_resched()) {
state = rb_entry(next_node, struct extent_state, state = rb_entry(next_node, struct extent_state,
rb_node); rb_node);
...@@ -860,7 +860,6 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -860,7 +860,6 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* Avoid to free 'prealloc' if it can be merged with * Avoid to free 'prealloc' if it can be merged with
* the later extent. * the later extent.
*/ */
atomic_inc(&prealloc->refs);
err = insert_state(tree, prealloc, start, this_end, err = insert_state(tree, prealloc, start, this_end,
&bits); &bits);
BUG_ON(err == -EEXIST); BUG_ON(err == -EEXIST);
...@@ -870,7 +869,6 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -870,7 +869,6 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
goto out; goto out;
} }
cache_state(prealloc, cached_state); cache_state(prealloc, cached_state);
free_extent_state(prealloc);
prealloc = NULL; prealloc = NULL;
start = this_end + 1; start = this_end + 1;
goto search_again; goto search_again;
...@@ -1562,7 +1560,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -1562,7 +1560,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bitset = 0; int bitset = 0;
spin_lock(&tree->lock); spin_lock(&tree->lock);
if (cached && cached->tree && cached->start == start) if (cached && cached->tree && cached->start <= start &&
cached->end > start)
node = &cached->rb_node; node = &cached->rb_node;
else else
node = tree_search(tree, start); node = tree_search(tree, start);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment