Commit 531f4b1a authored by Chris Mason's avatar Chris Mason

Merge branch 'for-chris' of git://github.com/sensille/linux into integration

Conflicts:
	fs/btrfs/ctree.h
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parents c06a0e12 7a26285e
...@@ -7,6 +7,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ ...@@ -7,6 +7,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \ export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
...@@ -1074,6 +1074,7 @@ struct btrfs_fs_info { ...@@ -1074,6 +1074,7 @@ struct btrfs_fs_info {
struct btrfs_workers endio_freespace_worker; struct btrfs_workers endio_freespace_worker;
struct btrfs_workers submit_workers; struct btrfs_workers submit_workers;
struct btrfs_workers caching_workers; struct btrfs_workers caching_workers;
struct btrfs_workers readahead_workers;
/* /*
* fixup workers take dirty pages that didn't properly go through * fixup workers take dirty pages that didn't properly go through
...@@ -1158,6 +1159,10 @@ struct btrfs_fs_info { ...@@ -1158,6 +1159,10 @@ struct btrfs_fs_info {
struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_root *delayed_root;
/* readahead tree */
spinlock_t reada_lock;
struct radix_tree_root reada_tree;
/* next backup root to be overwritten */ /* next backup root to be overwritten */
int backup_root_index; int backup_root_index;
}; };
...@@ -2812,4 +2817,20 @@ int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid); ...@@ -2812,4 +2817,20 @@ int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
struct btrfs_scrub_progress *progress); struct btrfs_scrub_progress *progress);
/* reada.c */
struct reada_control {
struct btrfs_root *root; /* tree to prefetch */
struct btrfs_key key_start;
struct btrfs_key key_end; /* exclusive */
atomic_t elems;
struct kref refcnt;
wait_queue_head_t wait;
};
struct reada_control *btrfs_reada_add(struct btrfs_root *root,
struct btrfs_key *start, struct btrfs_key *end);
int btrfs_reada_wait(void *handle);
void btrfs_reada_detach(void *handle);
int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
u64 start, int err);
#endif #endif
...@@ -366,7 +366,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, ...@@ -366,7 +366,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
while (1) { while (1) {
ret = read_extent_buffer_pages(io_tree, eb, start, 1, ret = read_extent_buffer_pages(io_tree, eb, start,
WAIT_COMPLETE,
btree_get_extent, mirror_num); btree_get_extent, mirror_num);
if (!ret && if (!ret &&
!verify_parent_transid(io_tree, eb, parent_transid)) !verify_parent_transid(io_tree, eb, parent_transid))
...@@ -607,11 +608,47 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, ...@@ -607,11 +608,47 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
end = min_t(u64, eb->len, PAGE_CACHE_SIZE); end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
end = eb->start + end - 1; end = eb->start + end - 1;
err: err:
if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
btree_readahead_hook(root, eb, eb->start, ret);
}
free_extent_buffer(eb); free_extent_buffer(eb);
out: out:
return ret; return ret;
} }
static int btree_io_failed_hook(struct bio *failed_bio,
struct page *page, u64 start, u64 end,
struct extent_state *state)
{
struct extent_io_tree *tree;
unsigned long len;
struct extent_buffer *eb;
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
tree = &BTRFS_I(page->mapping->host)->io_tree;
if (page->private == EXTENT_PAGE_PRIVATE)
goto out;
if (!page->private)
goto out;
len = page->private >> 2;
WARN_ON(len == 0);
eb = alloc_extent_buffer(tree, start, len, page);
if (eb == NULL)
goto out;
if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
btree_readahead_hook(root, eb, eb->start, -EIO);
}
out:
return -EIO; /* we fixed nothing */
}
static void end_workqueue_bio(struct bio *bio, int err) static void end_workqueue_bio(struct bio *bio, int err)
{ {
struct end_io_wq *end_io_wq = bio->bi_private; struct end_io_wq *end_io_wq = bio->bi_private;
...@@ -973,11 +1010,43 @@ int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, ...@@ -973,11 +1010,43 @@ int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
if (!buf) if (!buf)
return 0; return 0;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
buf, 0, 0, btree_get_extent, 0); buf, 0, WAIT_NONE, btree_get_extent, 0);
free_extent_buffer(buf); free_extent_buffer(buf);
return ret; return ret;
} }
int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
int mirror_num, struct extent_buffer **eb)
{
struct extent_buffer *buf = NULL;
struct inode *btree_inode = root->fs_info->btree_inode;
struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
int ret;
buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
if (!buf)
return 0;
set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
btree_get_extent, mirror_num);
if (ret) {
free_extent_buffer(buf);
return ret;
}
if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
free_extent_buffer(buf);
return -EIO;
} else if (extent_buffer_uptodate(io_tree, buf, NULL)) {
*eb = buf;
} else {
free_extent_buffer(buf);
}
return 0;
}
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize) u64 bytenr, u32 blocksize)
{ {
...@@ -1904,6 +1973,10 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1904,6 +1973,10 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->trans_no_join = 0; fs_info->trans_no_join = 0;
fs_info->free_chunk_space = 0; fs_info->free_chunk_space = 0;
/* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
spin_lock_init(&fs_info->reada_lock);
fs_info->thread_pool_size = min_t(unsigned long, fs_info->thread_pool_size = min_t(unsigned long,
num_online_cpus() + 2, 8); num_online_cpus() + 2, 8);
...@@ -2103,6 +2176,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -2103,6 +2176,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
fs_info->thread_pool_size, fs_info->thread_pool_size,
&fs_info->generic_worker); &fs_info->generic_worker);
btrfs_init_workers(&fs_info->readahead_workers, "readahead",
fs_info->thread_pool_size,
&fs_info->generic_worker);
/* /*
* endios are largely parallel and should have a very * endios are largely parallel and should have a very
...@@ -2113,6 +2189,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -2113,6 +2189,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->endio_write_workers.idle_thresh = 2; fs_info->endio_write_workers.idle_thresh = 2;
fs_info->endio_meta_write_workers.idle_thresh = 2; fs_info->endio_meta_write_workers.idle_thresh = 2;
fs_info->readahead_workers.idle_thresh = 2;
btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->workers, 1);
btrfs_start_workers(&fs_info->generic_worker, 1); btrfs_start_workers(&fs_info->generic_worker, 1);
...@@ -2126,6 +2203,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -2126,6 +2203,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_start_workers(&fs_info->endio_freespace_worker, 1); btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
btrfs_start_workers(&fs_info->delayed_workers, 1); btrfs_start_workers(&fs_info->delayed_workers, 1);
btrfs_start_workers(&fs_info->caching_workers, 1); btrfs_start_workers(&fs_info->caching_workers, 1);
btrfs_start_workers(&fs_info->readahead_workers, 1);
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
...@@ -2855,6 +2933,7 @@ int close_ctree(struct btrfs_root *root) ...@@ -2855,6 +2933,7 @@ int close_ctree(struct btrfs_root *root)
btrfs_stop_workers(&fs_info->submit_workers); btrfs_stop_workers(&fs_info->submit_workers);
btrfs_stop_workers(&fs_info->delayed_workers); btrfs_stop_workers(&fs_info->delayed_workers);
btrfs_stop_workers(&fs_info->caching_workers); btrfs_stop_workers(&fs_info->caching_workers);
btrfs_stop_workers(&fs_info->readahead_workers);
btrfs_close_devices(fs_info->fs_devices); btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree); btrfs_mapping_tree_free(&fs_info->mapping_tree);
...@@ -3363,6 +3442,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) ...@@ -3363,6 +3442,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
static struct extent_io_ops btree_extent_io_ops = { static struct extent_io_ops btree_extent_io_ops = {
.write_cache_pages_lock_hook = btree_lock_page_hook, .write_cache_pages_lock_hook = btree_lock_page_hook,
.readpage_end_io_hook = btree_readpage_end_io_hook, .readpage_end_io_hook = btree_readpage_end_io_hook,
.readpage_io_failed_hook = btree_io_failed_hook,
.submit_bio_hook = btree_submit_bio_hook, .submit_bio_hook = btree_submit_bio_hook,
/* note we're sharing with inode.c for the merge bio hook */ /* note we're sharing with inode.c for the merge bio hook */
.merge_bio_hook = btrfs_merge_bio_hook, .merge_bio_hook = btrfs_merge_bio_hook,
......
...@@ -40,6 +40,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, ...@@ -40,6 +40,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
u32 blocksize, u64 parent_transid); u32 blocksize, u64 parent_transid);
int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
u64 parent_transid); u64 parent_transid);
int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
int mirror_num, struct extent_buffer **eb);
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize); u64 bytenr, u32 blocksize);
int clean_tree_block(struct btrfs_trans_handle *trans, int clean_tree_block(struct btrfs_trans_handle *trans,
......
...@@ -1919,7 +1919,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) ...@@ -1919,7 +1919,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
if (!uptodate && tree->ops && if (!uptodate && tree->ops &&
tree->ops->readpage_io_failed_hook) { tree->ops->readpage_io_failed_hook) {
ret = tree->ops->readpage_io_failed_hook(bio, page, ret = tree->ops->readpage_io_failed_hook(bio, page,
start, end, NULL); start, end, state);
if (ret == 0) { if (ret == 0) {
uptodate = uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags); test_bit(BIO_UPTODATE, &bio->bi_flags);
...@@ -3551,8 +3551,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, ...@@ -3551,8 +3551,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
} }
int read_extent_buffer_pages(struct extent_io_tree *tree, int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, struct extent_buffer *eb, u64 start, int wait,
u64 start, int wait,
get_extent_t *get_extent, int mirror_num) get_extent_t *get_extent, int mirror_num)
{ {
unsigned long i; unsigned long i;
...@@ -3588,7 +3587,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ...@@ -3588,7 +3587,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
num_pages = num_extent_pages(eb->start, eb->len); num_pages = num_extent_pages(eb->start, eb->len);
for (i = start_i; i < num_pages; i++) { for (i = start_i; i < num_pages; i++) {
page = extent_buffer_page(eb, i); page = extent_buffer_page(eb, i);
if (!wait) { if (wait == WAIT_NONE) {
if (!trylock_page(page)) if (!trylock_page(page))
goto unlock_exit; goto unlock_exit;
} else { } else {
...@@ -3632,7 +3631,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ...@@ -3632,7 +3631,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
if (bio) if (bio)
submit_one_bio(READ, bio, mirror_num, bio_flags); submit_one_bio(READ, bio, mirror_num, bio_flags);
if (ret || !wait) if (ret || wait != WAIT_COMPLETE)
return ret; return ret;
for (i = start_i; i < num_pages; i++) { for (i = start_i; i < num_pages; i++) {
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define EXTENT_BUFFER_BLOCKING 1 #define EXTENT_BUFFER_BLOCKING 1
#define EXTENT_BUFFER_DIRTY 2 #define EXTENT_BUFFER_DIRTY 2
#define EXTENT_BUFFER_CORRUPT 3 #define EXTENT_BUFFER_CORRUPT 3
#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
/* these are flags for extent_clear_unlock_delalloc */ /* these are flags for extent_clear_unlock_delalloc */
#define EXTENT_CLEAR_UNLOCK_PAGE 0x1 #define EXTENT_CLEAR_UNLOCK_PAGE 0x1
...@@ -252,6 +253,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -252,6 +253,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len); u64 start, unsigned long len);
void free_extent_buffer(struct extent_buffer *eb); void free_extent_buffer(struct extent_buffer *eb);
#define WAIT_NONE 0
#define WAIT_COMPLETE 1
#define WAIT_PAGE_LOCK 2
int read_extent_buffer_pages(struct extent_io_tree *tree, int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, u64 start, int wait, struct extent_buffer *eb, u64 start, int wait,
get_extent_t *get_extent, int mirror_num); get_extent_t *get_extent, int mirror_num);
......
This diff is collapsed.
...@@ -29,15 +29,12 @@ ...@@ -29,15 +29,12 @@
* any can be found. * any can be found.
* *
* Future enhancements: * Future enhancements:
* - To enhance the performance, better read-ahead strategies for the
* extent-tree can be employed.
* - In case an unrepairable extent is encountered, track which files are * - In case an unrepairable extent is encountered, track which files are
* affected and report them * affected and report them
* - In case of a read error on files with nodatasum, map the file and read * - In case of a read error on files with nodatasum, map the file and read
* the extent to trigger a writeback of the good copy * the extent to trigger a writeback of the good copy
* - track and record media errors, throw out bad devices * - track and record media errors, throw out bad devices
* - add a mode to also read unallocated space * - add a mode to also read unallocated space
* - make the prefetch cancellable
*/ */
struct scrub_bio; struct scrub_bio;
...@@ -741,13 +738,16 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, ...@@ -741,13 +738,16 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
int slot; int slot;
int i; int i;
u64 nstripes; u64 nstripes;
int start_stripe;
struct extent_buffer *l; struct extent_buffer *l;
struct btrfs_key key; struct btrfs_key key;
u64 physical; u64 physical;
u64 logical; u64 logical;
u64 generation; u64 generation;
u64 mirror_num; u64 mirror_num;
struct reada_control *reada1;
struct reada_control *reada2;
struct btrfs_key key_start;
struct btrfs_key key_end;
u64 increment = map->stripe_len; u64 increment = map->stripe_len;
u64 offset; u64 offset;
...@@ -779,81 +779,67 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, ...@@ -779,81 +779,67 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->reada = 2;
path->search_commit_root = 1; path->search_commit_root = 1;
path->skip_locking = 1; path->skip_locking = 1;
/* /*
* find all extents for each stripe and just read them to get * trigger the readahead for extent tree csum tree and wait for
* them into the page cache * completion. During readahead, the scrub is officially paused
* FIXME: we can do better. build a more intelligent prefetching * to not hold off transaction commits
*/ */
logical = base + offset; logical = base + offset;
physical = map->stripes[num].physical;
ret = 0;
for (i = 0; i < nstripes; ++i) {
key.objectid = logical;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = (u64)0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out_noplug;
/*
* we might miss half an extent here, but that doesn't matter,
* as it's only the prefetch
*/
while (1) {
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
ret = btrfs_next_leaf(root, path);
if (ret == 0)
continue;
if (ret < 0)
goto out_noplug;
break; wait_event(sdev->list_wait,
} atomic_read(&sdev->in_flight) == 0);
btrfs_item_key_to_cpu(l, &key, slot); atomic_inc(&fs_info->scrubs_paused);
wake_up(&fs_info->scrub_pause_wait);
if (key.objectid >= logical + map->stripe_len) /* FIXME it might be better to start readahead at commit root */
break; key_start.objectid = logical;
key_start.type = BTRFS_EXTENT_ITEM_KEY;
key_start.offset = (u64)0;
key_end.objectid = base + offset + nstripes * increment;
key_end.type = BTRFS_EXTENT_ITEM_KEY;
key_end.offset = (u64)0;
reada1 = btrfs_reada_add(root, &key_start, &key_end);
key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
key_start.type = BTRFS_EXTENT_CSUM_KEY;
key_start.offset = logical;
key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
key_end.type = BTRFS_EXTENT_CSUM_KEY;
key_end.offset = base + offset + nstripes * increment;
reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
if (!IS_ERR(reada1))
btrfs_reada_wait(reada1);
if (!IS_ERR(reada2))
btrfs_reada_wait(reada2);
path->slots[0]++; mutex_lock(&fs_info->scrub_lock);
} while (atomic_read(&fs_info->scrub_pause_req)) {
btrfs_release_path(path); mutex_unlock(&fs_info->scrub_lock);
logical += increment; wait_event(fs_info->scrub_pause_wait,
physical += map->stripe_len; atomic_read(&fs_info->scrub_pause_req) == 0);
cond_resched(); mutex_lock(&fs_info->scrub_lock);
} }
atomic_dec(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock);
wake_up(&fs_info->scrub_pause_wait);
/* /*
* collect all data csums for the stripe to avoid seeking during * collect all data csums for the stripe to avoid seeking during
* the scrub. This might currently (crc32) end up to be about 1MB * the scrub. This might currently (crc32) end up to be about 1MB
*/ */
start_stripe = 0;
blk_start_plug(&plug); blk_start_plug(&plug);
again:
logical = base + offset + start_stripe * increment;
for (i = start_stripe; i < nstripes; ++i) {
ret = btrfs_lookup_csums_range(csum_root, logical,
logical + map->stripe_len - 1,
&sdev->csum_list, 1);
if (ret)
goto out;
logical += increment;
cond_resched();
}
/* /*
* now find all extents for each stripe and scrub them * now find all extents for each stripe and scrub them
*/ */
logical = base + offset + start_stripe * increment; logical = base + offset;
physical = map->stripes[num].physical + start_stripe * map->stripe_len; physical = map->stripes[num].physical;
ret = 0; ret = 0;
for (i = start_stripe; i < nstripes; ++i) { for (i = 0; i < nstripes; ++i) {
/* /*
* canceled? * canceled?
*/ */
...@@ -882,11 +868,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, ...@@ -882,11 +868,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
atomic_dec(&fs_info->scrubs_paused); atomic_dec(&fs_info->scrubs_paused);
mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->scrub_lock);
wake_up(&fs_info->scrub_pause_wait); wake_up(&fs_info->scrub_pause_wait);
scrub_free_csums(sdev);
start_stripe = i;
goto again;
} }
ret = btrfs_lookup_csums_range(csum_root, logical,
logical + map->stripe_len - 1,
&sdev->csum_list, 1);
if (ret)
goto out;
key.objectid = logical; key.objectid = logical;
key.type = BTRFS_EXTENT_ITEM_KEY; key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = (u64)0; key.offset = (u64)0;
...@@ -982,7 +971,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, ...@@ -982,7 +971,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
out: out:
blk_finish_plug(&plug); blk_finish_plug(&plug);
out_noplug:
btrfs_free_path(path); btrfs_free_path(path);
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
......
...@@ -366,6 +366,14 @@ static noinline int device_list_add(const char *path, ...@@ -366,6 +366,14 @@ static noinline int device_list_add(const char *path,
} }
INIT_LIST_HEAD(&device->dev_alloc_list); INIT_LIST_HEAD(&device->dev_alloc_list);
/* init readahead state */
spin_lock_init(&device->reada_lock);
device->reada_curr_zone = NULL;
atomic_set(&device->reada_in_flight, 0);
device->reada_next = 0;
INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
mutex_lock(&fs_devices->device_list_mutex); mutex_lock(&fs_devices->device_list_mutex);
list_add_rcu(&device->dev_list, &fs_devices->devices); list_add_rcu(&device->dev_list, &fs_devices->devices);
mutex_unlock(&fs_devices->device_list_mutex); mutex_unlock(&fs_devices->device_list_mutex);
......
...@@ -92,6 +92,14 @@ struct btrfs_device { ...@@ -92,6 +92,14 @@ struct btrfs_device {
struct btrfs_work work; struct btrfs_work work;
struct rcu_head rcu; struct rcu_head rcu;
struct work_struct rcu_work; struct work_struct rcu_work;
/* readahead state */
spinlock_t reada_lock;
atomic_t reada_in_flight;
u64 reada_next;
struct reada_zone *reada_curr_zone;
struct radix_tree_root reada_zones;
struct radix_tree_root reada_extents;
}; };
struct btrfs_fs_devices { struct btrfs_fs_devices {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment