Commit a1b32a59 authored by Chris Mason's avatar Chris Mason

Btrfs: Add debugging checks to track down corrupted metadata

Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 95819c05
...@@ -250,7 +250,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, ...@@ -250,7 +250,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
if (!ret && if (!ret &&
!verify_parent_transid(io_tree, eb, parent_transid)) !verify_parent_transid(io_tree, eb, parent_transid))
return ret; return ret;
printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num);
num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
eb->start, eb->len); eb->start, eb->len);
if (num_copies == 1) if (num_copies == 1)
...@@ -348,6 +348,9 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, ...@@ -348,6 +348,9 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
found_start = btrfs_header_bytenr(eb); found_start = btrfs_header_bytenr(eb);
if (found_start != start) { if (found_start != start) {
printk("bad tree block start %llu %llu\n",
(unsigned long long)found_start,
(unsigned long long)eb->start);
ret = -EIO; ret = -EIO;
goto err; goto err;
} }
...@@ -709,6 +712,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, ...@@ -709,6 +712,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
if (ret == 0) { if (ret == 0) {
buf->flags |= EXTENT_UPTODATE; buf->flags |= EXTENT_UPTODATE;
} else {
WARN_ON(1);
} }
return buf; return buf;
......
...@@ -1811,6 +1811,7 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); ...@@ -1811,6 +1811,7 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur);
} }
/* the get_extent function already copied into the page */ /* the get_extent function already copied into the page */
if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
check_page_uptodate(tree, page);
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
cur = cur + iosize; cur = cur + iosize;
page_offset += iosize; page_offset += iosize;
...@@ -2785,21 +2786,20 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, ...@@ -2785,21 +2786,20 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
* properly set. releasepage may drop page->private * properly set. releasepage may drop page->private
* on us if the page isn't already dirty. * on us if the page isn't already dirty.
*/ */
if (i == 0) {
lock_page(page); lock_page(page);
if (i == 0) {
set_page_extent_head(page, eb->len); set_page_extent_head(page, eb->len);
} else if (PagePrivate(page) && } else if (PagePrivate(page) &&
page->private != EXTENT_PAGE_PRIVATE) { page->private != EXTENT_PAGE_PRIVATE) {
lock_page(page);
set_page_extent_mapped(page); set_page_extent_mapped(page);
unlock_page(page);
} }
__set_page_dirty_nobuffers(extent_buffer_page(eb, i)); __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
if (i == 0) set_extent_dirty(tree, page_offset(page),
page_offset(page) + PAGE_CACHE_SIZE -1,
GFP_NOFS);
unlock_page(page); unlock_page(page);
} }
return set_extent_dirty(tree, eb->start, return 0;
eb->start + eb->len - 1, GFP_NOFS);
} }
EXPORT_SYMBOL(set_extent_buffer_dirty); EXPORT_SYMBOL(set_extent_buffer_dirty);
...@@ -2952,6 +2952,9 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ...@@ -2952,6 +2952,9 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
if (all_uptodate) { if (all_uptodate) {
if (start_i == 0) if (start_i == 0)
eb->flags |= EXTENT_UPTODATE; eb->flags |= EXTENT_UPTODATE;
if (ret) {
printk("all up to date but ret is %d\n", ret);
}
goto unlock_exit; goto unlock_exit;
} }
...@@ -2968,6 +2971,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ...@@ -2968,6 +2971,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
mirror_num); mirror_num);
if (err) { if (err) {
ret = err; ret = err;
printk("err %d from __extent_read_full_page\n", ret);
} }
} else { } else {
unlock_page(page); unlock_page(page);
...@@ -2978,12 +2982,15 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ...@@ -2978,12 +2982,15 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
submit_one_bio(READ, bio, mirror_num); submit_one_bio(READ, bio, mirror_num);
if (ret || !wait) { if (ret || !wait) {
if (ret)
printk("ret %d wait %d returning\n", ret, wait);
return ret; return ret;
} }
for (i = start_i; i < num_pages; i++) { for (i = start_i; i < num_pages; i++) {
page = extent_buffer_page(eb, i); page = extent_buffer_page(eb, i);
wait_on_page_locked(page); wait_on_page_locked(page);
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
printk("page not uptodate after wait_on_page_locked\n");
ret = -EIO; ret = -EIO;
} }
} }
......
...@@ -39,7 +39,8 @@ ...@@ -39,7 +39,8 @@
#include "compat.h" #include "compat.h"
static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
int write_bytes,
struct page **prepared_pages, struct page **prepared_pages,
const char __user * buf) const char __user * buf)
{ {
...@@ -69,7 +70,7 @@ static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, ...@@ -69,7 +70,7 @@ static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
return page_fault ? -EFAULT : 0; return page_fault ? -EFAULT : 0;
} }
static void btrfs_drop_pages(struct page **pages, size_t num_pages) static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
{ {
size_t i; size_t i;
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
...@@ -359,7 +360,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, ...@@ -359,7 +360,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
return err; return err;
} }
int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
{ {
struct extent_map *em; struct extent_map *em;
struct extent_map *split = NULL; struct extent_map *split = NULL;
...@@ -515,7 +516,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) ...@@ -515,7 +516,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
* it is either truncated or split. Anything entirely inside the range * it is either truncated or split. Anything entirely inside the range
* is deleted from the tree. * is deleted from the tree.
*/ */
int btrfs_drop_extents(struct btrfs_trans_handle *trans, int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode, struct btrfs_root *root, struct inode *inode,
u64 start, u64 end, u64 inline_limit, u64 *hint_byte) u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
{ {
...@@ -785,7 +786,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, ...@@ -785,7 +786,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
/* /*
* this gets pages into the page cache and locks them down * this gets pages into the page cache and locks them down
*/ */
static int prepare_pages(struct btrfs_root *root, struct file *file, static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
struct page **pages, size_t num_pages, struct page **pages, size_t num_pages,
loff_t pos, unsigned long first_index, loff_t pos, unsigned long first_index,
unsigned long last_index, size_t write_bytes) unsigned long last_index, size_t write_bytes)
......
...@@ -94,8 +94,8 @@ int btrfs_cleanup_fs_uuids(void) ...@@ -94,8 +94,8 @@ int btrfs_cleanup_fs_uuids(void)
return 0; return 0;
} }
static struct btrfs_device *__find_device(struct list_head *head, u64 devid, static noinline struct btrfs_device *__find_device(struct list_head *head,
u8 *uuid) u64 devid, u8 *uuid)
{ {
struct btrfs_device *dev; struct btrfs_device *dev;
struct list_head *cur; struct list_head *cur;
...@@ -110,7 +110,7 @@ static struct btrfs_device *__find_device(struct list_head *head, u64 devid, ...@@ -110,7 +110,7 @@ static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
return NULL; return NULL;
} }
static struct btrfs_fs_devices *find_fsid(u8 *fsid) static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
{ {
struct list_head *cur; struct list_head *cur;
struct btrfs_fs_devices *fs_devices; struct btrfs_fs_devices *fs_devices;
...@@ -134,7 +134,7 @@ static struct btrfs_fs_devices *find_fsid(u8 *fsid) ...@@ -134,7 +134,7 @@ static struct btrfs_fs_devices *find_fsid(u8 *fsid)
* the list if the block device is congested. This way, multiple devices * the list if the block device is congested. This way, multiple devices
* can make progress from a single worker thread. * can make progress from a single worker thread.
*/ */
int run_scheduled_bios(struct btrfs_device *device) static int noinline run_scheduled_bios(struct btrfs_device *device)
{ {
struct bio *pending; struct bio *pending;
struct backing_dev_info *bdi; struct backing_dev_info *bdi;
...@@ -233,7 +233,7 @@ void pending_bios_fn(struct btrfs_work *work) ...@@ -233,7 +233,7 @@ void pending_bios_fn(struct btrfs_work *work)
run_scheduled_bios(device); run_scheduled_bios(device);
} }
static int device_list_add(const char *path, static noinline int device_list_add(const char *path,
struct btrfs_super_block *disk_super, struct btrfs_super_block *disk_super,
u64 devid, struct btrfs_fs_devices **fs_devices_ret) u64 devid, struct btrfs_fs_devices **fs_devices_ret)
{ {
...@@ -480,7 +480,7 @@ int btrfs_scan_one_device(const char *path, int flags, void *holder, ...@@ -480,7 +480,7 @@ int btrfs_scan_one_device(const char *path, int flags, void *holder,
* called very infrequently and that a given device has a small number * called very infrequently and that a given device has a small number
* of extents * of extents
*/ */
static int find_free_dev_extent(struct btrfs_trans_handle *trans, static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device, struct btrfs_device *device,
struct btrfs_path *path, struct btrfs_path *path,
u64 num_bytes, u64 *start) u64 num_bytes, u64 *start)
...@@ -645,7 +645,7 @@ int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, ...@@ -645,7 +645,7 @@ int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, int noinline btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device, struct btrfs_device *device,
u64 chunk_tree, u64 chunk_objectid, u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset, u64 chunk_offset,
...@@ -693,7 +693,8 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, ...@@ -693,7 +693,8 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) static noinline int find_next_chunk(struct btrfs_root *root,
u64 objectid, u64 *offset)
{ {
struct btrfs_path *path; struct btrfs_path *path;
int ret; int ret;
...@@ -735,8 +736,8 @@ static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) ...@@ -735,8 +736,8 @@ static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
return ret; return ret;
} }
static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path, static noinline int find_next_devid(struct btrfs_root *root,
u64 *objectid) struct btrfs_path *path, u64 *objectid)
{ {
int ret; int ret;
struct btrfs_key key; struct btrfs_key key;
...@@ -1103,7 +1104,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) ...@@ -1103,7 +1104,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
goto out; goto out;
} }
int btrfs_update_device(struct btrfs_trans_handle *trans, int noinline btrfs_update_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device) struct btrfs_device *device)
{ {
int ret; int ret;
...@@ -1544,8 +1545,8 @@ int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, ...@@ -1544,8 +1545,8 @@ int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes, static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size,
int sub_stripes) int num_stripes, int sub_stripes)
{ {
if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
return calc_size; return calc_size;
...@@ -2141,7 +2142,8 @@ struct async_sched { ...@@ -2141,7 +2142,8 @@ struct async_sched {
* This will add one bio to the pending list for a device and make sure * This will add one bio to the pending list for a device and make sure
* the work struct is scheduled. * the work struct is scheduled.
*/ */
int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, static int noinline schedule_bio(struct btrfs_root *root,
struct btrfs_device *device,
int rw, struct bio *bio) int rw, struct bio *bio)
{ {
int should_queue = 1; int should_queue = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment