Commit ae2f5411 authored by Jens Axboe's avatar Jens Axboe Committed by Chris Mason

btrfs: 32-bit type problems

An assorted set of casts to get rid of the warnings on 32-bit archs.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 44b36eb2
......@@ -1389,7 +1389,7 @@ int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf)
ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
if (ret < 0) {
printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n",
ret, BTRFS_LEAF_DATA_SIZE(root),
ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
leaf_space_used(leaf, 0, nritems), nritems);
}
return ret;
......
......@@ -149,7 +149,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
if (ret)
return NULL;
block_group = (struct btrfs_block_group_cache *)ptr;
block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
if (block_group->key.objectid <= bytenr && bytenr <=
......@@ -279,7 +279,7 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
if (ret)
break;
cache = (struct btrfs_block_group_cache *)ptr;
cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
last = cache->key.objectid + cache->key.offset;
used = btrfs_block_group_used(&cache->item);
......@@ -537,7 +537,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
if (ret)
break;
cache = (struct btrfs_block_group_cache *)ptr;
cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
err = write_one_cache_group(trans, root,
path, cache);
/*
......@@ -1541,7 +1541,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
found_key.objectid + found_key.offset - 1,
bit | EXTENT_LOCKED, GFP_NOFS);
set_state_private(block_group_cache, found_key.objectid,
(u64)cache);
(unsigned long)cache);
if (key.objectid >=
btrfs_super_total_bytes(&info->super_copy))
......
......@@ -2654,8 +2654,8 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
src_off_in_page));
cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE -
dst_off_in_page));
cur = min_t(unsigned long, cur,
(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
copy_pages(extent_buffer_page(dst, dst_i),
extent_buffer_page(dst, src_i),
......@@ -2707,7 +2707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
if (dst_i == 0)
dst_off_in_page += start_offset;
cur = min(len, src_off_in_page + 1);
cur = min_t(unsigned long, len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1);
move_pages(extent_buffer_page(dst, dst_i),
extent_buffer_page(dst, src_i),
......
......@@ -188,7 +188,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
while (size > 0) {
page = pages[i];
kaddr = kmap_atomic(page, KM_USER0);
cur_size = min(PAGE_CACHE_SIZE - page_offset, size);
cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
kunmap_atomic(kaddr, KM_USER0);
page_offset = 0;
......
......@@ -1606,7 +1606,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
extent_start;
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
map = kmap(page);
copy_size = min(PAGE_CACHE_SIZE - page_offset,
copy_size = min_t(u64, PAGE_CACHE_SIZE - page_offset,
size - extent_offset);
em->block_start = EXTENT_MAP_INLINE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment