Commit ae2f5411 authored by Jens Axboe's avatar Jens Axboe Committed by Chris Mason

btrfs: 32-bit type problems

An assorted set of casts to get rid of the warnings on 32-bit archs.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 44b36eb2
...@@ -1389,7 +1389,7 @@ int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf) ...@@ -1389,7 +1389,7 @@ int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf)
ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
if (ret < 0) { if (ret < 0) {
printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n", printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n",
ret, BTRFS_LEAF_DATA_SIZE(root), ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
leaf_space_used(leaf, 0, nritems), nritems); leaf_space_used(leaf, 0, nritems), nritems);
} }
return ret; return ret;
......
...@@ -149,7 +149,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(struct ...@@ -149,7 +149,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
if (ret) if (ret)
return NULL; return NULL;
block_group = (struct btrfs_block_group_cache *)ptr; block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
if (block_group->key.objectid <= bytenr && bytenr <= if (block_group->key.objectid <= bytenr && bytenr <=
...@@ -279,7 +279,7 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, ...@@ -279,7 +279,7 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
if (ret) if (ret)
break; break;
cache = (struct btrfs_block_group_cache *)ptr; cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
last = cache->key.objectid + cache->key.offset; last = cache->key.objectid + cache->key.offset;
used = btrfs_block_group_used(&cache->item); used = btrfs_block_group_used(&cache->item);
...@@ -537,7 +537,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, ...@@ -537,7 +537,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
if (ret) if (ret)
break; break;
cache = (struct btrfs_block_group_cache *)ptr; cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
err = write_one_cache_group(trans, root, err = write_one_cache_group(trans, root,
path, cache); path, cache);
/* /*
...@@ -1541,7 +1541,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -1541,7 +1541,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
found_key.objectid + found_key.offset - 1, found_key.objectid + found_key.offset - 1,
bit | EXTENT_LOCKED, GFP_NOFS); bit | EXTENT_LOCKED, GFP_NOFS);
set_state_private(block_group_cache, found_key.objectid, set_state_private(block_group_cache, found_key.objectid,
(u64)cache); (unsigned long)cache);
if (key.objectid >= if (key.objectid >=
btrfs_super_total_bytes(&info->super_copy)) btrfs_super_total_bytes(&info->super_copy))
......
...@@ -2654,8 +2654,8 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, ...@@ -2654,8 +2654,8 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
src_off_in_page)); src_off_in_page));
cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE - cur = min_t(unsigned long, cur,
dst_off_in_page)); (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
copy_pages(extent_buffer_page(dst, dst_i), copy_pages(extent_buffer_page(dst, dst_i),
extent_buffer_page(dst, src_i), extent_buffer_page(dst, src_i),
...@@ -2707,7 +2707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, ...@@ -2707,7 +2707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
if (dst_i == 0) if (dst_i == 0)
dst_off_in_page += start_offset; dst_off_in_page += start_offset;
cur = min(len, src_off_in_page + 1); cur = min_t(unsigned long, len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1); cur = min(cur, dst_off_in_page + 1);
move_pages(extent_buffer_page(dst, dst_i), move_pages(extent_buffer_page(dst, dst_i),
extent_buffer_page(dst, src_i), extent_buffer_page(dst, src_i),
......
...@@ -188,7 +188,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans, ...@@ -188,7 +188,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
while (size > 0) { while (size > 0) {
page = pages[i]; page = pages[i];
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page, KM_USER0);
cur_size = min(PAGE_CACHE_SIZE - page_offset, size); cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size); write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
page_offset = 0; page_offset = 0;
......
...@@ -1606,7 +1606,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -1606,7 +1606,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
extent_start; extent_start;
ptr = btrfs_file_extent_inline_start(item) + extent_offset; ptr = btrfs_file_extent_inline_start(item) + extent_offset;
map = kmap(page); map = kmap(page);
copy_size = min(PAGE_CACHE_SIZE - page_offset, copy_size = min_t(u64, PAGE_CACHE_SIZE - page_offset,
size - extent_offset); size - extent_offset);
em->block_start = EXTENT_MAP_INLINE; em->block_start = EXTENT_MAP_INLINE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment