Commit 55be71cf authored by Mika Kukkonen's avatar Mika Kukkonen Committed by Linus Torvalds

[PATCH] Combined patch for remaining trivial sparse warnings in allnoconfig build

Well, one of these (fs/block_dev.c) is little non-trivial, but i felt
throwing that away would be a shame (and I did add comments ;-).

Also almost all of these have been submitted earlier through other
channels, but have not been picked up (the only controversial is again the
fs/block_dev.c patch, where Linus felt a better job would be done with
__ffs(), but I could not convince myself that is does the same thing as
original code).
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 9e1674ef
...@@ -219,7 +219,7 @@ long strnlen_user(const char __user *s, long n) ...@@ -219,7 +219,7 @@ long strnlen_user(const char __user *s, long n)
#ifdef CONFIG_X86_INTEL_USERCOPY #ifdef CONFIG_X86_INTEL_USERCOPY
static unsigned long static unsigned long
__copy_user_intel(void *to, const void *from,unsigned long size) __copy_user_intel(void __user *to, const void *from, unsigned long size)
{ {
int d0, d1; int d0, d1;
__asm__ __volatile__( __asm__ __volatile__(
...@@ -326,7 +326,7 @@ __copy_user_intel(void *to, const void *from,unsigned long size) ...@@ -326,7 +326,7 @@ __copy_user_intel(void *to, const void *from,unsigned long size)
} }
static unsigned long static unsigned long
__copy_user_zeroing_intel(void *to, const void *from, unsigned long size) __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
{ {
int d0, d1; int d0, d1;
__asm__ __volatile__( __asm__ __volatile__(
...@@ -425,9 +425,9 @@ __copy_user_zeroing_intel(void *to, const void *from, unsigned long size) ...@@ -425,9 +425,9 @@ __copy_user_zeroing_intel(void *to, const void *from, unsigned long size)
* them * them
*/ */
unsigned long unsigned long
__copy_user_zeroing_intel(void *to, const void *from, unsigned long size); __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
unsigned long unsigned long
__copy_user_intel(void *to, const void *from,unsigned long size); __copy_user_intel(void __user *to, const void *from, unsigned long size);
#endif /* CONFIG_X86_INTEL_USERCOPY */ #endif /* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */ /* Generic arbitrary sized copy. */
...@@ -562,9 +562,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long ...@@ -562,9 +562,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long
} }
#endif #endif
if (movsl_is_ok(to, from, n)) if (movsl_is_ok(to, from, n))
__copy_user((void *)to, from, n); __copy_user(to, from, n);
else else
n = __copy_user_intel((void *)to, from, n); n = __copy_user_intel(to, from, n);
return n; return n;
} }
...@@ -572,9 +572,9 @@ unsigned long ...@@ -572,9 +572,9 @@ unsigned long
__copy_from_user_ll(void *to, const void __user *from, unsigned long n) __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
{ {
if (movsl_is_ok(to, from, n)) if (movsl_is_ok(to, from, n))
__copy_user_zeroing(to, (const void *) from, n); __copy_user_zeroing(to, from, n);
else else
n = __copy_user_zeroing_intel(to, (const void *) from, n); n = __copy_user_zeroing_intel(to, from, n);
return n; return n;
} }
......
...@@ -211,7 +211,7 @@ struct request *elv_next_request(request_queue_t *q) ...@@ -211,7 +211,7 @@ struct request *elv_next_request(request_queue_t *q)
struct request *rq; struct request *rq;
int ret; int ret;
while ((rq = __elv_next_request(q))) { while ((rq = __elv_next_request(q)) != NULL) {
/* /*
* just mark as started even if we don't start it, a request * just mark as started even if we don't start it, a request
* that has been delayed should not be passed by new incoming * that has been delayed should not be passed by new incoming
......
...@@ -1176,7 +1176,7 @@ EXPORT_SYMBOL(blk_remove_plug); ...@@ -1176,7 +1176,7 @@ EXPORT_SYMBOL(blk_remove_plug);
/* /*
* remove the plug and let it rip.. * remove the plug and let it rip..
*/ */
inline void __generic_unplug_device(request_queue_t *q) void __generic_unplug_device(request_queue_t *q)
{ {
if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
return; return;
......
...@@ -64,8 +64,6 @@ static void kill_bdev(struct block_device *bdev) ...@@ -64,8 +64,6 @@ static void kill_bdev(struct block_device *bdev)
int set_blocksize(struct block_device *bdev, int size) int set_blocksize(struct block_device *bdev, int size)
{ {
int oldsize;
/* Size must be a power of two, and between 512 and PAGE_SIZE */ /* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || (size & (size-1))) if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
return -EINVAL; return -EINVAL;
...@@ -74,15 +72,13 @@ int set_blocksize(struct block_device *bdev, int size) ...@@ -74,15 +72,13 @@ int set_blocksize(struct block_device *bdev, int size)
if (size < bdev_hardsect_size(bdev)) if (size < bdev_hardsect_size(bdev))
return -EINVAL; return -EINVAL;
oldsize = bdev->bd_block_size; /* Don't change the size if it is same as current */
if (oldsize == size) if (bdev->bd_block_size != size) {
return 0;
/* Ok, we're actually changing the blocksize.. */
sync_blockdev(bdev); sync_blockdev(bdev);
bdev->bd_block_size = size; bdev->bd_block_size = size;
bdev->bd_inode->i_blkbits = blksize_bits(size); bdev->bd_inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev); kill_bdev(bdev);
}
return 0; return 0;
} }
...@@ -90,12 +86,15 @@ EXPORT_SYMBOL(set_blocksize); ...@@ -90,12 +86,15 @@ EXPORT_SYMBOL(set_blocksize);
int sb_set_blocksize(struct super_block *sb, int size) int sb_set_blocksize(struct super_block *sb, int size)
{ {
int bits; int bits = 9; /* 2^9 = 512 */
if (set_blocksize(sb->s_bdev, size) < 0)
if (set_blocksize(sb->s_bdev, size))
return 0; return 0;
/* If we get here, we know size is power of two
* and it's value is between 512 and PAGE_SIZE */
sb->s_blocksize = size; sb->s_blocksize = size;
for (bits = 9, size >>= 9; size >>= 1; bits++) for (size >>= 10; size; size >>= 1)
; ++bits;
sb->s_blocksize_bits = bits; sb->s_blocksize_bits = bits;
return sb->s_blocksize; return sb->s_blocksize;
} }
......
...@@ -398,7 +398,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s ...@@ -398,7 +398,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
return result; return result;
} }
static inline int __vfs_follow_link(struct nameidata *, const char *); static int __vfs_follow_link(struct nameidata *, const char *);
/* /*
* This limits recursive symlink follows to 8, while * This limits recursive symlink follows to 8, while
...@@ -2211,8 +2211,7 @@ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) ...@@ -2211,8 +2211,7 @@ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
return res; return res;
} }
static inline int static int __vfs_follow_link(struct nameidata *nd, const char *link)
__vfs_follow_link(struct nameidata *nd, const char *link)
{ {
int res = 0; int res = 0;
char *name; char *name;
......
...@@ -592,7 +592,7 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd ...@@ -592,7 +592,7 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *); extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(request_queue_t *); extern void generic_unplug_device(request_queue_t *);
extern inline void __generic_unplug_device(request_queue_t *); extern void __generic_unplug_device(request_queue_t *);
extern long nr_blockdev_pages(void); extern long nr_blockdev_pages(void);
int blk_get_queue(request_queue_t *); int blk_get_queue(request_queue_t *);
......
...@@ -37,7 +37,7 @@ static int populate_dir(struct kobject * kobj) ...@@ -37,7 +37,7 @@ static int populate_dir(struct kobject * kobj)
int i; int i;
if (t && t->default_attrs) { if (t && t->default_attrs) {
for (i = 0; (attr = t->default_attrs[i]); i++) { for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
if ((error = sysfs_create_file(kobj,attr))) if ((error = sysfs_create_file(kobj,attr)))
break; break;
} }
......
...@@ -235,7 +235,7 @@ void rb_erase(struct rb_node *node, struct rb_root *root) ...@@ -235,7 +235,7 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
struct rb_node *old = node, *left; struct rb_node *old = node, *left;
node = node->rb_right; node = node->rb_right;
while ((left = node->rb_left)) while ((left = node->rb_left) != NULL)
node = left; node = left;
child = node->rb_right; child = node->rb_right;
parent = node->rb_parent; parent = node->rb_parent;
......
...@@ -200,7 +200,7 @@ static int wait_on_page_writeback_range(struct address_space *mapping, ...@@ -200,7 +200,7 @@ static int wait_on_page_writeback_range(struct address_space *mapping,
index = start; index = start;
while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_WRITEBACK, PAGECACHE_TAG_WRITEBACK,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
unsigned i; unsigned i;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
......
...@@ -199,7 +199,7 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, ...@@ -199,7 +199,7 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
} }
write_lock(&vmlist_lock); write_lock(&vmlist_lock);
for (p = &vmlist; (tmp = *p) ;p = &tmp->next) { for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
if ((unsigned long)tmp->addr < addr) if ((unsigned long)tmp->addr < addr)
continue; continue;
if ((size + addr) < addr) if ((size + addr) < addr)
...@@ -260,7 +260,7 @@ struct vm_struct *remove_vm_area(void *addr) ...@@ -260,7 +260,7 @@ struct vm_struct *remove_vm_area(void *addr)
struct vm_struct **p, *tmp; struct vm_struct **p, *tmp;
write_lock(&vmlist_lock); write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ;p = &tmp->next) { for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
if (tmp->addr == addr) if (tmp->addr == addr)
goto found; goto found;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment