Commit 55be71cf authored by Mika Kukkonen's avatar Mika Kukkonen Committed by Linus Torvalds

[PATCH] Combined patch for remaining trivial sparse warnings in allnoconfig build

Well, one of these (fs/block_dev.c) is little non-trivial, but i felt
throwing that away would be a shame (and I did add comments ;-).

Also almost all of these have been submitted earlier through other
channels, but have not been picked up (the only controversial is again the
fs/block_dev.c patch, where Linus felt a better job would be done with
__ffs(), but I could not convince myself that is does the same thing as
original code).
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 9e1674ef
......@@ -219,7 +219,7 @@ long strnlen_user(const char __user *s, long n)
#ifdef CONFIG_X86_INTEL_USERCOPY
static unsigned long
__copy_user_intel(void *to, const void *from,unsigned long size)
__copy_user_intel(void __user *to, const void *from, unsigned long size)
{
int d0, d1;
__asm__ __volatile__(
......@@ -326,7 +326,7 @@ __copy_user_intel(void *to, const void *from,unsigned long size)
}
static unsigned long
__copy_user_zeroing_intel(void *to, const void *from, unsigned long size)
__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
{
int d0, d1;
__asm__ __volatile__(
......@@ -425,9 +425,9 @@ __copy_user_zeroing_intel(void *to, const void *from, unsigned long size)
* them
*/
unsigned long
__copy_user_zeroing_intel(void *to, const void *from, unsigned long size);
__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
unsigned long
__copy_user_intel(void *to, const void *from,unsigned long size);
__copy_user_intel(void __user *to, const void *from, unsigned long size);
#endif /* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */
......@@ -562,9 +562,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long
}
#endif
if (movsl_is_ok(to, from, n))
__copy_user((void *)to, from, n);
__copy_user(to, from, n);
else
n = __copy_user_intel((void *)to, from, n);
n = __copy_user_intel(to, from, n);
return n;
}
......@@ -572,9 +572,9 @@ unsigned long
__copy_from_user_ll(void *to, const void __user *from, unsigned long n)
{
if (movsl_is_ok(to, from, n))
__copy_user_zeroing(to, (const void *) from, n);
__copy_user_zeroing(to, from, n);
else
n = __copy_user_zeroing_intel(to, (const void *) from, n);
n = __copy_user_zeroing_intel(to, from, n);
return n;
}
......
......@@ -211,7 +211,7 @@ struct request *elv_next_request(request_queue_t *q)
struct request *rq;
int ret;
while ((rq = __elv_next_request(q))) {
while ((rq = __elv_next_request(q)) != NULL) {
/*
* just mark as started even if we don't start it, a request
* that has been delayed should not be passed by new incoming
......
......@@ -1176,7 +1176,7 @@ EXPORT_SYMBOL(blk_remove_plug);
/*
* remove the plug and let it rip..
*/
inline void __generic_unplug_device(request_queue_t *q)
void __generic_unplug_device(request_queue_t *q)
{
if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
return;
......
......@@ -64,8 +64,6 @@ static void kill_bdev(struct block_device *bdev)
int set_blocksize(struct block_device *bdev, int size)
{
int oldsize;
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
return -EINVAL;
......@@ -74,15 +72,13 @@ int set_blocksize(struct block_device *bdev, int size)
if (size < bdev_hardsect_size(bdev))
return -EINVAL;
oldsize = bdev->bd_block_size;
if (oldsize == size)
return 0;
/* Ok, we're actually changing the blocksize.. */
/* Don't change the size if it is same as current */
if (bdev->bd_block_size != size) {
sync_blockdev(bdev);
bdev->bd_block_size = size;
bdev->bd_inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
}
return 0;
}
......@@ -90,12 +86,15 @@ EXPORT_SYMBOL(set_blocksize);
int sb_set_blocksize(struct super_block *sb, int size)
{
int bits;
if (set_blocksize(sb->s_bdev, size) < 0)
int bits = 9; /* 2^9 = 512 */
if (set_blocksize(sb->s_bdev, size))
return 0;
/* If we get here, we know size is power of two
* and it's value is between 512 and PAGE_SIZE */
sb->s_blocksize = size;
for (bits = 9, size >>= 9; size >>= 1; bits++)
;
for (size >>= 10; size; size >>= 1)
++bits;
sb->s_blocksize_bits = bits;
return sb->s_blocksize;
}
......
......@@ -398,7 +398,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
return result;
}
static inline int __vfs_follow_link(struct nameidata *, const char *);
static int __vfs_follow_link(struct nameidata *, const char *);
/*
* This limits recursive symlink follows to 8, while
......@@ -2211,8 +2211,7 @@ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
return res;
}
static inline int
__vfs_follow_link(struct nameidata *nd, const char *link)
static int __vfs_follow_link(struct nameidata *nd, const char *link)
{
int res = 0;
char *name;
......
......@@ -592,7 +592,7 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(request_queue_t *);
extern inline void __generic_unplug_device(request_queue_t *);
extern void __generic_unplug_device(request_queue_t *);
extern long nr_blockdev_pages(void);
int blk_get_queue(request_queue_t *);
......
......@@ -37,7 +37,7 @@ static int populate_dir(struct kobject * kobj)
int i;
if (t && t->default_attrs) {
for (i = 0; (attr = t->default_attrs[i]); i++) {
for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
if ((error = sysfs_create_file(kobj,attr)))
break;
}
......
......@@ -235,7 +235,7 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
struct rb_node *old = node, *left;
node = node->rb_right;
while ((left = node->rb_left))
while ((left = node->rb_left) != NULL)
node = left;
child = node->rb_right;
parent = node->rb_parent;
......
......@@ -200,7 +200,7 @@ static int wait_on_page_writeback_range(struct address_space *mapping,
index = start;
while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_WRITEBACK,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
unsigned i;
for (i = 0; i < nr_pages; i++) {
......
......@@ -199,7 +199,7 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
}
write_lock(&vmlist_lock);
for (p = &vmlist; (tmp = *p) ;p = &tmp->next) {
for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
if ((unsigned long)tmp->addr < addr)
continue;
if ((size + addr) < addr)
......@@ -260,7 +260,7 @@ struct vm_struct *remove_vm_area(void *addr)
struct vm_struct **p, *tmp;
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ;p = &tmp->next) {
for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
if (tmp->addr == addr)
goto found;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment