Commit a7993279 authored by Linus Torvalds's avatar Linus Torvalds

Make bitops/cpumask functions be "const" where appropriate.

parent 8f77e95e
......@@ -7,10 +7,10 @@
#include <asm/bitops.h>
#include <asm/byteorder.h>
unsigned long find_next_zero_bit(unsigned long *addr, unsigned long size,
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
unsigned long *p = addr + (offset >> 6);
const unsigned long *p = addr + (offset >> 6);
unsigned long result = offset & ~63UL;
unsigned long tmp;
......@@ -48,10 +48,10 @@ unsigned long find_next_zero_bit(unsigned long *addr, unsigned long size,
EXPORT_SYMBOL(find_next_zero_bit);
unsigned long find_next_bit(unsigned long *addr, unsigned long size,
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
unsigned long *p = addr + (offset >> 6);
const unsigned long *p = addr + (offset >> 6);
unsigned long result = offset & ~63UL;
unsigned long tmp;
......@@ -106,10 +106,10 @@ static inline unsigned int ext2_ffz(unsigned int x)
return rc;
}
unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size,
unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
unsigned int *p = ((unsigned int *)addr) + (offset >> 5);
const unsigned int *p = ((const unsigned int *)addr) + (offset >> 5);
unsigned int result = offset & ~31;
unsigned int tmp;
......
......@@ -288,15 +288,15 @@ static __inline__ int ffs(int x)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
extern unsigned long find_next_zero_bit(unsigned long *addr, unsigned long size, unsigned long offset);
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
extern unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset);
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
#define find_first_bit(addr, size) \
find_next_bit((addr), (size), 0)
extern unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset);
extern unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
#define find_first_zero_le_bit(addr, size) \
find_next_zero_le_bit((addr), (size), 0)
......
......@@ -14,7 +14,7 @@
#ifdef __GNUC__
#ifdef __s390x__
static __inline__ __u64 ___arch__swab64p(__u64 *x)
static __inline__ __u64 ___arch__swab64p(const __u64 *x)
{
__u64 result;
......@@ -40,7 +40,7 @@ static __inline__ void ___arch__swab64s(__u64 *x)
}
#endif /* __s390x__ */
static __inline__ __u32 ___arch__swab32p(__u32 *x)
static __inline__ __u32 ___arch__swab32p(const __u32 *x)
{
__u32 result;
......@@ -77,7 +77,7 @@ static __inline__ void ___arch__swab32s(__u32 *x)
*x = ___arch__swab32p(x);
}
static __inline__ __u16 ___arch__swab16p(__u16 *x)
static __inline__ __u16 ___arch__swab16p(const __u16 *x)
{
__u16 result;
......
......@@ -134,7 +134,7 @@ static __inline__ __attribute_const__ __u16 __fswab16(__u16 x)
{
return __arch__swab16(x);
}
static __inline__ __u16 __swab16p(__u16 *x)
static __inline__ __u16 __swab16p(const __u16 *x)
{
return __arch__swab16p(x);
}
......@@ -147,7 +147,7 @@ static __inline__ __attribute_const__ __u32 __fswab32(__u32 x)
{
return __arch__swab32(x);
}
static __inline__ __u32 __swab32p(__u32 *x)
static __inline__ __u32 __swab32p(const __u32 *x)
{
return __arch__swab32p(x);
}
......@@ -167,7 +167,7 @@ static __inline__ __attribute_const__ __u64 __fswab64(__u64 x)
return __arch__swab64(x);
# endif
}
static __inline__ __u64 __swab64p(__u64 *x)
static __inline__ __u64 __swab64p(const __u64 *x)
{
return __arch__swab64p(x);
}
......
......@@ -171,19 +171,19 @@ static inline int __cpus_subset(cpumask_t *src1p,
}
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
static inline int __cpus_empty(cpumask_t *srcp, int nbits)
static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
static inline int __cpus_full(cpumask_t *srcp, int nbits)
static inline int __cpus_full(const cpumask_t *srcp, int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
static inline int __cpus_weight(cpumask_t *srcp, int nbits)
static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
......@@ -191,7 +191,7 @@ static inline int __cpus_weight(cpumask_t *srcp, int nbits)
#define cpus_shift_right(dst, src, n) \
__cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_right(cpumask_t *dstp,
cpumask_t *srcp, int n, int nbits)
const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
}
......@@ -199,19 +199,19 @@ static inline void __cpus_shift_right(cpumask_t *dstp,
#define cpus_shift_left(dst, src, n) \
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_left(cpumask_t *dstp,
cpumask_t *srcp, int n, int nbits)
const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
static inline int __first_cpu(cpumask_t *srcp, int nbits)
static inline int __first_cpu(const cpumask_t *srcp, int nbits)
{
return find_first_bit(srcp->bits, nbits);
}
#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
static inline int __next_cpu(int n, cpumask_t *srcp, int nbits)
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
return find_next_bit(srcp->bits, nbits, n+1);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment