Commit 1a1d48a4 authored by Denys Vlasenko's avatar Denys Vlasenko Committed by Ingo Molnar

linux/bitmap: Force inlining of bitmap weight functions

With this config:

  http://busybox.net/~vda/kernel_config_OPTIMIZE_INLINING_and_Os

gcc-4.7.2 generates many copies of these tiny functions:

	bitmap_weight (55 copies):
	55                      push   %rbp
	48 89 e5                mov    %rsp,%rbp
	e8 3f 3a 8b 00          callq  __bitmap_weight
	5d                      pop    %rbp
	c3                      retq

	hweight_long (23 copies):
	55                      push   %rbp
	e8 b5 65 8e 00          callq  __sw_hweight64
	48 89 e5                mov    %rsp,%rbp
	5d                      pop    %rbp
	c3                      retq

See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66122

This patch fixes this via s/inline/__always_inline/

While at it, replaced two "__inline__" with usual "inline"
(the rest of the source file uses the latter).

	    text     data      bss       dec  filename
	86971357 17195880 36659200 140826437  vmlinux.before
	86971120 17195912 36659200 140826232  vmlinux
Signed-off-by: default avatarDenys Vlasenko <dvlasenk@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Graf <tgraf@suug.ch>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1438697716-28121-1-git-send-email-dvlasenk@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c2f3ba74
...@@ -295,7 +295,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits) ...@@ -295,7 +295,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits; return find_first_zero_bit(src, nbits) == nbits;
} }
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits) static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{ {
if (small_const_nbits(nbits)) if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
......
...@@ -57,7 +57,7 @@ extern unsigned long __sw_hweight64(__u64 w); ...@@ -57,7 +57,7 @@ extern unsigned long __sw_hweight64(__u64 w);
(bit) < (size); \ (bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1)) (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
static __inline__ int get_bitmask_order(unsigned int count) static inline int get_bitmask_order(unsigned int count)
{ {
int order; int order;
...@@ -65,7 +65,7 @@ static __inline__ int get_bitmask_order(unsigned int count) ...@@ -65,7 +65,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
return order; /* We could be slightly more clever with -1 here... */ return order; /* We could be slightly more clever with -1 here... */
} }
static __inline__ int get_count_order(unsigned int count) static inline int get_count_order(unsigned int count)
{ {
int order; int order;
...@@ -75,7 +75,7 @@ static __inline__ int get_count_order(unsigned int count) ...@@ -75,7 +75,7 @@ static __inline__ int get_count_order(unsigned int count)
return order; return order;
} }
static inline unsigned long hweight_long(unsigned long w) static __always_inline unsigned long hweight_long(unsigned long w)
{ {
return sizeof(w) == 4 ? hweight32(w) : hweight64(w); return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment