Commit ad8258e8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'bitmap-6.5-rc1' of https://github.com/norov/linux

Pull bitmap updates from Yury Norov:
 "Fixes for different bitmap pieces:

   - lib/test_bitmap: increment failure counter properly

     The tests that don't use expect_eq() macro to determine that a test
     is failured must increment failed_tests explicitly.

   - lib/bitmap: drop optimization of bitmap_{from,to}_arr64

     bitmap_{from,to}_arr64() optimization is overly optimistic
     on 32-bit LE architectures when it's wired to
     bitmap_copy_clear_tail().

   - nodemask: Drop duplicate check in for_each_node_mask()

     As the return value type of first_node() became unsigned, the node
     >= 0 became unnecessary.

   - cpumask: fix function description kernel-doc notation

   - MAINTAINERS: Add bits.h and bitfield.h to the BITMAP API record

     Add linux/bits.h and linux/bitfield.h for visibility"

* tag 'bitmap-6.5-rc1' of https://github.com/norov/linux:
  MAINTAINERS: Add bitfield.h to the BITMAP API record
  MAINTAINERS: Add bits.h to the BITMAP API record
  cpumask: fix function description kernel-doc notation
  nodemask: Drop duplicate check in for_each_node_mask()
  lib/bitmap: drop optimization of bitmap_{from,to}_arr64
  lib/test_bitmap: increment failure counter properly
parents 8689f4f2 2a3110e3
...@@ -3499,18 +3499,24 @@ M: Yury Norov <yury.norov@gmail.com> ...@@ -3499,18 +3499,24 @@ M: Yury Norov <yury.norov@gmail.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
R: Rasmus Villemoes <linux@rasmusvillemoes.dk> R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
S: Maintained S: Maintained
F: include/linux/bitfield.h
F: include/linux/bitmap.h F: include/linux/bitmap.h
F: include/linux/bits.h
F: include/linux/cpumask.h F: include/linux/cpumask.h
F: include/linux/find.h F: include/linux/find.h
F: include/linux/nodemask.h F: include/linux/nodemask.h
F: include/vdso/bits.h
F: lib/bitmap.c F: lib/bitmap.c
F: lib/cpumask.c F: lib/cpumask.c
F: lib/cpumask_kunit.c F: lib/cpumask_kunit.c
F: lib/find_bit.c F: lib/find_bit.c
F: lib/find_bit_benchmark.c F: lib/find_bit_benchmark.c
F: lib/test_bitmap.c F: lib/test_bitmap.c
F: tools/include/linux/bitfield.h
F: tools/include/linux/bitmap.h F: tools/include/linux/bitmap.h
F: tools/include/linux/bits.h
F: tools/include/linux/find.h F: tools/include/linux/find.h
F: tools/include/vdso/bits.h
F: tools/lib/bitmap.c F: tools/lib/bitmap.c
F: tools/lib/find_bit.c F: tools/lib/find_bit.c
......
...@@ -302,12 +302,10 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, ...@@ -302,12 +302,10 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
#endif #endif
/* /*
* On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32 * On 64-bit systems bitmaps are represented as u64 arrays internally. So,
* machines the order of hi and lo parts of numbers match the bitmap structure. * the conversion is not needed when copying data from/to arrays of u64.
* In both cases conversion is not needed when copying data from/to arrays of
* u64.
*/ */
#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) #if BITS_PER_LONG == 32
void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits); void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits); void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
#else #else
......
...@@ -385,7 +385,7 @@ static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, ...@@ -385,7 +385,7 @@ static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
#if MAX_NUMNODES > 1 #if MAX_NUMNODES > 1
#define for_each_node_mask(node, mask) \ #define for_each_node_mask(node, mask) \
for ((node) = first_node(mask); \ for ((node) = first_node(mask); \
(node >= 0) && (node) < MAX_NUMNODES; \ (node) < MAX_NUMNODES; \
(node) = next_node((node), (mask))) (node) = next_node((node), (mask)))
#else /* MAX_NUMNODES == 1 */ #else /* MAX_NUMNODES == 1 */
#define for_each_node_mask(node, mask) \ #define for_each_node_mask(node, mask) \
......
...@@ -1495,7 +1495,7 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits) ...@@ -1495,7 +1495,7 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
EXPORT_SYMBOL(bitmap_to_arr32); EXPORT_SYMBOL(bitmap_to_arr32);
#endif #endif
#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) #if BITS_PER_LONG == 32
/** /**
* bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
* @bitmap: array of unsigned longs, the destination bitmap * @bitmap: array of unsigned longs, the destination bitmap
......
...@@ -157,7 +157,7 @@ EXPORT_SYMBOL(cpumask_local_spread); ...@@ -157,7 +157,7 @@ EXPORT_SYMBOL(cpumask_local_spread);
static DEFINE_PER_CPU(int, distribute_cpu_mask_prev); static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
/** /**
* Returns an arbitrary cpu within srcp1 & srcp2. * cpumask_any_and_distribute - Return an arbitrary cpu within srcp1 & srcp2.
* *
* Iterated calls using the same srcp1 and srcp2 will be distributed within * Iterated calls using the same srcp1 and srcp2 will be distributed within
* their intersection. * their intersection.
......
...@@ -470,6 +470,7 @@ static void __init test_bitmap_parselist(void) ...@@ -470,6 +470,7 @@ static void __init test_bitmap_parselist(void)
if (err != ptest.errno) { if (err != ptest.errno) {
pr_err("parselist: %d: input is %s, errno is %d, expected %d\n", pr_err("parselist: %d: input is %s, errno is %d, expected %d\n",
i, ptest.in, err, ptest.errno); i, ptest.in, err, ptest.errno);
failed_tests++;
continue; continue;
} }
...@@ -478,6 +479,7 @@ static void __init test_bitmap_parselist(void) ...@@ -478,6 +479,7 @@ static void __init test_bitmap_parselist(void)
pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n", pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
i, ptest.in, bmap[0], i, ptest.in, bmap[0],
*ptest.expected); *ptest.expected);
failed_tests++;
continue; continue;
} }
...@@ -511,11 +513,13 @@ static void __init test_bitmap_printlist(void) ...@@ -511,11 +513,13 @@ static void __init test_bitmap_printlist(void)
if (ret != slen + 1) { if (ret != slen + 1) {
pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen); pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen);
failed_tests++;
goto out; goto out;
} }
if (strncmp(buf, expected, slen)) { if (strncmp(buf, expected, slen)) {
pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected); pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected);
failed_tests++;
goto out; goto out;
} }
...@@ -583,6 +587,7 @@ static void __init test_bitmap_parse(void) ...@@ -583,6 +587,7 @@ static void __init test_bitmap_parse(void)
if (err != test.errno) { if (err != test.errno) {
pr_err("parse: %d: input is %s, errno is %d, expected %d\n", pr_err("parse: %d: input is %s, errno is %d, expected %d\n",
i, test.in, err, test.errno); i, test.in, err, test.errno);
failed_tests++;
continue; continue;
} }
...@@ -591,6 +596,7 @@ static void __init test_bitmap_parse(void) ...@@ -591,6 +596,7 @@ static void __init test_bitmap_parse(void)
pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n", pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
i, test.in, bmap[0], i, test.in, bmap[0],
*test.expected); *test.expected);
failed_tests++;
continue; continue;
} }
...@@ -615,10 +621,12 @@ static void __init test_bitmap_arr32(void) ...@@ -615,10 +621,12 @@ static void __init test_bitmap_arr32(void)
next_bit = find_next_bit(bmap2, next_bit = find_next_bit(bmap2,
round_up(nbits, BITS_PER_LONG), nbits); round_up(nbits, BITS_PER_LONG), nbits);
if (next_bit < round_up(nbits, BITS_PER_LONG)) if (next_bit < round_up(nbits, BITS_PER_LONG)) {
pr_err("bitmap_copy_arr32(nbits == %d:" pr_err("bitmap_copy_arr32(nbits == %d:"
" tail is not safely cleared: %d\n", " tail is not safely cleared: %d\n",
nbits, next_bit); nbits, next_bit);
failed_tests++;
}
if (nbits < EXP1_IN_BITS - 32) if (nbits < EXP1_IN_BITS - 32)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)], expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
...@@ -641,15 +649,19 @@ static void __init test_bitmap_arr64(void) ...@@ -641,15 +649,19 @@ static void __init test_bitmap_arr64(void)
expect_eq_bitmap(bmap2, exp1, nbits); expect_eq_bitmap(bmap2, exp1, nbits);
next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits); next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits);
if (next_bit < round_up(nbits, BITS_PER_LONG)) if (next_bit < round_up(nbits, BITS_PER_LONG)) {
pr_err("bitmap_copy_arr64(nbits == %d:" pr_err("bitmap_copy_arr64(nbits == %d:"
" tail is not safely cleared: %d\n", nbits, next_bit); " tail is not safely cleared: %d\n", nbits, next_bit);
failed_tests++;
}
if ((nbits % 64) && if ((nbits % 64) &&
(arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0))) (arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0))) {
pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n", pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n",
nbits, arr[(nbits - 1) / 64], nbits, arr[(nbits - 1) / 64],
GENMASK_ULL((nbits - 1) % 64, 0)); GENMASK_ULL((nbits - 1) % 64, 0));
failed_tests++;
}
if (nbits < EXP1_IN_BITS - 64) if (nbits < EXP1_IN_BITS - 64)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5); expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment