Commit 45552111 authored by Florian-Ewald Mueller's avatar Florian-Ewald Mueller Committed by Song Liu

md/bitmap: Fix bitmap chunk size overflow issues

- limit bitmap chunk size internal u64 variable to values not overflowing
  the u32 bitmap superblock structure variable stored on persistent media
- assign bitmap chunk size internal u64 variable from unsigned values to
  avoid possible sign extension artifacts when assigning from a s32 value

The bug has been there since at least kernel 4.0.
Steps to reproduce it:
1: mdadm -C /dev/mdx -l 1 --bitmap=internal --bitmap-chunk=256M -e 1.2
-n2 /dev/rnbd1 /dev/rnbd2
2 resize member device rnbd1 and rnbd2 to 8 TB
3 mdadm --grow /dev/mdx --size=max

The bitmap_chunksize will overflow without patch.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarFlorian-Ewald Mueller <florian-ewald.mueller@ionos.com>
Signed-off-by: default avatarJack Wang <jinpu.wang@ionos.com>
Signed-off-by: default avatarSong Liu <song@kernel.org>
parent f97a5528
...@@ -486,7 +486,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap) ...@@ -486,7 +486,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
sb = kmap_atomic(bitmap->storage.sb_page); sb = kmap_atomic(bitmap->storage.sb_page);
pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %d\n", le32_to_cpu(sb->version)); pr_debug(" version: %u\n", le32_to_cpu(sb->version));
pr_debug(" uuid: %08x.%08x.%08x.%08x\n", pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
le32_to_cpu(*(__le32 *)(sb->uuid+0)), le32_to_cpu(*(__le32 *)(sb->uuid+0)),
le32_to_cpu(*(__le32 *)(sb->uuid+4)), le32_to_cpu(*(__le32 *)(sb->uuid+4)),
...@@ -497,11 +497,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap) ...@@ -497,11 +497,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
pr_debug("events cleared: %llu\n", pr_debug("events cleared: %llu\n",
(unsigned long long) le64_to_cpu(sb->events_cleared)); (unsigned long long) le64_to_cpu(sb->events_cleared));
pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize));
pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
pr_debug(" sync size: %llu KB\n", pr_debug(" sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2); (unsigned long long)le64_to_cpu(sb->sync_size)/2);
pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
kunmap_atomic(sb); kunmap_atomic(sb);
} }
...@@ -2105,7 +2105,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, ...@@ -2105,7 +2105,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bytes = DIV_ROUND_UP(chunks, 8); bytes = DIV_ROUND_UP(chunks, 8);
if (!bitmap->mddev->bitmap_info.external) if (!bitmap->mddev->bitmap_info.external)
bytes += sizeof(bitmap_super_t); bytes += sizeof(bitmap_super_t);
} while (bytes > (space << 9)); } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
(BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
} else } else
chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
...@@ -2150,7 +2151,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, ...@@ -2150,7 +2151,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->counts.missing_pages = pages; bitmap->counts.missing_pages = pages;
bitmap->counts.chunkshift = chunkshift; bitmap->counts.chunkshift = chunkshift;
bitmap->counts.chunks = chunks; bitmap->counts.chunks = chunks;
bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
BITMAP_BLOCK_SHIFT); BITMAP_BLOCK_SHIFT);
blocks = min(old_counts.chunks << old_counts.chunkshift, blocks = min(old_counts.chunks << old_counts.chunkshift,
...@@ -2176,8 +2177,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, ...@@ -2176,8 +2177,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->counts.missing_pages = old_counts.pages; bitmap->counts.missing_pages = old_counts.pages;
bitmap->counts.chunkshift = old_counts.chunkshift; bitmap->counts.chunkshift = old_counts.chunkshift;
bitmap->counts.chunks = old_counts.chunks; bitmap->counts.chunks = old_counts.chunks;
bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + bitmap->mddev->bitmap_info.chunksize =
BITMAP_BLOCK_SHIFT); 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
blocks = old_counts.chunks << old_counts.chunkshift; blocks = old_counts.chunks << old_counts.chunkshift;
pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
break; break;
...@@ -2537,6 +2538,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len) ...@@ -2537,6 +2538,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len)
if (csize < 512 || if (csize < 512 ||
!is_power_of_2(csize)) !is_power_of_2(csize))
return -EINVAL; return -EINVAL;
if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
sizeof(((bitmap_super_t *)0)->chunksize))))
return -EOVERFLOW;
mddev->bitmap_info.chunksize = csize; mddev->bitmap_info.chunksize = csize;
return len; return len;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment