Commit 569fa939 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.4-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fix from David Sterba:
 "Unfortunately the recent u32 overflow fix was not complete, there was
  one conversion left, assertion not triggered by my tests but caught by
  Qu's fstests case.

  The "cleanup for later" has been promoted to a proper fix and wraps
  all uses of the stripe left shift so the diffstat has grown but leaves
  no potentially problematic uses.

  We should have done it that way before, sorry"

* tag 'for-6.4-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: fix remaining u32 overflows when left shifting stripe_nr
parents 9cb38381 cb091225
...@@ -1973,7 +1973,7 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, ...@@ -1973,7 +1973,7 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
/* For RAID5/6 adjust to a full IO stripe length */ /* For RAID5/6 adjust to a full IO stripe length */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
io_stripe_size = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT; io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
if (!buf) { if (!buf) {
......
...@@ -1304,7 +1304,7 @@ static int get_raid56_logic_offset(u64 physical, int num, ...@@ -1304,7 +1304,7 @@ static int get_raid56_logic_offset(u64 physical, int num,
u32 stripe_index; u32 stripe_index;
u32 rot; u32 rot;
*offset = last_offset + (i << BTRFS_STRIPE_LEN_SHIFT); *offset = last_offset + btrfs_stripe_nr_to_offset(i);
stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes; stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
...@@ -1319,7 +1319,7 @@ static int get_raid56_logic_offset(u64 physical, int num, ...@@ -1319,7 +1319,7 @@ static int get_raid56_logic_offset(u64 physical, int num,
if (stripe_index < num) if (stripe_index < num)
j++; j++;
} }
*offset = last_offset + (j << BTRFS_STRIPE_LEN_SHIFT); *offset = last_offset + btrfs_stripe_nr_to_offset(j);
return 1; return 1;
} }
...@@ -1715,7 +1715,7 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx) ...@@ -1715,7 +1715,7 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx)
ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state)); ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
scrub_throttle_dev_io(sctx, sctx->stripes[0].dev, scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
nr_stripes << BTRFS_STRIPE_LEN_SHIFT); btrfs_stripe_nr_to_offset(nr_stripes));
for (int i = 0; i < nr_stripes; i++) { for (int i = 0; i < nr_stripes; i++) {
stripe = &sctx->stripes[i]; stripe = &sctx->stripes[i];
scrub_submit_initial_read(sctx, stripe); scrub_submit_initial_read(sctx, stripe);
...@@ -1838,7 +1838,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, ...@@ -1838,7 +1838,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
bool all_empty = true; bool all_empty = true;
const int data_stripes = nr_data_stripes(map); const int data_stripes = nr_data_stripes(map);
unsigned long extent_bitmap = 0; unsigned long extent_bitmap = 0;
u64 length = data_stripes << BTRFS_STRIPE_LEN_SHIFT; u64 length = btrfs_stripe_nr_to_offset(data_stripes);
int ret; int ret;
ASSERT(sctx->raid56_data_stripes); ASSERT(sctx->raid56_data_stripes);
...@@ -1853,13 +1853,13 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, ...@@ -1853,13 +1853,13 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
data_stripes) >> BTRFS_STRIPE_LEN_SHIFT; data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
stripe_index = (i + rot) % map->num_stripes; stripe_index = (i + rot) % map->num_stripes;
physical = map->stripes[stripe_index].physical + physical = map->stripes[stripe_index].physical +
(rot << BTRFS_STRIPE_LEN_SHIFT); btrfs_stripe_nr_to_offset(rot);
scrub_reset_stripe(stripe); scrub_reset_stripe(stripe);
set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state); set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
ret = scrub_find_fill_first_stripe(bg, ret = scrub_find_fill_first_stripe(bg,
map->stripes[stripe_index].dev, physical, 1, map->stripes[stripe_index].dev, physical, 1,
full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT), full_stripe_start + btrfs_stripe_nr_to_offset(i),
BTRFS_STRIPE_LEN, stripe); BTRFS_STRIPE_LEN, stripe);
if (ret < 0) if (ret < 0)
goto out; goto out;
...@@ -1869,7 +1869,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, ...@@ -1869,7 +1869,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
*/ */
if (ret > 0) { if (ret > 0) {
stripe->logical = full_stripe_start + stripe->logical = full_stripe_start +
(i << BTRFS_STRIPE_LEN_SHIFT); btrfs_stripe_nr_to_offset(i);
stripe->dev = map->stripes[stripe_index].dev; stripe->dev = map->stripes[stripe_index].dev;
stripe->mirror_num = 1; stripe->mirror_num = 1;
set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
...@@ -2062,7 +2062,7 @@ static u64 simple_stripe_full_stripe_len(const struct map_lookup *map) ...@@ -2062,7 +2062,7 @@ static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)); BTRFS_BLOCK_GROUP_RAID10));
return (map->num_stripes / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT; return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
} }
/* Get the logical bytenr for the stripe */ /* Get the logical bytenr for the stripe */
...@@ -2078,7 +2078,7 @@ static u64 simple_stripe_get_logical(struct map_lookup *map, ...@@ -2078,7 +2078,7 @@ static u64 simple_stripe_get_logical(struct map_lookup *map,
* (stripe_index / sub_stripes) gives how many data stripes we need to * (stripe_index / sub_stripes) gives how many data stripes we need to
* skip. * skip.
*/ */
return ((stripe_index / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT) + return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
bg->start; bg->start;
} }
...@@ -2204,7 +2204,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -2204,7 +2204,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
} }
if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index); ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
offset = (stripe_index / map->sub_stripes) << BTRFS_STRIPE_LEN_SHIFT; offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
goto out; goto out;
} }
...@@ -2219,7 +2219,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -2219,7 +2219,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
/* Initialize @offset in case we need to go to out: label */ /* Initialize @offset in case we need to go to out: label */
get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL); get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
increment = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT; increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
/* /*
* Due to the rotation, for RAID56 it's better to iterate each stripe * Due to the rotation, for RAID56 it's better to iterate each stripe
......
...@@ -857,10 +857,10 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf, ...@@ -857,10 +857,10 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
* *
* Thus it should be a good way to catch obvious bitflips. * Thus it should be a good way to catch obvious bitflips.
*/ */
if (unlikely(length >= ((u64)U32_MAX << BTRFS_STRIPE_LEN_SHIFT))) { if (unlikely(length >= btrfs_stripe_nr_to_offset(U32_MAX))) {
chunk_err(leaf, chunk, logical, chunk_err(leaf, chunk, logical,
"chunk length too large: have %llu limit %llu", "chunk length too large: have %llu limit %llu",
length, (u64)U32_MAX << BTRFS_STRIPE_LEN_SHIFT); length, btrfs_stripe_nr_to_offset(U32_MAX));
return -EUCLEAN; return -EUCLEAN;
} }
if (unlikely(type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK | if (unlikely(type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
......
...@@ -5125,7 +5125,7 @@ static void init_alloc_chunk_ctl_policy_regular( ...@@ -5125,7 +5125,7 @@ static void init_alloc_chunk_ctl_policy_regular(
/* We don't want a chunk larger than 10% of writable space */ /* We don't want a chunk larger than 10% of writable space */
ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10), ctl->max_chunk_size = min(mult_perc(fs_devices->total_rw_bytes, 10),
ctl->max_chunk_size); ctl->max_chunk_size);
ctl->dev_extent_min = ctl->dev_stripes << BTRFS_STRIPE_LEN_SHIFT; ctl->dev_extent_min = btrfs_stripe_nr_to_offset(ctl->dev_stripes);
} }
static void init_alloc_chunk_ctl_policy_zoned( static void init_alloc_chunk_ctl_policy_zoned(
...@@ -5801,7 +5801,7 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, ...@@ -5801,7 +5801,7 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
if (!WARN_ON(IS_ERR(em))) { if (!WARN_ON(IS_ERR(em))) {
map = em->map_lookup; map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
len = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT; len = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
free_extent_map(em); free_extent_map(em);
} }
return len; return len;
...@@ -5975,12 +5975,12 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, ...@@ -5975,12 +5975,12 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT;
/* stripe_offset is the offset of this block in its stripe */ /* stripe_offset is the offset of this block in its stripe */
stripe_offset = offset - ((u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT); stripe_offset = offset - btrfs_stripe_nr_to_offset(stripe_nr);
stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >> stripe_nr_end = round_up(offset + length, BTRFS_STRIPE_LEN) >>
BTRFS_STRIPE_LEN_SHIFT; BTRFS_STRIPE_LEN_SHIFT;
stripe_cnt = stripe_nr_end - stripe_nr; stripe_cnt = stripe_nr_end - stripe_nr;
stripe_end_offset = ((u64)stripe_nr_end << BTRFS_STRIPE_LEN_SHIFT) - stripe_end_offset = btrfs_stripe_nr_to_offset(stripe_nr_end) -
(offset + length); (offset + length);
/* /*
* after this, stripe_nr is the number of stripes on this * after this, stripe_nr is the number of stripes on this
...@@ -6023,12 +6023,12 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, ...@@ -6023,12 +6023,12 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
for (i = 0; i < *num_stripes; i++) { for (i = 0; i < *num_stripes; i++) {
stripes[i].physical = stripes[i].physical =
map->stripes[stripe_index].physical + map->stripes[stripe_index].physical +
stripe_offset + ((u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT); stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr);
stripes[i].dev = map->stripes[stripe_index].dev; stripes[i].dev = map->stripes[stripe_index].dev;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) { BTRFS_BLOCK_GROUP_RAID10)) {
stripes[i].length = stripes_per_dev << BTRFS_STRIPE_LEN_SHIFT; stripes[i].length = btrfs_stripe_nr_to_offset(stripes_per_dev);
if (i / sub_stripes < remaining_stripes) if (i / sub_stripes < remaining_stripes)
stripes[i].length += BTRFS_STRIPE_LEN; stripes[i].length += BTRFS_STRIPE_LEN;
...@@ -6183,8 +6183,8 @@ static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op, ...@@ -6183,8 +6183,8 @@ static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op,
ASSERT(*stripe_offset < U32_MAX); ASSERT(*stripe_offset < U32_MAX);
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
unsigned long full_stripe_len = nr_data_stripes(map) << unsigned long full_stripe_len =
BTRFS_STRIPE_LEN_SHIFT; btrfs_stripe_nr_to_offset(nr_data_stripes(map));
/* /*
* For full stripe start, we use previously calculated * For full stripe start, we use previously calculated
...@@ -6196,8 +6196,8 @@ static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op, ...@@ -6196,8 +6196,8 @@ static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op,
* not ensured to be power of 2. * not ensured to be power of 2.
*/ */
*full_stripe_start = *full_stripe_start =
(u64)rounddown(*stripe_nr, nr_data_stripes(map)) << btrfs_stripe_nr_to_offset(
BTRFS_STRIPE_LEN_SHIFT; rounddown(*stripe_nr, nr_data_stripes(map)));
ASSERT(*full_stripe_start + full_stripe_len > offset); ASSERT(*full_stripe_start + full_stripe_len > offset);
ASSERT(*full_stripe_start <= offset); ASSERT(*full_stripe_start <= offset);
...@@ -6223,7 +6223,7 @@ static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup * ...@@ -6223,7 +6223,7 @@ static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *
{ {
dst->dev = map->stripes[stripe_index].dev; dst->dev = map->stripes[stripe_index].dev;
dst->physical = map->stripes[stripe_index].physical + dst->physical = map->stripes[stripe_index].physical +
stripe_offset + ((u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT); stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr);
} }
int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
...@@ -6345,7 +6345,8 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, ...@@ -6345,7 +6345,8 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
/* Return the length to the full stripe end */ /* Return the length to the full stripe end */
*length = min(logical + *length, *length = min(logical + *length,
raid56_full_stripe_start + em->start + raid56_full_stripe_start + em->start +
(data_stripes << BTRFS_STRIPE_LEN_SHIFT)) - logical; btrfs_stripe_nr_to_offset(data_stripes)) -
logical;
stripe_index = 0; stripe_index = 0;
stripe_offset = 0; stripe_offset = 0;
} else { } else {
...@@ -6435,7 +6436,7 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, ...@@ -6435,7 +6436,7 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
* modulo, to reduce one modulo call. * modulo, to reduce one modulo call.
*/ */
bioc->full_stripe_logical = em->start + bioc->full_stripe_logical = em->start +
((stripe_nr * data_stripes) << BTRFS_STRIPE_LEN_SHIFT); btrfs_stripe_nr_to_offset(stripe_nr * data_stripes);
for (i = 0; i < num_stripes; i++) for (i = 0; i < num_stripes; i++)
set_io_stripe(&bioc->stripes[i], map, set_io_stripe(&bioc->stripes[i], map,
(i + stripe_nr) % num_stripes, (i + stripe_nr) % num_stripes,
...@@ -8032,7 +8033,7 @@ static void map_raid56_repair_block(struct btrfs_io_context *bioc, ...@@ -8032,7 +8033,7 @@ static void map_raid56_repair_block(struct btrfs_io_context *bioc,
for (i = 0; i < data_stripes; i++) { for (i = 0; i < data_stripes; i++) {
u64 stripe_start = bioc->full_stripe_logical + u64 stripe_start = bioc->full_stripe_logical +
(i << BTRFS_STRIPE_LEN_SHIFT); btrfs_stripe_nr_to_offset(i);
if (logical >= stripe_start && if (logical >= stripe_start &&
logical < stripe_start + BTRFS_STRIPE_LEN) logical < stripe_start + BTRFS_STRIPE_LEN)
......
...@@ -574,6 +574,17 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes) ...@@ -574,6 +574,17 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
sizeof(struct btrfs_stripe) * (num_stripes - 1); sizeof(struct btrfs_stripe) * (num_stripes - 1);
} }
/*
* Do the type safe converstion from stripe_nr to offset inside the chunk.
*
* @stripe_nr is u32, with left shift it can overflow u32 for chunks larger
* than 4G. This does the proper type cast to avoid overflow.
*/
static inline u64 btrfs_stripe_nr_to_offset(u32 stripe_nr)
{
return (u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT;
}
void btrfs_get_bioc(struct btrfs_io_context *bioc); void btrfs_get_bioc(struct btrfs_io_context *bioc);
void btrfs_put_bioc(struct btrfs_io_context *bioc); void btrfs_put_bioc(struct btrfs_io_context *bioc);
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment