Commit a5211002 authored by Theodore Ts'o's avatar Theodore Ts'o

ext4: pass allocation_request struct to ext4_(alloc,splice)_branch

Instead of initializing the allocation_request structure in
ext4_alloc_branch(), set it up in ext4_ind_map_blocks(), and then pass
it to ext4_alloc_branch() and ext4_splice_branch().

This allows ext4_ind_map_blocks to pass flags in the allocation
request structure without having to add Yet Another argument to
ext4_alloc_branch().
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
parent eb68d0e2
...@@ -318,34 +318,22 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, ...@@ -318,34 +318,22 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
* ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
* as described above and return 0. * as described above and return 0.
*/ */
static int ext4_alloc_branch(handle_t *handle, struct inode *inode, static int ext4_alloc_branch(handle_t *handle,
ext4_lblk_t iblock, int indirect_blks, struct ext4_allocation_request *ar,
int *blks, ext4_fsblk_t goal, int indirect_blks, ext4_lblk_t *offsets,
ext4_lblk_t *offsets, Indirect *branch) Indirect *branch)
{ {
struct ext4_allocation_request ar;
struct buffer_head * bh; struct buffer_head * bh;
ext4_fsblk_t b, new_blocks[4]; ext4_fsblk_t b, new_blocks[4];
__le32 *p; __le32 *p;
int i, j, err, len = 1; int i, j, err, len = 1;
/*
* Set up for the direct block allocation
*/
memset(&ar, 0, sizeof(ar));
ar.inode = inode;
ar.len = *blks;
ar.logical = iblock;
if (S_ISREG(inode->i_mode))
ar.flags = EXT4_MB_HINT_DATA;
for (i = 0; i <= indirect_blks; i++) { for (i = 0; i <= indirect_blks; i++) {
if (i == indirect_blks) { if (i == indirect_blks) {
ar.goal = goal; new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err);
} else } else
goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode, ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
goal, 0, NULL, &err); ar->inode, ar->goal, 0, NULL, &err);
if (err) { if (err) {
i--; i--;
goto failed; goto failed;
...@@ -354,7 +342,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -354,7 +342,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
if (i == 0) if (i == 0)
continue; continue;
bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]); bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
if (unlikely(!bh)) { if (unlikely(!bh)) {
err = -ENOMEM; err = -ENOMEM;
goto failed; goto failed;
...@@ -372,7 +360,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -372,7 +360,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
b = new_blocks[i]; b = new_blocks[i];
if (i == indirect_blks) if (i == indirect_blks)
len = ar.len; len = ar->len;
for (j = 0; j < len; j++) for (j = 0; j < len; j++)
*p++ = cpu_to_le32(b++); *p++ = cpu_to_le32(b++);
...@@ -381,11 +369,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -381,11 +369,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
unlock_buffer(bh); unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh); err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
if (err) if (err)
goto failed; goto failed;
} }
*blks = ar.len;
return 0; return 0;
failed: failed:
for (; i >= 0; i--) { for (; i >= 0; i--) {
...@@ -396,10 +383,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -396,10 +383,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
* existing before ext4_alloc_branch() was called. * existing before ext4_alloc_branch() was called.
*/ */
if (i > 0 && i != indirect_blks && branch[i].bh) if (i > 0 && i != indirect_blks && branch[i].bh)
ext4_forget(handle, 1, inode, branch[i].bh, ext4_forget(handle, 1, ar->inode, branch[i].bh,
branch[i].bh->b_blocknr); branch[i].bh->b_blocknr);
ext4_free_blocks(handle, inode, NULL, new_blocks[i], ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
(i == indirect_blks) ? ar.len : 1, 0); (i == indirect_blks) ? ar->len : 1, 0);
} }
return err; return err;
} }
...@@ -419,9 +406,9 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -419,9 +406,9 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
* inode (->i_blocks, etc.). In case of success we end up with the full * inode (->i_blocks, etc.). In case of success we end up with the full
* chain to new block and return 0. * chain to new block and return 0.
*/ */
static int ext4_splice_branch(handle_t *handle, struct inode *inode, static int ext4_splice_branch(handle_t *handle,
ext4_lblk_t block, Indirect *where, int num, struct ext4_allocation_request *ar,
int blks) Indirect *where, int num)
{ {
int i; int i;
int err = 0; int err = 0;
...@@ -446,9 +433,9 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, ...@@ -446,9 +433,9 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
* Update the host buffer_head or inode to point to more just allocated * Update the host buffer_head or inode to point to more just allocated
* direct blocks blocks * direct blocks blocks
*/ */
if (num == 0 && blks > 1) { if (num == 0 && ar->len > 1) {
current_block = le32_to_cpu(where->key) + 1; current_block = le32_to_cpu(where->key) + 1;
for (i = 1; i < blks; i++) for (i = 1; i < ar->len; i++)
*(where->p + i) = cpu_to_le32(current_block++); *(where->p + i) = cpu_to_le32(current_block++);
} }
...@@ -465,14 +452,14 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, ...@@ -465,14 +452,14 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
*/ */
jbd_debug(5, "splicing indirect only\n"); jbd_debug(5, "splicing indirect only\n");
BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, where->bh); err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
if (err) if (err)
goto err_out; goto err_out;
} else { } else {
/* /*
* OK, we spliced it into the inode itself on a direct block. * OK, we spliced it into the inode itself on a direct block.
*/ */
ext4_mark_inode_dirty(handle, inode); ext4_mark_inode_dirty(handle, ar->inode);
jbd_debug(5, "splicing direct\n"); jbd_debug(5, "splicing direct\n");
} }
return err; return err;
...@@ -484,11 +471,11 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, ...@@ -484,11 +471,11 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
* need to revoke the block, which is why we don't * need to revoke the block, which is why we don't
* need to set EXT4_FREE_BLOCKS_METADATA. * need to set EXT4_FREE_BLOCKS_METADATA.
*/ */
ext4_free_blocks(handle, inode, where[i].bh, 0, 1, ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
EXT4_FREE_BLOCKS_FORGET); EXT4_FREE_BLOCKS_FORGET);
} }
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
blks, 0); ar->len, 0);
return err; return err;
} }
...@@ -525,11 +512,11 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, ...@@ -525,11 +512,11 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, struct ext4_map_blocks *map,
int flags) int flags)
{ {
struct ext4_allocation_request ar;
int err = -EIO; int err = -EIO;
ext4_lblk_t offsets[4]; ext4_lblk_t offsets[4];
Indirect chain[4]; Indirect chain[4];
Indirect *partial; Indirect *partial;
ext4_fsblk_t goal;
int indirect_blks; int indirect_blks;
int blocks_to_boundary = 0; int blocks_to_boundary = 0;
int depth; int depth;
...@@ -579,7 +566,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, ...@@ -579,7 +566,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
return -ENOSPC; return -ENOSPC;
} }
goal = ext4_find_goal(inode, map->m_lblk, partial); /* Set up for the direct block allocation */
memset(&ar, 0, sizeof(ar));
ar.inode = inode;
ar.logical = map->m_lblk;
if (S_ISREG(inode->i_mode))
ar.flags = EXT4_MB_HINT_DATA;
ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
/* the number of blocks need to allocate for [d,t]indirect blocks */ /* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1; indirect_blks = (chain + depth) - partial - 1;
...@@ -588,13 +582,13 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, ...@@ -588,13 +582,13 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
* Next look up the indirect map to count the totoal number of * Next look up the indirect map to count the totoal number of
* direct blocks to allocate for this branch. * direct blocks to allocate for this branch.
*/ */
count = ext4_blks_to_allocate(partial, indirect_blks, ar.len = ext4_blks_to_allocate(partial, indirect_blks,
map->m_len, blocks_to_boundary); map->m_len, blocks_to_boundary);
/* /*
* Block out ext4_truncate while we alter the tree * Block out ext4_truncate while we alter the tree
*/ */
err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, err = ext4_alloc_branch(handle, &ar, indirect_blks,
&count, goal,
offsets + (partial - chain), partial); offsets + (partial - chain), partial);
/* /*
...@@ -605,14 +599,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, ...@@ -605,14 +599,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
* may need to return -EAGAIN upwards in the worst case. --sct * may need to return -EAGAIN upwards in the worst case. --sct
*/ */
if (!err) if (!err)
err = ext4_splice_branch(handle, inode, map->m_lblk, err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
partial, indirect_blks, count);
if (err) if (err)
goto cleanup; goto cleanup;
map->m_flags |= EXT4_MAP_NEW; map->m_flags |= EXT4_MAP_NEW;
ext4_update_inode_fsync_trans(handle, inode, 1); ext4_update_inode_fsync_trans(handle, inode, 1);
count = ar.len;
got_it: got_it:
map->m_flags |= EXT4_MAP_MAPPED; map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = le32_to_cpu(chain[depth-1].key); map->m_pblk = le32_to_cpu(chain[depth-1].key);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment