Commit 889c65b3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: remove the imap_valid flag

Simplify the way we check for a valid imap - we know we have a valid
mapping after xfs_map_blocks returned successfully, and we know we can
call xfs_imap_valid on any imap, as it will always fail on a
zero-initialized map.

We can also remove the xfs_imap_valid function and fold it into
xfs_map_blocks now.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 3345746e
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
*/ */
struct xfs_writepage_ctx { struct xfs_writepage_ctx {
struct xfs_bmbt_irec imap; struct xfs_bmbt_irec imap;
bool imap_valid;
unsigned int io_type; unsigned int io_type;
struct xfs_ioend *ioend; struct xfs_ioend *ioend;
sector_t last_block; sector_t last_block;
...@@ -370,15 +369,47 @@ xfs_map_blocks( ...@@ -370,15 +369,47 @@ xfs_map_blocks(
struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
ssize_t count = i_blocksize(inode); ssize_t count = i_blocksize(inode);
xfs_fileoff_t offset_fsb, end_fsb; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset), end_fsb;
struct xfs_bmbt_irec imap; struct xfs_bmbt_irec imap;
int whichfork = XFS_DATA_FORK; int whichfork = XFS_DATA_FORK;
struct xfs_iext_cursor icur; struct xfs_iext_cursor icur;
bool imap_valid;
int error = 0; int error = 0;
/*
* We have to make sure the cached mapping is within EOF to protect
* against eofblocks trimming on file release leaving us with a stale
* mapping. Otherwise, a page for a subsequent file extending buffered
* write could get picked up by this writeback cycle and written to the
* wrong blocks.
*
* Note that what we really want here is a generic mapping invalidation
* mechanism to protect us from arbitrary extent modifying contexts, not
* just eofblocks.
*/
xfs_trim_extent_eof(&wpc->imap, ip);
/*
* COW fork blocks can overlap data fork blocks even if the blocks
* aren't shared. COW I/O always takes precedent, so we must always
* check for overlap on reflink inodes unless the mapping is already a
* COW one.
*/
imap_valid = offset_fsb >= wpc->imap.br_startoff &&
offset_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount;
if (imap_valid &&
(!xfs_is_reflink_inode(ip) || wpc->io_type == XFS_IO_COW))
return 0;
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -EIO; return -EIO;
/*
* If we don't have a valid map, now it's time to get a new one for this
* offset. This will convert delayed allocations (including COW ones)
* into real extents. If we return without a valid map, it means we
* landed in a hole and we skip the block.
*/
xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_ilock(ip, XFS_ILOCK_SHARED);
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
(ip->i_df.if_flags & XFS_IFEXTENTS)); (ip->i_df.if_flags & XFS_IFEXTENTS));
...@@ -387,7 +418,6 @@ xfs_map_blocks( ...@@ -387,7 +418,6 @@ xfs_map_blocks(
if (offset > mp->m_super->s_maxbytes - count) if (offset > mp->m_super->s_maxbytes - count)
count = mp->m_super->s_maxbytes - offset; count = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
/* /*
* Check if this is offset is covered by a COW extents, and if yes use * Check if this is offset is covered by a COW extents, and if yes use
...@@ -420,7 +450,7 @@ xfs_map_blocks( ...@@ -420,7 +450,7 @@ xfs_map_blocks(
/* /*
* Map valid and no COW extent in the way? We're done. * Map valid and no COW extent in the way? We're done.
*/ */
if (wpc->imap_valid) { if (imap_valid) {
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0; return 0;
} }
...@@ -465,31 +495,6 @@ xfs_map_blocks( ...@@ -465,31 +495,6 @@ xfs_map_blocks(
return 0; return 0;
} }
STATIC bool
xfs_imap_valid(
struct inode *inode,
struct xfs_bmbt_irec *imap,
xfs_off_t offset)
{
offset >>= inode->i_blkbits;
/*
* We have to make sure the cached mapping is within EOF to protect
* against eofblocks trimming on file release leaving us with a stale
* mapping. Otherwise, a page for a subsequent file extending buffered
* write could get picked up by this writeback cycle and written to the
* wrong blocks.
*
* Note that what we really want here is a generic mapping invalidation
* mechanism to protect us from arbitrary extent modifying contexts, not
* just eofblocks.
*/
xfs_trim_extent_eof(imap, XFS_I(inode));
return offset >= imap->br_startoff &&
offset < imap->br_startoff + imap->br_blockcount;
}
STATIC void STATIC void
xfs_start_buffer_writeback( xfs_start_buffer_writeback(
struct buffer_head *bh) struct buffer_head *bh)
...@@ -856,27 +861,10 @@ xfs_writepage_map( ...@@ -856,27 +861,10 @@ xfs_writepage_map(
continue; continue;
} }
if (wpc->imap_valid) error = xfs_map_blocks(wpc, inode, file_offset);
wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, if (error)
file_offset); break;
if (wpc->io_type == XFS_IO_HOLE)
/*
* COW fork blocks can overlap data fork blocks even if the
* blocks aren't shared. COW I/O always takes precedent, so we
* must always check for overlap on reflink inodes unless the
* mapping is already a COW one.
*/
if (!wpc->imap_valid ||
(xfs_is_reflink_inode(XFS_I(inode)) &&
wpc->io_type != XFS_IO_COW)) {
error = xfs_map_blocks(wpc, inode, file_offset);
if (error)
goto out;
wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
file_offset);
}
if (!wpc->imap_valid || wpc->io_type == XFS_IO_HOLE)
continue; continue;
lock_buffer(bh); lock_buffer(bh);
...@@ -887,7 +875,6 @@ xfs_writepage_map( ...@@ -887,7 +875,6 @@ xfs_writepage_map(
ASSERT(wpc->ioend || list_empty(&submit_list)); ASSERT(wpc->ioend || list_empty(&submit_list));
out:
/* /*
* On error, we have to fail the ioend here because we have locked * On error, we have to fail the ioend here because we have locked
* buffers in the ioend. If we don't do this, we'll deadlock * buffers in the ioend. If we don't do this, we'll deadlock
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment