Commit a8414fa3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-iomap-for-linus-4.8-rc3' of...

Merge tag 'xfs-iomap-for-linus-4.8-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs

Pull xfs and iomap fixes from Dave Chinner:
 "Changes in this update:

  Regression fixes for XFS changes introduce in 4.8-rc1:
   - buffer IO accounting assert failure
   - ENOSPC block accounting reservation issue
   - DAX IO path page cache invalidation fix
   - rmapbt on-disk block count in agf
   - correct classification of rmap block type when updating AGFL.
   - iomap support for attribute fork mapping

  Regression fixes for iomap infrastructure in 4.8-rc1:
   - fiemap: honor FIEMAP_FLAG_SYNC
   - fiemap: implement FIEMAP_FLAG_XATTR support to fix XFS regression
   - make mark_page_accessed and pagefault_disable usage consistent with
     other IO paths"

* tag 'xfs-iomap-for-linus-4.8-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs:
  xfs: remove OWN_AG rmap when allocating a block from the AGFL
  xfs: (re-)implement FIEMAP_FLAG_XATTR
  xfs: simplify xfs_file_iomap_begin
  iomap: mark ->iomap_end as optional
  iomap: prepare iomap_fiemap for attribute mappings
  iomap: fiemap should honor the FIEMAP_FLAG_SYNC flag
  iomap: remove superflous pagefault_disable from iomap_write_actor
  iomap: remove superflous mark_page_accessed from iomap_write_actor
  xfs: store rmapbt block count in the AGF
  xfs: don't invalidate whole file on DAX read/write
  xfs: fix bogus space reservation in xfs_iomap_write_allocate
  xfs: don't assert fail on non-async buffers on ioacct decrement
parents 3f318b3c 32438cf9
...@@ -84,8 +84,11 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, ...@@ -84,8 +84,11 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
* Now the data has been copied, commit the range we've copied. This * Now the data has been copied, commit the range we've copied. This
* should not fail unless the filesystem has had a fatal error. * should not fail unless the filesystem has had a fatal error.
*/ */
ret = ops->iomap_end(inode, pos, length, written > 0 ? written : 0, if (ops->iomap_end) {
flags, &iomap); ret = ops->iomap_end(inode, pos, length,
written > 0 ? written : 0,
flags, &iomap);
}
return written ? written : ret; return written ? written : ret;
} }
...@@ -194,12 +197,9 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ...@@ -194,12 +197,9 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (mapping_writably_mapped(inode->i_mapping)) if (mapping_writably_mapped(inode->i_mapping))
flush_dcache_page(page); flush_dcache_page(page);
pagefault_disable();
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
pagefault_enable();
flush_dcache_page(page); flush_dcache_page(page);
mark_page_accessed(page);
status = iomap_write_end(inode, pos, bytes, copied, page); status = iomap_write_end(inode, pos, bytes, copied, page);
if (unlikely(status < 0)) if (unlikely(status < 0))
...@@ -470,13 +470,18 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, ...@@ -470,13 +470,18 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
if (ret) if (ret)
return ret; return ret;
ret = filemap_write_and_wait(inode->i_mapping); if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
if (ret) ret = filemap_write_and_wait(inode->i_mapping);
return ret; if (ret)
return ret;
}
while (len > 0) { while (len > 0) {
ret = iomap_apply(inode, start, len, 0, ops, &ctx, ret = iomap_apply(inode, start, len, 0, ops, &ctx,
iomap_fiemap_actor); iomap_fiemap_actor);
/* inode with no (attribute) mapping will give ENOENT */
if (ret == -ENOENT)
break;
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret == 0) if (ret == 0)
......
...@@ -1582,6 +1582,7 @@ xfs_alloc_ag_vextent_small( ...@@ -1582,6 +1582,7 @@ xfs_alloc_ag_vextent_small(
xfs_extlen_t *flenp, /* result length */ xfs_extlen_t *flenp, /* result length */
int *stat) /* status: 0-freelist, 1-normal/none */ int *stat) /* status: 0-freelist, 1-normal/none */
{ {
struct xfs_owner_info oinfo;
int error; int error;
xfs_agblock_t fbno; xfs_agblock_t fbno;
xfs_extlen_t flen; xfs_extlen_t flen;
...@@ -1624,6 +1625,18 @@ xfs_alloc_ag_vextent_small( ...@@ -1624,6 +1625,18 @@ xfs_alloc_ag_vextent_small(
error0); error0);
args->wasfromfl = 1; args->wasfromfl = 1;
trace_xfs_alloc_small_freelist(args); trace_xfs_alloc_small_freelist(args);
/*
* If we're feeding an AGFL block to something that
* doesn't live in the free space, we need to clear
* out the OWN_AG rmap.
*/
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
error = xfs_rmap_free(args->tp, args->agbp, args->agno,
fbno, 1, &oinfo);
if (error)
goto error0;
*stat = 0; *stat = 0;
return 0; return 0;
} }
...@@ -2264,6 +2277,7 @@ xfs_alloc_log_agf( ...@@ -2264,6 +2277,7 @@ xfs_alloc_log_agf(
offsetof(xfs_agf_t, agf_longest), offsetof(xfs_agf_t, agf_longest),
offsetof(xfs_agf_t, agf_btreeblks), offsetof(xfs_agf_t, agf_btreeblks),
offsetof(xfs_agf_t, agf_uuid), offsetof(xfs_agf_t, agf_uuid),
offsetof(xfs_agf_t, agf_rmap_blocks),
sizeof(xfs_agf_t) sizeof(xfs_agf_t)
}; };
......
...@@ -640,12 +640,15 @@ typedef struct xfs_agf { ...@@ -640,12 +640,15 @@ typedef struct xfs_agf {
__be32 agf_btreeblks; /* # of blocks held in AGF btrees */ __be32 agf_btreeblks; /* # of blocks held in AGF btrees */
uuid_t agf_uuid; /* uuid of filesystem */ uuid_t agf_uuid; /* uuid of filesystem */
__be32 agf_rmap_blocks; /* rmapbt blocks used */
__be32 agf_padding; /* padding */
/* /*
* reserve some contiguous space for future logged fields before we add * reserve some contiguous space for future logged fields before we add
* the unlogged fields. This makes the range logging via flags and * the unlogged fields. This makes the range logging via flags and
* structure offsets much simpler. * structure offsets much simpler.
*/ */
__be64 agf_spare64[16]; __be64 agf_spare64[15];
/* unlogged fields, written during buffer writeback. */ /* unlogged fields, written during buffer writeback. */
__be64 agf_lsn; /* last write sequence */ __be64 agf_lsn; /* last write sequence */
...@@ -670,7 +673,8 @@ typedef struct xfs_agf { ...@@ -670,7 +673,8 @@ typedef struct xfs_agf {
#define XFS_AGF_LONGEST 0x00000400 #define XFS_AGF_LONGEST 0x00000400
#define XFS_AGF_BTREEBLKS 0x00000800 #define XFS_AGF_BTREEBLKS 0x00000800
#define XFS_AGF_UUID 0x00001000 #define XFS_AGF_UUID 0x00001000
#define XFS_AGF_NUM_BITS 13 #define XFS_AGF_RMAP_BLOCKS 0x00002000
#define XFS_AGF_NUM_BITS 14
#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) #define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
#define XFS_AGF_FLAGS \ #define XFS_AGF_FLAGS \
...@@ -686,7 +690,8 @@ typedef struct xfs_agf { ...@@ -686,7 +690,8 @@ typedef struct xfs_agf {
{ XFS_AGF_FREEBLKS, "FREEBLKS" }, \ { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
{ XFS_AGF_LONGEST, "LONGEST" }, \ { XFS_AGF_LONGEST, "LONGEST" }, \
{ XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \ { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \
{ XFS_AGF_UUID, "UUID" } { XFS_AGF_UUID, "UUID" }, \
{ XFS_AGF_RMAP_BLOCKS, "RMAP_BLOCKS" }
/* disk block (xfs_daddr_t) in the AG */ /* disk block (xfs_daddr_t) in the AG */
#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) #define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
......
...@@ -98,6 +98,8 @@ xfs_rmapbt_alloc_block( ...@@ -98,6 +98,8 @@ xfs_rmapbt_alloc_block(
union xfs_btree_ptr *new, union xfs_btree_ptr *new,
int *stat) int *stat)
{ {
struct xfs_buf *agbp = cur->bc_private.a.agbp;
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
int error; int error;
xfs_agblock_t bno; xfs_agblock_t bno;
...@@ -124,6 +126,8 @@ xfs_rmapbt_alloc_block( ...@@ -124,6 +126,8 @@ xfs_rmapbt_alloc_block(
xfs_trans_agbtree_delta(cur->bc_tp, 1); xfs_trans_agbtree_delta(cur->bc_tp, 1);
new->s = cpu_to_be32(bno); new->s = cpu_to_be32(bno);
be32_add_cpu(&agf->agf_rmap_blocks, 1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 1; *stat = 1;
...@@ -143,6 +147,8 @@ xfs_rmapbt_free_block( ...@@ -143,6 +147,8 @@ xfs_rmapbt_free_block(
bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno, trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
bno, 1); bno, 1);
be32_add_cpu(&agf->agf_rmap_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
if (error) if (error)
return error; return error;
......
...@@ -115,7 +115,6 @@ xfs_buf_ioacct_dec( ...@@ -115,7 +115,6 @@ xfs_buf_ioacct_dec(
if (!(bp->b_flags & _XBF_IN_FLIGHT)) if (!(bp->b_flags & _XBF_IN_FLIGHT))
return; return;
ASSERT(bp->b_flags & XBF_ASYNC);
bp->b_flags &= ~_XBF_IN_FLIGHT; bp->b_flags &= ~_XBF_IN_FLIGHT;
percpu_counter_dec(&bp->b_target->bt_io_count); percpu_counter_dec(&bp->b_target->bt_io_count);
} }
......
...@@ -741,9 +741,20 @@ xfs_file_dax_write( ...@@ -741,9 +741,20 @@ xfs_file_dax_write(
* page is inserted into the pagecache when we have to serve a write * page is inserted into the pagecache when we have to serve a write
* fault on a hole. It should never be dirtied and can simply be * fault on a hole. It should never be dirtied and can simply be
* dropped from the pagecache once we get real data for the page. * dropped from the pagecache once we get real data for the page.
*
* XXX: This is racy against mmap, and there's nothing we can do about
* it. dax_do_io() should really do this invalidation internally as
* it will know if we've allocated over a holei for this specific IO and
* if so it needs to update the mapping tree and invalidate existing
* PTEs over the newly allocated range. Remove this invalidation when
* dax_do_io() is fixed up.
*/ */
if (mapping->nrpages) { if (mapping->nrpages) {
ret = invalidate_inode_pages2(mapping); loff_t end = iocb->ki_pos + iov_iter_count(from) - 1;
ret = invalidate_inode_pages2_range(mapping,
iocb->ki_pos >> PAGE_SHIFT,
end >> PAGE_SHIFT);
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
} }
......
...@@ -248,6 +248,7 @@ xfs_growfs_data_private( ...@@ -248,6 +248,7 @@ xfs_growfs_data_private(
agf->agf_roots[XFS_BTNUM_RMAPi] = agf->agf_roots[XFS_BTNUM_RMAPi] =
cpu_to_be32(XFS_RMAP_BLOCK(mp)); cpu_to_be32(XFS_RMAP_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1); agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
agf->agf_rmap_blocks = cpu_to_be32(1);
} }
agf->agf_flfirst = cpu_to_be32(1); agf->agf_flfirst = cpu_to_be32(1);
......
...@@ -715,12 +715,16 @@ xfs_iomap_write_allocate( ...@@ -715,12 +715,16 @@ xfs_iomap_write_allocate(
* is in the delayed allocation extent on which we sit * is in the delayed allocation extent on which we sit
* but before our buffer starts. * but before our buffer starts.
*/ */
nimaps = 0; nimaps = 0;
while (nimaps == 0) { while (nimaps == 0) {
nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
/*
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, nres, * We have already reserved space for the extent and any
* indirect blocks when creating the delalloc extent,
* there is no need to reserve space in this transaction
* again.
*/
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
0, XFS_TRANS_RESERVE, &tp); 0, XFS_TRANS_RESERVE, &tp);
if (error) if (error)
return error; return error;
...@@ -1037,20 +1041,14 @@ xfs_file_iomap_begin( ...@@ -1037,20 +1041,14 @@ xfs_file_iomap_begin(
return error; return error;
trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
xfs_bmbt_to_iomap(ip, iomap, &imap);
} else if (nimaps) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
trace_xfs_iomap_found(ip, offset, length, 0, &imap);
xfs_bmbt_to_iomap(ip, iomap, &imap);
} else { } else {
ASSERT(nimaps);
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
trace_xfs_iomap_not_found(ip, offset, length, 0, &imap); trace_xfs_iomap_found(ip, offset, length, 0, &imap);
iomap->blkno = IOMAP_NULL_BLOCK;
iomap->type = IOMAP_HOLE;
iomap->offset = offset;
iomap->length = length;
} }
xfs_bmbt_to_iomap(ip, iomap, &imap);
return 0; return 0;
} }
...@@ -1112,3 +1110,48 @@ struct iomap_ops xfs_iomap_ops = { ...@@ -1112,3 +1110,48 @@ struct iomap_ops xfs_iomap_ops = {
.iomap_begin = xfs_file_iomap_begin, .iomap_begin = xfs_file_iomap_begin,
.iomap_end = xfs_file_iomap_end, .iomap_end = xfs_file_iomap_end,
}; };
static int
xfs_xattr_iomap_begin(
struct inode *inode,
loff_t offset,
loff_t length,
unsigned flags,
struct iomap *iomap)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
struct xfs_bmbt_irec imap;
int nimaps = 1, error = 0;
unsigned lockmode;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
lockmode = xfs_ilock_data_map_shared(ip);
/* if there are no attribute fork or extents, return ENOENT */
if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
error = -ENOENT;
goto out_unlock;
}
ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
out_unlock:
xfs_iunlock(ip, lockmode);
if (!error) {
ASSERT(nimaps);
xfs_bmbt_to_iomap(ip, iomap, &imap);
}
return error;
}
struct iomap_ops xfs_xattr_iomap_ops = {
.iomap_begin = xfs_xattr_iomap_begin,
};
...@@ -35,5 +35,6 @@ void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, ...@@ -35,5 +35,6 @@ void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
struct xfs_bmbt_irec *); struct xfs_bmbt_irec *);
extern struct iomap_ops xfs_iomap_ops; extern struct iomap_ops xfs_iomap_ops;
extern struct iomap_ops xfs_xattr_iomap_ops;
#endif /* __XFS_IOMAP_H__*/ #endif /* __XFS_IOMAP_H__*/
...@@ -1009,7 +1009,14 @@ xfs_vn_fiemap( ...@@ -1009,7 +1009,14 @@ xfs_vn_fiemap(
int error; int error;
xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED); xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED);
error = iomap_fiemap(inode, fieinfo, start, length, &xfs_iomap_ops); if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
error = iomap_fiemap(inode, fieinfo, start, length,
&xfs_xattr_iomap_ops);
} else {
error = iomap_fiemap(inode, fieinfo, start, length,
&xfs_iomap_ops);
}
xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
return error; return error;
......
...@@ -1298,7 +1298,6 @@ DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc); ...@@ -1298,7 +1298,6 @@ DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct); DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct);
DEFINE_IOMAP_EVENT(xfs_iomap_alloc); DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
DEFINE_IOMAP_EVENT(xfs_iomap_found); DEFINE_IOMAP_EVENT(xfs_iomap_found);
DEFINE_IOMAP_EVENT(xfs_iomap_not_found);
DECLARE_EVENT_CLASS(xfs_simple_io_class, DECLARE_EVENT_CLASS(xfs_simple_io_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment