Commit 161fa27f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'iomap-4.19-merge' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull fs iomap refactoring from Darrick Wong:
 "This is the first part of the XFS changes for 4.19.

  Christoph and Andreas coordinated some refactoring work on the iomap
  code in preparation for removing buffer heads from XFS and porting
  gfs2 to iomap. I'm sending this small pull request ahead of the main
  XFS merge to avoid holding up gfs2 unnecessarily"

* 'iomap-4.19-merge' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  iomap: add inline data support to iomap_readpage_actor
  iomap: support direct I/O to inline data
  iomap: refactor iomap_dio_actor
  iomap: add initial support for writes without buffer heads
  iomap: add an iomap-based readpage and readpages implementation
  iomap: add private pointer to struct iomap
  iomap: add a page_done callback
  iomap: generic inline data handling
  iomap: complete partial direct I/O writes synchronously
  iomap: mark newly allocated buffer heads as new
  fs: factor out a __generic_write_end helper
parents a1a4f841 806a1477
...@@ -1900,15 +1900,16 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, ...@@ -1900,15 +1900,16 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
break; break;
case IOMAP_UNWRITTEN: case IOMAP_UNWRITTEN:
/* /*
* For unwritten regions, we always need to ensure that * For unwritten regions, we always need to ensure that regions
* sub-block writes cause the regions in the block we are not * in the block we are not writing to are zeroed. Mark the
* writing to are zeroed. Set the buffer as new to ensure this. * buffer as new to ensure this.
*/ */
set_buffer_new(bh); set_buffer_new(bh);
set_buffer_unwritten(bh); set_buffer_unwritten(bh);
/* FALLTHRU */ /* FALLTHRU */
case IOMAP_MAPPED: case IOMAP_MAPPED:
if (offset >= i_size_read(inode)) if ((iomap->flags & IOMAP_F_NEW) ||
offset >= i_size_read(inode))
set_buffer_new(bh); set_buffer_new(bh);
bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
inode->i_blkbits; inode->i_blkbits;
...@@ -2076,6 +2077,40 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, ...@@ -2076,6 +2077,40 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
} }
EXPORT_SYMBOL(block_write_begin); EXPORT_SYMBOL(block_write_begin);
int __generic_write_end(struct inode *inode, loff_t pos, unsigned copied,
struct page *page)
{
loff_t old_size = inode->i_size;
bool i_size_changed = false;
/*
* No need to use i_size_read() here, the i_size cannot change under us
* because we hold i_rwsem.
*
* But it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
if (pos + copied > inode->i_size) {
i_size_write(inode, pos + copied);
i_size_changed = true;
}
unlock_page(page);
put_page(page);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
/*
* Don't mark the inode dirty under page lock. First, it unnecessarily
* makes the holding time of page lock longer. Second, it forces lock
* ordering of page lock and transaction start for journaling
* filesystems.
*/
if (i_size_changed)
mark_inode_dirty(inode);
return copied;
}
int block_write_end(struct file *file, struct address_space *mapping, int block_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct page *page, void *fsdata)
...@@ -2116,39 +2151,8 @@ int generic_write_end(struct file *file, struct address_space *mapping, ...@@ -2116,39 +2151,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct page *page, void *fsdata)
{ {
struct inode *inode = mapping->host;
loff_t old_size = inode->i_size;
int i_size_changed = 0;
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
return __generic_write_end(mapping->host, pos, copied, page);
/*
* No need to use i_size_read() here, the i_size
* cannot change under us because we hold i_mutex.
*
* But it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
if (pos+copied > inode->i_size) {
i_size_write(inode, pos+copied);
i_size_changed = 1;
}
unlock_page(page);
put_page(page);
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
/*
* Don't mark the inode dirty under page lock. First, it unnecessarily
* makes the holding time of page lock longer. Second, it forces lock
* ordering of page lock and transaction start for journaling
* filesystems.
*/
if (i_size_changed)
mark_inode_dirty(inode);
return copied;
} }
EXPORT_SYMBOL(generic_write_end); EXPORT_SYMBOL(generic_write_end);
......
...@@ -43,6 +43,8 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait) ...@@ -43,6 +43,8 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
extern void guard_bio_eod(int rw, struct bio *bio); extern void guard_bio_eod(int rw, struct bio *bio);
extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len, extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block, struct iomap *iomap); get_block_t *get_block, struct iomap *iomap);
int __generic_write_end(struct inode *inode, loff_t pos, unsigned copied,
struct page *page);
/* /*
* char_dev.c * char_dev.c
......
This diff is collapsed.
...@@ -626,7 +626,7 @@ xfs_file_iomap_begin_delay( ...@@ -626,7 +626,7 @@ xfs_file_iomap_begin_delay(
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail. * them out if the write happens to fail.
*/ */
iomap->flags = IOMAP_F_NEW; iomap->flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, count, 0, &got); trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
done: done:
if (isnullstartblock(got.br_startblock)) if (isnullstartblock(got.br_startblock))
...@@ -1032,6 +1032,8 @@ xfs_file_iomap_begin( ...@@ -1032,6 +1032,8 @@ xfs_file_iomap_begin(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -EIO; return -EIO;
iomap->flags |= IOMAP_F_BUFFER_HEAD;
if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) && if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) &&
!IS_DAX(inode) && !xfs_get_extsz_hint(ip)) { !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
/* Reserve delalloc blocks for regular writeback. */ /* Reserve delalloc blocks for regular writeback. */
...@@ -1132,7 +1134,7 @@ xfs_file_iomap_begin( ...@@ -1132,7 +1134,7 @@ xfs_file_iomap_begin(
if (error) if (error)
return error; return error;
iomap->flags = IOMAP_F_NEW; iomap->flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
out_finish: out_finish:
......
...@@ -9,6 +9,7 @@ struct fiemap_extent_info; ...@@ -9,6 +9,7 @@ struct fiemap_extent_info;
struct inode; struct inode;
struct iov_iter; struct iov_iter;
struct kiocb; struct kiocb;
struct page;
struct vm_area_struct; struct vm_area_struct;
struct vm_fault; struct vm_fault;
...@@ -29,6 +30,7 @@ struct vm_fault; ...@@ -29,6 +30,7 @@ struct vm_fault;
*/ */
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ #define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */ #define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
#define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */
/* /*
* Flags that only need to be reported for IOMAP_REPORT requests: * Flags that only need to be reported for IOMAP_REPORT requests:
...@@ -55,6 +57,16 @@ struct iomap { ...@@ -55,6 +57,16 @@ struct iomap {
u16 flags; /* flags for mapping */ u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */ struct block_device *bdev; /* block device for I/O */
struct dax_device *dax_dev; /* dax_dev for dax operations */ struct dax_device *dax_dev; /* dax_dev for dax operations */
void *inline_data;
void *private; /* filesystem private */
/*
* Called when finished processing a page in the mapping returned in
* this iomap. At least for now this is only supported in the buffered
* write path.
*/
void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
struct page *page, struct iomap *iomap);
}; };
/* /*
...@@ -88,6 +100,10 @@ struct iomap_ops { ...@@ -88,6 +100,10 @@ struct iomap_ops {
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops); const struct iomap_ops *ops);
int iomap_readpage(struct page *page, const struct iomap_ops *ops);
int iomap_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages, const struct iomap_ops *ops);
int iomap_set_page_dirty(struct page *page);
int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops); const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment