Commit 3faed667 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: don't look at buffer heads in xfs_add_to_ioend

Calculate all information for the bio based on the passed in information
without requiring a buffer_head structure.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 889c65b3
...@@ -32,7 +32,6 @@ struct xfs_writepage_ctx { ...@@ -32,7 +32,6 @@ struct xfs_writepage_ctx {
struct xfs_bmbt_irec imap; struct xfs_bmbt_irec imap;
unsigned int io_type; unsigned int io_type;
struct xfs_ioend *ioend; struct xfs_ioend *ioend;
sector_t last_block;
}; };
void void
...@@ -534,11 +533,6 @@ xfs_start_page_writeback( ...@@ -534,11 +533,6 @@ xfs_start_page_writeback(
unlock_page(page); unlock_page(page);
} }
static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
{
return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
}
/* /*
* Submit the bio for an ioend. We are passed an ioend with a bio attached to * Submit the bio for an ioend. We are passed an ioend with a bio attached to
* it, and we submit that bio. The ioend may be used for multiple bio * it, and we submit that bio. The ioend may be used for multiple bio
...@@ -604,27 +598,20 @@ xfs_submit_ioend( ...@@ -604,27 +598,20 @@ xfs_submit_ioend(
return 0; return 0;
} }
static void
xfs_init_bio_from_bh(
struct bio *bio,
struct buffer_head *bh)
{
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
}
static struct xfs_ioend * static struct xfs_ioend *
xfs_alloc_ioend( xfs_alloc_ioend(
struct inode *inode, struct inode *inode,
unsigned int type, unsigned int type,
xfs_off_t offset, xfs_off_t offset,
struct buffer_head *bh) struct block_device *bdev,
sector_t sector)
{ {
struct xfs_ioend *ioend; struct xfs_ioend *ioend;
struct bio *bio; struct bio *bio;
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset); bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
xfs_init_bio_from_bh(bio, bh); bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = sector;
ioend = container_of(bio, struct xfs_ioend, io_inline_bio); ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
INIT_LIST_HEAD(&ioend->io_list); INIT_LIST_HEAD(&ioend->io_list);
...@@ -649,13 +636,14 @@ static void ...@@ -649,13 +636,14 @@ static void
xfs_chain_bio( xfs_chain_bio(
struct xfs_ioend *ioend, struct xfs_ioend *ioend,
struct writeback_control *wbc, struct writeback_control *wbc,
struct buffer_head *bh) struct block_device *bdev,
sector_t sector)
{ {
struct bio *new; struct bio *new;
new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
xfs_init_bio_from_bh(new, bh); bio_set_dev(new, bdev);
new->bi_iter.bi_sector = sector;
bio_chain(ioend->io_bio, new); bio_chain(ioend->io_bio, new);
bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
...@@ -665,39 +653,45 @@ xfs_chain_bio( ...@@ -665,39 +653,45 @@ xfs_chain_bio(
} }
/* /*
* Test to see if we've been building up a completion structure for * Test to see if we have an existing ioend structure that we could append to
* earlier buffers -- if so, we try to append to this ioend if we * first, otherwise finish off the current ioend and start another.
* can, otherwise we finish off any current ioend and start another.
* Return the ioend we finished off so that the caller can submit it
* once it has finished processing the dirty page.
*/ */
STATIC void STATIC void
xfs_add_to_ioend( xfs_add_to_ioend(
struct inode *inode, struct inode *inode,
struct buffer_head *bh,
xfs_off_t offset, xfs_off_t offset,
struct page *page,
struct xfs_writepage_ctx *wpc, struct xfs_writepage_ctx *wpc,
struct writeback_control *wbc, struct writeback_control *wbc,
struct list_head *iolist) struct list_head *iolist)
{ {
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
unsigned len = i_blocksize(inode);
unsigned poff = offset & (PAGE_SIZE - 1);
sector_t sector;
sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type || if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
bh->b_blocknr != wpc->last_block + 1 || sector != bio_end_sector(wpc->ioend->io_bio) ||
offset != wpc->ioend->io_offset + wpc->ioend->io_size) { offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
if (wpc->ioend) if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist); list_add(&wpc->ioend->io_list, iolist);
wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh); wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset,
bdev, sector);
} }
/* /*
* If the buffer doesn't fit into the bio we need to allocate a new * If the block doesn't fit into the bio we need to allocate a new
* one. This shouldn't happen more than once for a given buffer. * one. This shouldn't happen more than once for a given block.
*/ */
while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size) while (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len)
xfs_chain_bio(wpc->ioend, wbc, bh); xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
wpc->ioend->io_size += bh->b_size; wpc->ioend->io_size += len;
wpc->last_block = bh->b_blocknr;
xfs_start_buffer_writeback(bh);
} }
STATIC void STATIC void
...@@ -869,7 +863,9 @@ xfs_writepage_map( ...@@ -869,7 +863,9 @@ xfs_writepage_map(
lock_buffer(bh); lock_buffer(bh);
xfs_map_at_offset(inode, bh, &wpc->imap, file_offset); xfs_map_at_offset(inode, bh, &wpc->imap, file_offset);
xfs_add_to_ioend(inode, bh, file_offset, wpc, wbc, &submit_list); xfs_add_to_ioend(inode, file_offset, page, wpc, wbc,
&submit_list);
xfs_start_buffer_writeback(bh);
count++; count++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment