Commit b16b1deb authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

writeback: make writeback_control track the inode being written back

Currently, for cgroup writeback, the IO submission paths directly
associate the bio's with the blkcg from inode_to_wb_blkcg_css();
however, it'd be necessary to keep more writeback context to implement
foreign inode writeback detection.  wbc (writeback_control) is the
natural fit for the extra context - it persists throughout the
writeback of each inode and is passed all the way down to IO
submission paths.

This patch adds wbc_attach_and_unlock_inode(), wbc_detach_inode(), and
wbc_attach_fdatawrite_inode() which are used to associate wbc with the
inode being written back.  IO submission paths now use wbc_init_bio()
instead of directly associating bio's with blkcg themselves.  This
leaves inode_to_wb_blkcg_css() w/o any user.  The function is removed.

wbc currently only tracks the associated wb (bdi_writeback).  Future
patches will add more for foreign inode detection.  The association is
established under i_lock which will be depended upon when migrating
foreign inodes to other wb's.

As currently, once established, inode to wb association never changes,
going through wbc when initializing bio's doesn't cause any behavior
changes.

v2: submit_blk_blkcg() now checks whether the wbc is associated with a
    wb before dereferencing it.  This can happen when pageout() is
    writing pages directly without going through the usual writeback
    path.  As pageout() path is single-threaded, we don't want it to
    be blocked behind a slow cgroup and ultimately want it to delegate
    actual writing to the usual writeback path.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jan Kara <jack@suse.cz>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Greg Thelen <gthelen@google.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 21c6321f
...@@ -45,9 +45,9 @@ ...@@ -45,9 +45,9 @@
#include <trace/events/block.h> #include <trace/events/block.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static int submit_bh_blkcg(int rw, struct buffer_head *bh, static int submit_bh_wbc(int rw, struct buffer_head *bh,
unsigned long bio_flags, unsigned long bio_flags,
struct cgroup_subsys_state *blkcg_css); struct writeback_control *wbc);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
...@@ -1709,7 +1709,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page, ...@@ -1709,7 +1709,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
unsigned int blocksize, bbits; unsigned int blocksize, bbits;
int nr_underway = 0; int nr_underway = 0;
int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
struct cgroup_subsys_state *blkcg_css = inode_to_wb_blkcg_css(inode);
head = create_page_buffers(page, inode, head = create_page_buffers(page, inode,
(1 << BH_Dirty)|(1 << BH_Uptodate)); (1 << BH_Dirty)|(1 << BH_Uptodate));
...@@ -1798,7 +1797,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, ...@@ -1798,7 +1797,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
do { do {
struct buffer_head *next = bh->b_this_page; struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) { if (buffer_async_write(bh)) {
submit_bh_blkcg(write_op, bh, 0, blkcg_css); submit_bh_wbc(write_op, bh, 0, wbc);
nr_underway++; nr_underway++;
} }
bh = next; bh = next;
...@@ -1852,7 +1851,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, ...@@ -1852,7 +1851,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
struct buffer_head *next = bh->b_this_page; struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) { if (buffer_async_write(bh)) {
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
submit_bh_blkcg(write_op, bh, 0, blkcg_css); submit_bh_wbc(write_op, bh, 0, wbc);
nr_underway++; nr_underway++;
} }
bh = next; bh = next;
...@@ -3017,11 +3016,11 @@ void guard_bio_eod(int rw, struct bio *bio) ...@@ -3017,11 +3016,11 @@ void guard_bio_eod(int rw, struct bio *bio)
} }
} }
static int submit_bh_blkcg(int rw, struct buffer_head *bh, static int submit_bh_wbc(int rw, struct buffer_head *bh,
unsigned long bio_flags, unsigned long bio_flags, struct writeback_control *wbc)
struct cgroup_subsys_state *blkcg_css)
{ {
struct bio *bio; struct bio *bio;
int ret = 0;
BUG_ON(!buffer_locked(bh)); BUG_ON(!buffer_locked(bh));
BUG_ON(!buffer_mapped(bh)); BUG_ON(!buffer_mapped(bh));
...@@ -3041,8 +3040,8 @@ static int submit_bh_blkcg(int rw, struct buffer_head *bh, ...@@ -3041,8 +3040,8 @@ static int submit_bh_blkcg(int rw, struct buffer_head *bh,
*/ */
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
if (blkcg_css) if (wbc)
bio_associate_blkcg(bio, blkcg_css); wbc_init_bio(wbc, bio);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev; bio->bi_bdev = bh->b_bdev;
...@@ -3071,13 +3070,13 @@ static int submit_bh_blkcg(int rw, struct buffer_head *bh, ...@@ -3071,13 +3070,13 @@ static int submit_bh_blkcg(int rw, struct buffer_head *bh,
int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
{ {
return submit_bh_blkcg(rw, bh, bio_flags, NULL); return submit_bh_wbc(rw, bh, bio_flags, NULL);
} }
EXPORT_SYMBOL_GPL(_submit_bh); EXPORT_SYMBOL_GPL(_submit_bh);
int submit_bh(int rw, struct buffer_head *bh) int submit_bh(int rw, struct buffer_head *bh)
{ {
return submit_bh_blkcg(rw, bh, 0, NULL); return submit_bh_wbc(rw, bh, 0, NULL);
} }
EXPORT_SYMBOL(submit_bh); EXPORT_SYMBOL(submit_bh);
......
...@@ -244,6 +244,37 @@ void __inode_attach_wb(struct inode *inode, struct page *page) ...@@ -244,6 +244,37 @@ void __inode_attach_wb(struct inode *inode, struct page *page)
wb_put(wb); wb_put(wb);
} }
/**
* wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
* @wbc: writeback_control of interest
* @inode: target inode
*
* @inode is locked and about to be written back under the control of @wbc.
* Record @inode's writeback context into @wbc and unlock the i_lock. On
* writeback completion, wbc_detach_inode() should be called. This is used
* to track the cgroup writeback context.
*/
void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
struct inode *inode)
{
wbc->wb = inode_to_wb(inode);
wb_get(wbc->wb);
spin_unlock(&inode->i_lock);
}
/**
* wbc_detach_inode - disassociate wbc from its target inode
* @wbc: writeback_control of interest
*
* To be called after a writeback attempt of an inode finishes and undoes
* wbc_attach_and_unlock_inode(). Can be called under any context.
*/
void wbc_detach_inode(struct writeback_control *wbc)
{
wb_put(wbc->wb);
wbc->wb = NULL;
}
/** /**
* inode_congested - test whether an inode is congested * inode_congested - test whether an inode is congested
* @inode: inode to test for congestion * @inode: inode to test for congestion
...@@ -877,10 +908,11 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, ...@@ -877,10 +908,11 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
goto out; goto out;
inode->i_state |= I_SYNC; inode->i_state |= I_SYNC;
spin_unlock(&inode->i_lock); wbc_attach_and_unlock_inode(wbc, inode);
ret = __writeback_single_inode(inode, wbc); ret = __writeback_single_inode(inode, wbc);
wbc_detach_inode(wbc);
spin_lock(&wb->list_lock); spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
/* /*
...@@ -1013,7 +1045,7 @@ static long writeback_sb_inodes(struct super_block *sb, ...@@ -1013,7 +1045,7 @@ static long writeback_sb_inodes(struct super_block *sb,
continue; continue;
} }
inode->i_state |= I_SYNC; inode->i_state |= I_SYNC;
spin_unlock(&inode->i_lock); wbc_attach_and_unlock_inode(&wbc, inode);
write_chunk = writeback_chunk_size(wb, work); write_chunk = writeback_chunk_size(wb, work);
wbc.nr_to_write = write_chunk; wbc.nr_to_write = write_chunk;
...@@ -1025,6 +1057,7 @@ static long writeback_sb_inodes(struct super_block *sb, ...@@ -1025,6 +1057,7 @@ static long writeback_sb_inodes(struct super_block *sb,
*/ */
__writeback_single_inode(inode, &wbc); __writeback_single_inode(inode, &wbc);
wbc_detach_inode(&wbc);
work->nr_pages -= write_chunk - wbc.nr_to_write; work->nr_pages -= write_chunk - wbc.nr_to_write;
wrote += write_chunk - wbc.nr_to_write; wrote += write_chunk - wbc.nr_to_write;
spin_lock(&wb->list_lock); spin_lock(&wb->list_lock);
......
...@@ -606,7 +606,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, ...@@ -606,7 +606,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
if (bio == NULL) if (bio == NULL)
goto confused; goto confused;
bio_associate_blkcg(bio, inode_to_wb_blkcg_css(inode)); wbc_init_bio(wbc, bio);
} }
/* /*
......
...@@ -332,12 +332,6 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) ...@@ -332,12 +332,6 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
return inode->i_wb; return inode->i_wb;
} }
static inline struct cgroup_subsys_state *
inode_to_wb_blkcg_css(struct inode *inode)
{
return inode_to_wb(inode)->blkcg_css;
}
struct wb_iter { struct wb_iter {
int start_blkcg_id; int start_blkcg_id;
struct radix_tree_iter tree_iter; struct radix_tree_iter tree_iter;
...@@ -434,12 +428,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg) ...@@ -434,12 +428,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg)
{ {
} }
static inline struct cgroup_subsys_state *
inode_to_wb_blkcg_css(struct inode *inode)
{
return blkcg_root_css;
}
struct wb_iter { struct wb_iter {
int next_id; int next_id;
}; };
......
...@@ -86,6 +86,9 @@ struct writeback_control { ...@@ -86,6 +86,9 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */ unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *wb; /* wb this writeback is issued under */
#endif
}; };
/* /*
...@@ -176,7 +179,14 @@ static inline void wait_on_inode(struct inode *inode) ...@@ -176,7 +179,14 @@ static inline void wait_on_inode(struct inode *inode)
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
#include <linux/cgroup.h>
#include <linux/bio.h>
void __inode_attach_wb(struct inode *inode, struct page *page); void __inode_attach_wb(struct inode *inode, struct page *page);
void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
struct inode *inode)
__releases(&inode->i_lock);
void wbc_detach_inode(struct writeback_control *wbc);
/** /**
* inode_attach_wb - associate an inode with its wb * inode_attach_wb - associate an inode with its wb
...@@ -207,6 +217,44 @@ static inline void inode_detach_wb(struct inode *inode) ...@@ -207,6 +217,44 @@ static inline void inode_detach_wb(struct inode *inode)
} }
} }
/**
* wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
* @wbc: writeback_control of interest
* @inode: target inode
*
* This function is to be used by __filemap_fdatawrite_range(), which is an
* alternative entry point into writeback code, and first ensures @inode is
* associated with a bdi_writeback and attaches it to @wbc.
*/
static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
struct inode *inode)
{
spin_lock(&inode->i_lock);
inode_attach_wb(inode, NULL);
wbc_attach_and_unlock_inode(wbc, inode);
}
/**
* wbc_init_bio - writeback specific initializtion of bio
* @wbc: writeback_control for the writeback in progress
* @bio: bio to be initialized
*
* @bio is a part of the writeback in progress controlled by @wbc. Perform
* writeback specific initialization. This is used to apply the cgroup
* writeback context.
*/
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
/*
* pageout() path doesn't attach @wbc to the inode being written
* out. This is intentional as we don't want the function to block
* behind a slow cgroup. Ultimately, we want pageout() to kick off
* regular writeback instead of writing things out itself.
*/
if (wbc->wb)
bio_associate_blkcg(bio, wbc->wb->blkcg_css);
}
#else /* CONFIG_CGROUP_WRITEBACK */ #else /* CONFIG_CGROUP_WRITEBACK */
static inline void inode_attach_wb(struct inode *inode, struct page *page) static inline void inode_attach_wb(struct inode *inode, struct page *page)
...@@ -217,6 +265,26 @@ static inline void inode_detach_wb(struct inode *inode) ...@@ -217,6 +265,26 @@ static inline void inode_detach_wb(struct inode *inode)
{ {
} }
static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
struct inode *inode)
__releases(&inode->i_lock)
{
spin_unlock(&inode->i_lock);
}
static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
struct inode *inode)
{
}
static inline void wbc_detach_inode(struct writeback_control *wbc)
{
}
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
}
#endif /* CONFIG_CGROUP_WRITEBACK */ #endif /* CONFIG_CGROUP_WRITEBACK */
/* /*
......
...@@ -290,7 +290,9 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, ...@@ -290,7 +290,9 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
if (!mapping_cap_writeback_dirty(mapping)) if (!mapping_cap_writeback_dirty(mapping))
return 0; return 0;
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
ret = do_writepages(mapping, &wbc); ret = do_writepages(mapping, &wbc);
wbc_detach_inode(&wbc);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment