Commit fee468fd authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

writeback: reliably update bandwidth estimation

Currently we trigger writeback bandwidth estimation from
balance_dirty_pages() and from wb_writeback().  However neither of these
need to trigger when the system is relatively idle and writeback is
triggered e.g.  from fsync(2).  Make sure writeback estimates happen
reliably by triggering them from do_writepages().

Link: https://lkml.kernel.org/r/20210713104716.22868-2-jack@suse.czSigned-off-by: default avatarJan Kara <jack@suse.cz>
Cc: Michael Stapelberg <stapelberg+linux@google.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 633a2abb
...@@ -2004,7 +2004,6 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, ...@@ -2004,7 +2004,6 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
static long wb_writeback(struct bdi_writeback *wb, static long wb_writeback(struct bdi_writeback *wb,
struct wb_writeback_work *work) struct wb_writeback_work *work)
{ {
unsigned long wb_start = jiffies;
long nr_pages = work->nr_pages; long nr_pages = work->nr_pages;
unsigned long dirtied_before = jiffies; unsigned long dirtied_before = jiffies;
struct inode *inode; struct inode *inode;
...@@ -2058,8 +2057,6 @@ static long wb_writeback(struct bdi_writeback *wb, ...@@ -2058,8 +2057,6 @@ static long wb_writeback(struct bdi_writeback *wb,
progress = __writeback_inodes_wb(wb, work); progress = __writeback_inodes_wb(wb, work);
trace_writeback_written(wb, work); trace_writeback_written(wb, work);
wb_update_bandwidth(wb, wb_start);
/* /*
* Did we write something? Try for more * Did we write something? Try for more
* *
......
...@@ -288,6 +288,17 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) ...@@ -288,6 +288,17 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
return inode->i_wb; return inode->i_wb;
} }
static inline struct bdi_writeback *inode_to_wb_wbc(
struct inode *inode,
struct writeback_control *wbc)
{
/*
* If wbc does not have inode attached, it means cgroup writeback was
* disabled when wbc started. Just use the default wb in that case.
*/
return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
}
/** /**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode * @inode: target inode
...@@ -366,6 +377,14 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) ...@@ -366,6 +377,14 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
return &inode_to_bdi(inode)->wb; return &inode_to_bdi(inode)->wb;
} }
static inline struct bdi_writeback *inode_to_wb_wbc(
struct inode *inode,
struct writeback_control *wbc)
{
return inode_to_wb(inode);
}
static inline struct bdi_writeback * static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{ {
......
...@@ -379,7 +379,6 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, ...@@ -379,7 +379,6 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
void balance_dirty_pages_ratelimited(struct address_space *mapping); void balance_dirty_pages_ratelimited(struct address_space *mapping);
bool wb_over_bg_thresh(struct bdi_writeback *wb); bool wb_over_bg_thresh(struct bdi_writeback *wb);
......
...@@ -1332,7 +1332,6 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc, ...@@ -1332,7 +1332,6 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc, static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
struct dirty_throttle_control *mdtc, struct dirty_throttle_control *mdtc,
unsigned long start_time,
bool update_ratelimit) bool update_ratelimit)
{ {
struct bdi_writeback *wb = gdtc->wb; struct bdi_writeback *wb = gdtc->wb;
...@@ -1352,13 +1351,6 @@ static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc, ...@@ -1352,13 +1351,6 @@ static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
written = percpu_counter_read(&wb->stat[WB_WRITTEN]); written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
/*
* Skip quiet periods when disk bandwidth is under-utilized.
* (at least 1s idle time between two flusher runs)
*/
if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
goto snapshot;
if (update_ratelimit) { if (update_ratelimit) {
domain_update_bandwidth(gdtc, now); domain_update_bandwidth(gdtc, now);
wb_update_dirty_ratelimit(gdtc, dirtied, elapsed); wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
...@@ -1374,17 +1366,36 @@ static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc, ...@@ -1374,17 +1366,36 @@ static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
} }
wb_update_write_bandwidth(wb, elapsed, written); wb_update_write_bandwidth(wb, elapsed, written);
snapshot:
wb->dirtied_stamp = dirtied; wb->dirtied_stamp = dirtied;
wb->written_stamp = written; wb->written_stamp = written;
wb->bw_time_stamp = now; wb->bw_time_stamp = now;
} }
void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time) static void wb_update_bandwidth(struct bdi_writeback *wb)
{ {
struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
__wb_update_bandwidth(&gdtc, NULL, start_time, false); spin_lock(&wb->list_lock);
__wb_update_bandwidth(&gdtc, NULL, false);
spin_unlock(&wb->list_lock);
}
/* Interval after which we consider wb idle and don't estimate bandwidth */
#define WB_BANDWIDTH_IDLE_JIF (HZ)
static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
{
unsigned long now = jiffies;
unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
!atomic_read(&wb->writeback_inodes)) {
spin_lock(&wb->list_lock);
wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
wb->written_stamp = wb_stat(wb, WB_WRITTEN);
wb->bw_time_stamp = now;
spin_unlock(&wb->list_lock);
}
} }
/* /*
...@@ -1713,7 +1724,7 @@ static void balance_dirty_pages(struct bdi_writeback *wb, ...@@ -1713,7 +1724,7 @@ static void balance_dirty_pages(struct bdi_writeback *wb,
if (time_is_before_jiffies(wb->bw_time_stamp + if (time_is_before_jiffies(wb->bw_time_stamp +
BANDWIDTH_INTERVAL)) { BANDWIDTH_INTERVAL)) {
spin_lock(&wb->list_lock); spin_lock(&wb->list_lock);
__wb_update_bandwidth(gdtc, mdtc, start_time, true); __wb_update_bandwidth(gdtc, mdtc, true);
spin_unlock(&wb->list_lock); spin_unlock(&wb->list_lock);
} }
...@@ -2347,9 +2358,12 @@ EXPORT_SYMBOL(generic_writepages); ...@@ -2347,9 +2358,12 @@ EXPORT_SYMBOL(generic_writepages);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc) int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
{ {
int ret; int ret;
struct bdi_writeback *wb;
if (wbc->nr_to_write <= 0) if (wbc->nr_to_write <= 0)
return 0; return 0;
wb = inode_to_wb_wbc(mapping->host, wbc);
wb_bandwidth_estimate_start(wb);
while (1) { while (1) {
if (mapping->a_ops->writepages) if (mapping->a_ops->writepages)
ret = mapping->a_ops->writepages(mapping, wbc); ret = mapping->a_ops->writepages(mapping, wbc);
...@@ -2360,6 +2374,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc) ...@@ -2360,6 +2374,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
cond_resched(); cond_resched();
congestion_wait(BLK_RW_ASYNC, HZ/50); congestion_wait(BLK_RW_ASYNC, HZ/50);
} }
wb_update_bandwidth(wb);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment