Commit 7762741e authored by Wu Fengguang's avatar Wu Fengguang

writeback: consolidate variable names in balance_dirty_pages()

Introduce

	nr_dirty = NR_FILE_DIRTY + NR_WRITEBACK + NR_UNSTABLE_NFS

in order to simplify many tests in the following patches.

balance_dirty_pages() will eventually care only about the dirty sums
besides nr_writeback.
Acked-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
parent 00821b00
...@@ -565,8 +565,9 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi, ...@@ -565,8 +565,9 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi,
static void balance_dirty_pages(struct address_space *mapping, static void balance_dirty_pages(struct address_space *mapping,
unsigned long write_chunk) unsigned long write_chunk)
{ {
long nr_reclaimable, bdi_nr_reclaimable; unsigned long nr_reclaimable, bdi_nr_reclaimable;
long nr_writeback, bdi_nr_writeback; unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */
unsigned long bdi_dirty;
unsigned long background_thresh; unsigned long background_thresh;
unsigned long dirty_thresh; unsigned long dirty_thresh;
unsigned long bdi_thresh; unsigned long bdi_thresh;
...@@ -579,7 +580,7 @@ static void balance_dirty_pages(struct address_space *mapping, ...@@ -579,7 +580,7 @@ static void balance_dirty_pages(struct address_space *mapping,
for (;;) { for (;;) {
nr_reclaimable = global_page_state(NR_FILE_DIRTY) + nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS); global_page_state(NR_UNSTABLE_NFS);
nr_writeback = global_page_state(NR_WRITEBACK); nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
global_dirty_limits(&background_thresh, &dirty_thresh); global_dirty_limits(&background_thresh, &dirty_thresh);
...@@ -588,8 +589,7 @@ static void balance_dirty_pages(struct address_space *mapping, ...@@ -588,8 +589,7 @@ static void balance_dirty_pages(struct address_space *mapping,
* catch-up. This avoids (excessively) small writeouts * catch-up. This avoids (excessively) small writeouts
* when the bdi limits are ramping up. * when the bdi limits are ramping up.
*/ */
if (nr_reclaimable + nr_writeback <= if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
(background_thresh + dirty_thresh) / 2)
break; break;
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
...@@ -607,10 +607,12 @@ static void balance_dirty_pages(struct address_space *mapping, ...@@ -607,10 +607,12 @@ static void balance_dirty_pages(struct address_space *mapping,
*/ */
if (bdi_thresh < 2*bdi_stat_error(bdi)) { if (bdi_thresh < 2*bdi_stat_error(bdi)) {
bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK); bdi_dirty = bdi_nr_reclaimable +
bdi_stat_sum(bdi, BDI_WRITEBACK);
} else { } else {
bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); bdi_dirty = bdi_nr_reclaimable +
bdi_stat(bdi, BDI_WRITEBACK);
} }
/* /*
...@@ -619,9 +621,8 @@ static void balance_dirty_pages(struct address_space *mapping, ...@@ -619,9 +621,8 @@ static void balance_dirty_pages(struct address_space *mapping,
* bdi or process from holding back light ones; The latter is * bdi or process from holding back light ones; The latter is
* the last resort safeguard. * the last resort safeguard.
*/ */
dirty_exceeded = dirty_exceeded = (bdi_dirty > bdi_thresh) ||
(bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) (nr_dirty > dirty_thresh);
|| (nr_reclaimable + nr_writeback > dirty_thresh);
if (!dirty_exceeded) if (!dirty_exceeded)
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment