Commit d0e1d66b authored by Namjae Jeon's avatar Namjae Jeon Committed by Linus Torvalds

writeback: remove nr_pages_dirtied arg from balance_dirty_pages_ratelimited_nr()

There is no reason to pass the nr_pages_dirtied argument, because
nr_pages_dirtied value from the caller is unused in
balance_dirty_pages_ratelimited_nr().
Signed-off-by: default avatarNamjae Jeon <linkinjeon@gmail.com>
Signed-off-by: default avatarVivek Trivedi <vtrivedi018@gmail.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b58ed041
...@@ -3416,8 +3416,8 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) ...@@ -3416,8 +3416,8 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
num_dirty = root->fs_info->dirty_metadata_bytes; num_dirty = root->fs_info->dirty_metadata_bytes;
if (num_dirty > thresh) { if (num_dirty > thresh) {
balance_dirty_pages_ratelimited_nr( balance_dirty_pages_ratelimited(
root->fs_info->btree_inode->i_mapping, 1); root->fs_info->btree_inode->i_mapping);
} }
return; return;
} }
...@@ -3437,8 +3437,8 @@ void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) ...@@ -3437,8 +3437,8 @@ void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
num_dirty = root->fs_info->dirty_metadata_bytes; num_dirty = root->fs_info->dirty_metadata_bytes;
if (num_dirty > thresh) { if (num_dirty > thresh) {
balance_dirty_pages_ratelimited_nr( balance_dirty_pages_ratelimited(
root->fs_info->btree_inode->i_mapping, 1); root->fs_info->btree_inode->i_mapping);
} }
return; return;
} }
......
...@@ -1346,8 +1346,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1346,8 +1346,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
cond_resched(); cond_resched();
balance_dirty_pages_ratelimited_nr(inode->i_mapping, balance_dirty_pages_ratelimited(inode->i_mapping);
dirty_pages);
if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1) if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
btrfs_btree_balance_dirty(root, 1); btrfs_btree_balance_dirty(root, 1);
......
...@@ -1225,7 +1225,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, ...@@ -1225,7 +1225,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
} }
defrag_count += ret; defrag_count += ret;
balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret); balance_dirty_pages_ratelimited(inode->i_mapping);
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
if (newer_than) { if (newer_than) {
......
...@@ -2513,18 +2513,15 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, ...@@ -2513,18 +2513,15 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
ret = sd.num_spliced; ret = sd.num_spliced;
if (ret > 0) { if (ret > 0) {
unsigned long nr_pages;
int err; int err;
nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
err = generic_write_sync(out, *ppos, ret); err = generic_write_sync(out, *ppos, ret);
if (err) if (err)
ret = err; ret = err;
else else
*ppos += ret; *ppos += ret;
balance_dirty_pages_ratelimited_nr(mapping, nr_pages); balance_dirty_pages_ratelimited(mapping);
} }
return ret; return ret;
......
...@@ -1024,17 +1024,14 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, ...@@ -1024,17 +1024,14 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
ret = sd.num_spliced; ret = sd.num_spliced;
if (ret > 0) { if (ret > 0) {
unsigned long nr_pages;
int err; int err;
nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
err = generic_write_sync(out, *ppos, ret); err = generic_write_sync(out, *ppos, ret);
if (err) if (err)
ret = err; ret = err;
else else
*ppos += ret; *ppos += ret;
balance_dirty_pages_ratelimited_nr(mapping, nr_pages); balance_dirty_pages_ratelimited(mapping);
} }
sb_end_write(inode->i_sb); sb_end_write(inode->i_sb);
......
...@@ -161,14 +161,7 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi, ...@@ -161,14 +161,7 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi,
unsigned long start_time); unsigned long start_time);
void page_writeback_init(void); void page_writeback_init(void);
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, void balance_dirty_pages_ratelimited(struct address_space *mapping);
unsigned long nr_pages_dirtied);
static inline void
balance_dirty_pages_ratelimited(struct address_space *mapping)
{
balance_dirty_pages_ratelimited_nr(mapping, 1);
}
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
void *data); void *data);
......
...@@ -1069,7 +1069,7 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi, ...@@ -1069,7 +1069,7 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi,
} }
/* /*
* After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr() * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
* will look to see if it needs to start dirty throttling. * will look to see if it needs to start dirty throttling.
* *
* If dirty_poll_interval is too low, big NUMA machines will call the expensive * If dirty_poll_interval is too low, big NUMA machines will call the expensive
...@@ -1436,9 +1436,8 @@ static DEFINE_PER_CPU(int, bdp_ratelimits); ...@@ -1436,9 +1436,8 @@ static DEFINE_PER_CPU(int, bdp_ratelimits);
DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
/** /**
* balance_dirty_pages_ratelimited_nr - balance dirty memory state * balance_dirty_pages_ratelimited - balance dirty memory state
* @mapping: address_space which was dirtied * @mapping: address_space which was dirtied
* @nr_pages_dirtied: number of pages which the caller has just dirtied
* *
* Processes which are dirtying memory should call in here once for each page * Processes which are dirtying memory should call in here once for each page
* which was newly dirtied. The function will periodically check the system's * which was newly dirtied. The function will periodically check the system's
...@@ -1449,8 +1448,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; ...@@ -1449,8 +1448,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
* limit we decrease the ratelimiting by a lot, to prevent individual processes * limit we decrease the ratelimiting by a lot, to prevent individual processes
* from overshooting the limit by (ratelimit_pages) each. * from overshooting the limit by (ratelimit_pages) each.
*/ */
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, void balance_dirty_pages_ratelimited(struct address_space *mapping)
unsigned long nr_pages_dirtied)
{ {
struct backing_dev_info *bdi = mapping->backing_dev_info; struct backing_dev_info *bdi = mapping->backing_dev_info;
int ratelimit; int ratelimit;
...@@ -1484,6 +1482,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, ...@@ -1484,6 +1482,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
*/ */
p = &__get_cpu_var(dirty_throttle_leaks); p = &__get_cpu_var(dirty_throttle_leaks);
if (*p > 0 && current->nr_dirtied < ratelimit) { if (*p > 0 && current->nr_dirtied < ratelimit) {
unsigned long nr_pages_dirtied;
nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
*p -= nr_pages_dirtied; *p -= nr_pages_dirtied;
current->nr_dirtied += nr_pages_dirtied; current->nr_dirtied += nr_pages_dirtied;
...@@ -1493,7 +1492,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, ...@@ -1493,7 +1492,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
if (unlikely(current->nr_dirtied >= ratelimit)) if (unlikely(current->nr_dirtied >= ratelimit))
balance_dirty_pages(mapping, current->nr_dirtied); balance_dirty_pages(mapping, current->nr_dirtied);
} }
EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
void throttle_vm_writeout(gfp_t gfp_mask) void throttle_vm_writeout(gfp_t gfp_mask)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment