Commit 2a340760 authored by Jaegeuk Kim's avatar Jaegeuk Kim

f2fs: call f2fs_balance_fs only when node was changed

If user tries to update or read data, we don't need to call f2fs_balance_fs
which triggers f2fs_gc, which increases unnecessary long latency.
Reviewed-by: default avatarChao Yu <chao2.yu@samsung.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 3104af35
...@@ -509,7 +509,6 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset, ...@@ -509,7 +509,6 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset,
u64 end_offset; u64 end_offset;
while (len) { while (len) {
f2fs_balance_fs(sbi);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
/* When reading holes, we need its node page */ /* When reading holes, we need its node page */
...@@ -542,6 +541,9 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset, ...@@ -542,6 +541,9 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset,
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
if (dn.node_changed)
f2fs_balance_fs(sbi);
} }
return; return;
...@@ -551,6 +553,8 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset, ...@@ -551,6 +553,8 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset,
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
out: out:
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
if (dn.node_changed)
f2fs_balance_fs(sbi);
return; return;
} }
...@@ -649,6 +653,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -649,6 +653,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
if (create) { if (create) {
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
if (dn.node_changed)
f2fs_balance_fs(sbi);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
} }
...@@ -706,8 +712,11 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -706,8 +712,11 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
put_out: put_out:
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
unlock_out: unlock_out:
if (create) if (create) {
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
if (dn.node_changed)
f2fs_balance_fs(sbi);
}
out: out:
trace_f2fs_map_blocks(inode, map, err); trace_f2fs_map_blocks(inode, map, err);
return err; return err;
...@@ -1415,8 +1424,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1415,8 +1424,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
trace_f2fs_write_begin(inode, pos, len, flags); trace_f2fs_write_begin(inode, pos, len, flags);
f2fs_balance_fs(sbi);
/* /*
* We should check this at this moment to avoid deadlock on inode page * We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be: * and #0 page. The locking rule for inline_data conversion should be:
...@@ -1466,6 +1473,17 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1466,6 +1473,17 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
if (dn.node_changed && has_not_enough_free_secs(sbi, 0)) {
unlock_page(page);
f2fs_balance_fs(sbi);
lock_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
f2fs_put_page(page, 1);
goto repeat;
}
}
f2fs_wait_on_page_writeback(page, DATA); f2fs_wait_on_page_writeback(page, DATA);
/* wait for GCed encrypted page writeback */ /* wait for GCed encrypted page writeback */
......
...@@ -40,8 +40,6 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, ...@@ -40,8 +40,6 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
struct dnode_of_data dn; struct dnode_of_data dn;
int err; int err;
f2fs_balance_fs(sbi);
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
...@@ -57,6 +55,9 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, ...@@ -57,6 +55,9 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
if (dn.node_changed)
f2fs_balance_fs(sbi);
file_update_time(vma->vm_file); file_update_time(vma->vm_file);
lock_page(page); lock_page(page);
if (unlikely(page->mapping != inode->i_mapping || if (unlikely(page->mapping != inode->i_mapping ||
...@@ -233,9 +234,6 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -233,9 +234,6 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out; goto out;
} }
go_write: go_write:
/* guarantee free sections for fsync */
f2fs_balance_fs(sbi);
/* /*
* Both of fdatasync() and fsync() are able to be recovered from * Both of fdatasync() and fsync() are able to be recovered from
* sudden-power-off. * sudden-power-off.
...@@ -267,6 +265,8 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -267,6 +265,8 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (need_inode_block_update(sbi, ino)) { if (need_inode_block_update(sbi, ino)) {
mark_inode_dirty_sync(inode); mark_inode_dirty_sync(inode);
f2fs_write_inode(inode, NULL); f2fs_write_inode(inode, NULL);
f2fs_balance_fs(sbi);
goto sync_nodes; goto sync_nodes;
} }
...@@ -945,8 +945,6 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) ...@@ -945,8 +945,6 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
return -EINVAL; return -EINVAL;
f2fs_balance_fs(F2FS_I_SB(inode));
ret = f2fs_convert_inline_inode(inode); ret = f2fs_convert_inline_inode(inode);
if (ret) if (ret)
return ret; return ret;
...@@ -993,8 +991,6 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, ...@@ -993,8 +991,6 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret) if (ret)
return ret; return ret;
f2fs_balance_fs(sbi);
ret = f2fs_convert_inline_inode(inode); ret = f2fs_convert_inline_inode(inode);
if (ret) if (ret)
return ret; return ret;
...@@ -1104,12 +1100,12 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) ...@@ -1104,12 +1100,12 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
return -EINVAL; return -EINVAL;
f2fs_balance_fs(sbi);
ret = f2fs_convert_inline_inode(inode); ret = f2fs_convert_inline_inode(inode);
if (ret) if (ret)
return ret; return ret;
f2fs_balance_fs(sbi);
ret = truncate_blocks(inode, i_size_read(inode), true); ret = truncate_blocks(inode, i_size_read(inode), true);
if (ret) if (ret)
return ret; return ret;
...@@ -1152,8 +1148,6 @@ static int expand_inode_data(struct inode *inode, loff_t offset, ...@@ -1152,8 +1148,6 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
loff_t off_start, off_end; loff_t off_start, off_end;
int ret = 0; int ret = 0;
f2fs_balance_fs(sbi);
ret = inode_newsize_ok(inode, (len + offset)); ret = inode_newsize_ok(inode, (len + offset));
if (ret) if (ret)
return ret; return ret;
...@@ -1162,6 +1156,8 @@ static int expand_inode_data(struct inode *inode, loff_t offset, ...@@ -1162,6 +1156,8 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
if (ret) if (ret)
return ret; return ret;
f2fs_balance_fs(sbi);
pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
...@@ -1349,8 +1345,6 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) ...@@ -1349,8 +1345,6 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
if (!inode_owner_or_capable(inode)) if (!inode_owner_or_capable(inode))
return -EACCES; return -EACCES;
f2fs_balance_fs(F2FS_I_SB(inode));
if (f2fs_is_atomic_file(inode)) if (f2fs_is_atomic_file(inode))
return 0; return 0;
...@@ -1437,8 +1431,6 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp) ...@@ -1437,8 +1431,6 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
if (ret) if (ret)
return ret; return ret;
f2fs_balance_fs(F2FS_I_SB(inode));
clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
commit_inmem_pages(inode, true); commit_inmem_pages(inode, true);
......
...@@ -202,6 +202,10 @@ int f2fs_convert_inline_inode(struct inode *inode) ...@@ -202,6 +202,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
if (dn.node_changed)
f2fs_balance_fs(sbi);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment