Commit 3a2ad567 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: don't interrupt free nids building during nid allocation

Let build_free_nids support sync/async methods, in allocation flow of nids,
we use synchronuous method, so that we can avoid looping in alloc_nid when
free memory is low; in unblock_operations and f2fs_balance_fs_bg we use
asynchronuous method in where low memory condition can interrupt us.
Signed-off-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent eb0aa4b8
...@@ -987,7 +987,7 @@ static void unblock_operations(struct f2fs_sb_info *sbi) ...@@ -987,7 +987,7 @@ static void unblock_operations(struct f2fs_sb_info *sbi)
{ {
up_write(&sbi->node_write); up_write(&sbi->node_write);
build_free_nids(sbi); build_free_nids(sbi, false);
f2fs_unlock_all(sbi); f2fs_unlock_all(sbi);
} }
......
...@@ -2040,7 +2040,7 @@ void move_node_page(struct page *, int); ...@@ -2040,7 +2040,7 @@ void move_node_page(struct page *, int);
int fsync_node_pages(struct f2fs_sb_info *, struct inode *, int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
struct writeback_control *, bool); struct writeback_control *, bool);
int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *); int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
void build_free_nids(struct f2fs_sb_info *); void build_free_nids(struct f2fs_sb_info *, bool);
bool alloc_nid(struct f2fs_sb_info *, nid_t *); bool alloc_nid(struct f2fs_sb_info *, nid_t *);
void alloc_nid_done(struct f2fs_sb_info *, nid_t); void alloc_nid_done(struct f2fs_sb_info *, nid_t);
void alloc_nid_failed(struct f2fs_sb_info *, nid_t); void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
......
...@@ -1733,9 +1733,6 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) ...@@ -1733,9 +1733,6 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
struct nat_entry *ne; struct nat_entry *ne;
int err; int err;
if (!available_free_memory(sbi, FREE_NIDS))
return -1;
/* 0 nid should not be used */ /* 0 nid should not be used */
if (unlikely(nid == 0)) if (unlikely(nid == 0))
return 0; return 0;
...@@ -1803,14 +1800,12 @@ static void scan_nat_page(struct f2fs_sb_info *sbi, ...@@ -1803,14 +1800,12 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
f2fs_bug_on(sbi, blk_addr == NEW_ADDR); f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
if (blk_addr == NULL_ADDR) { if (blk_addr == NULL_ADDR)
if (add_free_nid(sbi, start_nid, true) < 0) add_free_nid(sbi, start_nid, true);
break;
}
} }
} }
void __build_free_nids(struct f2fs_sb_info *sbi) void __build_free_nids(struct f2fs_sb_info *sbi, bool sync)
{ {
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
...@@ -1822,6 +1817,9 @@ void __build_free_nids(struct f2fs_sb_info *sbi) ...@@ -1822,6 +1817,9 @@ void __build_free_nids(struct f2fs_sb_info *sbi)
if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK) if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
return; return;
if (!sync && !available_free_memory(sbi, FREE_NIDS))
return;
/* readahead nat pages to be scanned */ /* readahead nat pages to be scanned */
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
META_NAT, true); META_NAT, true);
...@@ -1864,10 +1862,10 @@ void __build_free_nids(struct f2fs_sb_info *sbi) ...@@ -1864,10 +1862,10 @@ void __build_free_nids(struct f2fs_sb_info *sbi)
nm_i->ra_nid_pages, META_NAT, false); nm_i->ra_nid_pages, META_NAT, false);
} }
void build_free_nids(struct f2fs_sb_info *sbi) void build_free_nids(struct f2fs_sb_info *sbi, bool sync)
{ {
mutex_lock(&NM_I(sbi)->build_lock); mutex_lock(&NM_I(sbi)->build_lock);
__build_free_nids(sbi); __build_free_nids(sbi, sync);
mutex_unlock(&NM_I(sbi)->build_lock); mutex_unlock(&NM_I(sbi)->build_lock);
} }
...@@ -1906,7 +1904,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) ...@@ -1906,7 +1904,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
spin_unlock(&nm_i->nid_list_lock); spin_unlock(&nm_i->nid_list_lock);
/* Let's scan nat pages and its caches to get free nids */ /* Let's scan nat pages and its caches to get free nids */
build_free_nids(sbi); build_free_nids(sbi, true);
goto retry; goto retry;
} }
...@@ -2343,7 +2341,7 @@ int build_node_manager(struct f2fs_sb_info *sbi) ...@@ -2343,7 +2341,7 @@ int build_node_manager(struct f2fs_sb_info *sbi)
if (err) if (err)
return err; return err;
build_free_nids(sbi); build_free_nids(sbi, true);
return 0; return 0;
} }
......
...@@ -382,7 +382,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) ...@@ -382,7 +382,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
if (!available_free_memory(sbi, FREE_NIDS)) if (!available_free_memory(sbi, FREE_NIDS))
try_to_free_nids(sbi, MAX_FREE_NIDS); try_to_free_nids(sbi, MAX_FREE_NIDS);
else else
build_free_nids(sbi); build_free_nids(sbi, false);
/* checkpoint is the only way to shrink partial cached entries */ /* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) || if (!available_free_memory(sbi, NAT_ENTRIES) ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment