Commit 4ac91242 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: introduce free nid bitmap

In scenario of intensively node allocation, free nids will be ran out
soon, then it needs to stop to load free nids by traversing NAT blocks,
in worse case, if NAT blocks does not be cached in memory, it generates
IOs which slows down our foreground operations.

In order to speed up node allocation, in this patch we introduce a new
free_nid_bitmap array, so there is an bitmap table for each NAT block,
Once the NAT block is loaded, related bitmap cache will be switched on,
and bitmap will be set during traversing nat entries in NAT block, later
we can query and update nid usage status in memory completely.

With such implementation, I expect performance of node allocation can be
improved in the long-term after filesystem image is mounted.
Signed-off-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent ced2c7ea
...@@ -194,6 +194,8 @@ static void update_mem_info(struct f2fs_sb_info *sbi) ...@@ -194,6 +194,8 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct f2fs_nm_info); si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP); si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS); si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
si->base_mem += NM_I(sbi)->nat_blocks / 8;
get_cache: get_cache:
si->cache_mem = 0; si->cache_mem = 0;
......
...@@ -555,6 +555,8 @@ struct f2fs_nm_info { ...@@ -555,6 +555,8 @@ struct f2fs_nm_info {
unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */ unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */
spinlock_t nid_list_lock; /* protect nid lists ops */ spinlock_t nid_list_lock; /* protect nid lists ops */
struct mutex build_lock; /* lock for build free nids */ struct mutex build_lock; /* lock for build free nids */
unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
unsigned char *nat_block_bitmap;
/* for checkpoint */ /* for checkpoint */
char *nat_bitmap; /* NAT bitmap pointer */ char *nat_bitmap; /* NAT bitmap pointer */
......
...@@ -1765,7 +1765,8 @@ static void __remove_nid_from_list(struct f2fs_sb_info *sbi, ...@@ -1765,7 +1765,8 @@ static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
radix_tree_delete(&nm_i->free_nid_root, i->nid); radix_tree_delete(&nm_i->free_nid_root, i->nid);
} }
static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) /* return if the nid is recognized as free */
static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
{ {
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i; struct free_nid *i;
...@@ -1774,14 +1775,14 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) ...@@ -1774,14 +1775,14 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
/* 0 nid should not be used */ /* 0 nid should not be used */
if (unlikely(nid == 0)) if (unlikely(nid == 0))
return 0; return false;
if (build) { if (build) {
/* do not add allocated nids */ /* do not add allocated nids */
ne = __lookup_nat_cache(nm_i, nid); ne = __lookup_nat_cache(nm_i, nid);
if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
nat_get_blkaddr(ne) != NULL_ADDR)) nat_get_blkaddr(ne) != NULL_ADDR))
return 0; return false;
} }
i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
...@@ -1790,7 +1791,7 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) ...@@ -1790,7 +1791,7 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
if (radix_tree_preload(GFP_NOFS)) { if (radix_tree_preload(GFP_NOFS)) {
kmem_cache_free(free_nid_slab, i); kmem_cache_free(free_nid_slab, i);
return 0; return true;
} }
spin_lock(&nm_i->nid_list_lock); spin_lock(&nm_i->nid_list_lock);
...@@ -1799,9 +1800,9 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) ...@@ -1799,9 +1800,9 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
radix_tree_preload_end(); radix_tree_preload_end();
if (err) { if (err) {
kmem_cache_free(free_nid_slab, i); kmem_cache_free(free_nid_slab, i);
return 0; return true;
} }
return 1; return true;
} }
static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
...@@ -1822,17 +1823,36 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) ...@@ -1822,17 +1823,36 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
kmem_cache_free(free_nid_slab, i); kmem_cache_free(free_nid_slab, i);
} }
void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
unsigned int nid_ofs = nid - START_NID(nid);
if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
return;
if (set)
set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
else
clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
}
static void scan_nat_page(struct f2fs_sb_info *sbi, static void scan_nat_page(struct f2fs_sb_info *sbi,
struct page *nat_page, nid_t start_nid) struct page *nat_page, nid_t start_nid)
{ {
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
struct f2fs_nat_block *nat_blk = page_address(nat_page); struct f2fs_nat_block *nat_blk = page_address(nat_page);
block_t blk_addr; block_t blk_addr;
unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
int i; int i;
set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
i = start_nid % NAT_ENTRY_PER_BLOCK; i = start_nid % NAT_ENTRY_PER_BLOCK;
for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
bool freed = false;
if (unlikely(start_nid >= nm_i->max_nid)) if (unlikely(start_nid >= nm_i->max_nid))
break; break;
...@@ -1840,8 +1860,52 @@ static void scan_nat_page(struct f2fs_sb_info *sbi, ...@@ -1840,8 +1860,52 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
f2fs_bug_on(sbi, blk_addr == NEW_ADDR); f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
if (blk_addr == NULL_ADDR) if (blk_addr == NULL_ADDR)
add_free_nid(sbi, start_nid, true); freed = add_free_nid(sbi, start_nid, true);
update_free_nid_bitmap(sbi, start_nid, freed);
}
}
static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
struct f2fs_journal *journal = curseg->journal;
unsigned int i, idx;
unsigned int target = FREE_NID_PAGES * NAT_ENTRY_PER_BLOCK;
down_read(&nm_i->nat_tree_lock);
for (i = 0; i < nm_i->nat_blocks; i++) {
if (!test_bit_le(i, nm_i->nat_block_bitmap))
continue;
for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
nid_t nid;
if (!test_bit_le(idx, nm_i->free_nid_bitmap[i]))
continue;
nid = i * NAT_ENTRY_PER_BLOCK + idx;
add_free_nid(sbi, nid, true);
if (nm_i->nid_cnt[FREE_NID_LIST] >= target)
goto out;
}
} }
out:
down_read(&curseg->journal_rwsem);
for (i = 0; i < nats_in_cursum(journal); i++) {
block_t addr;
nid_t nid;
addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
nid = le32_to_cpu(nid_in_journal(journal, i));
if (addr == NULL_ADDR)
add_free_nid(sbi, nid, true);
else
remove_free_nid(sbi, nid);
}
up_read(&curseg->journal_rwsem);
up_read(&nm_i->nat_tree_lock);
} }
static int scan_nat_bits(struct f2fs_sb_info *sbi) static int scan_nat_bits(struct f2fs_sb_info *sbi)
...@@ -1912,9 +1976,17 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) ...@@ -1912,9 +1976,17 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
if (!sync && !available_free_memory(sbi, FREE_NIDS)) if (!sync && !available_free_memory(sbi, FREE_NIDS))
return; return;
if (!mount) {
/* try to find free nids in free_nid_bitmap */
scan_free_nid_bits(sbi);
if (nm_i->nid_cnt[FREE_NID_LIST])
return;
/* try to find free nids with nat_bits */ /* try to find free nids with nat_bits */
if (!mount && !scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST]) if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
return; return;
}
/* find next valid candidate */ /* find next valid candidate */
if (enabled_nat_bits(sbi, NULL)) { if (enabled_nat_bits(sbi, NULL)) {
...@@ -2010,6 +2082,9 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) ...@@ -2010,6 +2082,9 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
i->state = NID_ALLOC; i->state = NID_ALLOC;
__insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
nm_i->available_nids--; nm_i->available_nids--;
update_free_nid_bitmap(sbi, *nid, false);
spin_unlock(&nm_i->nid_list_lock); spin_unlock(&nm_i->nid_list_lock);
return true; return true;
} }
...@@ -2064,6 +2139,8 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) ...@@ -2064,6 +2139,8 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
nm_i->available_nids++; nm_i->available_nids++;
update_free_nid_bitmap(sbi, nid, true);
spin_unlock(&nm_i->nid_list_lock); spin_unlock(&nm_i->nid_list_lock);
if (need_free) if (need_free)
...@@ -2392,6 +2469,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, ...@@ -2392,6 +2469,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
add_free_nid(sbi, nid, false); add_free_nid(sbi, nid, false);
spin_lock(&NM_I(sbi)->nid_list_lock); spin_lock(&NM_I(sbi)->nid_list_lock);
NM_I(sbi)->available_nids++; NM_I(sbi)->available_nids++;
update_free_nid_bitmap(sbi, nid, true);
spin_unlock(&NM_I(sbi)->nid_list_lock);
} else {
spin_lock(&NM_I(sbi)->nid_list_lock);
update_free_nid_bitmap(sbi, nid, false);
spin_unlock(&NM_I(sbi)->nid_list_lock); spin_unlock(&NM_I(sbi)->nid_list_lock);
} }
} }
...@@ -2558,6 +2640,22 @@ static int init_node_manager(struct f2fs_sb_info *sbi) ...@@ -2558,6 +2640,22 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
return 0; return 0;
} }
int init_free_nid_cache(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i->free_nid_bitmap = f2fs_kvzalloc(nm_i->nat_blocks *
NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
if (!nm_i->free_nid_bitmap)
return -ENOMEM;
nm_i->nat_block_bitmap = f2fs_kvzalloc(nm_i->nat_blocks / 8,
GFP_KERNEL);
if (!nm_i->nat_block_bitmap)
return -ENOMEM;
return 0;
}
int build_node_manager(struct f2fs_sb_info *sbi) int build_node_manager(struct f2fs_sb_info *sbi)
{ {
int err; int err;
...@@ -2570,6 +2668,10 @@ int build_node_manager(struct f2fs_sb_info *sbi) ...@@ -2570,6 +2668,10 @@ int build_node_manager(struct f2fs_sb_info *sbi)
if (err) if (err)
return err; return err;
err = init_free_nid_cache(sbi);
if (err)
return err;
build_free_nids(sbi, true, true); build_free_nids(sbi, true, true);
return 0; return 0;
} }
...@@ -2628,6 +2730,9 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) ...@@ -2628,6 +2730,9 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
} }
up_write(&nm_i->nat_tree_lock); up_write(&nm_i->nat_tree_lock);
kvfree(nm_i->nat_block_bitmap);
kvfree(nm_i->free_nid_bitmap);
kfree(nm_i->nat_bitmap); kfree(nm_i->nat_bitmap);
kfree(nm_i->nat_bits); kfree(nm_i->nat_bits);
#ifdef CONFIG_F2FS_CHECK_FS #ifdef CONFIG_F2FS_CHECK_FS
......
...@@ -279,6 +279,7 @@ struct f2fs_node { ...@@ -279,6 +279,7 @@ struct f2fs_node {
* For NAT entries * For NAT entries
*/ */
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
struct f2fs_nat_entry { struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */ __u8 version; /* latest version of cached nat entry */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment