Commit 1b38dc8e authored by Jaegeuk Kim's avatar Jaegeuk Kim

f2fs: shrink nat_cache entries

This patch registers shrinking nat_cache entries.
Reviewed-by: default avatarChao Yu <chao2.yu@samsung.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 2658e50d
...@@ -328,11 +328,11 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, ...@@ -328,11 +328,11 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
{ {
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
int nr = nr_shrink;
if (available_free_memory(sbi, NAT_ENTRIES)) if (!down_write_trylock(&nm_i->nat_tree_lock))
return 0; return 0;
down_write(&nm_i->nat_tree_lock);
while (nr_shrink && !list_empty(&nm_i->nat_entries)) { while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
struct nat_entry *ne; struct nat_entry *ne;
ne = list_first_entry(&nm_i->nat_entries, ne = list_first_entry(&nm_i->nat_entries,
...@@ -341,7 +341,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) ...@@ -341,7 +341,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
nr_shrink--; nr_shrink--;
} }
up_write(&nm_i->nat_tree_lock); up_write(&nm_i->nat_tree_lock);
return nr_shrink; return nr - nr_shrink;
} }
/* /*
......
...@@ -306,8 +306,12 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) ...@@ -306,8 +306,12 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
/* try to shrink extent cache when there is no enough memory */ /* try to shrink extent cache when there is no enough memory */
f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
/* check the # of cached NAT entries and prefree segments */ /* check the # of cached NAT entries */
if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || if (!available_free_memory(sbi, NAT_ENTRIES))
try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
/* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) ||
excess_prefree_segs(sbi) || excess_prefree_segs(sbi) ||
!available_free_memory(sbi, INO_ENTRIES)) !available_free_memory(sbi, INO_ENTRIES))
f2fs_sync_fs(sbi->sb, true); f2fs_sync_fs(sbi->sb, true);
......
...@@ -18,6 +18,11 @@ static LIST_HEAD(f2fs_list); ...@@ -18,6 +18,11 @@ static LIST_HEAD(f2fs_list);
static DEFINE_SPINLOCK(f2fs_list_lock); static DEFINE_SPINLOCK(f2fs_list_lock);
static unsigned int shrinker_run_no; static unsigned int shrinker_run_no;
static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
{
return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
}
unsigned long f2fs_shrink_count(struct shrinker *shrink, unsigned long f2fs_shrink_count(struct shrinker *shrink,
struct shrink_control *sc) struct shrink_control *sc)
{ {
...@@ -37,7 +42,8 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink, ...@@ -37,7 +42,8 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
} }
spin_unlock(&f2fs_list_lock); spin_unlock(&f2fs_list_lock);
/* TODO: count # of objects */ /* shrink clean nat cache entries */
count += __count_nat_entries(sbi);
spin_lock(&f2fs_list_lock); spin_lock(&f2fs_list_lock);
p = p->next; p = p->next;
...@@ -76,7 +82,8 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink, ...@@ -76,7 +82,8 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
sbi->shrinker_run_no = run_no; sbi->shrinker_run_no = run_no;
/* TODO: shrink caches */ /* shrink clean nat cache entries */
freed += try_to_free_nats(sbi, nr);
spin_lock(&f2fs_list_lock); spin_lock(&f2fs_list_lock);
p = p->next; p = p->next;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment