Commit 5b7ee374 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: use atomic64_t for extent cache hit stat

Our hit stat of extent cache will increase all the time until remount,
and we use atomic_t type for the stat variable, so it may easily incur
overflow when we query extent cache frequently in a long time running
fs.

So to avoid that, this patch uses atomic64_t for hit stat variables.
Signed-off-by: default avatarChao Yu <chao2.yu@samsung.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 39307a8e
...@@ -33,11 +33,11 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -33,11 +33,11 @@ static void update_general_status(struct f2fs_sb_info *sbi)
int i; int i;
/* validation check of the segment numbers */ /* validation check of the segment numbers */
si->hit_largest = atomic_read(&sbi->read_hit_largest); si->hit_largest = atomic64_read(&sbi->read_hit_largest);
si->hit_cached = atomic_read(&sbi->read_hit_cached); si->hit_cached = atomic64_read(&sbi->read_hit_cached);
si->hit_rbtree = atomic_read(&sbi->read_hit_rbtree); si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree; si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
si->total_ext = atomic_read(&sbi->total_hit_ext); si->total_ext = atomic64_read(&sbi->total_hit_ext);
si->ext_tree = sbi->total_ext_tree; si->ext_tree = sbi->total_ext_tree;
si->ext_node = atomic_read(&sbi->total_ext_node); si->ext_node = atomic_read(&sbi->total_ext_node);
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES); si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
...@@ -283,12 +283,12 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -283,12 +283,12 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - node blocks : %d (%d)\n", si->node_blks, seq_printf(s, " - node blocks : %d (%d)\n", si->node_blks,
si->bg_node_blks); si->bg_node_blks);
seq_puts(s, "\nExtent Cache:\n"); seq_puts(s, "\nExtent Cache:\n");
seq_printf(s, " - Hit Count: L1-1:%d L1-2:%d L2:%d\n", seq_printf(s, " - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
si->hit_largest, si->hit_cached, si->hit_largest, si->hit_cached,
si->hit_rbtree); si->hit_rbtree);
seq_printf(s, " - Hit Ratio: %d%% (%d / %d)\n", seq_printf(s, " - Hit Ratio: %llu%% (%llu / %llu)\n",
!si->total_ext ? 0 : !si->total_ext ? 0 :
(si->hit_total * 100) / si->total_ext, div64_u64(si->hit_total * 100, si->total_ext),
si->hit_total, si->total_ext); si->hit_total, si->total_ext);
seq_printf(s, " - Inner Struct Count: tree: %d, node: %d\n", seq_printf(s, " - Inner Struct Count: tree: %d, node: %d\n",
si->ext_tree, si->ext_node); si->ext_tree, si->ext_node);
...@@ -378,10 +378,10 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi) ...@@ -378,10 +378,10 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
si->sbi = sbi; si->sbi = sbi;
sbi->stat_info = si; sbi->stat_info = si;
atomic_set(&sbi->total_hit_ext, 0); atomic64_set(&sbi->total_hit_ext, 0);
atomic_set(&sbi->read_hit_rbtree, 0); atomic64_set(&sbi->read_hit_rbtree, 0);
atomic_set(&sbi->read_hit_largest, 0); atomic64_set(&sbi->read_hit_largest, 0);
atomic_set(&sbi->read_hit_cached, 0); atomic64_set(&sbi->read_hit_cached, 0);
atomic_set(&sbi->inline_xattr, 0); atomic_set(&sbi->inline_xattr, 0);
atomic_set(&sbi->inline_inode, 0); atomic_set(&sbi->inline_inode, 0);
......
...@@ -795,10 +795,10 @@ struct f2fs_sb_info { ...@@ -795,10 +795,10 @@ struct f2fs_sb_info {
unsigned int segment_count[2]; /* # of allocated segments */ unsigned int segment_count[2]; /* # of allocated segments */
unsigned int block_count[2]; /* # of allocated blocks */ unsigned int block_count[2]; /* # of allocated blocks */
atomic_t inplace_count; /* # of inplace update */ atomic_t inplace_count; /* # of inplace update */
atomic_t total_hit_ext; /* # of lookup extent cache */ atomic64_t total_hit_ext; /* # of lookup extent cache */
atomic_t read_hit_rbtree; /* # of hit rbtree extent node */ atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */
atomic_t read_hit_largest; /* # of hit largest extent node */ atomic64_t read_hit_largest; /* # of hit largest extent node */
atomic_t read_hit_cached; /* # of hit cached extent node */ atomic64_t read_hit_cached; /* # of hit cached extent node */
atomic_t inline_xattr; /* # of inline_xattr inodes */ atomic_t inline_xattr; /* # of inline_xattr inodes */
atomic_t inline_inode; /* # of inline_data inodes */ atomic_t inline_inode; /* # of inline_data inodes */
atomic_t inline_dir; /* # of inline_dentry inodes */ atomic_t inline_dir; /* # of inline_dentry inodes */
...@@ -1848,7 +1848,8 @@ struct f2fs_stat_info { ...@@ -1848,7 +1848,8 @@ struct f2fs_stat_info {
struct f2fs_sb_info *sbi; struct f2fs_sb_info *sbi;
int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
int main_area_segs, main_area_sections, main_area_zones; int main_area_segs, main_area_sections, main_area_zones;
int hit_largest, hit_cached, hit_rbtree, hit_total, total_ext; unsigned long long hit_largest, hit_cached, hit_rbtree;
unsigned long long hit_total, total_ext;
int ext_tree, ext_node; int ext_tree, ext_node;
int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
int nats, dirty_nats, sits, dirty_sits, fnids; int nats, dirty_nats, sits, dirty_sits, fnids;
...@@ -1885,10 +1886,10 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) ...@@ -1885,10 +1886,10 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++) #define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
#define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++) #define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++)
#define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--) #define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--)
#define stat_inc_total_hit(sbi) (atomic_inc(&(sbi)->total_hit_ext)) #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
#define stat_inc_rbtree_node_hit(sbi) (atomic_inc(&(sbi)->read_hit_rbtree)) #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
#define stat_inc_largest_node_hit(sbi) (atomic_inc(&(sbi)->read_hit_largest)) #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
#define stat_inc_cached_node_hit(sbi) (atomic_inc(&(sbi)->read_hit_cached)) #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached))
#define stat_inc_inline_xattr(inode) \ #define stat_inc_inline_xattr(inode) \
do { \ do { \
if (f2fs_has_inline_xattr(inode)) \ if (f2fs_has_inline_xattr(inode)) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment