Commit 7a88ddb5 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: fix inconsistent comments

Lack of maintenance on comments may mislead developers, fix them.
Signed-off-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 3addc1ae
...@@ -50,9 +50,6 @@ struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) ...@@ -50,9 +50,6 @@ struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
return page; return page;
} }
/*
* We guarantee no failure on the returned page.
*/
static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
bool is_meta) bool is_meta)
{ {
...@@ -206,7 +203,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, ...@@ -206,7 +203,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
} }
/* /*
* Readahead CP/NAT/SIT/SSA pages * Readahead CP/NAT/SIT/SSA/POR pages
*/ */
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync) int type, bool sync)
...@@ -898,7 +895,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) ...@@ -898,7 +895,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
return -ENOMEM; return -ENOMEM;
/* /*
* Finding out valid cp block involves read both * Finding out valid cp block involves read both
* sets( cp pack1 and cp pack 2) * sets( cp pack 1 and cp pack 2)
*/ */
cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
...@@ -1385,10 +1382,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1385,10 +1382,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Flush all the NAT/SIT pages */ /* Flush all the NAT/SIT pages */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
/* /* start to update checkpoint, cp ver is already updated previously */
* modify checkpoint
* version number is already updated
*/
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true)); ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
...@@ -1541,9 +1535,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1541,9 +1535,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0; return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
} }
/*
* We guarantee that this checkpoint procedure will not fail.
*/
int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{ {
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
...@@ -1611,7 +1602,6 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1611,7 +1602,6 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_flush_sit_entries(sbi, cpc); f2fs_flush_sit_entries(sbi, cpc);
/* unlock all the fs_lock[] in do_checkpoint() */
err = do_checkpoint(sbi, cpc); err = do_checkpoint(sbi, cpc);
if (err) if (err)
f2fs_release_discard_addrs(sbi); f2fs_release_discard_addrs(sbi);
...@@ -1624,7 +1614,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1624,7 +1614,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (cpc->reason & CP_RECOVERY) if (cpc->reason & CP_RECOVERY)
f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver); f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
/* do checkpoint periodically */ /* update CP_TIME to trigger checkpoint periodically */
f2fs_update_time(sbi, CP_TIME); f2fs_update_time(sbi, CP_TIME);
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out: out:
......
...@@ -364,9 +364,6 @@ static void f2fs_write_end_io(struct bio *bio) ...@@ -364,9 +364,6 @@ static void f2fs_write_end_io(struct bio *bio)
bio_put(bio); bio_put(bio);
} }
/*
* Return true, if pre_bio's bdev is same as its target device.
*/
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio) block_t blk_addr, struct bio *bio)
{ {
...@@ -403,6 +400,9 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr) ...@@ -403,6 +400,9 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
return 0; return 0;
} }
/*
* Return true, if pre_bio's bdev is same as its target device.
*/
static bool __same_bdev(struct f2fs_sb_info *sbi, static bool __same_bdev(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio) block_t blk_addr, struct bio *bio)
{ {
...@@ -410,9 +410,6 @@ static bool __same_bdev(struct f2fs_sb_info *sbi, ...@@ -410,9 +410,6 @@ static bool __same_bdev(struct f2fs_sb_info *sbi,
return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno; return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
} }
/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
{ {
struct f2fs_sb_info *sbi = fio->sbi; struct f2fs_sb_info *sbi = fio->sbi;
...@@ -1388,13 +1385,9 @@ void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock) ...@@ -1388,13 +1385,9 @@ void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
} }
/* /*
* f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with * f2fs_map_blocks() tries to find or build mapping relationship which
* f2fs_map_blocks structure. * maps continuous logical blocks to physical blocks, and return such
* If original data blocks are allocated, then give them to blockdev. * info via f2fs_map_blocks structure.
* Otherwise,
* a. preallocate requested block addresses
* b. do not use extent cache for better performance
* c. give the block addresses to blockdev
*/ */
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag) int create, int flag)
......
...@@ -2388,9 +2388,9 @@ static inline block_t datablock_addr(struct inode *inode, ...@@ -2388,9 +2388,9 @@ static inline block_t datablock_addr(struct inode *inode,
raw_node = F2FS_NODE(node_page); raw_node = F2FS_NODE(node_page);
/* from GC path only */
if (is_inode) { if (is_inode) {
if (!inode) if (!inode)
/* from GC path only */
base = offset_in_addr(&raw_node->i); base = offset_in_addr(&raw_node->i);
else if (f2fs_has_extra_attr(inode)) else if (f2fs_has_extra_attr(inode))
base = get_extra_isize(inode); base = get_extra_isize(inode);
......
...@@ -112,7 +112,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) ...@@ -112,7 +112,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
} }
} }
/* fill the page */
f2fs_wait_on_page_writeback(page, DATA, false, true); f2fs_wait_on_page_writeback(page, DATA, false, true);
/* wait for GCed page writeback via META_MAPPING */ /* wait for GCed page writeback via META_MAPPING */
......
...@@ -192,7 +192,10 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, ...@@ -192,7 +192,10 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
p->ofs_unit = sbi->segs_per_sec; p->ofs_unit = sbi->segs_per_sec;
} }
/* we need to check every dirty segments in the FG_GC case */ /*
* adjust candidates range, should select all dirty segments for
* foreground GC and urgent GC cases.
*/
if (gc_type != FG_GC && if (gc_type != FG_GC &&
(sbi->gc_mode != GC_URGENT) && (sbi->gc_mode != GC_URGENT) &&
p->max_search > sbi->max_victim_search) p->max_search > sbi->max_victim_search)
......
...@@ -776,7 +776,7 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -776,7 +776,7 @@ void f2fs_evict_inode(struct inode *inode)
else else
f2fs_inode_synced(inode); f2fs_inode_synced(inode);
/* ino == 0, if f2fs_new_inode() was failed t*/ /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
if (inode->i_ino) if (inode->i_ino)
invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
inode->i_ino); inode->i_ino);
......
...@@ -177,7 +177,7 @@ static inline int is_extension_exist(const unsigned char *s, const char *sub) ...@@ -177,7 +177,7 @@ static inline int is_extension_exist(const unsigned char *s, const char *sub)
} }
/* /*
* Set multimedia files as cold files for hot/cold data separation * Set file's temperature for hot/cold data separation
*/ */
static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode, static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode,
const unsigned char *name) const unsigned char *name)
......
...@@ -510,9 +510,6 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) ...@@ -510,9 +510,6 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
return nr - nr_shrink; return nr - nr_shrink;
} }
/*
* This function always returns success
*/
int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
struct node_info *ni) struct node_info *ni)
{ {
...@@ -716,8 +713,7 @@ static int get_node_path(struct inode *inode, long block, ...@@ -716,8 +713,7 @@ static int get_node_path(struct inode *inode, long block,
/* /*
* Caller should call f2fs_put_dnode(dn). * Caller should call f2fs_put_dnode(dn).
* Also, it should grab and release a rwsem by calling f2fs_lock_op() and * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op() only if ro is not set RDONLY_NODE. * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
* In the case of RDONLY_NODE, we don't need to care about mutex.
*/ */
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
{ {
......
...@@ -58,7 +58,7 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink, ...@@ -58,7 +58,7 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
/* count extent cache entries */ /* count extent cache entries */
count += __count_extent_cache(sbi); count += __count_extent_cache(sbi);
/* shrink clean nat cache entries */ /* count clean nat cache entries */
count += __count_nat_entries(sbi); count += __count_nat_entries(sbi);
/* count free nids cache entries */ /* count free nids cache entries */
......
...@@ -1658,7 +1658,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) ...@@ -1658,7 +1658,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
out_unlock: out_unlock:
up_write(&sbi->gc_lock); up_write(&sbi->gc_lock);
restore_flag: restore_flag:
sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */ sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
return err; return err;
} }
...@@ -3590,7 +3590,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3590,7 +3590,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
f2fs_err(sbi, "Cannot turn on quotas: error %d", err); f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
} }
#endif #endif
/* if there are nt orphan nodes free them */ /* if there are any orphan inodes, free them */
err = f2fs_recover_orphan_inodes(sbi); err = f2fs_recover_orphan_inodes(sbi);
if (err) if (err)
goto free_meta; goto free_meta;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment