Commit 2caffb6a authored by Kemeng Shi's avatar Kemeng Shi Committed by Theodore Ts'o

ext4: use correct criteria name instead stale integer number in comment

Use correct criteria name instead stale integer number in comment
Signed-off-by: default avatarKemeng Shi <shikemeng@huaweicloud.com>
Reviewed-by: default avatarOjaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20240424061904.987525-5-shikemeng@huaweicloud.comSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent d1a3924e
......@@ -213,11 +213,14 @@ enum criteria {
#define EXT4_MB_USE_RESERVED 0x2000
/* Do strict check for free blocks while retrying block allocation */
#define EXT4_MB_STRICT_CHECK 0x4000
/* Large fragment size list lookup succeeded at least once for cr = 0 */
/* Large fragment size list lookup succeeded at least once for
* CR_POWER2_ALIGNED */
#define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED 0x8000
/* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
/* Avg fragment size rb tree lookup succeeded at least once for
* CR_GOAL_LEN_FAST */
#define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED 0x00010000
/* Avg fragment size rb tree lookup succeeded at least once for cr = 1.5 */
/* Avg fragment size rb tree lookup succeeded at least once for
* CR_BEST_AVAIL_LEN */
#define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED 0x00020000
struct ext4_allocation_request {
......
......@@ -1135,8 +1135,9 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
} else {
/*
* TODO: For CR=2, we can arrange groups in an rb tree sorted by
* bb_free. But until that happens, we should never come here.
* TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an
* rb tree sorted by bb_free. But until that happens, we should
* never come here.
*/
WARN_ON(1);
}
......@@ -2683,7 +2684,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
int ret;
/*
* cr=CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
* CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
* search to find large good chunks almost for free. If buddy
* data is not ready, then this optimization makes no sense. But
* we never skip the first block group in a flex_bg, since this
......@@ -3448,10 +3449,11 @@ static int ext4_mb_init_backend(struct super_block *sb)
}
if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
sbi->s_mb_prefetch = ext4_get_groups_count(sb);
/* now many real IOs to prefetch within a single allocation at cr=0
* given cr=0 is an CPU-related optimization we shouldn't try to
* load too many groups, at some point we should start to use what
* we've got in memory.
/*
* now many real IOs to prefetch within a single allocation at
* CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related
* optimization we shouldn't try to load too many groups, at some point
* we should start to use what we've got in memory.
* with an average random access time 5ms, it'd take a second to get
* 200 groups (* N with flex_bg), so let's make this limit 4
*/
......
......@@ -187,8 +187,8 @@ struct ext4_allocation_context {
struct ext4_free_extent ac_f_ex;
/*
* goal len can change in CR1.5, so save the original len. This is
* used while adjusting the PA window and for accounting.
* goal len can change in CR_BEST_AVAIL_LEN, so save the original len.
* This is used while adjusting the PA window and for accounting.
*/
ext4_grpblk_t ac_orig_goal_len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment