@@ -1135,8 +1135,9 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1135
1135
ext4_mb_choose_next_group_best_avail (ac , new_cr , group );
1136
1136
} else {
1137
1137
/*
1138
- * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1139
- * bb_free. But until that happens, we should never come here.
1138
+ * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an
1139
+ * rb tree sorted by bb_free. But until that happens, we should
1140
+ * never come here.
1140
1141
*/
1141
1142
WARN_ON (1 );
1142
1143
}
@@ -2683,7 +2684,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2683
2684
int ret ;
2684
2685
2685
2686
/*
2686
- * cr= CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
2687
+ * CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
2687
2688
* search to find large good chunks almost for free. If buddy
2688
2689
* data is not ready, then this optimization makes no sense. But
2689
2690
* we never skip the first block group in a flex_bg, since this
@@ -3448,10 +3449,11 @@ static int ext4_mb_init_backend(struct super_block *sb)
3448
3449
}
3449
3450
if (sbi -> s_mb_prefetch > ext4_get_groups_count (sb ))
3450
3451
sbi -> s_mb_prefetch = ext4_get_groups_count (sb );
3451
- /* now many real IOs to prefetch within a single allocation at cr=0
3452
- * given cr=0 is an CPU-related optimization we shouldn't try to
3453
- * load too many groups, at some point we should start to use what
3454
- * we've got in memory.
3452
+ /*
3453
+ * now many real IOs to prefetch within a single allocation at
3454
+ * CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related
3455
+ * optimization we shouldn't try to load too many groups, at some point
3456
+ * we should start to use what we've got in memory.
3455
3457
* with an average random access time 5ms, it'd take a second to get
3456
3458
* 200 groups (* N with flex_bg), so let's make this limit 4
3457
3459
*/
0 commit comments