@@ -745,6 +745,8 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
745
745
MB_CHECK_ASSERT (e4b -> bd_info -> bb_fragments == fragments );
746
746
747
747
grp = ext4_get_group_info (sb , e4b -> bd_group );
748
+ if (!grp )
749
+ return NULL ;
748
750
list_for_each (cur , & grp -> bb_prealloc_list ) {
749
751
ext4_group_t groupnr ;
750
752
struct ext4_prealloc_space * pa ;
@@ -1060,9 +1062,9 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1060
1062
1061
1063
static noinline_for_stack
1062
1064
void ext4_mb_generate_buddy (struct super_block * sb ,
1063
- void * buddy , void * bitmap , ext4_group_t group )
1065
+ void * buddy , void * bitmap , ext4_group_t group ,
1066
+ struct ext4_group_info * grp )
1064
1067
{
1065
- struct ext4_group_info * grp = ext4_get_group_info (sb , group );
1066
1068
struct ext4_sb_info * sbi = EXT4_SB (sb );
1067
1069
ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP (sb );
1068
1070
ext4_grpblk_t i = 0 ;
@@ -1181,6 +1183,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1181
1183
break ;
1182
1184
1183
1185
grinfo = ext4_get_group_info (sb , group );
1186
+ if (!grinfo )
1187
+ continue ;
1184
1188
/*
1185
1189
* If page is uptodate then we came here after online resize
1186
1190
* which added some new uninitialized group info structs, so
@@ -1246,6 +1250,10 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1246
1250
group , page -> index , i * blocksize );
1247
1251
trace_ext4_mb_buddy_bitmap_load (sb , group );
1248
1252
grinfo = ext4_get_group_info (sb , group );
1253
+ if (!grinfo ) {
1254
+ err = - EFSCORRUPTED ;
1255
+ goto out ;
1256
+ }
1249
1257
grinfo -> bb_fragments = 0 ;
1250
1258
memset (grinfo -> bb_counters , 0 ,
1251
1259
sizeof (* grinfo -> bb_counters ) *
@@ -1256,7 +1264,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1256
1264
ext4_lock_group (sb , group );
1257
1265
/* init the buddy */
1258
1266
memset (data , 0xff , blocksize );
1259
- ext4_mb_generate_buddy (sb , data , incore , group );
1267
+ ext4_mb_generate_buddy (sb , data , incore , group , grinfo );
1260
1268
ext4_unlock_group (sb , group );
1261
1269
incore = NULL ;
1262
1270
} else {
@@ -1370,6 +1378,9 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1370
1378
might_sleep ();
1371
1379
mb_debug (sb , "init group %u\n" , group );
1372
1380
this_grp = ext4_get_group_info (sb , group );
1381
+ if (!this_grp )
1382
+ return - EFSCORRUPTED ;
1383
+
1373
1384
/*
1374
1385
* This ensures that we don't reinit the buddy cache
1375
1386
* page which map to the group from which we are already
@@ -1444,6 +1455,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1444
1455
1445
1456
blocks_per_page = PAGE_SIZE / sb -> s_blocksize ;
1446
1457
grp = ext4_get_group_info (sb , group );
1458
+ if (!grp )
1459
+ return - EFSCORRUPTED ;
1447
1460
1448
1461
e4b -> bd_blkbits = sb -> s_blocksize_bits ;
1449
1462
e4b -> bd_info = grp ;
@@ -2159,6 +2172,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2159
2172
struct ext4_group_info * grp = ext4_get_group_info (ac -> ac_sb , group );
2160
2173
struct ext4_free_extent ex ;
2161
2174
2175
+ if (!grp )
2176
+ return - EFSCORRUPTED ;
2162
2177
if (!(ac -> ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY )))
2163
2178
return 0 ;
2164
2179
if (grp -> bb_free == 0 )
@@ -2385,7 +2400,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2385
2400
2386
2401
BUG_ON (cr < 0 || cr >= 4 );
2387
2402
2388
- if (unlikely (EXT4_MB_GRP_BBITMAP_CORRUPT (grp )))
2403
+ if (unlikely (EXT4_MB_GRP_BBITMAP_CORRUPT (grp ) || ! grp ))
2389
2404
return false;
2390
2405
2391
2406
free = grp -> bb_free ;
@@ -2454,6 +2469,8 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2454
2469
ext4_grpblk_t free ;
2455
2470
int ret = 0 ;
2456
2471
2472
+ if (!grp )
2473
+ return - EFSCORRUPTED ;
2457
2474
if (sbi -> s_mb_stats )
2458
2475
atomic64_inc (& sbi -> s_bal_cX_groups_considered [ac -> ac_criteria ]);
2459
2476
if (should_lock ) {
@@ -2534,7 +2551,7 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2534
2551
* prefetch once, so we avoid getblk() call, which can
2535
2552
* be expensive.
2536
2553
*/
2537
- if (!EXT4_MB_GRP_TEST_AND_SET_READ (grp ) &&
2554
+ if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ (grp ) &&
2538
2555
EXT4_MB_GRP_NEED_INIT (grp ) &&
2539
2556
ext4_free_group_clusters (sb , gdp ) > 0 &&
2540
2557
!(ext4_has_group_desc_csum (sb ) &&
@@ -2578,7 +2595,7 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2578
2595
gdp = ext4_get_group_desc (sb , group , NULL );
2579
2596
grp = ext4_get_group_info (sb , group );
2580
2597
2581
- if (EXT4_MB_GRP_NEED_INIT (grp ) &&
2598
+ if (grp && gdp && EXT4_MB_GRP_NEED_INIT (grp ) &&
2582
2599
ext4_free_group_clusters (sb , gdp ) > 0 &&
2583
2600
!(ext4_has_group_desc_csum (sb ) &&
2584
2601
(gdp -> bg_flags & cpu_to_le16 (EXT4_BG_BLOCK_UNINIT )))) {
@@ -2837,6 +2854,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2837
2854
sizeof (struct ext4_group_info );
2838
2855
2839
2856
grinfo = ext4_get_group_info (sb , group );
2857
+ if (!grinfo )
2858
+ return 0 ;
2840
2859
/* Load the group info in memory only if not already loaded. */
2841
2860
if (unlikely (EXT4_MB_GRP_NEED_INIT (grinfo ))) {
2842
2861
err = ext4_mb_load_buddy (sb , group , & e4b );
@@ -2847,7 +2866,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2847
2866
buddy_loaded = 1 ;
2848
2867
}
2849
2868
2850
- memcpy (& sg , ext4_get_group_info ( sb , group ) , i );
2869
+ memcpy (& sg , grinfo , i );
2851
2870
2852
2871
if (buddy_loaded )
2853
2872
ext4_mb_unload_buddy (& e4b );
@@ -3208,8 +3227,12 @@ static int ext4_mb_init_backend(struct super_block *sb)
3208
3227
3209
3228
err_freebuddy :
3210
3229
cachep = get_groupinfo_cache (sb -> s_blocksize_bits );
3211
- while (i -- > 0 )
3212
- kmem_cache_free (cachep , ext4_get_group_info (sb , i ));
3230
+ while (i -- > 0 ) {
3231
+ struct ext4_group_info * grp = ext4_get_group_info (sb , i );
3232
+
3233
+ if (grp )
3234
+ kmem_cache_free (cachep , grp );
3235
+ }
3213
3236
i = sbi -> s_group_info_size ;
3214
3237
rcu_read_lock ();
3215
3238
group_info = rcu_dereference (sbi -> s_group_info );
@@ -3522,6 +3545,8 @@ int ext4_mb_release(struct super_block *sb)
3522
3545
for (i = 0 ; i < ngroups ; i ++ ) {
3523
3546
cond_resched ();
3524
3547
grinfo = ext4_get_group_info (sb , i );
3548
+ if (!grinfo )
3549
+ continue ;
3525
3550
mb_group_bb_bitmap_free (grinfo );
3526
3551
ext4_lock_group (sb , i );
3527
3552
count = ext4_mb_cleanup_pa (grinfo );
@@ -4606,6 +4631,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
4606
4631
struct ext4_free_data * entry ;
4607
4632
4608
4633
grp = ext4_get_group_info (sb , group );
4634
+ if (!grp )
4635
+ return ;
4609
4636
n = rb_first (& (grp -> bb_free_root ));
4610
4637
4611
4638
while (n ) {
@@ -4633,6 +4660,9 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4633
4660
int preallocated = 0 ;
4634
4661
int len ;
4635
4662
4663
+ if (!grp )
4664
+ return ;
4665
+
4636
4666
/* all form of preallocation discards first load group,
4637
4667
* so the only competing code is preallocation use.
4638
4668
* we don't need any locking here
@@ -4869,6 +4899,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4869
4899
4870
4900
ei = EXT4_I (ac -> ac_inode );
4871
4901
grp = ext4_get_group_info (sb , ac -> ac_b_ex .fe_group );
4902
+ if (!grp )
4903
+ return ;
4872
4904
4873
4905
pa -> pa_node_lock .inode_lock = & ei -> i_prealloc_lock ;
4874
4906
pa -> pa_inode = ac -> ac_inode ;
@@ -4918,6 +4950,8 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4918
4950
atomic_add (pa -> pa_free , & EXT4_SB (sb )-> s_mb_preallocated );
4919
4951
4920
4952
grp = ext4_get_group_info (sb , ac -> ac_b_ex .fe_group );
4953
+ if (!grp )
4954
+ return ;
4921
4955
lg = ac -> ac_lg ;
4922
4956
BUG_ON (lg == NULL );
4923
4957
@@ -5043,6 +5077,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
5043
5077
int err ;
5044
5078
int free = 0 ;
5045
5079
5080
+ if (!grp )
5081
+ return 0 ;
5046
5082
mb_debug (sb , "discard preallocation for group %u\n" , group );
5047
5083
if (list_empty (& grp -> bb_prealloc_list ))
5048
5084
goto out_dbg ;
@@ -5297,6 +5333,9 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
5297
5333
struct ext4_prealloc_space * pa ;
5298
5334
ext4_grpblk_t start ;
5299
5335
struct list_head * cur ;
5336
+
5337
+ if (!grp )
5338
+ continue ;
5300
5339
ext4_lock_group (sb , i );
5301
5340
list_for_each (cur , & grp -> bb_prealloc_list ) {
5302
5341
pa = list_entry (cur , struct ext4_prealloc_space ,
@@ -6064,6 +6103,7 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
6064
6103
struct buffer_head * bitmap_bh = NULL ;
6065
6104
struct super_block * sb = inode -> i_sb ;
6066
6105
struct ext4_group_desc * gdp ;
6106
+ struct ext4_group_info * grp ;
6067
6107
unsigned int overflow ;
6068
6108
ext4_grpblk_t bit ;
6069
6109
struct buffer_head * gd_bh ;
@@ -6089,8 +6129,8 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
6089
6129
overflow = 0 ;
6090
6130
ext4_get_group_no_and_offset (sb , block , & block_group , & bit );
6091
6131
6092
- if ( unlikely ( EXT4_MB_GRP_BBITMAP_CORRUPT (
6093
- ext4_get_group_info ( sb , block_group ) )))
6132
+ grp = ext4_get_group_info ( sb , block_group );
6133
+ if ( unlikely (! grp || EXT4_MB_GRP_BBITMAP_CORRUPT ( grp )))
6094
6134
return ;
6095
6135
6096
6136
/*
@@ -6692,6 +6732,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6692
6732
6693
6733
for (group = first_group ; group <= last_group ; group ++ ) {
6694
6734
grp = ext4_get_group_info (sb , group );
6735
+ if (!grp )
6736
+ continue ;
6695
6737
/* We only do this if the grp has never been initialized */
6696
6738
if (unlikely (EXT4_MB_GRP_NEED_INIT (grp ))) {
6697
6739
ret = ext4_mb_init_group (sb , group , GFP_NOFS );
0 commit comments